diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 442018cc6..9e05737f4 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,8 +1,9 @@ variables: rpm_release: "0.$CI_PIPELINE_ID" specfile: $project.spec - other_repos: $yum_repo_url/redhawk-dependencies - redhawk_version: '2.0.9' + other_repos: $s3_repo_url/redhawk-dependencies/redhawk-dependencies + redhawk_version: '2.2.1-rc2' + redhawk_deps: 'develop-2.2' stages: - package-redhawk-codegen @@ -33,7 +34,7 @@ stages: --build-arg "arch=$arch" --build-arg "spec_file=$specfile" --build-arg "rpm_release=$rpm_release" - --build-arg "other_repos=$other_repos/$dist/$arch" + --build-arg "other_repos=${other_repos}/yum/${redhawk_deps}/$dist/$arch" --build-arg "local_repo=$local_repo" . # Create a yum repository from the packages we just built and any packages we've built in a previous stage - id=$(docker create -it ${docker_registry}redhawk-rpmbuild:$proj_lower-$safe_ref-$dist-$arch bash -lc 'mkdir -p /tmp/repo; @@ -44,11 +45,14 @@ stages: createrepo .; tar czf repo.tar.gz *') - docker start -a $id - #Cleanup any previous output we've inherited + # Cleanup any previous output we've inherited - cd $CI_PROJECT_DIR - rm -rf output && mkdir output - docker cp $id:/tmp/repo/repo.tar.gz output/repo.tar.gz - docker rm -f $id || true + - cd output + - tar xf repo.tar.gz + - cd $CI_PROJECT_DIR artifacts: name: $CI_JOB_NAME paths: @@ -58,18 +62,6 @@ stages: - master - /^(\d+\.)?(\d+)?(\.\d+)$/ -.deploy-common: &deploy-common - image: ${docker_registry}centos:7 - stage: deploy - dependencies: [] - script: - - if [ -n "$jenkins_url" ]; then - curl --insecure -X POST $jenkins_url/job/$jenkins_job/buildWithParameters?pipeline_id=$CI_PIPELINE_ID --user $jenkins_user:$jenkins_api_token; - fi - except: - - master - - /^(\d+\.)?(\d+)?(\.\d+)$/ - redhawk:el6-i386: stage: package-redhawk-codegen variables: @@ -261,11 +253,19 @@ frontend:el7: local_repo: output <<: *rpmbuild -#Trigger a Jenkins job to aggregate artifacts there deploy: - variables: - jenkins_job: redhawk/job/core-framework-$CI_COMMIT_REF_NAME - <<: *deploy-common + image: ${docker_registry}centos:7 + stage: deploy + dependencies: [] + before_script: + - echo "Deploying to $jenkins_url/job/$CI_PROJECT_NAMESPACE/job/$CI_PROJECT_NAME-`basename $CI_COMMIT_REF_NAME`" + script: + - if [ -n "$jenkins_url" ]; then + curl --insecure -X POST $jenkins_url/job/$CI_PROJECT_NAMESPACE/job/$CI_PROJECT_NAME-`basename $CI_COMMIT_REF_NAME`/buildWithParameters?pipeline_id=$CI_PIPELINE_ID --user $jenkins_user:$jenkins_api_token; + fi + except: + - /^(\d+\.)?(\d+)?(\.\d+)$/ + - master #Trigger separate tests pipeline test-trigger: @@ -277,6 +277,7 @@ test-trigger: -F token=$test_token -F "variables[triggering_ref]=$CI_COMMIT_SHA" -F "variables[triggering_ref_name]=$CI_COMMIT_REF_NAME" + -F "variables[triggering_ref_slug]=$CI_COMMIT_REF_SLUG" $test_url only: - branches @@ -287,14 +288,87 @@ test-trigger:release: image: ${docker_registry}centos:7 stage: deploy dependencies: [] + variables: + redhawk_version: '2.2.0' script: - curl --insecure -X POST -F ref=$redhawk_version -F token=$test_token -F "variables[triggering_ref]=$CI_COMMIT_SHA" -F "variables[triggering_ref_name]=$CI_COMMIT_REF_NAME" + -F "variables[triggering_ref_slug]=$CI_COMMIT_REF_SLUG" $test_url only: - tags - #Skip on tag of following format except: - /^(\d+\.)?(\d+)?(\.\d+)$/ + +.s3: &s3 + image: ${docker_registry}utils/s3cmd:el6-createrepo + stage: deploy + tags: + - s3 + script: + - CI_COMMIT_REF_SLUG_NO_RC=${CI_COMMIT_REF_SLUG%-rc[0-9]*}; + - mkdir -p $arch + - for file in `find output -name *.rpm`; do + cp $file $arch; + done + - createrepo $arch + - /usr/bin/s3cmd sync -F -v -P --delete-removed $arch s3://$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/yum/$CI_COMMIT_REF_SLUG/$dist/ + - if [ "$CI_COMMIT_REF_SLUG_NO_RC" != "$CI_COMMIT_REF_SLUG" ]; then + /usr/bin/s3cmd sync -F -v -P --delete-removed $arch s3://$CI_PROJECT_NAMESPACE/$CI_PROJECT_NAME/yum/$CI_COMMIT_REF_SLUG_NO_RC/$dist/; + fi + # Pull down dependencies to create aggregated "cf-and-deps" repo + - mkdir -p cf-and-deps/$arch + - curl ${s3_repo_url}/redhawk-dependencies/redhawk-dependencies/yum/${redhawk_deps}/$dist/$arch/redhawk-dependencies.repo > /etc/yum.repos.d/redhawk-dependencies.repo + - reposync --download_path=tmp + --newest-only + --repoid=redhawk-dependencies + - for file in `find tmp -name *.rpm`; do + cp $file cf-and-deps/$arch; + done + - for file in `find output -name *.rpm`; do + cp $file cf-and-deps/$arch; + done + - createrepo cf-and-deps/$arch + - cd cf-and-deps + - /usr/bin/s3cmd sync -F -v -P --delete-removed $arch s3://$CI_PROJECT_NAMESPACE/cf-and-deps/yum/$CI_COMMIT_REF_SLUG/$dist/ + +deploy-el7: + variables: + dist: el7 + arch: x86_64 + dependencies: + - redhawk:el7 + - bulkio:el7 + - burstio:el7 + - frontend:el7 + - gpp:el7 + - redhawk-codegen:el7 + <<: *s3 + +deploy-el6: + variables: + dist: el6 + arch: x86_64 + dependencies: + - redhawk:el6 + - bulkio:el6 + - burstio:el6 + - frontend:el6 + - gpp:el6 + - redhawk-codegen:el6 + <<: *s3 + +deploy-el6-i386: + variables: + dist: el6 + arch: i686 + dependencies: + - redhawk:el6-i386 + - bulkio:el6-i386 + - burstio:el6-i386 + - frontend:el6-i386 + - gpp:el6-i386 + - redhawk-codegen:el6-i386 + <<: *s3 diff --git a/GPP/.gitignore b/GPP/.gitignore index 2f78cf5b6..4ebbe450c 100644 --- a/GPP/.gitignore +++ b/GPP/.gitignore @@ -1,2 +1,12 @@ +*.o *.pyc - +.deps/ +Makefile +Makefile.in +aclocal.m4 +autom4te.cache/ +config.* +configure +depcomp +install-sh +missing diff --git a/GPP/GPP.prf.xml b/GPP/GPP.prf.xml index 51dfb675b..4463af5c6 100644 --- a/GPP/GPP.prf.xml +++ b/GPP/GPP.prf.xml @@ -69,15 +69,12 @@ with this program. If not, see http://www.gnu.org/licenses/. - If provided, all component output will be redirected to this file. The GPP will not delete or rotate these logs. The provided value may contain environment variables or reference component exec-params with @EXEC_PARAM@. For example, this would be a valid value $SDRROOT/logs/@COMPONENT_IDENTIFIER@.log - - DCE:e4e86070-a121-45d4-a144-00386f2188e3 @@ -86,6 +83,7 @@ with this program. If not, see http://www.gnu.org/licenses/. Required + Data rate being allocated. See data_size for unit of measurement. @@ -114,11 +112,13 @@ Optional Requires the IP address to be addressable from the interface. Optional + Requires this specific interface. Optional + @@ -153,35 +153,85 @@ Optional - - + + + + + + 0.0 Mbps - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + False @@ -189,7 +239,9 @@ Optional 0 Mbps - + + + 0 @@ -200,7 +252,6 @@ Optional - The Multicast NIC interface associated with this GPP (e.g. eth1). If not provided no multicast allocations are permitted. @@ -251,7 +302,6 @@ Optional - Percentage of total Multicast NIC this GPP can use for capacity management 80 @@ -260,13 +310,11 @@ Optional - When queired, returns the list of vlans on this host. When used as an allocation, defines the list of VLANS the component requires. - 80.0 @@ -296,7 +344,6 @@ Optional - Identifier of component or device that generated this message @@ -331,7 +378,6 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr - cycle time between updates of metric capture, calculations and threshold evaluations. 500 @@ -339,57 +385,61 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr - Report reason why the GPP had it's usage state set to BUSY. - + + Select a cache directory other than the default. + + + + + + Select a working directory other than the default. + + + + The thresholds that cause a failure for allocations + + false + 10 % - - 80 % - - 10 MB - - 900 MB/s - - The percentage of file handles remaining to the GPP that triggers a threshold condition 3 % - - The percentage of threads available to the GPP that triggers a threshold condition 3 % - - + + + 10 + MB - Amount of RAM in the GPP not in use (measured) MiB @@ -403,6 +453,16 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr + + MB + + + + + MB + + + Equal to "processor_cores" x "loadCapacityPerCore". @@ -435,42 +495,28 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr - The current load average, as reported by /proc/loadavg. Each core on a computer can have a load average between 0.0 and 1.0. This differs greatly from CPU percentage (as reported by top). Load averages differ in two significant ways: 1) they measure the trend of CPU utlization, and 2) they include all demand for the CPU not only how much was active at the time of measurement. Load averages do not include any processes or threads waiting on I/O, networking, databases, or anything else not demanding the CPU. - - - - - - - - - - - - + + + - - + 0.1 - - list of cpu ids that are being monitored for loadavg and idle utilization. - The current number of threads for the GPP @@ -486,7 +532,6 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr - The current number of threads running on the system @@ -502,7 +547,6 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr - @@ -513,8 +557,6 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr - - @@ -535,14 +577,16 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr - - + + + + + + The context specification for the exec_directive_class. See numa library manpage for socket(numa node) and cpu list specifications. For cgroup/cpuset option then a pre-existing cgroup name is required. 0 - - The classification of the affinity policy to apply. @@ -553,35 +597,23 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr - - determines if the specified affinity policy (exec_directive_value, exec_directive_class) is inherited by RH resources started from this GPP. false - - list of cpu ids to black list when making affinity requests. see numa library manpage for cpu list specifications. - - If no affinity specification is provide during deployment, then enabling this will deploy resources on next available processor socket. (force_override will ignore this) false - - controls if affinity requests are processed by the GPP. true - - - - - + \ No newline at end of file diff --git a/GPP/GPP.scd.xml b/GPP/GPP.scd.xml index 1a022e95e..df6b1aa54 100644 --- a/GPP/GPP.scd.xml +++ b/GPP/GPP.scd.xml @@ -66,4 +66,4 @@ with this program. If not, see http://www.gnu.org/licenses/. - + \ No newline at end of file diff --git a/GPP/GPP.spd.xml b/GPP/GPP.spd.xml index 6d9b8aa43..7027a8c5d 100644 --- a/GPP/GPP.spd.xml +++ b/GPP/GPP.spd.xml @@ -1,5 +1,4 @@ - - + - + + Redhawk GPP null diff --git a/GPP/GPP.spec b/GPP/GPP.spec index ce24d7da0..82c1546b4 100644 --- a/GPP/GPP.spec +++ b/GPP/GPP.spec @@ -31,8 +31,8 @@ Prefix: %{_prefix} %define _infodir %{_prefix}/info Name: GPP -Version: 2.0.9 -Release: 1%{?dist} +Version: 2.2.1 +Release: 2%{?dist} Summary: REDHAWK GPP Group: Applications/Engineering @@ -41,10 +41,8 @@ URL: http://redhawksdr.org/ Source: %{name}-%{version}.tar.gz Vendor: REDHAWK -BuildRoot: %{_tmppath}/%{name}-root - -Requires(post): redhawk >= 2.0 -BuildRequires: redhawk-devel >= 2.0 +Requires(post): redhawk = %{version} +BuildRequires: redhawk-devel = %{version} BuildRequires: numactl-devel >= 2.0 Obsoletes: %{name} < 2.0 @@ -126,6 +124,12 @@ find %{_prefix}/dev/nodes -type d -name 'DevMgr_*' -uid 0 -exec chown -R redhawk %changelog +* Wed Jun 28 2017 Ryan Bauman - 2.1.2-1 +- Update for 2.1.2-rc1 + +* Wed Jun 28 2017 Ryan Bauman - 2.1.1-2 +- Bump for 2.1.1-rc2 + * Fri Jan 9 2015 1.11.0-1 - Update for cpp GPP diff --git a/GPP/HEADER b/GPP/HEADER new file mode 100644 index 000000000..3df23d2b5 --- /dev/null +++ b/GPP/HEADER @@ -0,0 +1,17 @@ +This file is protected by Copyright. Please refer to the COPYRIGHT file +distributed with this source distribution. + +This file is part of REDHAWK GPP. + +REDHAWK GPP is free software: you can redistribute it and/or modify it under +the terms of the GNU Lesser General Public License as published by the Free +Software Foundation, either version 3 of the License, or (at your option) any +later version. + +REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +details. + +You should have received a copy of the GNU Lesser General Public License +along with this program. If not, see http://www.gnu.org/licenses/. \ No newline at end of file diff --git a/GPP/build.sh b/GPP/build.sh index 118b4cce5..1fecacc9c 100755 --- a/GPP/build.sh +++ b/GPP/build.sh @@ -24,9 +24,9 @@ if [ "$1" = "rpm" ]; then if [ -e GPP.spec ]; then mydir=`dirname $0` tmpdir=`mktemp -d` - cp -r ${mydir} ${tmpdir}/GPP-2.0.9 - tar czf ${tmpdir}/GPP-2.0.9.tar.gz --exclude=".svn" --exclude=".git" -C ${tmpdir} GPP-2.0.9 - rpmbuild -ta ${tmpdir}/GPP-2.0.9.tar.gz + cp -r ${mydir} ${tmpdir}/GPP-2.2.1 + tar czf ${tmpdir}/GPP-2.2.1.tar.gz --exclude=".svn" -C ${tmpdir} GPP-2.2.1 + rpmbuild -ta ${tmpdir}/GPP-2.2.1.tar.gz rm -rf $tmpdir else echo "Missing RPM spec file in" `pwd` diff --git a/GPP/cpp/.gitignore b/GPP/cpp/.gitignore index bc1a67473..c2b075fa7 100644 --- a/GPP/cpp/.gitignore +++ b/GPP/cpp/.gitignore @@ -1,13 +1,3 @@ -.deps .dirstamp GPP -Makefile -Makefile.in -aclocal.m4 -autom4te.cache/ compile -config.* -configure -depcomp -install-sh -missing diff --git a/GPP/cpp/.md5sums b/GPP/cpp/.md5sums index e9a105a73..14d039834 100644 --- a/GPP/cpp/.md5sums +++ b/GPP/cpp/.md5sums @@ -1,11 +1,11 @@ -9a98d99db1969fd0d9d15997f4e17a2b GPP_base.cpp +8580ff5ad75dd6c08534c1b70d2d531d GPP_base.cpp 63f09d1ebc0f9bc23cde5f0ba62bfe92 main.cpp -c99df9fa4ab0cd042eac42fcee6441c3 reconf +222775e71f94ed287b5265eb43029af4 reconf 0d4ac7556a5e6a8ae5c1af00b1e60294 GPP.cpp -380674a5e3e6aad2c33ae49b11604261 GPP_base.h +d9201d8ccb494530596eb36fea85b5fb GPP_base.h ed52e5c18685dbea09f982b35fb4b244 configure.ac 27c738bb146931b195b29a3f05a7dccb Makefile.am 32a7fdea0a2b8c7378333703d831ed8d GPP.h 9ed0b24c3ac3d024b71659ce764c1fdc Makefile.am.ide -fbe69e2e989cfda455bc149083a5ff58 struct_props.h -0d1975802982b41325f73129696f8a63 build.sh +39bc4c07bada48e93495cce426a74e2a struct_props.h +82a9e0259f502aa6171972509a8cca50 build.sh diff --git a/GPP/cpp/GPP.cpp b/GPP/cpp/GPP.cpp index 1ce845f6b..f1b3769fb 100644 --- a/GPP/cpp/GPP.cpp +++ b/GPP/cpp/GPP.cpp @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -71,25 +72,19 @@ #include #endif -#include "ossie/Events.h" -#include "ossie/affinity.h" - +#include +#include +#include +#include +#include #include "GPP.h" #include "utils/affinity.h" #include "utils/SymlinkReader.h" -#include "utils/ReferenceWrapper.h" #include "parsers/PidProcStatParser.h" #include "states/ProcStat.h" #include "states/ProcMeminfo.h" #include "statistics/CpuUsageStats.h" -#include "reports/NicThroughputThresholdMonitor.h" -#include "reports/FreeMemoryThresholdMonitor.h" - -#define PROCESSOR_NAME "DCE:fefb9c66-d14a-438d-ad59-2cfd1adb272b" -#define OS_NAME "DCE:4a23ad60-0b25-4121-a630-68803a498f75" -#define OS_VERSION "DCE:0f3a9a37-a342-43d8-9b7f-78dc6da74192" - class SigChildThread : public ThreadedComponent { @@ -120,6 +115,8 @@ class RedirectedIO : public ThreadedComponent { }; +static const uint64_t MB_TO_BYTES = 1024*1024; + uint64_t conv_units( const std::string &units ) { uint64_t unit_m=1024*1024; if ( units == "Kb" ) unit_m = 1e3; @@ -127,7 +124,7 @@ uint64_t conv_units( const std::string &units ) { if ( units == "Gb" ) unit_m = 1e9; if ( units == "Tb" ) unit_m = 1e12; if ( units == "KB" ) unit_m = 1024; - if ( units == "MB" || units == "MiB" ) unit_m = 1024*1024; + if ( units == "MB" || units == "MiB" ) unit_m = MB_TO_BYTES; if ( units == "GB" ) unit_m = 1024*1024*1024; if ( units == "TB" ) unit_m = (uint64_t)1024*1024*1024*1024; return unit_m; @@ -203,11 +200,12 @@ namespace rh_logger { // // proc_redirect class and helpers // -class FindRedirect : public std::binary_function< GPP_i::proc_redirect, int, bool > { +class FindRedirect : public std::binary_function< GPP_i::ProcRedirectPtr, int, bool > { public: - bool operator() ( const GPP_i::proc_redirect &a, const int &pid ) const { - return a.pid == pid; + // bool operator() ( const GPP_i::proc_redirect &a, const int &pid ) const { + bool operator() ( const GPP_i::ProcRedirectPtr a, const int &pid ) const { + return a->pid == pid; }; }; @@ -409,6 +407,7 @@ void GPP_i::_init() { // _handle_io_redirects = false; _componentOutputLog =""; + epfd=epoll_create(400); // // add our local set affinity method that performs numa library calls @@ -443,25 +442,26 @@ void GPP_i::_init() { // default cycle time setting for updating data model, metrics and state threshold_cycle_time = 500; + thresholds.ignore = false; // // Add property change listeners and allocation modifiers // // Add property change listeners for affinity information... - addPropertyChangeListener( "affinity", this, &GPP_i::_affinity_changed ); + addPropertyListener(affinity, this, &GPP_i::_affinity_changed); // add property change listener - addPropertyChangeListener("reserved_capacity_per_component", this, &GPP_i::reservedChanged); + addPropertyListener(reserved_capacity_per_component, this, &GPP_i::reservedChanged); // add property change listener - addPropertyChangeListener("DCE:c80f6c5a-e3ea-4f57-b0aa-46b7efac3176", this, &GPP_i::_component_output_changed); + addPropertyListener(componentOutputLog, this, &GPP_i::_component_output_changed); // add property change listener - addPropertyChangeListener("DCE:89be90ae-6a83-4399-a87d-5f4ae30ef7b1", this, &GPP_i::mcastnicThreshold_changed); + addPropertyListener(mcastnicThreshold, this, &GPP_i::mcastnicThreshold_changed); // add property change listener thresholds - addPropertyChangeListener("thresholds", this, &GPP_i::thresholds_changed); + addPropertyListener(thresholds, this, &GPP_i::thresholds_changed); utilization_entry_struct cpu; cpu.description = "CPU cores"; @@ -471,9 +471,6 @@ void GPP_i::_init() { cpu.maximum = 0; utilization.push_back(cpu); - // shadow property to allow for disabling of values - __thresholds = thresholds; - setPropertyQueryImpl(this->component_monitor, this, &GPP_i::get_component_monitor); // tie allocation modifier callbacks to identifiers @@ -494,7 +491,43 @@ void GPP_i::_init() { setAllocationImpl("DCE:8dcef419-b440-4bcf-b893-cab79b6024fb", this, &GPP_i::allocate_memCapacity, &GPP_i::deallocate_memCapacity); //setAllocationImpl("diskCapacity", this, &GPP_i::allocate_diskCapacity, &GPP_i::deallocate_diskCapacity); + + // check reservation allocations + setAllocationImpl(this->redhawk__reservation_request, this, &GPP_i::allocate_reservation_request, &GPP_i::deallocate_reservation_request); + +} + +void GPP_i::constructor() +{ + // Get the initial working directory + char buf[PATH_MAX+1]; + getcwd(buf, sizeof(buf)); + std::string path = buf; + + // If a working directory was given, change to that + if (!workingDirectory.empty()) { + if (chdir(workingDirectory.c_str())) { + RH_ERROR(_baseLog, "Cannot change working directory to " << workingDirectory); + workingDirectory = ""; + } + } + // Otherwise, default to the initial working directory + if (workingDirectory.empty()) { + workingDirectory = path; + } + + // If no cache directory given, use initial working directory + if (cacheDirectory.empty()) { + cacheDirectory = path; + } + + RH_DEBUG(_baseLog, "Working directory: " << workingDirectory); + RH_DEBUG(_baseLog, "Cache directory: " << cacheDirectory); + + shmCapacity = redhawk::shm::getSystemTotalMemory() / MB_TO_BYTES; + // Initialize system and user CPU ticks + ProcStat::GetTicks(_systemTicks, _userTicks); } @@ -532,7 +565,7 @@ void GPP_i::update_grp_child_pids() { catch(...){ std::stringstream errstr; errstr << "Unable to process id: "<_baseLog, __FUNCTION__ << ": " << errstr.str() ); continue; } } @@ -574,20 +607,20 @@ void GPP_i::update_grp_child_pids() { } catch ( ... ) { std::stringstream errstr; errstr << "Invalid line format in stat file, pid :" << _pid << " field number " << fcnt << " line " << line ; - LOG_WARN(GPP_i, __FUNCTION__ << ": " << errstr.str() ); + RH_WARN(this->_baseLog, __FUNCTION__ << ": " << errstr.str() ); continue; } } catch ( ... ) { std::stringstream errstr; errstr << "Unable to read "<_baseLog, __FUNCTION__ << ": " << errstr.str() ); continue; } if ( fcnt < 37 ) { std::stringstream errstr; errstr << "Insufficient fields proc//stat: "<=37 received=" << fcnt << ")"; - LOG_DEBUG(GPP_i, __FUNCTION__ << ": " << errstr.str() ); + RH_DEBUG(this->_baseLog, __FUNCTION__ << ": " << errstr.str() ); continue; } parsed_stat[pid] = tmp; @@ -631,7 +664,7 @@ std::vector GPP_i::get_component_monitor() { if ((grp_children.find(_pid.pid) == grp_children.end()) or (parsed_stat.find(_pid.pid) == parsed_stat.end())) { std::stringstream errstr; errstr << "Could not find /proc/"<<_pid.pid<<"/stat. The process corresponding to component "<<_pid.identifier<<" is no longer there"; - LOG_WARN(GPP_i, __FUNCTION__ << ": " << errstr.str() ); + RH_WARN(this->_baseLog, __FUNCTION__ << ": " << errstr.str() ); continue; } component_monitor_struct tmp; @@ -682,7 +715,7 @@ void GPP_i::process_ODM(const CORBA::Any &data) { i=std::find_if( i, pids.end(), std::bind2nd( FindApp(), appId ) ); if ( i != pids.end() ) { i->app_started = true; - LOG_TRACE(GPP_i, "Monitor_Processes.. APP STARTED:" << i->pid << " app: " << i->appName ); + RH_TRACE(this->_baseLog, "Monitor_Processes.. APP STARTED:" << i->pid << " app: " << i->appName ); i++; } } @@ -695,49 +728,68 @@ void GPP_i::process_ODM(const CORBA::Any &data) { i=std::find_if( i, pids.end(), std::bind2nd( FindApp(), appId ) ); if ( i != pids.end() ) { i->app_started = false; - LOG_TRACE(GPP_i, "Monitor_Processes.. APP STOPPED :" << i->pid << " app: " << i->appName ); + RH_TRACE(this->_baseLog, "Monitor_Processes.. APP STOPPED :" << i->pid << " app: " << i->appName ); i++; } } } } + const StandardEvent::DomainManagementObjectRemovedEventType* app_removed; + if (data >>= app_removed) { + if (app_removed->sourceCategory == StandardEvent::APPLICATION) { + WriteLock rlock(pidLock); + std::string producerId(app_removed->producerId); + for (ApplicationReservationMap::iterator app_it=applicationReservations.begin(); app_it!=applicationReservations.end(); app_it++) { + if (app_it->first == producerId) { + applicationReservations.erase(app_it); + break; + } + } + } + } } int GPP_i::_setupExecPartitions( const CpuList &bl_cpus ) { #if HAVE_LIBNUMA - // fill in the exec partitions for each numa node identified on the system - std::string nodestr("all"); - struct bitmask *node_mask = numa_parse_nodestring((char *)nodestr.c_str()); + if ( gpp::affinity::check_numa() == true ) { + // fill in the exec partitions for each numa node identified on the system + std::string nodestr("all"); + struct bitmask *node_mask = numa_parse_nodestring((char *)nodestr.c_str()); - bitmask *cpu_mask = numa_allocate_cpumask(); + bitmask *cpu_mask = numa_allocate_cpumask(); - // for each node bit set in the mask then get cpu list - int nbytes = numa_bitmask_nbytes(node_mask); - for (int i=0; i < nbytes*8; i++ ){ - if ( numa_bitmask_isbitset( node_mask, i ) ) { - numa_node_to_cpus( i, cpu_mask ); + // for each node bit set in the mask then get cpu list + int nbytes = numa_bitmask_nbytes(node_mask); + for (int i=0; i < nbytes*8; i++ ){ + if ( numa_bitmask_isbitset( node_mask, i ) ) { + numa_node_to_cpus( i, cpu_mask ); - // foreach cpu identified add to list - int nb = numa_bitmask_nbytes(cpu_mask); - CpuUsageStats::CpuList cpus; - for (int j=0; j < nb*8; j++ ){ - int count = std::count( bl_cpus.begin(), bl_cpus.end(), j ); - if ( numa_bitmask_isbitset( cpu_mask, j ) && count == 0 ) { - cpus.push_back( j ); - } + // foreach cpu identified add to list + int nb = numa_bitmask_nbytes(cpu_mask); + CpuUsageStats::CpuList cpus; + for (int j=0; j < nb*8; j++ ){ + int count = std::count( bl_cpus.begin(), bl_cpus.end(), j ); + if ( numa_bitmask_isbitset( cpu_mask, j ) && count == 0 ) { + cpus.push_back( j ); + } + } + CpuUsageStats cpu_usage(cpus); + exec_socket soc; + soc.id = i; + soc.cpus = cpus; + soc.stats = cpu_usage; + if (!thresholds.ignore && (thresholds.cpu_idle >= 0.0)) { + soc.idle_threshold = thresholds.cpu_idle; + } else { + soc.idle_threshold = 0.0; + } + soc.load_capacity.max = cpus.size() * 1.0; + soc.load_capacity.measured = 0.0; + soc.load_capacity.allocated = 0.0; + execPartitions.push_back( soc ); + } } - CpuUsageStats cpu_usage(cpus); - exec_socket soc; - soc.id = i; - soc.cpus = cpus; - soc.stats = cpu_usage; - soc.idle_threshold = __thresholds.cpu_idle; - soc.load_capacity.max = cpus.size() * 1.0; - soc.load_capacity.measured = 0.0; - soc.load_capacity.allocated = 0.0; - execPartitions.push_back( soc ); - } } #endif @@ -746,13 +798,13 @@ int GPP_i::_setupExecPartitions( const CpuList &bl_cpus ) { ExecPartitionList::iterator iter = execPartitions.begin(); std::ostringstream ss; ss << boost::format("%-6s %-4s %-7s %-7s %-7s ") % "SOCKET" % "CPUS" % "USER" % "SYSTEM" % "IDLE" ; - LOG_INFO(GPP_i, ss.str() ); + RH_TRACE(this->_baseLog, ss.str() ); ss.clear(); ss.str(""); for ( ; iter != execPartitions.end(); iter++ ) { iter->update(); iter->update(); ss << boost::format("%-6d %-4d %-7.2f %-7.2f %-7.2f ") % iter->id % iter->stats.get_ncpus() % iter->stats.get_user_percent() % iter->stats.get_system_percent() % iter->stats.get_idle_percent() ; - LOG_INFO(GPP_i, ss.str() ); + RH_TRACE(this->_baseLog, ss.str() ); ss.clear(); ss.str(""); } @@ -774,25 +826,30 @@ GPP_i::initializeNetworkMonitor() data_model.push_back( nic_facade ); + _allNicsThresholdMonitor = boost::make_shared("nics", "NIC_THROUGHPUT"); + threshold_monitors.push_back(_allNicsThresholdMonitor); + std::vector nic_devices( nic_facade->get_devices() ); std::vector filtered_devices( nic_facade->get_filtered_devices() ); for( size_t i=0; i(modified_thresholds.nic_usage), - boost::bind(&NicFacade::get_throughput_by_device, nic_facade, nic_devices[i]) ) ); - - // monitors that affect busy state... - for ( size_t ii=0; ii < filtered_devices.size(); ii++ ) { - if ( nic_devices[i] == filtered_devices[ii] ) { - nic_monitors.push_back(nic_m); - break; - } + // Only use the filtered set of devices, which we can get away with + // here because it cannot be updated after this method runs. In the + // future, if the available NICs can be dynamically changed, all of the + // possible NICs will need to be created here and selectively marked as + // active/inactive (distinct from threshold enable/disable). + const std::string& nic = nic_devices[i]; + if (std::find(filtered_devices.begin(), filtered_devices.end(), nic) == filtered_devices.end()) { + RH_INFO(_baseLog, __FUNCTION__ << ": Skipping interface (" << nic << ")"); + continue; } - addThresholdMonitor(nic_m); + + RH_INFO(_baseLog, __FUNCTION__ << ": Adding interface (" << nic << ")"); + ThresholdMonitorPtr nic_m = boost::make_shared(nic, "NIC_THROUGHPUT", this, &GPP_i::_nicThresholdCheck); + nic_m->add_listener(this, &GPP_i::_nicThresholdStateChanged); + _allNicsThresholdMonitor->add_monitor(nic_m); } + } void @@ -800,7 +857,7 @@ GPP_i::initializeResourceMonitors() { // add cpu utilization calculator - RH_NL_INFO("GPP", " initialize CPU Montior --- wl size " << wl_cpus.size()); + RH_NL_INFO("GPP", " initialize CPU Monitor --- wl size " << wl_cpus.size()); // request a system monitor for this GPP system_monitor.reset( new SystemMonitor( wl_cpus ) ); @@ -819,30 +876,188 @@ GPP_i::initializeResourceMonitors() data_model.push_back( process_limits ); // observer to monitor when cpu idle pass threshold value - addThresholdMonitor( ThresholdMonitorPtr( new CpuThresholdMonitor(_identifier, &modified_thresholds.cpu_idle, - *(system_monitor->getCpuStats()), false ))); + _cpuIdleThresholdMonitor = boost::make_shared("cpu", "CPU_IDLE", this, &GPP_i::_cpuIdleThresholdCheck); + _cpuIdleThresholdMonitor->add_listener(this, &GPP_i::_cpuIdleThresholdStateChanged); + threshold_monitors.push_back(_cpuIdleThresholdMonitor); + + _loadAvgThresholdMonitor = boost::make_shared("cpu", "LOAD_AVG", this, &GPP_i::_loadAvgThresholdCheck); + _loadAvgThresholdMonitor->add_listener(this, &GPP_i::_loadAvgThresholdStateChanged); + threshold_monitors.push_back(_loadAvgThresholdMonitor); - // add available memory monitor, mem_free defaults to MB - addThresholdMonitor( ThresholdMonitorPtr( new FreeMemoryThresholdMonitor(_identifier, - MakeCref(modified_thresholds.mem_free), - ConversionWrapper(memCapacity, mem_cap_units, std::multiplies() ) ))); + _freeMemThresholdMonitor = boost::make_shared("physical_ram", "MEMORY_FREE", this, &GPP_i::_freeMemThresholdCheck); + _freeMemThresholdMonitor->add_listener(this, &GPP_i::_freeMemThresholdStateChanged); + threshold_monitors.push_back(_freeMemThresholdMonitor); + + _threadThresholdMonitor = boost::make_shared("ulimit", "THREADS", this, &GPP_i::_threadThresholdCheck); + _threadThresholdMonitor->add_listener(this, &GPP_i::_threadThresholdStateChanged); + threshold_monitors.push_back(_threadThresholdMonitor); + + _fileThresholdMonitor = boost::make_shared("ulimit", "OPEN_FILES", this, &GPP_i::_fileThresholdCheck); + _fileThresholdMonitor->add_listener(this, &GPP_i::_fileThresholdStateChanged); + threshold_monitors.push_back(_fileThresholdMonitor); + + _shmThresholdMonitor = boost::make_shared("shm", "SHM_FREE", this, &GPP_i::_shmThresholdCheck); + _shmThresholdMonitor->add_listener(this, &GPP_i::_shmThresholdStateChanged); + threshold_monitors.push_back(_shmThresholdMonitor); } -void -GPP_i::addThresholdMonitor( ThresholdMonitorPtr t ) +// +// Threshold policy and event handling +// + +bool GPP_i::_cpuIdleThresholdCheck(ThresholdMonitor* monitor) { - t->attach_listener( boost::bind(&GPP_i::send_threshold_event, this, _1) ); - threshold_monitors.push_back( t ); + double sys_idle = system_monitor->get_idle_percent(); + double sys_idle_avg = system_monitor->get_idle_average(); + RH_TRACE(_baseLog, "Update CPU idle threshold monitor, threshold=" << modified_thresholds.cpu_idle + << " current=" << sys_idle << " average=" << sys_idle_avg); + return (sys_idle < modified_thresholds.cpu_idle) && (sys_idle_avg < modified_thresholds.cpu_idle); } -void -GPP_i::setShadowThresholds( const thresholds_struct &nv ) { - if ( nv.cpu_idle >= 0.0 ) __thresholds.cpu_idle = nv.cpu_idle; - if ( nv.load_avg >= 0.0 ) __thresholds.load_avg = nv.load_avg; - if ( nv.mem_free >= 0 ) __thresholds.mem_free = nv.mem_free; - if ( nv.nic_usage >= 0 ) __thresholds.nic_usage = nv.nic_usage; - if ( nv.files_available >= 0.0 ) __thresholds.files_available = nv.files_available; - if ( nv.threads >= 0.0 ) __thresholds.threads = nv.threads; +void GPP_i::_cpuIdleThresholdStateChanged(ThresholdMonitor* monitor) +{ + _sendThresholdMessage(monitor, system_monitor->get_idle_percent(), modified_thresholds.cpu_idle); +} + +bool GPP_i::_loadAvgThresholdCheck(ThresholdMonitor* monitor) +{ + double load_avg = system_monitor->get_loadavg(); + RH_TRACE(_baseLog, "Update load average threshold monitor, threshold=" << modified_thresholds.load_avg + << " measured=" << load_avg); + return (load_avg > modified_thresholds.load_avg); +} + +void GPP_i::_loadAvgThresholdStateChanged(ThresholdMonitor* monitor) +{ + _sendThresholdMessage(monitor, system_monitor->get_loadavg(), modified_thresholds.load_avg); +} + +bool GPP_i::_freeMemThresholdCheck(ThresholdMonitor* monitor) +{ + int64_t mem_free = system_monitor->get_mem_free(); + RH_TRACE(_baseLog, "Update free memory threshold monitor, threshold=" << modified_thresholds.mem_free + << " measured=" << mem_free); + return (mem_free < modified_thresholds.mem_free); +} + +void GPP_i::_freeMemThresholdStateChanged(ThresholdMonitor* monitor) +{ + _sendThresholdMessage(monitor, system_monitor->get_mem_free(), modified_thresholds.mem_free); +} + +bool GPP_i::_threadThresholdCheck(ThresholdMonitor* monitor) +{ + int gpp_max_threads = gpp_limits.max_threads * modified_thresholds.threads; + if (gpp_limits.max_threads != -1) { + RH_TRACE(_baseLog, "Update thread threshold monitor (GPP), threshold=" << gpp_max_threads + << " measured=" << gpp_limits.current_threads); + if (gpp_limits.current_threads > gpp_max_threads) { + return true; + } + } + int sys_max_threads = sys_limits.max_threads * modified_thresholds.threads; + if (sys_limits.max_threads != -1) { + RH_TRACE(_baseLog, "Update thread threshold monitor (system), threshold=" << sys_max_threads + << " measured=" << sys_limits.current_threads); + if (sys_limits.current_threads > sys_max_threads) { + return true; + } + } + + return false; +} + +void GPP_i::_threadThresholdStateChanged(ThresholdMonitor* monitor) +{ + _sendThresholdMessage(monitor, gpp_limits.current_threads, gpp_limits.max_threads * modified_thresholds.threads); +} + +bool GPP_i::_fileThresholdCheck(ThresholdMonitor* monitor) +{ + int gpp_max_open_files = gpp_limits.max_open_files * modified_thresholds.files_available; + int sys_max_open_files = sys_limits.max_open_files * modified_thresholds.files_available; + RH_TRACE(_baseLog, "Update file threshold monitor (GPP), threshold=" << gpp_max_open_files + << " measured=" << gpp_limits.current_open_files); + RH_TRACE(_baseLog, "Update file threshold monitor (system), threshold=" << sys_max_open_files + << " measured=" << sys_limits.current_open_files); + if (gpp_limits.current_open_files > gpp_max_open_files) { + return true; + } else if (sys_limits.current_open_files > sys_max_open_files) { + return true; + } + return false; +} + +void GPP_i::_fileThresholdStateChanged(ThresholdMonitor* monitor) +{ + _sendThresholdMessage(monitor, gpp_limits.current_open_files, + gpp_limits.max_open_files * modified_thresholds.files_available); +} + +bool GPP_i::_shmThresholdCheck(ThresholdMonitor* monitor) +{ + RH_TRACE(_baseLog, "Update shared memory threshold monitor, threshold=" << modified_thresholds.shm_free + << " measured=" << shmFree); + return shmFree < modified_thresholds.shm_free; +} + +void GPP_i::_shmThresholdStateChanged(ThresholdMonitor* monitor) +{ + _sendThresholdMessage(monitor, shmFree, modified_thresholds.shm_free); +} + +bool GPP_i::_nicThresholdCheck(ThresholdMonitor* monitor) +{ + const std::string& nic = monitor->get_resource_id(); + float measured = nic_facade->get_throughput_by_device(nic); + RH_TRACE(_baseLog, "Update NIC threshold monitor " << nic + << " threshold=" << modified_thresholds.nic_usage + << " measured=" << measured); + return measured >= modified_thresholds.nic_usage; +} + +void GPP_i::_nicThresholdStateChanged(ThresholdMonitor* monitor) +{ + std::string nic = monitor->get_resource_id(); + float measured = nic_facade->get_throughput_by_device(nic); + _sendThresholdMessage(monitor, measured, modified_thresholds.nic_usage); +} + +template +void GPP_i::_sendThresholdMessage(ThresholdMonitor* monitor, const T1& measured, const T2& threshold) +{ + bool exceeded = monitor->is_threshold_exceeded(); + + threshold_event_struct message; + message.source_id = _identifier; + message.resource_id = monitor->get_resource_id(); + message.threshold_class = monitor->get_threshold_class(); + if (exceeded) { + message.type = enums::threshold_event::type::Threshold_Exceeded; + } else { + message.type = enums::threshold_event::type::Threshold_Not_Exceeded; + } + if (monitor->is_enabled()) { + message.threshold_value = boost::lexical_cast(threshold); + } else { + message.threshold_value = ""; + } + message.measured_value = boost::lexical_cast(measured); + + std::stringstream sstr; + sstr << message.threshold_class << " threshold "; + if (!exceeded) { + sstr << "not "; + } + sstr << "exceeded " + << "(resource_id=" << message.resource_id + << " threshold_value=" << message.threshold_value + << " measured_value=" << message.measured_value << ")"; + message.message = sstr.str(); + + message.timestamp = time(NULL); + + send_threshold_event(message); } // @@ -860,7 +1075,7 @@ void GPP_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemExc odm_consumer = mymgr->Subscriber("ODM_Channel"); odm_consumer->setDataArrivedListener(this, &GPP_i::process_ODM); } catch ( ... ) { - LOG_WARN(GPP_i, "Unable to register with EventChannelManager, disabling domain event notification."); + RH_WARN(this->_baseLog, "Unable to register with EventChannelManager, disabling domain event notification."); } // @@ -869,10 +1084,10 @@ void GPP_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemExc if ( componentOutputLog != "" ) { _componentOutputLog =__ExpandEnvVars(componentOutputLog); _handle_io_redirects = true; - LOG_INFO(GPP_i, "Turning on Component Output Redirection file: " << _componentOutputLog ); + RH_INFO(this->_baseLog, "Turning on Component Output Redirection file: " << _componentOutputLog ); } else { - LOG_INFO(GPP_i, "Component Output Redirection is DISABLED." << componentOutputLog ); + RH_INFO(this->_baseLog, "Component Output Redirection is DISABLED." << componentOutputLog ); } // @@ -924,14 +1139,14 @@ void GPP_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemExc // const SystemMonitor::Report &rpt = system_monitor->getReport(); - // thresholds can be individually disabled, use shadow thresholds for actual calculations and conditions - setShadowThresholds( thresholds ); - // // load average attributes // loadTotal = loadCapacityPerCore * (float)processor_cores; - loadCapacity = loadTotal * ((double)__thresholds.load_avg / 100.0); + loadCapacity = loadTotal; + if (!thresholds.ignore && thresholds.load_avg >= 0.0) { + loadCapacity *= thresholds.load_avg / 100.0; + } loadFree = loadCapacity; idle_capacity_modifier = 100.0 * reserved_capacity_per_component/((float)processor_cores); @@ -940,19 +1155,25 @@ void GPP_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemExc // memInitVirtFree=rpt.virtual_memory_free; // assume current state to be total available int64_t init_mem_free = (int64_t) memInitVirtFree; - memInitCapacityPercent = (double)( (int64_t)init_mem_free - (int64_t)(__thresholds.mem_free*thresh_mem_free_units) )/ (double)init_mem_free; - if ( memInitCapacityPercent < 0.0 ) memInitCapacityPercent = 100.0; + if (!thresholds.ignore && thresholds.mem_free >= 0) { + memInitCapacityPercent = (double)( (int64_t)init_mem_free - (int64_t)(thresholds.mem_free*thresh_mem_free_units) )/ (double)init_mem_free; + if (memInitCapacityPercent < 0.0) { + memInitCapacityPercent = 100.0; + } + } else { + memInitCapacityPercent = 100.0; + } memFree = init_mem_free / mem_free_units; memCapacity = ((int64_t)( init_mem_free * memInitCapacityPercent)) / mem_cap_units ; memCapacityThreshold = memCapacity; + shmFree = redhawk::shm::getSystemFreeMemory() / MB_TO_BYTES; + // // set initial modified thresholds // modified_thresholds = thresholds; - modified_thresholds.mem_free = __thresholds.mem_free*thresh_mem_free_units; - modified_thresholds.load_avg = loadTotal * ( (double)__thresholds.load_avg / 100.0); - modified_thresholds.cpu_idle = __thresholds.cpu_idle; + thresholds_changed(thresholds, thresholds); loadAverage.onemin = rpt.load.one_min; loadAverage.fivemin = rpt.load.five_min; @@ -973,12 +1194,6 @@ void GPP_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemExc gpp_limits.current_open_files = pid_rpt.files; gpp_limits.max_open_files = pid_rpt.files_limit; - // enable monitors to push out state change events.. - MonitorSequence::iterator iter=threshold_monitors.begin(); - for( ; iter != threshold_monitors.end(); iter++ ) { - if ( *iter ) (*iter)->enable_dispatch(); - } - // // setup mcast interface allocations, used by older systems -- need to deprecate // @@ -1004,44 +1219,97 @@ void GPP_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemExc } -void GPP_i::thresholds_changed(const thresholds_struct *ov, const thresholds_struct *nv) { - - if ( !(nv->mem_free < 0 ) && ov->mem_free != nv->mem_free ) { - LOG_DEBUG(GPP_i, __FUNCTION__ << " THRESHOLDS.MEM_FREE CHANGED old/new " << ov->mem_free << "/" << nv->mem_free ); - WriteLock wlock(pidLock); - int64_t init_mem_free = (int64_t) memInitVirtFree; +void GPP_i::thresholds_changed(const thresholds_struct& ov, const thresholds_struct& nv) +{ + WriteLock wlock(monitorLock); + if (nv.ignore || (nv.mem_free < 0)) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.MEM_FREE DISABLED"); + modified_thresholds.mem_free = 0; + _freeMemThresholdMonitor->disable(); + } else { + if (ov.mem_free != nv.mem_free) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.MEM_FREE CHANGED old/new " << ov.mem_free << "/" << nv.mem_free); + } + int64_t init_mem_free = (int64_t) memInitVirtFree; // type cast required for correct calc on 32bit os - memInitCapacityPercent = (double)( (int64_t)init_mem_free - (int64_t)(nv->mem_free*thresh_mem_free_units) )/ (double) init_mem_free; + memInitCapacityPercent = (double)( (int64_t)init_mem_free - (int64_t)(nv.mem_free*thresh_mem_free_units) )/ (double) init_mem_free; if ( memInitCapacityPercent < 0.0 ) memInitCapacityPercent = 100.0; - memCapacity = ((int64_t)( init_mem_free * memInitCapacityPercent) ) / mem_cap_units ; + memCapacity = ((int64_t)( init_mem_free * memInitCapacityPercent) ) / mem_cap_units; memCapacityThreshold = memCapacity; - modified_thresholds.mem_free = nv->mem_free*thresh_mem_free_units; + modified_thresholds.mem_free = nv.mem_free*thresh_mem_free_units; + _freeMemThresholdMonitor->enable(); } - - if ( !(nv->load_avg < 0.0) && !(fabs(ov->load_avg - nv->load_avg ) < std::numeric_limits::epsilon()) ) { - LOG_DEBUG(GPP_i, __FUNCTION__ << " THRESHOLDS.LOAD_AVG CHANGED old/new " << ov->load_avg << "/" << nv->load_avg ); - WriteLock wlock(pidLock); - loadCapacity = loadTotal * ((double)nv->load_avg / 100.0); + if (nv.ignore || (nv.load_avg < 0.0)) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.LOAD_AVG DISABLED"); + modified_thresholds.load_avg = loadTotal; + _loadAvgThresholdMonitor->disable(); + } else { + if (fabs(ov.load_avg - nv.load_avg) >= std::numeric_limits::epsilon()) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.LOAD_AVG CHANGED old/new " << ov.load_avg << "/" << nv.load_avg); + } + loadCapacity = loadTotal * ((double)nv.load_avg / 100.0); loadFree = loadCapacity; - modified_thresholds.load_avg = loadTotal * ( (double)nv->load_avg / 100.0); + modified_thresholds.load_avg = loadTotal * ( (double)nv.load_avg / 100.0); + _loadAvgThresholdMonitor->enable(); } - if ( !(nv->cpu_idle < 0.0) && !(fabs(ov->cpu_idle - nv->cpu_idle ) < std::numeric_limits::epsilon())) { - LOG_DEBUG(GPP_i, __FUNCTION__ << " THRESHOLDS.CPU_IDLE CHANGED old/new " << ov->cpu_idle << "/" << nv->cpu_idle ); - WriteLock wlock(pidLock); - modified_thresholds.cpu_idle = nv->cpu_idle; + if (nv.ignore || (nv.cpu_idle < 0.0)) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.CPU_IDLE DISABLED"); + modified_thresholds.cpu_idle = 0.0; + _cpuIdleThresholdMonitor->disable(); + } else { + if (fabs(ov.cpu_idle - nv.cpu_idle) >= std::numeric_limits::epsilon()) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.CPU_IDLE CHANGED old/new " << ov.cpu_idle << "/" << nv.cpu_idle); + } + modified_thresholds.cpu_idle = nv.cpu_idle; + _cpuIdleThresholdMonitor->enable(); } + if (nv.ignore || (nv.nic_usage < 0)) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.NIC_USAGE DISABLED"); + _allNicsThresholdMonitor->disable(); + } else { + if (ov.nic_usage != nv.nic_usage) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.NIC_USAGE CHANGED old/new " << ov.nic_usage << "/" << nv.nic_usage); + } + modified_thresholds.nic_usage = nv.nic_usage; + _allNicsThresholdMonitor->enable(); + } - if ( !(nv->nic_usage < 0) && !(fabs(ov->nic_usage - nv->nic_usage ) < std::numeric_limits::epsilon())) { - LOG_DEBUG(GPP_i, __FUNCTION__ << " THRESHOLDS.NIC_USAGE CHANGED old/new " << ov->nic_usage << "/" << nv->nic_usage ); - WriteLock wlock(monitorLock); - modified_thresholds.nic_usage = nv->nic_usage; + if (nv.ignore || (nv.shm_free < 0)) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.SHM_FREE DISABLED"); + _shmThresholdMonitor->disable(); + } else { + if (ov.shm_free != nv.shm_free) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.SHM_FREE CHANGED old/new " + << ov.shm_free << "/" << nv.shm_free); + } + modified_thresholds.shm_free = nv.shm_free; + _shmThresholdMonitor->enable(); } - setShadowThresholds( *nv ); + if (nv.ignore || (nv.threads < 0.0)) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.THREADS DISABLED"); + _threadThresholdMonitor->disable(); + } else { + if (fabs(ov.threads - nv.threads) >= std::numeric_limits::epsilon()) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.THREADS CHANGED old/new " << ov.threads << "/" << nv.threads); + } + modified_thresholds.threads = 1.0 - (nv.threads * .01); + _threadThresholdMonitor->enable(); + } + if (nv.ignore || (nv.files_available < 0.0)) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.FILES_AVAILABLE DISABLED"); + _fileThresholdMonitor->disable(); + } else { + if (fabs(ov.files_available - nv.files_available) >= std::numeric_limits::epsilon()) { + RH_DEBUG(_baseLog, __FUNCTION__ << " THRESHOLDS.FILES_AVAILABLE CHANGED old/new " << ov.files_available << "/" << nv.files_available); + } + modified_thresholds.files_available = 1.0 - (nv.files_available * .01); + _fileThresholdMonitor->enable(); + } } void GPP_i::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) { @@ -1060,7 +1328,6 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::execute (const char* name, const CF: CF::ExecutableDevice::InvalidParameters, CF::ExecutableDevice::InvalidOptions, CF::InvalidFileName, CF::ExecutableDevice::ExecuteFail) { - boost::recursive_mutex::scoped_lock lock; try { @@ -1070,47 +1337,46 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::execute (const char* name, const CF: { std::stringstream errstr; errstr << "Error acquiring lock (errno=" << e.native_error() << " msg=\"" << e.what() << "\")"; - LOG_ERROR(GPP_i, __FUNCTION__ << ": " << errstr.str() ); + RH_ERROR(this->_baseLog, __FUNCTION__ << ": " << errstr.str() ); throw CF::Device::InvalidState(errstr.str().c_str()); } - std::vector prepend_args; - std::string naming_context_ior; - CF::Properties variable_parameters; - variable_parameters = parameters; - redhawk::PropertyMap& tmp_params = redhawk::PropertyMap::cast(variable_parameters); + redhawk::PropertyMap tmp_params(parameters); float reservation_value = -1; if (tmp_params.find("RH::GPP::MODIFIED_CPU_RESERVATION_VALUE") != tmp_params.end()) { - double reservation_value_d; - if (!tmp_params["RH::GPP::MODIFIED_CPU_RESERVATION_VALUE"].getValue(reservation_value)) { - if (tmp_params["RH::GPP::MODIFIED_CPU_RESERVATION_VALUE"].getValue(reservation_value_d)) { - reservation_value = reservation_value_d; - } else { - reservation_value = -1; - } + try { + reservation_value = tmp_params["RH::GPP::MODIFIED_CPU_RESERVATION_VALUE"].toFloat(); + } catch (const std::exception&) { + reservation_value = -1; } tmp_params.erase("RH::GPP::MODIFIED_CPU_RESERVATION_VALUE"); } - naming_context_ior = tmp_params["NAMING_CONTEXT_IOR"].toString(); + + std::string component_id = tmp_params.get("COMPONENT_IDENTIFIER", std::string()).toString(); + if (applicationReservations.find(component_id) != applicationReservations.end()) { + applicationReservations.erase(component_id); + } + + std::string naming_context_ior = tmp_params.get("NAMING_CONTEXT_IOR", std::string()).toString(); std::string app_id; - std::string component_id = tmp_params["COMPONENT_IDENTIFIER"].toString(); - std::string name_binding = tmp_params["NAME_BINDING"].toString(); - CF::Application_var _app = CF::Application::_nil(); - CORBA::Object_var obj = ossie::corba::Orb()->string_to_object(naming_context_ior.c_str()); - if (CORBA::is_nil(obj)) { - LOG_WARN(GPP_i, "Invalid application registrar IOR"); - } else { - CF::ApplicationRegistrar_var _appRegistrar = CF::ApplicationRegistrar::_nil(); - _appRegistrar = CF::ApplicationRegistrar::_narrow(obj); - if (CORBA::is_nil(_appRegistrar)) { - LOG_WARN(GPP_i, "Invalid application registrar IOR"); + if (!naming_context_ior.empty()) { + CORBA::Object_var obj = ossie::corba::Orb()->string_to_object(naming_context_ior.c_str()); + if (CORBA::is_nil(obj)) { + RH_WARN(_baseLog, "Invalid application registrar IOR"); } else { - _app = _appRegistrar->app(); - if (not CORBA::is_nil(_app)) { - app_id = ossie::corba::returnString(_app->identifier()); + CF::ApplicationRegistrar_var app_registrar = CF::ApplicationRegistrar::_narrow(obj); + if (CORBA::is_nil(app_registrar)) { + RH_WARN(_baseLog, "Invalid application registrar IOR"); + } else { + CF::Application_var application = app_registrar->app(); + if (!CORBA::is_nil(application)) { + app_id = ossie::corba::returnString(application->identifier()); + } } } } + + std::vector prepend_args; if (useScreen) { std::string ld_lib_path(getenv("LD_LIBRARY_PATH")); setenv("GPP_LD_LIBRARY_PATH",ld_lib_path.c_str(),1); @@ -1146,6 +1412,8 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::execute (const char* name, const CF: prepend_args.push_back("-m"); prepend_args.push_back("-c"); prepend_args.push_back(binary_location+"gpp.screenrc"); + + std::string name_binding = tmp_params.get("NAME_BINDING", std::string()).toString(); if ((not component_id.empty()) and (not name_binding.empty())) { if (component_id.find("DCE:") != std::string::npos) { component_id = component_id.substr(4, std::string::npos); @@ -1160,6 +1428,60 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::execute (const char* name, const CF: prepend_args.push_back(waveform_name+"."+name_binding); } } + bool useDocker = false; + if (tmp_params.find("__DOCKER_IMAGE__") != tmp_params.end()) { + std::string image_name = tmp_params["__DOCKER_IMAGE__"].toString(); + LOG_DEBUG(GPP_i, __FUNCTION__ << "Component specified a Docker image: " << image_name); + std::string target = GPP_i::find_exec("docker"); + if(!target.empty()) { + char buffer[128]; + std::string result = ""; + std::string docker_query = target + " image -q " + image_name; + FILE* pipe = popen(docker_query.c_str(), "r"); + if (!pipe) + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EINVAL, "Could not run popen"); + try { + while (!feof(pipe)) { + if (fgets(buffer, 128, pipe) != NULL) { + result += buffer; + } + } + } catch (...) { + pclose(pipe); + throw; + } + pclose(pipe); + if (result.empty()) { + CF::Properties invalidParameters; + invalidParameters.length(invalidaParameters.length() + 1); + invalidParameters[invalidParameters.length() -1].id = "__DOCKER_IMAGE__"; + invalidParameters[invalidParameters.length() -1].value <<= image_name.c_str(); + throw CF::ExecutableDevice::InvalidParameters(invalidParameters); + } + std::string container_name(component_id); + std::replace(container_name.begin(), container_name.end(), ':', '-'); + prepend_args.push_back(target); + prepend_args.push_back("run"); + prepend_args.push_back("--sig-proxy=true"); + prepend_args.push_back("--rm"); + prepend_args.push_back("--name"); + prepend_args.push_back(container_name); + prepend_args.push_back("--net=host"); + prepend_args.push_back("-v"); + prepend_args.push_back(docker_omniorb_cfg+":/etc/omniORB.cfg"); + if ( tmp_params.find("__DOCKER_ARGS__") != tmp_params.end()) { + std::string docker_args_raw = tmp_params["__DOCKER_ARGS__"].toString(); + std::vector docker_args; + boost::split(docker_args, docker_args_raw, boost::is_any_of(" ")); + BOOST_FOREACH( const std::string& arg, docker_args) { + prepend_args.push_back(arg); + } + } + prepend_args.push_back(image_name); + LOG_DEBUG(GPP_i, __FUNCTION__ << "Component will launch within a Docker container using this image: " << image_name); + useDocker = true; + } + } CF::ExecutableDevice::ProcessID_Type ret_pid; try { ret_pid = do_execute(name, options, tmp_params, prepend_args); @@ -1200,7 +1522,7 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const invalidOptions[invalidOptions.length() - 1].value = options[i].value; } else - LOG_WARN(GPP_i, "Received a PRIORITY_ID execute option...ignoring.") + RH_WARN(this->_baseLog, "Received a PRIORITY_ID execute option...ignoring.") } if (options[i].id == CF::ExecutableDevice::STACK_SIZE_ID) { CORBA::TypeCode_var atype = options[i].value.type(); @@ -1210,7 +1532,7 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const invalidOptions[invalidOptions.length() - 1].value = options[i].value; } else - LOG_WARN(GPP_i, "Received a STACK_SIZE_ID execute option...ignoring.") + RH_WARN(this->_baseLog, "Received a STACK_SIZE_ID execute option...ignoring.") } } @@ -1219,10 +1541,17 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const } // retrieve current working directory - tmp = getcwd(NULL, 200); - if (tmp != NULL) { - path = std::string(tmp); - free(tmp); + if (this->cacheDirectory.empty()) { + tmp = getcwd(NULL, 200); + if (tmp != NULL) { + path = std::string(tmp); + free(tmp); + } + } else { + path = this->cacheDirectory; + if (!path.compare(path.length()-1, 1, "/")) { + path = path.erase(path.length()-1); + } } // append relative path of the executable @@ -1237,7 +1566,7 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const // change permissions to 7-- if (chmod(path.c_str(), S_IRWXU) != 0) { - LOG_ERROR(GPP_i, "Unable to change permission on executable"); + RH_ERROR(this->_baseLog, "Unable to change permission on executable"); throw CF::ExecutableDevice::ExecuteFail(CF::CF_EACCES, "Unable to change permission on executable"); } @@ -1264,15 +1593,15 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const } args.push_back(path); - LOG_DEBUG(GPP_i, "Building param list for process " << path); + RH_DEBUG(this->_baseLog, "Building param list for process " << path); for (CORBA::ULong i = 0; i < parameters.length(); ++i) { - LOG_DEBUG(GPP_i, "id=" << ossie::corba::returnString(parameters[i].id) << " value=" << ossie::any_to_string(parameters[i].value)); + RH_DEBUG(this->_baseLog, "id=" << ossie::corba::returnString(parameters[i].id) << " value=" << ossie::any_to_string(parameters[i].value)); CORBA::TypeCode_var atype = parameters[i].value.type(); args.push_back(ossie::corba::returnString(parameters[i].id)); args.push_back(ossie::any_to_string(parameters[i].value)); } - LOG_DEBUG(GPP_i, "Forking process " << path); + RH_DEBUG(this->_baseLog, "Forking process " << path); std::vector argv(args.size() + 1, NULL); for (std::size_t i = 0; i < args.size(); ++i) { @@ -1285,17 +1614,31 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const // setup to capture stdout and stderr from children. int comp_fd[2]; + std::string rfname; if ( _handle_io_redirects ) { - if ( pipe( comp_fd ) == -1 ) { - LOG_ERROR(GPP_i, "Failure to create redirected IO for:" << path); - throw CF::ExecutableDevice::ExecuteFail(CF::CF_EPERM, "Failure to create redirected IO for component"); - } + rfname=__ExpandEnvVars(componentOutputLog); + rfname=__ExpandProperties(rfname, parameters ); + if ( rfname == _componentOutputLog ) { + RH_TRACE(this->_baseLog, "Redirect to common file for :" << path << " file: " << rfname ); + if ( pipe( comp_fd ) == -1 ) { + RH_ERROR(this->_baseLog, "Failure to create redirected IO for:" << path); + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EPERM, "Failure to create redirected IO for component"); + } - if ( fcntl( comp_fd[0], F_SETFD, FD_CLOEXEC ) == -1 ) { - LOG_ERROR(GPP_i, "Failure to support redirected IO for:" << path); - throw CF::ExecutableDevice::ExecuteFail(CF::CF_EPERM, "Failure to support redirected IO for component"); - } - + if ( fcntl( comp_fd[0], F_SETFD, FD_CLOEXEC ) == -1 ) { + RH_ERROR(this->_baseLog, "Failure to support redirected IO for:" << path); + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EPERM, "Failure to support redirected IO for component"); + } + } + else { // per process logging + RH_TRACE(this->_baseLog, "Redirect per process for :" << path << " file: " << rfname ); + comp_fd[0]=-1; + comp_fd[1] = open(rfname.c_str(), O_RDWR | O_CREAT | O_APPEND , S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH ); + if ( comp_fd[1] == -1 ) { + RH_ERROR(this->_baseLog, "Failure to create redirected IO for:" << path); + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EPERM, "Failure to create redirected IO for component"); + } + } } // fork child process @@ -1356,7 +1699,7 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const exit(-1); } - close(comp_fd[0]); + if ( comp_fd[0] != -1 ) close(comp_fd[0]); close(comp_fd[1]); } @@ -1389,7 +1732,7 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const exit(returnval); } else if (pid < 0 ){ - LOG_ERROR(GPP_i, "Error forking child process (errno: " << errno << " msg=\"" << strerror(errno) << "\")" ); + RH_ERROR(this->_baseLog, "Error forking child process (errno: " << errno << " msg=\"" << strerror(errno) << "\")" ); switch (errno) { case E2BIG: throw CF::ExecutableDevice::ExecuteFail(CF::CF_E2BIG, @@ -1423,16 +1766,20 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const if ( _handle_io_redirects ) { close(comp_fd[1]); - LOG_TRACE(GPP_i, "Adding Task for IO Redirection PID:" << pid << " : stdout "<< comp_fd[0] ); + RH_TRACE(this->_baseLog, "Adding Task for IO Redirection PID:" << pid << " : stdout "<< comp_fd[0] ); WriteLock wlock(fdsLock); - // trans form file name if contains env or exec param expansion - std::string rfname=__ExpandEnvVars(componentOutputLog); - rfname=__ExpandProperties(rfname, parameters ); - redirectedFds.push_front( proc_redirect( rfname, pid, comp_fd[0] ) ); + if ( comp_fd[0] != -1 ) { + ProcRedirectPtr rd = ProcRedirectPtr( new proc_redirect( rfname, pid, comp_fd[0] ) ); + redirectedFds.push_front( rd ); + epoll_event event; + event.data.ptr = (void*)(rd.get()); + event.events = EPOLLIN; + int ret __attribute__((unused)) = epoll_ctl (epfd, EPOLL_CTL_ADD, comp_fd[0], &event); + } } - LOG_DEBUG(GPP_i, "Execute success: name:" << name << " : "<< path); + RH_DEBUG(this->_baseLog, "Execute success: name:" << name << " : "<< path); return pid; } @@ -1440,8 +1787,7 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const void GPP_i::terminate (CF::ExecutableDevice::ProcessID_Type processId) throw (CORBA::SystemException, CF::ExecutableDevice::InvalidProcess, CF::Device::InvalidState) { - LOG_TRACE(GPP_i, " Terminate request, processID: " << processId); - component_description comp; + RH_TRACE(this->_baseLog, " Terminate request, processID: " << processId); try { markPidTerminated( processId ); ExecutableDevice_impl::terminate(processId); @@ -1451,116 +1797,36 @@ void GPP_i::terminate (CF::ExecutableDevice::ProcessID_Type processId) throw (CO removeProcess(processId); } -bool GPP_i::_component_cleanup( const int child_pid, const int exit_status ) { - - - bool ret=false; - component_description comp; - try { - comp = getComponentDescription(child_pid); - ret=true; - if ( !comp.terminated ) { - // release of component can exit process before terminate is called - if ( WIFEXITED(exit_status) == 0 ) { - LOG_ERROR(GPP_i, " Unexpected Component Failure, App/Identifier/Process: " << - comp.appName << "/" << comp.identifier << "/" << comp.terminated << "/" << child_pid << - " STATUS==" << WIFEXITED(exit_status) << "," << WEXITSTATUS(exit_status) << - "," <get_threshold_value(); - actual += monitor->get_measured_value(); - - LOG_TRACE(GPP_i, __FUNCTION__ << ": NicThreshold: " << monitor->get_resource_id() << " exceeded " << monitor->is_threshold_exceeded() << " threshold=" << monitor->get_threshold() << " measured=" << monitor->get_measured()); - if ( monitor->is_threshold_exceeded() ) nic_exceeded++; - } - - if ( nic_monitors.size() != 0 && nic_monitors.size() == nic_exceeded ) { - std::ostringstream oss; - oss << "Threshold (cumulative) : " << threshold << " Actual (cumulative) : " << actual; - _setReason( "NIC USAGE ", oss.str() ); - retval = true; - } - - return retval; -} - -bool GPP_i::_check_thread_limits( const thresholds_struct &thresholds) -{ - float _tthreshold = 1 - __thresholds.threads * .01; - - if (gpp_limits.max_threads != -1) { - // - // check current process limits - // - LOG_TRACE(GPP_i, "_gpp_check_limits threads (cur/max): " << gpp_limits.current_threads << "/" << gpp_limits.max_threads ); - if (gpp_limits.current_threads>(gpp_limits.max_threads*_tthreshold)) { - LOG_WARN(GPP_i, "GPP process thread limit threshold exceeded, count/threshold: " << gpp_limits.current_threads << "/" << (gpp_limits.max_threads*_tthreshold) ); - return true; - } + component_description comp; + try { + comp = getComponentDescription(pid); + } catch (...) { + // pass.. could be a pid from and popen or system commands.. + return false; } - if ( sys_limits.max_threads != -1 ) { - // - // check current system limits - // - LOG_TRACE(GPP_i, "_sys_check_limits threads (cur/max): " << sys_limits.current_threads << "/" << sys_limits.max_threads ); - if (sys_limits.current_threads>( sys_limits.max_threads *_tthreshold)) { - LOG_WARN(GPP_i, "SYSTEM thread limit threshold exceeded, count/threshold: " << sys_limits.current_threads << "/" << (sys_limits.max_threads*_tthreshold) ); - return true; - } + if (!comp.terminated) { + // release of component can exit process before terminate is called + if (WIFEXITED(status) && (WEXITSTATUS(status) != 0)) { + RH_ERROR(this->_baseLog, "Unexpected component exit with non-zero status " << WEXITSTATUS(status) + << ", App/Identifier/Process: " << comp.appName << "/" << comp.identifier << "/" << pid); + sendChildNotification(comp.identifier, comp.appName); + } else if (WIFSIGNALED(status)) { + RH_ERROR(this->_baseLog, "Unexepected component termination with signal " << WTERMSIG(status) + << ", App/Identifier/Process: " << comp.appName << "/" << comp.identifier << "/" << pid); + sendChildNotification(comp.identifier, comp.appName); + } } - return false; -} -bool GPP_i::_check_file_limits( const thresholds_struct &thresholds) -{ - float _fthreshold = 1 - __thresholds.files_available * .01; + removeProcess(pid); - if (gpp_limits.max_open_files != -1) { - // - // check current process limits - // - LOG_TRACE(GPP_i, "_gpp_check_limits threads (cur/max): " << gpp_limits.current_open_files << "/" << gpp_limits.max_open_files ); - if (gpp_limits.current_open_files>(gpp_limits.max_open_files*_fthreshold)) { - LOG_WARN(GPP_i, "GPP process thread limit threshold exceeded, count/threshold: " << gpp_limits.current_open_files << "/" << (gpp_limits.max_open_files*_fthreshold) ); - return true; - } - } + // Ensure that if the process created a shared memory heap, it gets removed + // to avoid wasting shared memory + _cleanupProcessShm(pid); - if ( sys_limits.max_open_files != -1 ) { - // - // check current system limits - // - LOG_TRACE(GPP_i, "_sys_check_limits threads (cur/max): " << sys_limits.current_open_files << "/" << sys_limits.max_open_files ); - if (sys_limits.current_open_files>( sys_limits.max_open_files *_fthreshold)) { - LOG_WARN(GPP_i, "SYSTEM thread limit threshold exceeded, count/threshold: " << sys_limits.current_open_files << "/" << (sys_limits.max_open_files*_fthreshold) ); - return true; - } - } - return false; + return true; } @@ -1572,6 +1838,21 @@ bool GPP_i::_check_file_limits( const thresholds_struct &thresholds) void GPP_i::updateUsageState() { + // allow for global ignore of thresholds + if ( thresholds.ignore == true ) { + _resetBusyReason(); + RH_TRACE(_baseLog, "Ignoring threshold checks "); + if (getPids().size() == 0) { + RH_TRACE(_baseLog, "Usage State IDLE (trigger) pids === 0... "); + setUsageState(CF::Device::IDLE); + } + else { + RH_TRACE(_baseLog, "Usage State ACTIVE..... "); + setUsageState(CF::Device::ACTIVE); + } + return; + } + double sys_idle = system_monitor->get_idle_percent(); double sys_idle_avg = system_monitor->get_idle_average(); double sys_load = system_monitor->get_loadavg(); @@ -1581,122 +1862,117 @@ void GPP_i::updateUsageState() double max_allowable_load = utilization[0].maximum; double subscribed = utilization[0].subscribed; - - { - std::stringstream oss; - ReadLock rlock(monitorLock); - NicMonitorSequence::iterator iter=nic_monitors.begin(); - for( ; iter != nic_monitors.end(); iter++ ) { - NicMonitorPtr m = *iter; - oss << " Nic: " << m->get_resource_id() << " exceeded " << m->is_threshold_exceeded() << " threshold=" << m->get_threshold() << " measured=" << m->get_measured() << std::endl; - } - - LOG_DEBUG(GPP_i, "USAGE STATE: " << std::endl << - " CPU: threshold " << modified_thresholds.cpu_idle << " Actual: " << sys_idle << " Avg: " << sys_idle_avg << std::endl << - " MEM: threshold " << modified_thresholds.mem_free << " Actual: " << mem_free << std::endl << - " LOAD: threshold " << modified_thresholds.load_avg << " Actual: " << sys_load << std::endl << - " RESRV: threshold " << max_allowable_load << " Actual: " << subscribed << std::endl << - " Ingress threshold: " << mcastnicIngressThresholdValue << " capacity: " << mcastnicIngressCapacity << std::endl << - " Egress threshold: " << mcastnicEgressThresholdValue << " capacity: " << mcastnicEgressCapacity << std::endl << - " Threads threshold: " << gpp_limits.max_threads << " Actual: " << gpp_limits.current_threads << std::endl << - " NIC: " << std::endl << oss.str() - ); - } + uint64_t all_nics_threshold = 0; + double all_nics_throughput = 0.0; + + ReadLock rlock(monitorLock); + + std::stringstream nic_message; + std::vector filtered_nics = nic_facade->get_filtered_devices(); + for (size_t index = 0; index < filtered_nics.size(); ++index) { + const std::string& nic = filtered_nics[index]; + double throughput = nic_facade->get_throughput_by_device(nic); + nic_message << " Nic: " << nic + << " threshold=" << modified_thresholds.nic_usage + << " measured=" << throughput << std::endl; + + all_nics_threshold += modified_thresholds.nic_usage; + all_nics_throughput += throughput; + } + + RH_TRACE(_baseLog, "USAGE STATE: " << std::endl << + " CPU: threshold " << modified_thresholds.cpu_idle << " Actual: " << sys_idle << " Avg: " << sys_idle_avg << std::endl << + " MEM: threshold " << modified_thresholds.mem_free << " Actual: " << mem_free << std::endl << + " LOAD: threshold " << modified_thresholds.load_avg << " Actual: " << sys_load << std::endl << + " RESRV: threshold " << max_allowable_load << " Actual: " << subscribed << std::endl << + " Ingress threshold: " << mcastnicIngressThresholdValue << " capacity: " << mcastnicIngressCapacity << std::endl << + " Egress threshold: " << mcastnicEgressThresholdValue << " capacity: " << mcastnicEgressCapacity << std::endl << + " Threads threshold: " << gpp_limits.max_threads << " Actual: " << gpp_limits.current_threads << std::endl << + " NIC: " << std::endl << nic_message.str() + ); - if (!(thresholds.cpu_idle < 0) && !(thresholds.load_avg < 0)) { - if (sys_idle < modified_thresholds.cpu_idle) { - if ( sys_idle_avg < modified_thresholds.cpu_idle) { - std::ostringstream oss; - oss << "Threshold: " << modified_thresholds.cpu_idle << " Actual/Average: " << sys_idle << "/" << sys_idle_avg ; - _setReason( "CPU IDLE", oss.str() ); - setUsageState(CF::Device::BUSY); - return; - } - } + if (_cpuIdleThresholdMonitor->is_threshold_exceeded()) { + std::ostringstream oss; + oss << "Threshold: " << modified_thresholds.cpu_idle << " Actual/Average: " << sys_idle << "/" << sys_idle_avg ; + _setBusyReason("CPU IDLE", oss.str()); } - - if ( !(thresholds.mem_free < 0) && (mem_free < modified_thresholds.mem_free)) { - std::ostringstream oss; - oss << "Threshold: " << modified_thresholds.mem_free << " Actual: " << mem_free; - _setReason( "FREE MEMORY", oss.str() ); - setUsageState(CF::Device::BUSY); + else if (_freeMemThresholdMonitor->is_threshold_exceeded()) { + std::ostringstream oss; + oss << "Threshold: " << modified_thresholds.mem_free << " Actual: " << mem_free; + _setBusyReason("FREE MEMORY", oss.str()); } - - else if ( !(thresholds.cpu_idle < 0) && !(thresholds.load_avg < 0) && ( sys_load > modified_thresholds.load_avg )) { + else if (_loadAvgThresholdMonitor->is_threshold_exceeded()) { std::ostringstream oss; oss << "Threshold: " << modified_thresholds.load_avg << " Actual: " << sys_load; - _setReason( "LOAD AVG", oss.str() ); - setUsageState(CF::Device::BUSY); + _setBusyReason("LOAD AVG", oss.str()); } - else if ( reserved_capacity_per_component != 0 && (subscribed > max_allowable_load) ) { + else if ((reserved_capacity_per_component != 0) && (subscribed > max_allowable_load)) { std::ostringstream oss; oss << "Threshold: " << max_allowable_load << " Actual(subscribed) " << subscribed; - _setReason( "RESERVATION CAPACITY", oss.str() ); - setUsageState(CF::Device::BUSY); + _setBusyReason("RESERVATION CAPACITY", oss.str()); } - else if ( !(thresholds.nic_usage < 0) && _check_nic_thresholds() ) { - setUsageState(CF::Device::BUSY); + else if (_shmThresholdMonitor->is_threshold_exceeded()) { + std::ostringstream oss; + oss << "Threshold: " << modified_thresholds.shm_free << " Actual: " << shmFree; + _setBusyReason("SHARED MEMORY", oss.str()); + } + else if (_allNicsThresholdMonitor->is_threshold_exceeded()) { + std::ostringstream oss; + oss << "Threshold (cumulative) : " << all_nics_threshold << " Actual (cumulative) : " << all_nics_throughput; + _setBusyReason("NIC USAGE ", oss.str()); } - else if (!(thresholds.threads < 0) && _check_thread_limits(thresholds)) { + else if (_threadThresholdMonitor->is_threshold_exceeded()) { std::ostringstream oss; oss << "Threshold: " << gpp_limits.max_threads << " Actual: " << gpp_limits.current_threads; - _setReason( "ULIMIT (MAX_THREADS)", oss.str() ); - setUsageState(CF::Device::BUSY); + _setBusyReason("ULIMIT (MAX_THREADS)", oss.str()); } - else if (!(thresholds.files_available < 0) && _check_file_limits(thresholds)) { + else if (_fileThresholdMonitor->is_threshold_exceeded()) { std::ostringstream oss; oss << "Threshold: " << gpp_limits.max_open_files << " Actual: " << gpp_limits.current_open_files; - _setReason( "ULIMIT (MAX_FILES)", oss.str() ); - setUsageState(CF::Device::BUSY); + _setBusyReason("ULIMIT (MAX_FILES)", oss.str()); } else if (getPids().size() == 0) { - LOG_TRACE(GPP_i, "Usage State IDLE (trigger) pids === 0... "); - _resetReason(); + RH_TRACE(_baseLog, "Usage State IDLE (trigger) pids === 0... "); + _resetBusyReason(); setUsageState(CF::Device::IDLE); } else { - LOG_TRACE(GPP_i, "Usage State ACTIVE..... "); - _resetReason(); + RH_TRACE(_baseLog, "Usage State ACTIVE..... "); + _resetBusyReason(); setUsageState(CF::Device::ACTIVE); } } -void GPP_i::_resetReason() { - _setReason("",""); +void GPP_i::_resetBusyReason() { + _busy.mark = _busy.timestamp = boost::posix_time::not_a_date_time; + _busy.resource.clear(); + busy_reason.clear(); } -void GPP_i::_setReason( const std::string &reason, const std::string &event, const bool enable_timestamp ) { - - if ( reason != "" ) { - if ( reason != _busy_reason ) { - LOG_INFO(GPP_i, "GPP BUSY, REASON: " << reason << " " << event ); - _busy_timestamp = boost::posix_time::microsec_clock::local_time(); - _busy_mark = boost::posix_time::microsec_clock::local_time(); - _busy_reason = reason; - std::ostringstream oss; - oss << "(time: " << _busy_timestamp << ") REASON: " << _busy_reason << " EXCEEDED " << event; - busy_reason = oss.str(); - } - else if ( reason == _busy_reason ) { - boost::posix_time::ptime now = boost::posix_time::microsec_clock::local_time(); - boost::posix_time::time_duration dur = now - _busy_timestamp; - boost::posix_time::time_duration last_msg = now - _busy_mark; - std::ostringstream oss; - oss << "(first/duration: " << _busy_timestamp << "/" << dur << ") REASON: " << _busy_reason << " EXCEEDED " << event; - busy_reason = oss.str(); - if ( last_msg.total_seconds() > 2 ) { - _busy_mark = now; - LOG_INFO(GPP_i, "GPP BUSY, " << oss.str() ); - } +void GPP_i::_setBusyReason(const std::string& resource, const std::string& message) +{ + if (resource != _busy.resource) { + RH_INFO(_baseLog, "GPP BUSY, REASON: " << resource << " " << message); + _busy.timestamp = boost::posix_time::microsec_clock::local_time(); + _busy.mark = boost::posix_time::microsec_clock::local_time(); + _busy.resource = resource; + std::ostringstream oss; + oss << "(time: " << _busy.timestamp << ") REASON: " << _busy.resource << " EXCEEDED " << message; + busy_reason = oss.str(); + } else { + boost::posix_time::ptime now = boost::posix_time::microsec_clock::local_time(); + boost::posix_time::time_duration dur = now - _busy.timestamp; + boost::posix_time::time_duration last_msg = now - _busy.mark; + std::ostringstream oss; + oss << "(first/duration: " << _busy.timestamp << "/" << dur << ") REASON: " << _busy.resource << " EXCEEDED " << message; + busy_reason = oss.str(); + if ( last_msg.total_seconds() > 2 ) { + _busy.mark = now; + RH_INFO(_baseLog, "GPP BUSY, " << oss.str() ); } } - else { - _busy_timestamp = boost::posix_time::microsec_clock::local_time(); - _busy_mark = _busy_timestamp; - busy_reason = reason; - _busy_reason = reason; - } + setUsageState(CF::Device::BUSY); } /** @@ -1800,7 +2076,7 @@ int GPP_i::serviceFunction() catch( const boost::thread_resource_error& e ){ std::stringstream errstr; errstr << "Error acquiring lock (errno=" << e.native_error() << " msg=\"" << e.what() << "\")"; - LOG_ERROR(GPP_i, __FUNCTION__ << ": " << errstr.str() ); + RH_ERROR(this->_baseLog, __FUNCTION__ << ": " << errstr.str() ); } // @@ -1813,25 +2089,20 @@ int GPP_i::serviceFunction() ExecPartitionList::iterator iter = execPartitions.begin(); std::ostringstream ss; ss << boost::format("%-6s %-4s %-7s %-7s %-7s ") % "SOCKET" % "CPUS" % "USER" % "SYSTEM" % "IDLE"; - LOG_DEBUG(GPP_i, ss.str() ); + RH_TRACE(this->_baseLog, ss.str() ); ss.clear(); ss.str(""); for ( ; iter != execPartitions.end(); iter++ ) { ss << boost::format("%-6d %-4d %-7.2f %-7.2f %-7.2f ") % iter->id % iter->stats.get_ncpus() % iter->stats.get_user_percent() % iter->stats.get_system_percent() % iter->stats.get_idle_percent() ; - LOG_DEBUG(GPP_i, ss.str() ); + RH_TRACE(this->_baseLog, ss.str() ); ss.clear(); ss.str(""); } } // update monitors to see if thresholds are exceeded - std::for_each( threshold_monitors.begin(), threshold_monitors.end(), boost::bind( &Updateable::update, _1 ) ); - - for( size_t i=0; iidle_cap_mod = 100.0 * reserved_capacity_per_component / ((float)iter->cpus.size()); + iter->idle_cap_mod = 100.0 * reserved_capacity_per_component / ((float)iter->cpus.size()); } - } } -void GPP_i::mcastnicThreshold_changed(const CORBA::Long *oldvalue, const CORBA::Long *newvalue) { - - if( newvalue ) { - int threshold = *newvalue; +void GPP_i::mcastnicThreshold_changed(int oldvalue, int newvalue) +{ + int threshold = newvalue; if ( threshold >= 0 && threshold <= 100 ) { double origIngressThreshold = mcastnicIngressThresholdValue; double origEgressThreshold = mcastnicIngressThresholdValue; @@ -1933,10 +2193,7 @@ void GPP_i::mcastnicThreshold_changed(const CORBA::Long *oldvalue, const CORBA: mcastnicEgressCapacity = mcastnicEgressThresholdValue; mcastnicEgressFree = mcastnicEgressCapacity; } - } - } - } @@ -1950,18 +2207,18 @@ void GPP_i::_affinity_changed( const affinity_struct *ovp, const affinity_struct if ( ovp ) { const affinity_struct ov = *ovp; - LOG_DEBUG(GPP_i, "OV: " ); - LOG_DEBUG(GPP_i, "OV: ov.policy/context " << ov.exec_directive_class << "/" << ov.exec_directive_value ); - LOG_DEBUG(GPP_i, "OV: ov.blacklist size " << ov.blacklist_cpus.size() ); - LOG_DEBUG(GPP_i, "OV: ov.force_override " << ov.force_override ); - LOG_DEBUG(GPP_i, "OV: ov.disabled " << ov.disabled ); + RH_DEBUG(this->_baseLog, "OV: " ); + RH_DEBUG(this->_baseLog, "OV: ov.policy/context " << ov.exec_directive_class << "/" << ov.exec_directive_value ); + RH_DEBUG(this->_baseLog, "OV: ov.blacklist size " << ov.blacklist_cpus.size() ); + RH_DEBUG(this->_baseLog, "OV: ov.force_override " << ov.force_override ); + RH_DEBUG(this->_baseLog, "OV: ov.disabled " << ov.disabled ); } - LOG_DEBUG(GPP_i, "NV: " ); - LOG_DEBUG(GPP_i, "NV: nv.policy/context " << nv.exec_directive_class << "/" << nv.exec_directive_value ); - LOG_DEBUG(GPP_i, "NV: nv.blacklist size " << nv.blacklist_cpus.size() ); - LOG_DEBUG(GPP_i, "NV: nv.force_override " << nv.force_override ); - LOG_DEBUG(GPP_i, "NV: nv.disabled " << nv.disabled ); + RH_DEBUG(this->_baseLog, "NV: " ); + RH_DEBUG(this->_baseLog, "NV: nv.policy/context " << nv.exec_directive_class << "/" << nv.exec_directive_value ); + RH_DEBUG(this->_baseLog, "NV: nv.blacklist size " << nv.blacklist_cpus.size() ); + RH_DEBUG(this->_baseLog, "NV: nv.force_override " << nv.force_override ); + RH_DEBUG(this->_baseLog, "NV: nv.disabled " << nv.disabled ); // change affinity struct to affinity spec.. redhawk::affinity::AffinityDirective value; @@ -2049,11 +2306,11 @@ bool GPP_i::allocate_mcastegress_capacity(const CORBA::Long &value) boost::mutex::scoped_lock lock(propertySetAccess); std::string except_msg("Invalid allocation"); bool retval=false; - LOG_DEBUG(GPP_i, __FUNCTION__ << ": Allocating mcastegress allocation " << value); + RH_DEBUG(this->_baseLog, __FUNCTION__ << ": Allocating mcastegress allocation " << value); if ( mcastnicInterface == "" ) { std::string msg = "mcastnicEgressCapacity request failed because no mcastnicInterface has been configured"; - LOG_DEBUG(GPP_i, __FUNCTION__ << msg ); + RH_DEBUG(this->_baseLog, __FUNCTION__ << msg ); throw CF::Device::InvalidState(msg.c_str()); return retval; } @@ -2063,7 +2320,7 @@ bool GPP_i::allocate_mcastegress_capacity(const CORBA::Long &value) std::ostringstream os; os << "mcastnicEgressCapacity request: " << value << " failed because of insufficent capacity available, current: " << mcastnicEgressCapacity; std::string msg = os.str(); - LOG_DEBUG(GPP_i, __FUNCTION__ << msg ); + RH_DEBUG(this->_baseLog, __FUNCTION__ << msg ); CF::Properties errprops; errprops.length(1); errprops[0].id = "mcastnicEgressCapacity"; @@ -2082,7 +2339,7 @@ bool GPP_i::allocate_mcastegress_capacity(const CORBA::Long &value) void GPP_i::deallocate_mcastegress_capacity(const CORBA::Long &value) { boost::mutex::scoped_lock lock(propertySetAccess); - LOG_DEBUG(GPP_i, __FUNCTION__ << ": Deallocating mcastegress allocation " << value); + RH_DEBUG(this->_baseLog, __FUNCTION__ << ": Deallocating mcastegress allocation " << value); mcastnicEgressCapacity = value + mcastnicEgressCapacity; if ( mcastnicEgressCapacity > mcastnicEgressThresholdValue ) { @@ -2092,6 +2349,57 @@ void GPP_i::deallocate_mcastegress_capacity(const CORBA::Long &value) mcastnicEgressFree = mcastnicEgressCapacity; } +bool GPP_i::allocate_reservation_request(const redhawk__reservation_request_struct &value) +{ + if (isBusy()) { + return false; + } + RH_DEBUG(this->_baseLog, __FUNCTION__ << ": allocating reservation_request allocation "); + { + WriteLock rlock(pidLock); + if (applicationReservations.find(value.obj_id) != applicationReservations.end()){ + RH_INFO(_baseLog, __FUNCTION__ << ": Cannot make multiple reservations against the same application: "<& reservations = applicationReservations[value.obj_id].reservation; + for (unsigned int idx=0; idx_baseLog, __FUNCTION__ << ": Deallocating reservation_request allocation "); + for (ApplicationReservationMap::iterator app_it=applicationReservations.begin(); app_it!=applicationReservations.end(); app_it++) { + if (app_it->first == value.obj_id) { + applicationReservations.erase(app_it); + break; + } + } +} + bool GPP_i::allocate_mcastingress_capacity(const CORBA::Long &value) @@ -2099,11 +2407,11 @@ bool GPP_i::allocate_mcastingress_capacity(const CORBA::Long &value) boost::mutex::scoped_lock lock(propertySetAccess); std::string except_msg("Invalid allocation"); bool retval=false; - LOG_DEBUG(GPP_i, __FUNCTION__ << ": Allocating mcastingress allocation " << value); + RH_DEBUG(this->_baseLog, __FUNCTION__ << ": Allocating mcastingress allocation " << value); if ( mcastnicInterface == "" ) { std::string msg = "mcastnicIngressCapacity request failed because no mcastnicInterface has been configured" ; - LOG_DEBUG(GPP_i, __FUNCTION__ << msg ); + RH_DEBUG(this->_baseLog, __FUNCTION__ << msg ); throw CF::Device::InvalidState(msg.c_str()); } @@ -2112,7 +2420,7 @@ bool GPP_i::allocate_mcastingress_capacity(const CORBA::Long &value) std::ostringstream os; os << "mcastnicIngressCapacity request: " << value << " failed because of insufficent capacity available, current: " << mcastnicIngressCapacity; std::string msg = os.str(); - LOG_DEBUG(GPP_i, __FUNCTION__ << msg ); + RH_DEBUG(this->_baseLog, __FUNCTION__ << msg ); CF::Properties errprops; errprops.length(1); errprops[0].id = "mcastnicIngressCapacity"; @@ -2132,7 +2440,7 @@ bool GPP_i::allocate_mcastingress_capacity(const CORBA::Long &value) void GPP_i::deallocate_mcastingress_capacity(const CORBA::Long &value) { boost::mutex::scoped_lock lock(propertySetAccess); - LOG_DEBUG(GPP_i, __FUNCTION__ << ": Deallocating mcastingress deallocation " << value); + RH_DEBUG(this->_baseLog, __FUNCTION__ << ": Deallocating mcastingress deallocation " << value); mcastnicIngressCapacity = value + mcastnicIngressCapacity; if ( mcastnicIngressCapacity > mcastnicIngressThresholdValue ) { @@ -2149,10 +2457,10 @@ bool GPP_i::allocateCapacity_nic_allocation(const nic_allocation_struct &alloc) WriteLock wlock(nicLock); std::string except_msg("Invalid allocation"); bool success=false; - LOG_TRACE(GPP_i, __FUNCTION__ << ": Allocating nic_allocation (identifier=" << alloc.identifier << ")"); + RH_TRACE(this->_baseLog, __FUNCTION__ << ": Allocating nic_allocation (identifier=" << alloc.identifier << ")"); try { - LOG_TRACE(GPP_i, __FUNCTION__ << ": ALLOCATION: { identifier: \"" << alloc.identifier << "\", data_rate: " << alloc.data_rate << ", data_size: " << alloc.data_size << ", multicast_support: \"" << alloc.multicast_support << "\", ip_addressable: \"" << alloc.ip_addressable << "\", interface: \"" << alloc.interface << "\" }"); + RH_TRACE(this->_baseLog, __FUNCTION__ << ": ALLOCATION: { identifier: \"" << alloc.identifier << "\", data_rate: " << alloc.data_rate << ", data_size: " << alloc.data_size << ", multicast_support: \"" << alloc.multicast_support << "\", ip_addressable: \"" << alloc.ip_addressable << "\", interface: \"" << alloc.interface << "\" }"); success = nic_facade->allocate_capacity(alloc); if( success ) @@ -2165,7 +2473,7 @@ bool GPP_i::allocateCapacity_nic_allocation(const nic_allocation_struct &alloc) status = nic_allocation_status[i]; // need to check if processor socket servicing interface has enough idle capacity if ( _check_exec_partition( status.interface ) == true ) { - LOG_TRACE(GPP_i, __FUNCTION__ << ": SUCCESS: { identifier: \"" << status.identifier << "\", data_rate: " << status.data_rate << ", data_size: " << status.data_size << ", multicast_support: \"" << status.multicast_support << "\", ip_addressable: \"" << status.ip_addressable << "\", interface: \"" << status.interface << "\" }"); + RH_TRACE(this->_baseLog, __FUNCTION__ << ": SUCCESS: { identifier: \"" << status.identifier << "\", data_rate: " << status.data_rate << ", data_size: " << status.data_size << ", multicast_support: \"" << status.multicast_support << "\", ip_addressable: \"" << status.ip_addressable << "\", interface: \"" << status.interface << "\" }"); break; } else { @@ -2201,9 +2509,9 @@ bool GPP_i::allocateCapacity_nic_allocation(const nic_allocation_struct &alloc) void GPP_i::deallocateCapacity_nic_allocation(const nic_allocation_struct &alloc) { WriteLock wlock(nicLock); - LOG_TRACE(GPP_i, __FUNCTION__ << ": Deallocating nic_allocation (identifier=" << alloc.identifier << ")"); + RH_TRACE(this->_baseLog, __FUNCTION__ << ": Deallocating nic_allocation (identifier=" << alloc.identifier << ")"); try { - LOG_DEBUG(GPP_i, __FUNCTION__ << ": { identifier: \"" << alloc.identifier << "\", data_rate: " << alloc.data_rate << ", data_size: " << alloc.data_size << ", multicast_support: \"" << alloc.multicast_support << "\", ip_addressable: \"" << alloc.ip_addressable << "\", interface: \"" << alloc.interface << "\" }"); + RH_DEBUG(this->_baseLog, __FUNCTION__ << ": { identifier: \"" << alloc.identifier << "\", data_rate: " << alloc.data_rate << ", data_size: " << alloc.data_size << ", multicast_support: \"" << alloc.multicast_support << "\", ip_addressable: \"" << alloc.ip_addressable << "\", interface: \"" << alloc.interface << "\" }"); nic_facade->deallocate_capacity(alloc); } catch( ... ) @@ -2216,16 +2524,6 @@ void GPP_i::deallocateCapacity_nic_allocation(const nic_allocation_struct &alloc } } -void GPP_i::deallocateCapacity (const CF::Properties& capacities) throw (CF::Device::InvalidState, CF::Device::InvalidCapacity, CORBA::SystemException) -{ - GPP_base::deallocateCapacity(capacities); -} -CORBA::Boolean GPP_i::allocateCapacity (const CF::Properties& capacities) throw (CF::Device::InvalidState, CF::Device::InvalidCapacity, CF::Device::InsufficientCapacity, CORBA::SystemException) -{ - bool retval = GPP_base::allocateCapacity(capacities); - return retval; -} - bool GPP_i::allocate_diskCapacity(const double &value) { @@ -2245,19 +2543,19 @@ bool GPP_i::allocate_memCapacity(const CORBA::LongLong &value) { if (isBusy()) { return false; } - LOG_DEBUG(GPP_i, "allocate memory (REQUEST) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); + RH_DEBUG(this->_baseLog, "allocate memory (REQUEST) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); if ( value > memCapacity or value > memCapacityThreshold ) return false; memCapacity -= value; - LOG_DEBUG(GPP_i, "allocate memory (SUCCESS) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); + RH_DEBUG(this->_baseLog, "allocate memory (SUCCESS) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); return true; } void GPP_i::deallocate_memCapacity(const CORBA::LongLong &value) { - LOG_DEBUG(GPP_i, "deallocate memory (REQUEST) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); + RH_DEBUG(this->_baseLog, "deallocate memory (REQUEST) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); memCapacity += value; - LOG_DEBUG(GPP_i, "deallocate memory (SUCCESS) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); + RH_DEBUG(this->_baseLog, "deallocate memory (SUCCESS) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); if ( memCapacity > memCapacityThreshold ) { memCapacity = memCapacityThreshold; } @@ -2277,12 +2575,12 @@ bool GPP_i::allocate_loadCapacity(const double &value) { // get current system load and calculated reservation load if ( reserved_capacity_per_component == 0.0 ) { - LOG_DEBUG(GPP_i, "allocate load capacity, (REQUEST) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); + RH_DEBUG(this->_baseLog, "allocate load capacity, (REQUEST) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); // get system monitor report... double load_threshold = modified_thresholds.load_avg; double sys_load = system_monitor->get_loadavg(); if ( sys_load + value > load_threshold ) { - LOG_WARN(GPP_i, "Allocate load capacity would exceed measured system load, current loadavg: " << sys_load << " requested: " << value << " threshold: " << load_threshold ); + RH_WARN(this->_baseLog, "Allocate load capacity would exceed measured system load, current loadavg: " << sys_load << " requested: " << value << " threshold: " << load_threshold ); } // perform classic load capacity @@ -2290,7 +2588,7 @@ bool GPP_i::allocate_loadCapacity(const double &value) { std::ostringstream os; os << " Allocate load capacity failed due to insufficient capacity, available capacity:" << loadCapacity << " requested capacity: " << value; std::string msg = os.str(); - LOG_DEBUG(GPP_i, msg ); + RH_DEBUG(this->_baseLog, msg ); CF::Properties errprops; errprops.length(1); errprops[0].id = "DCE:72c1c4a9-2bcf-49c5-bafd-ae2c1d567056 (loadCapacity)"; @@ -2299,18 +2597,18 @@ bool GPP_i::allocate_loadCapacity(const double &value) { } loadCapacity -= value; - LOG_DEBUG(GPP_i, "allocate load capacity, (SUCCESS) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); + RH_DEBUG(this->_baseLog, "allocate load capacity, (SUCCESS) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); } else { // manage load capacity handled via reservation - LOG_WARN(GPP_i, "Allocate load capacity allowed, GPP using component reservations for managing load capacity." ); + RH_WARN(this->_baseLog, "Allocate load capacity allowed, GPP using component reservations for managing load capacity." ); loadCapacity -= value; if ( loadCapacity < 0.0 ) { loadCapacity = 0.0; } - LOG_DEBUG(GPP_i, "allocate load capacity, (SUCCESS) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); + RH_DEBUG(this->_baseLog, "allocate load capacity, (SUCCESS) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); } @@ -2319,12 +2617,12 @@ bool GPP_i::allocate_loadCapacity(const double &value) { } void GPP_i::deallocate_loadCapacity(const double &value) { - LOG_DEBUG(GPP_i, "deallocate load capacity, (REQUEST) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); + RH_DEBUG(this->_baseLog, "deallocate load capacity, (REQUEST) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); loadCapacity += value; if ( loadCapacity > loadFree ) { loadCapacity = loadFree; } - LOG_DEBUG(GPP_i, "deallocate load capacity, (SUCCESS) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); + RH_DEBUG(this->_baseLog, "deallocate load capacity, (SUCCESS) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); updateThresholdMonitors(); updateUsageState(); return; @@ -2341,13 +2639,13 @@ void GPP_i::deallocate_loadCapacity(const double &value) { void GPP_i::send_threshold_event(const threshold_event_struct& message) { - LOG_INFO(GPP_i, __FUNCTION__ << ": " << message.message ); + RH_INFO(this->_baseLog, __FUNCTION__ << ": " << message.message ); MessageEvent_out->sendMessage(message); } void GPP_i::sendChildNotification(const std::string &comp_id, const std::string &app_id) { - LOG_INFO(GPP_i, "Child termination notification on the IDM channel : comp:" << comp_id << " app:" <_baseLog, "Child termination notification on the IDM channel : comp:" << comp_id << " app:" <update(); - LOG_TRACE(GPP_i, __FUNCTION__ << ": resource_id=" << monitor->get_resource_id() << " threshold=" << monitor->get_threshold() << " measured=" << monitor->get_measured()); - } + std::for_each(threshold_monitors.begin(), threshold_monitors.end(), boost::bind(&Updateable::update, _1)); } -void GPP_i::update() + +void GPP_i::updateProcessStats() { - // establish what the actual load is per floor_reservation - // if the actual load -per is less than the reservation, compute the different and add the difference to the cpu_idle - // read the clock from the system (start) - - int64_t user=0, system=0; - ProcStat::GetTicks( system, user); - int64_t f_start_total = system; - int64_t f_use_start_total = user; - float reservation_set = 0; - size_t nres=0; - int64_t usage=0; + // establish what the actual load is per floor_reservation + // if the actual load -per is less than the reservation, compute the + // different and add the difference to the cpu_idle + { + WriteLock rlock(pidLock); + this->update_grp_child_pids(); + } - { - WriteLock rlock(pidLock); - - this->update_grp_child_pids(); - - ProcessList::iterator i=this->pids.begin(); - for ( ; i!=pids.end(); i++) { - - if ( !i->terminated ) { + // Update system and user clocks and determine how much time has elapsed + // since the last measurement + int64_t last_system_ticks = _systemTicks; + int64_t last_user_ticks = _userTicks; + ProcStat::GetTicks(_systemTicks, _userTicks); + int64_t system_elapsed = _systemTicks - last_system_ticks; + int64_t user_elapsed = _userTicks - last_user_ticks; - // update pstat usage for each process - usage = i->get_pstat_usage(); + float inverse_load_per_core = ((float)processor_cores)/(system_elapsed); + float aggregate_usage = 0; + float non_specialized_aggregate_usage = 0; - if ( !i->app_started ) { - nres++; - if ( i->reservation == -1) { - reservation_set += idle_capacity_modifier; - } else { - reservation_set += 100.0 * i->reservation/((float)processor_cores); - } - } - } + ReadLock rlock(pidLock); + for (ApplicationReservationMap::iterator app_it=applicationReservations.begin(); app_it!=applicationReservations.end(); app_it++) { + app_it->second.usage = 0; } - } - LOG_TRACE(GPP_i, __FUNCTION__ << " Completed first pass, record pstats for nproc: " << nres << " res_set " << reservation_set ); + double reservation_set = 0; + size_t nres=0; + int usage_out=0; - // set number reservations that are not started - n_reservations = nres; - - // wait a little bit - usleep(500000); - - - user=0, system=0; - ProcStat::GetTicks( system, user); - int64_t f_end_total = system; - int64_t f_use_end_total = user; - float f_total = (float)(f_end_total-f_start_total); - if ( f_total <= 0.0 ) { - LOG_TRACE(GPP_i, __FUNCTION__ << std::endl<< " System Ticks end/start " << f_end_total << "/" << f_start_total << std::endl ); - f_total=1.0; - } - float inverse_load_per_core = ((float)processor_cores)/(f_total); - float aggregate_usage = 0; - float non_specialized_aggregate_usage = 0; - double percent_core; - - ReadLock rlock(pidLock); - ProcessList::iterator i=this->pids.begin(); - int usage_out=0; - for ( ; i!=pids.end(); i++, usage_out++) { - - usage = 0; - percent_core =0; - if ( !i->terminated ) { + for (ProcessList::iterator i=this->pids.begin(); i!=pids.end(); i++, usage_out++) { + if (i->terminated) { + continue; + } - // get delta from last pstat - usage = i->get_pstat_usage(); + // get delta from last pstat + int64_t usage = i->get_pstat_usage(); - percent_core = (double)usage * inverse_load_per_core; - i->core_usage = percent_core; - double res = i->reservation; + double percent_core = (double)usage * inverse_load_per_core; + i->core_usage = percent_core; + double res = i->reservation; #if 0 - // debug assist - if ( !(usage_out % 500) || usage < 0 || percent_core < 0.0 ) { - uint64_t u, p2, p1; - u = i->get_pstat_usage(p2,p1); - LOG_INFO(GPP_i, __FUNCTION__ << std::endl<< "PROC SPEC PID: " << i->pid << std::endl << - " usage " << usage << std::endl << - " u " << usage << std::endl << - " p2 " << p2 << std::endl << - " p1 " << p1 << std::endl << - " percent_core: " << percent_core << std::endl << - " reservation: " << i->reservation << std::endl ); - } + // debug assist + if ( !(usage_out % 500) || usage < 0 || percent_core < 0.0 ) { + uint64_t u, p2, p1; + u = i->get_pstat_usage(p2,p1); + RH_INFO(_baseLog, __FUNCTION__ << std::endl<< "PROC SPEC PID: " << i->pid << std::endl << + " usage " << usage << std::endl << + " u " << usage << std::endl << + " p2 " << p2 << std::endl << + " p1 " << p1 << std::endl << + " percent_core: " << percent_core << std::endl << + " reservation: " << i->reservation << std::endl ); + } #endif - if ( i->app_started ) { - - // if component is not using enough the add difference between minimum and current load - if ( percent_core < res ) { - reservation_set += 100.00 * ( res - percent_core)/((double)processor_cores); - } - // for components with non specific - if ( res == -1.0 ) { - non_specialized_aggregate_usage += percent_core / inverse_load_per_core; + if ( applicationReservations.find(i->appName) != applicationReservations.end()) { + if (applicationReservations[i->appName].reservation.find("cpucores") != applicationReservations[i->appName].reservation.end()) { + applicationReservations[i->appName].usage += percent_core; + } } - else { - aggregate_usage += percent_core / inverse_load_per_core; + + if (i->app_started) { + // if component is not using enough the add difference between minimum and current load + if ( percent_core < res ) { + reservation_set += 100.00 * ( res - percent_core)/((double)processor_cores); + } + // for components with non specific + if ( res == -1.0 ) { + non_specialized_aggregate_usage += percent_core / inverse_load_per_core; + } + else { + aggregate_usage += percent_core / inverse_load_per_core; + } + } else { + if ( applicationReservations.find(i->appName) != applicationReservations.end()) { + if (applicationReservations[i->appName].reservation.find("cpucores") != applicationReservations[i->appName].reservation.end()) { + continue; + } + } + nres++; + if ( i->reservation == -1) { + reservation_set += idle_capacity_modifier; + } else { + reservation_set += 100.0 * i->reservation/((float)processor_cores); + } } - } } - } - - LOG_TRACE(GPP_i, __FUNCTION__ << " Completed SECOND pass, record pstats for processes" ); - - aggregate_usage *= inverse_load_per_core; - non_specialized_aggregate_usage *= inverse_load_per_core; - modified_thresholds.cpu_idle = __thresholds.cpu_idle + reservation_set; - utilization[0].component_load = aggregate_usage + non_specialized_aggregate_usage; - float estimate_total = (f_use_end_total-f_use_start_total) * inverse_load_per_core; - utilization[0].system_load = (utilization[0].component_load > estimate_total) ? utilization[0].component_load : estimate_total; // for very light loads, sometimes there is a measurement mismatch because of timing - utilization[0].subscribed = (reservation_set * (float)processor_cores) / 100.0 + utilization[0].component_load; - utilization[0].maximum = processor_cores-(__thresholds.cpu_idle/100.0) * processor_cores; - - LOG_DEBUG(GPP_i, __FUNCTION__ << " LOAD and IDLE : " << std::endl << - " modified_threshold(req+res)=" << modified_thresholds.cpu_idle << std::endl << - " system: idle: " << system_monitor->get_idle_percent() << std::endl << - " idle avg: " << system_monitor->get_idle_average() << std::endl << - " threshold(req): " << __thresholds.cpu_idle << std::endl << - " idle modifier: " << idle_capacity_modifier << std::endl << - " reserved_cap_per_component: " << reserved_capacity_per_component << std::endl << - " number of reservations: " << n_reservations << std::endl << - " processes: " << pids.size() << std::endl << - " loadCapacity: " << loadCapacity << std::endl << - " loadTotal: " << loadTotal << std::endl << - " loadFree(Modified): " << loadFree <getReport(); - LOG_DEBUG(GPP_i, __FUNCTION__ << " SysInfo Load : " << std::endl << - " one: " << rpt.load.one_min << std::endl << - " five: " << rpt.load.five_min << std::endl << - " fifteen: " << rpt.load.fifteen_min << std::endl ); + // set number reservations that are not started + n_reservations = nres; - loadAverage.onemin = rpt.load.one_min; - loadAverage.fivemin = rpt.load.five_min; - loadAverage.fifteenmin = rpt.load.fifteen_min; + for (ApplicationReservationMap::iterator app_it=applicationReservations.begin(); app_it!=applicationReservations.end(); app_it++) { + if (app_it->second.reservation.find("cpucores") != app_it->second.reservation.end()) { + bool found_app = false; + for ( ProcessList::iterator _pid_it=this->pids.begin();_pid_it!=pids.end(); _pid_it++) { + if (applicationReservations.find(_pid_it->appName) != applicationReservations.end()) { + found_app = true; + break; + } + } + if (not found_app) { + if (app_it->second.reservation["cpucores"] == -1) { + reservation_set += idle_capacity_modifier; + } else { + reservation_set += 100.0 * app_it->second.reservation["cpucores"]/((float)processor_cores); + } + } else { + if (app_it->second.usage < app_it->second.reservation["cpucores"]) { + reservation_set += 100.00 * ( app_it->second.reservation["cpucores"] - app_it->second.usage)/((double)processor_cores); + } + } + } + } - memFree = rpt.virtual_memory_free / mem_free_units; - LOG_DEBUG(GPP_i, __FUNCTION__ << "Memory : " << std::endl << - " sys_monitor.vit_total: " << rpt.virtual_memory_total << std::endl << - " sys_monitor.vit_free: " << rpt.virtual_memory_free << std::endl << - " sys_monitor.mem_total: " << rpt.physical_memory_total << std::endl << - " sys_monitor.mem_free: " << rpt.physical_memory_free << std::endl << - " memFree: " << memFree << std::endl << - " memCapacity: " << memCapacity << std::endl << - " memCapacityThreshold: " << memCapacityThreshold << std::endl << - " memInitCapacityPercent: " << memInitCapacityPercent << std::endl ); + RH_TRACE(_baseLog, __FUNCTION__ << " Completed pass, record pstats for processes" ); + + aggregate_usage *= inverse_load_per_core; + non_specialized_aggregate_usage *= inverse_load_per_core; + utilization[0].component_load = aggregate_usage + non_specialized_aggregate_usage; + float estimate_total = (user_elapsed) * inverse_load_per_core; + utilization[0].system_load = std::max(utilization[0].component_load, estimate_total); // for very light loads, sometimes there is a measurement mismatch because of timing + utilization[0].subscribed = (reservation_set * (float)processor_cores) / 100.0 + utilization[0].component_load; + + // The maximum CPU utilization is in terms of cores; if a threshold is set, + // normalize it to the range [0,1] and scale the maximum by that ratio + float cpu_idle_threshold = 0.0; + utilization[0].maximum = processor_cores; + if (!thresholds.ignore && (thresholds.cpu_idle >= 0.0)) { + utilization[0].maximum *= (1.0 - thresholds.cpu_idle * 0.01); + modified_thresholds.cpu_idle = thresholds.cpu_idle + reservation_set; + cpu_idle_threshold = thresholds.cpu_idle; + } + + RH_DEBUG(_baseLog, __FUNCTION__ << " LOAD and IDLE : " << std::endl << + " modified_threshold(req+res)=" << modified_thresholds.cpu_idle << std::endl << + " system: idle: " << system_monitor->get_idle_percent() << std::endl << + " idle avg: " << system_monitor->get_idle_average() << std::endl << + " threshold(req): " << cpu_idle_threshold << std::endl << + " idle modifier: " << idle_capacity_modifier << std::endl << + " reserved_cap_per_component: " << reserved_capacity_per_component << std::endl << + " number of reservations: " << n_reservations << std::endl << + " processes: " << pids.size() << std::endl << + " loadCapacity: " << loadCapacity << std::endl << + " loadTotal: " << loadTotal << std::endl << + " loadFree(Modified): " << loadFree <update_state(); - const Limits::Contents &pid_rpt = process_limits->get(); - gpp_limits.current_threads = pid_rpt.threads; - gpp_limits.max_threads = pid_rpt.threads_limit; - gpp_limits.current_open_files = pid_rpt.files; - gpp_limits.max_open_files = pid_rpt.files_limit; +void GPP_i::update() +{ + updateProcessStats(); + + const SystemMonitor::Report &rpt = system_monitor->getReport(); + RH_TRACE(_baseLog, __FUNCTION__ << " SysInfo Load : " << std::endl << + " one: " << rpt.load.one_min << std::endl << + " five: " << rpt.load.five_min << std::endl << + " fifteen: " << rpt.load.fifteen_min << std::endl ); + + loadAverage.onemin = rpt.load.one_min; + loadAverage.fivemin = rpt.load.five_min; + loadAverage.fifteenmin = rpt.load.fifteen_min; + + memFree = rpt.virtual_memory_free / mem_free_units; + RH_TRACE(_baseLog, __FUNCTION__ << "Memory : " << std::endl << + " sys_monitor.vit_total: " << rpt.virtual_memory_total << std::endl << + " sys_monitor.vit_free: " << rpt.virtual_memory_free << std::endl << + " sys_monitor.mem_total: " << rpt.physical_memory_total << std::endl << + " sys_monitor.mem_free: " << rpt.physical_memory_free << std::endl << + " memFree: " << memFree << std::endl << + " memCapacity: " << memCapacity << std::endl << + " memCapacityThreshold: " << memCapacityThreshold << std::endl << + " memInitCapacityPercent: " << memInitCapacityPercent << std::endl ); + + shmFree = redhawk::shm::getSystemFreeMemory() / MB_TO_BYTES; + + // + // transfer limits to properties + // + const Limits::Contents &sys_rpt =rpt.sys_limits; + sys_limits.current_threads = sys_rpt.threads; + sys_limits.max_threads = sys_rpt.threads_limit; + sys_limits.current_open_files = sys_rpt.files; + sys_limits.max_open_files = sys_rpt.files_limit; + process_limits->update_state(); + const Limits::Contents &pid_rpt = process_limits->get(); + gpp_limits.current_threads = pid_rpt.threads; + gpp_limits.max_threads = pid_rpt.threads_limit; + gpp_limits.current_open_files = pid_rpt.files; + gpp_limits.max_open_files = pid_rpt.files_limit; } @@ -2566,11 +2882,11 @@ int GPP_i::sigchld_handler(int sig) FD_SET(sig_fd, &readfds); select(sig_fd+1, &readfds, NULL, NULL, &tv); if (FD_ISSET(sig_fd, &readfds)) { - LOG_TRACE(GPP_i, " Checking for signals from SIGNALFD(" << sig_fd << ") cnt:" << cnt++ ); + RH_TRACE(this->_baseLog, " Checking for signals from SIGNALFD(" << sig_fd << ") cnt:" << cnt++ ); s = read(sig_fd, &si, sizeof(struct signalfd_siginfo)); - LOG_TRACE(GPP_i, " RETURN from SIGNALFD(" << sig_fd << ") cnt/ret:" << cnt << "/" << s ); + RH_TRACE(this->_baseLog, " RETURN from SIGNALFD(" << sig_fd << ") cnt/ret:" << cnt << "/" << s ); if (s != sizeof(struct signalfd_siginfo)){ - LOG_ERROR(GPP_i, "SIGCHLD handling error ..."); + RH_ERROR(this->_baseLog, "SIGCHLD handling error ..."); break; } @@ -2585,12 +2901,12 @@ int GPP_i::sigchld_handler(int sig) // will issue a notification event message for non domain terminated resources.. ie. segfaults.. // if ( si.ssi_signo == SIGCHLD) { - LOG_TRACE(GPP_i, "Child died , pid .................................." << si.ssi_pid); + RH_TRACE(this->_baseLog, "Child died , pid .................................." << si.ssi_pid); int status; pid_t child_pid; bool reap=false; while( (child_pid = waitpid(-1, &status, WNOHANG)) > 0 ) { - LOG_TRACE(GPP_i, "WAITPID died , pid .................................." << child_pid); + RH_TRACE(this->_baseLog, "WAITPID died , pid .................................." << child_pid); if ( (uint)child_pid == si.ssi_pid ) reap=true; _component_cleanup( child_pid, status ); } @@ -2599,7 +2915,7 @@ int GPP_i::sigchld_handler(int sig) } } else { - LOG_TRACE(GPP_i, "read from signalfd --> signo:" << si.ssi_signo); + RH_TRACE(this->_baseLog, "read from signalfd --> signo:" << si.ssi_signo); } } else { @@ -2608,7 +2924,7 @@ int GPP_i::sigchld_handler(int sig) } } - //LOG_TRACE(GPP_i, "sigchld_handler RETURN.........loop cnt:" << cnt); + //RH_TRACE(this->_baseLog, "sigchld_handler RETURN.........loop cnt:" << cnt); return NOOP; } @@ -2622,99 +2938,68 @@ int GPP_i::redirected_io_handler() // check we have a log file if ( _componentOutputLog == "" ) { - LOG_DEBUG(GPP_i, " Component IO redirect ON but no file specified. "); + RH_DEBUG(this->_baseLog, " Component IO redirect ON but no file specified. "); return NOOP; } - LOG_DEBUG(GPP_i, " Locking For Redirect Processing............. "); + RH_DEBUG(this->_baseLog, " Locking For Redirect Processing............. "); ReadLock lock(fdsLock); - int redirect_file = open(_componentOutputLog.c_str(), O_RDWR | O_CREAT , S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH ); + int redirect_file = open(_componentOutputLog.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH ); if ( redirect_file != -1 ) { if ( lseek(redirect_file, 0, SEEK_END) == -1 ) { - LOG_DEBUG(GPP_i, " Unable to SEEK To file end, file: " << _componentOutputLog); + RH_DEBUG(this->_baseLog, " Unable to SEEK To file end, file: " << _componentOutputLog); } } else { - LOG_TRACE(GPP_i, " Unable to open up componentOutputLog, fallback to /dev/null tried log: " << _componentOutputLog); + RH_TRACE(this->_baseLog, " Unable to open up componentOutputLog, fallback to /dev/null tried log: " << _componentOutputLog); redirect_file = open("/dev/null", O_RDWR, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH ); } size_t size = 0; uint64_t cnt = 0; - uint64_t fopens = 0; uint64_t fcloses = 0; uint64_t nbytes = 0; size_t result=0; int rd_fd =0; - ProcessFds::iterator fd = redirectedFds.begin(); - for ( ; fd != redirectedFds.end() && _handle_io_redirects ; fd++ ) { - - // set default redirect to be master - rd_fd=redirect_file; - - // check if our pid is vaid - if ( fd->pid > 0 and fd->cout > -1 ) { - - // open up a specific redirect file - if ( fd->fname != "" && fd->fname != _componentOutputLog ) { - LOG_TRACE(GPP_i, " OPEN FILE - PID: " << fd->pid << " fname " << fd->fname); - rd_fd = open(fd->fname.c_str(), O_RDWR | O_CREAT , S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH ); - if ( rd_fd == -1 ) { - LOG_ERROR(GPP_i, " Unable to open component output log: " << fd->fname); - rd_fd = redirect_file; - } - else { - fopens++; - if ( lseek(rd_fd, 0, SEEK_END) == -1 ) { - LOG_DEBUG(GPP_i, " Unable to SEEK To file end, file: " << fd->fname); - } - } - } - fd_set readfds; - FD_ZERO(&readfds); - FD_SET(fd->cout, &readfds); - struct timeval tv = {0, 50}; - select(fd->cout+1, &readfds, NULL, NULL, &tv); - if (FD_ISSET(fd->cout, &readfds)) { - - result=0; - size = 0; - if (ioctl (fd->cout, FIONREAD, &size) == -1) { - LOG_ERROR(GPP_i, "(redirected IO) Error requesting how much to read, PID: " << fd->pid << " FD:" << fd->cout ); - close(fd->cout); - fd->cout = -1; - } - if ( fd->cout != -1 && rd_fd != -1 ) { - LOG_TRACE(GPP_i, " SPLICE DATA From Child to Output SIZE " << size << "...... PID: " << fd->pid << " FD:" << fd->cout ); - result = splice( fd->cout, NULL, rd_fd, NULL, size,0 ); - LOG_TRACE(GPP_i, " SPLICE DATA From Child to Output RES:" << result << "... PID: " << fd->pid << " FD:" << fd->cout ); - } - if ( (int64_t)result == -1 ) { - LOG_ERROR(GPP_i, "(redirected IO) Error during transfer to redirected file, PID: " << fd->pid << " FD:" << fd->cout ); - close(fd->cout); - fd->cout = -1; - } - else { - nbytes += result; - cnt++; - } + size_t nfds=redirectedFds.size(); + // set default redirect to be master + rd_fd=redirect_file; + std::vector events(nfds); + int rfds = epoll_wait(epfd, events.data(), nfds, 10); + + if ( rfds > 0 ) { + for ( int i=0; i< rfds; i++ ) { + size = 0; + result=0; + proc_redirect *fd=(proc_redirect*)events[i].data.ptr; + if (ioctl (fd->cout, FIONREAD, &size) == -1) { + RH_ERROR(this->_baseLog, "(redirected IO) Error requesting how much to read, PID: " << fd->pid << " FD:" << fd->cout ); + close(fd->cout); + fd->cout = -1; + fcloses++; + } + if ( fd->cout != -1 && rd_fd != -1 ) { + result = splice( fd->cout, NULL, rd_fd, NULL, size,0 ); + RH_TRACE(this->_baseLog, " SPLICE DATA From Child to Output RES:" << result << "... PID: " << fd->pid << " FD:" << fd->cout ); + } + if ( (int64_t)result == -1 ) { + RH_ERROR(this->_baseLog, "(redirected IO) Error during transfer to redirected file, PID: " << fd->pid << " FD:" << fd->cout ); + close(fd->cout); + fd->cout = -1; + fcloses++; + } + else { + nbytes += result; + cnt++; + } } - - } - - /// close our per component redirected io file if we opened one - if ( rd_fd != -1 && rd_fd != redirect_file ) { - fcloses++; - close(rd_fd); - } - } - + // close file while we wait if ( redirect_file ) close(redirect_file); - LOG_DEBUG(GPP_i, " IO REDIRECT, NPROCS: "<< redirectedFds.size() << " OPEN/CLOSE " << fopens << "/" << fcloses <<" PROCESSED PROCS/Bytes " << cnt << "/" << nbytes ); + RH_DEBUG(this->_baseLog, " IO REDIRECT, NPROCS: "<< redirectedFds.size() << " CLOSED: " << fcloses <<" PROCESSED PROCS/Bytes " << cnt << "/" << nbytes ); return NOOP; } @@ -2736,7 +3021,7 @@ void GPP_i::addProcess(int pid, const std::string &appName, const std::string &i ProcessList:: iterator result = std::find_if( pids.begin(), pids.end(), std::bind2nd( FindPid(), pid ) ); if ( result != pids.end() ) return; - LOG_DEBUG(GPP_i, "START Adding Process/RES: " << pid << "/" << req_reservation << " APP:" << appName ); + RH_DEBUG(this->_baseLog, "START Adding Process/RES: " << pid << "/" << req_reservation << " APP:" << appName ); component_description tmp; tmp.appName = appName; tmp.pid = pid; @@ -2745,7 +3030,10 @@ void GPP_i::addProcess(int pid, const std::string &appName, const std::string &i tmp.core_usage = 0; tmp.parent = this; pids.push_front( tmp ); - LOG_DEBUG(GPP_i, "END Adding Process/RES: " << pid << "/" << req_reservation << " APP:" << appName ); + if (applicationReservations.find(appName) != applicationReservations.end()) { + applicationReservations[appName].component_pids.push_back(pid); + } + RH_DEBUG(this->_baseLog, "END Adding Process/RES: " << pid << "/" << req_reservation << " APP:" << appName ); } GPP_i::component_description GPP_i::getComponentDescription(int pid) @@ -2762,7 +3050,7 @@ void GPP_i::markPidTerminated( const int pid) ReadLock lock(pidLock); ProcessList:: iterator it = std::find_if( pids.begin(), pids.end(), std::bind2nd( FindPid(), pid ) ); if (it == pids.end()) return; - LOG_DEBUG(GPP_i, " Mark For Termination: " << it->pid << " APP:" << it->appName ); + RH_DEBUG(this->_baseLog, " Mark For Termination: " << it->pid << " APP:" << it->appName ); it->app_started= false; it->terminated = true; } @@ -2774,7 +3062,7 @@ void GPP_i::removeProcess(int pid) WriteLock wlock(pidLock); ProcessList:: iterator result = std::find_if( pids.begin(), pids.end(), std::bind2nd( FindPid(), pid ) ); if ( result != pids.end() ) { - LOG_DEBUG(GPP_i, "Monitor Process: REMOVE Process: " << result->pid << " app: " << result->appName ); + RH_DEBUG(this->_baseLog, "Monitor Process: REMOVE Process: " << result->pid << " app: " << result->appName ); pids.erase(result); } } @@ -2783,9 +3071,11 @@ void GPP_i::removeProcess(int pid) WriteLock wlock(fdsLock); ProcessFds::iterator i=std::find_if( redirectedFds.begin(), redirectedFds.end(), std::bind2nd( FindRedirect(), pid ) ); if ( i != redirectedFds.end() ) { - i->close(); - LOG_DEBUG(GPP_i, "Redirectio IO ..REMOVE Redirected pid:" << pid ); - redirectedFds.erase(i); + ProcRedirectPtr rdp=*i; + rdp->close(); + rdp.reset(); + RH_DEBUG(this->_baseLog, "Redirectio IO ..REMOVE Redirected pid:" << pid ); + redirectedFds.erase(i); } } @@ -2914,3 +3204,21 @@ int GPP_i::_get_deploy_on_partition() { if ( psoc > -1 ) { RH_NL_INFO("GPP", " Deploy resource on selected SOCKET PARTITON, socket:" << psoc ); } return psoc; } + +void GPP_i::_cleanupProcessShm(pid_t pid) +{ + const std::string heap_name = redhawk::shm::getProcessHeapName(pid); + redhawk::shm::SuperblockFile heap(heap_name); + try { + heap.open(false); + } catch (const std::exception&) { + // Ignore error, it probably doesn't exist + return; + } + RH_DEBUG(_baseLog, "Removing shared memory heap '" << heap_name << "'"); + try { + heap.file().unlink(); + } catch (const std::exception&) { + // Someone else removed it in the meantime + } +} diff --git a/GPP/cpp/GPP.h b/GPP/cpp/GPP.h index d8de40ad3..6f0d609bf 100644 --- a/GPP/cpp/GPP.h +++ b/GPP/cpp/GPP.h @@ -31,8 +31,6 @@ #include "statistics/Statistics.h" #include "statistics/CpuUsageStats.h" #include "reports/SystemMonitorReporting.h" -#include "reports/CpuThresholdMonitor.h" -#include "reports/NicThroughputThresholdMonitor.h" #include "NicFacade.h" #include "ossie/Events.h" @@ -80,6 +78,8 @@ class GPP_i : public GPP_base void deallocate_diskCapacity(const double &value); bool allocate_memCapacity(const CORBA::LongLong &value); void deallocate_memCapacity(const CORBA::LongLong &value); + bool allocate_reservation_request(const redhawk__reservation_request_struct &value); + void deallocate_reservation_request(const redhawk__reservation_request_struct &value); bool allocate_mcastegress_capacity(const CORBA::Long &value); void deallocate_mcastegress_capacity(const CORBA::Long &value); bool allocate_mcastingress_capacity(const CORBA::Long &value); @@ -94,7 +94,8 @@ class GPP_i : public GPP_base CF::ExecutableDevice::ProcessID_Type do_execute (const char* name, const CF::Properties& options, const CF::Properties& parameters, - const std::vector prepend_args) + const std::vector prepend_args, + const bool use_docker) throw (CF::ExecutableDevice::ExecuteFail, CF::InvalidFileName, CF::ExecutableDevice::InvalidOptions, CF::ExecutableDevice::InvalidParameters, @@ -108,8 +109,6 @@ class GPP_i : public GPP_base void sendChildNotification(const std::string &comp_id, const std::string &app_id); bool allocateCapacity_nic_allocation(const nic_allocation_struct &value); void deallocateCapacity_nic_allocation(const nic_allocation_struct &value); - void deallocateCapacity (const CF::Properties& capacities) throw (CF::Device::InvalidState, CF::Device::InvalidCapacity, CORBA::SystemException); - CORBA::Boolean allocateCapacity (const CF::Properties& capacities) throw (CF::Device::InvalidState, CF::Device::InvalidCapacity, CF::Device::InsufficientCapacity, CORBA::SystemException); void releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError); @@ -150,7 +149,8 @@ class GPP_i : public GPP_base void close(); }; - + typedef boost::shared_ptr ProcRedirectPtr; + struct component_description { static const int pstat_history_len=5; int pid; @@ -174,6 +174,13 @@ class GPP_i : public GPP_base int64_t get_process_time(); }; + struct application_reservation { + std::vector component_pids; + std::map reservation; + float usage; + }; + + void constructor(); protected: @@ -234,52 +241,47 @@ class GPP_i : public GPP_base void process_ODM(const CORBA::Any &data); void updateUsageState(); - void setShadowThresholds(const thresholds_struct &newVals ); - typedef boost::shared_ptr NicMonitorPtr; typedef boost::shared_ptr ThresholdMonitorPtr; typedef std::vector< uint32_t > CpuList; typedef std::vector< boost::shared_ptr > UpdateableSequence; - typedef std::vector > StateSequence; - typedef std::vector > StatisticsSequence; - typedef std::vector > ReportingSequence; typedef std::vector< ThresholdMonitorPtr > MonitorSequence; - typedef std::vector< NicMonitorPtr > NicMonitorSequence; typedef boost::shared_ptr SystemMonitorPtr; typedef std::map ProcessMap; typedef std::deque< component_description > ProcessList; - typedef std::deque< proc_redirect > ProcessFds; + typedef std::deque< ProcRedirectPtr > ProcessFds; + typedef std::map ApplicationReservationMap; void addProcess(int pid, const std::string &appName, const std::string &identifier, const float req_reservation ); void removeProcess(int pid ); - void addThresholdMonitor( ThresholdMonitorPtr threshold_monitor ); - void reservedChanged(const float *oldValue, const float *newValue); - void mcastnicThreshold_changed(const CORBA::Long *oldValue, const CORBA::Long *newValue); - void thresholds_changed(const thresholds_struct *oldValue, const thresholds_struct *newValue); + void reservedChanged(float oldValue, float newValue); + void mcastnicThreshold_changed(int oldValue, int newValue); + void thresholds_changed(const thresholds_struct& oldValue, const thresholds_struct& newValue); void update(); + void updateProcessStats(); ProcessList pids; size_t n_reservations; Lock pidLock; Lock fdsLock; ProcessFds redirectedFds; + int epfd; bool _handle_io_redirects; std::string _componentOutputLog; Lock nicLock; NicFacadePtr nic_facade; MonitorSequence threshold_monitors; - NicMonitorSequence nic_monitors; SystemMonitorPtr system_monitor; ProcessLimitsPtr process_limits; ExecPartitionList execPartitions; + ApplicationReservationMap applicationReservations; Lock monitorLock; UpdateableSequence data_model; - thresholds_struct __thresholds; thresholds_struct modified_thresholds; uint64_t thresh_mem_free_units; uint64_t mem_free_units; @@ -299,17 +301,20 @@ class GPP_i : public GPP_base redhawk::events::SubscriberPtr odm_consumer; // interface that receives ODM_Channel events redhawk::events::ManagerPtr mymgr; // interface to manage event channel access - std::string _busy_reason; - boost::posix_time::ptime _busy_timestamp; // time when busy reason was initially set - boost::posix_time::ptime _busy_mark; // track message output + // State tracking for busy reason + struct { + std::string resource; + boost::posix_time::ptime timestamp; // time when busy reason was initially set + boost::posix_time::ptime mark; // track message output + } _busy; private: // // set the busy reason property for the GPP.. // - void _resetReason(); - void _setReason( const std::string &reason, const std::string &event, const bool enable_timestamp = true ); + void _resetBusyReason(); + void _setBusyReason(const std::string& resource, const std::string& message); bool _component_cleanup( const int pid, const int exit_status ); @@ -350,7 +355,7 @@ class GPP_i : public GPP_base // // Callback when componentOutputLog is changed // - void _component_output_changed(const std::string *ov, const std::string *nv ); + void _component_output_changed(const std::string& ov, const std::string& nv); // // Set vlan list attribute @@ -372,20 +377,52 @@ class GPP_i : public GPP_base // void _init(); + ThresholdMonitorPtr _cpuIdleThresholdMonitor; + ThresholdMonitorPtr _freeMemThresholdMonitor; + ThresholdMonitorPtr _loadAvgThresholdMonitor; + ThresholdMonitorPtr _threadThresholdMonitor; + ThresholdMonitorPtr _fileThresholdMonitor; + ThresholdMonitorPtr _shmThresholdMonitor; + + boost::shared_ptr _allNicsThresholdMonitor; + + template + void _sendThresholdMessage(ThresholdMonitor* monitor, const T1& measured, const T2& threshold); + bool _shmThresholdCheck(ThresholdMonitor* monitor); + void _shmThresholdStateChanged(ThresholdMonitor* monitor); + + bool _cpuIdleThresholdCheck(ThresholdMonitor* monitor); + void _cpuIdleThresholdStateChanged(ThresholdMonitor* monitor); + + bool _loadAvgThresholdCheck(ThresholdMonitor* monitor); + void _loadAvgThresholdStateChanged(ThresholdMonitor* monitor); + + bool _freeMemThresholdCheck(ThresholdMonitor* monitor); + void _freeMemThresholdStateChanged(ThresholdMonitor* monitor); + // // check thread limits for the process and system // - bool _check_thread_limits( const thresholds_struct &threshold); + bool _threadThresholdCheck(ThresholdMonitor* monitor); + void _threadThresholdStateChanged(ThresholdMonitor* monitor); // // check file limits for the process and system // - bool _check_file_limits( const thresholds_struct &threshold); + bool _fileThresholdCheck(ThresholdMonitor* monitor); + void _fileThresholdStateChanged(ThresholdMonitor* monitor); // // check threshold limits for nic interfaces to determine busy state // - bool _check_nic_thresholds(); + bool _nicThresholdCheck(ThresholdMonitor* monitor); + void _nicThresholdStateChanged(ThresholdMonitor* monitor); + + void _cleanupProcessShm(pid_t pid); + + // Processor time counters + int64_t _systemTicks; + int64_t _userTicks; std::string user_id; ossie::ProcessThread _signalThread; diff --git a/GPP/cpp/GPP_base.cpp b/GPP/cpp/GPP_base.cpp index e9a3df80c..0545c2a84 100644 --- a/GPP/cpp/GPP_base.cpp +++ b/GPP/cpp/GPP_base.cpp @@ -4,15 +4,15 @@ * * This file is part of REDHAWK GPP. * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. + * REDHAWK GPP is free software: you can redistribute it and/or modify it under + * the terms of the GNU Lesser General Public License as published by the Free + * Software Foundation, either version 3 of the License, or (at your option) any + * later version. * * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more + * details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see http://www.gnu.org/licenses/. @@ -33,40 +33,36 @@ GPP_base::GPP_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : ExecutableDevice_impl(devMgr_ior, id, lbl, sftwrPrfl), ThreadedComponent() { - construct(); + construct(); } GPP_base::GPP_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : ExecutableDevice_impl(devMgr_ior, id, lbl, sftwrPrfl, compDev), ThreadedComponent() { - construct(); + construct(); } GPP_base::GPP_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : ExecutableDevice_impl(devMgr_ior, id, lbl, sftwrPrfl, capacities), ThreadedComponent() { - construct(); + construct(); } GPP_base::GPP_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : ExecutableDevice_impl(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev), ThreadedComponent() { - construct(); + construct(); } GPP_base::~GPP_base() { - if (propEvent) { - delete propEvent; + propEvent->_remove_ref(); propEvent = 0; - } - if ( MessageEvent_out ) { - delete MessageEvent_out; + MessageEvent_out->_remove_ref(); MessageEvent_out = 0; - } } void GPP_base::construct() @@ -76,7 +72,16 @@ void GPP_base::construct() propEvent = new PropertyEventSupplier("propEvent"); addPort("propEvent", propEvent); propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:9190eb70-bd1e-4556-87ee-5a259dcfee39")); + propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:506102d6-04a9-4532-9420-a323d818ddec")); + propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:eb08e43f-11c7-45a0-8750-edff439c8b24")); + propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:0b57a27a-8fa2-412b-b0ae-010618b8f40e")); + propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:9b5bbdcb-1894-4b95-847c-787f121c05ae")); + propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:89be90ae-6a83-4399-a87d-5f4ae30ef7b1")); + propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:6565bffd-cb09-4927-9385-2ecac68035c7")); propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:22a60339-b66e-4309-91ae-e9bfed6f0490")); + propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:6c000787-6fea-4765-8686-2e051e6c24b0")); + propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:72c1c4a9-2bcf-49c5-bafd-ae2c1d567056")); + propEvent->registerProperty(this->_identifier, this->naming_service_name, this->getPropertyFromId("DCE:9da85ebc-6503-48e7-af36-b77c7ad0c2b4")); this->registerPropertyChangePort(propEvent); MessageEvent_out = new MessageSupplierPort("MessageEvent_out"); addPort("MessageEvent_out", MessageEvent_out); @@ -122,15 +127,16 @@ void GPP_base::loadProperties() "readonly", "", "eq", - "property,allocation,configure"); + "allocation,configure"); addProperty(device_model, + "REDHAWK GPP", "DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", "device_model", "readonly", - "REDHAWK GPP", + "", "eq", - "property,allocation,configure"); + "allocation,property,configure"); addProperty(processor_name, "DCE:fefb9c66-d14a-438d-ad59-2cfd1adb272b", @@ -138,7 +144,7 @@ void GPP_base::loadProperties() "readonly", "", "eq", - "property,allocation,configure"); + "allocation,property,configure"); addProperty(os_name, "DCE:4a23ad60-0b25-4121-a630-68803a498f75", @@ -146,7 +152,7 @@ void GPP_base::loadProperties() "readonly", "", "eq", - "property,allocation,configure"); + "allocation,property,configure"); addProperty(os_version, "DCE:0f3a9a37-a342-43d8-9b7f-78dc6da74192", @@ -154,7 +160,7 @@ void GPP_base::loadProperties() "readonly", "", "eq", - "property,allocation,configure"); + "allocation,property,configure"); addProperty(hostName, "DCE:9190eb70-bd1e-4556-87ee-5a259dcfee39", @@ -174,13 +180,22 @@ void GPP_base::loadProperties() "execparam"); addProperty(componentOutputLog, + "", "DCE:c80f6c5a-e3ea-4f57-b0aa-46b7efac3176", "componentOutputLog", "readwrite", "", "external", "property"); - + + addProperty(docker_omniorb_cfg, + "docker_omniorb_cfg", + "docker_omniorb_cfg", + "readonly", + "/etc/omniORB.cfg", + "external", + "property"); + addProperty(mcastnicInterface, "", "DCE:4e416acc-3144-47eb-9e38-97f1d24f7700", @@ -209,7 +224,6 @@ void GPP_base::loadProperties() "execparam"); addProperty(mcastnicIngressCapacity, - 0, "DCE:506102d6-04a9-4532-9420-a323d818ddec", "mcastnicIngressCapacity", "readwrite", @@ -218,7 +232,6 @@ void GPP_base::loadProperties() "allocation,event"); addProperty(mcastnicEgressCapacity, - 0, "DCE:eb08e43f-11c7-45a0-8750-edff439c8b24", "mcastnicEgressCapacity", "readwrite", @@ -253,91 +266,185 @@ void GPP_base::loadProperties() "external", "configure,event"); - addProperty(mcastnicVLANs, - "DCE:65544aad-4c73-451f-93de-d4d76984025a", - "mcastnicVLANs", + addProperty(threshold_cycle_time, + 500, + "threshold_cycle_time", + "threshold_cycle_time", "readwrite", + "milliseconds", + "external", + "property"); + + addProperty(busy_reason, + "", + "busy_reason", + "busy_reason", + "readonly", "", "external", - "allocation"); + "property"); - // Set the sequence with its initial values - nic_interfaces.push_back("e.*"); - addProperty(nic_interfaces, - nic_interfaces, - "nic_interfaces", + addProperty(cacheDirectory, "", - "readwrite", + "cacheDirectory", + "cacheDirectory", + "readonly", "", "external", - "configure,property"); + "property"); - addProperty(available_nic_interfaces, - "available_nic_interfaces", + addProperty(workingDirectory, + "", + "workingDirectory", + "workingDirectory", + "readonly", "", + "external", + "property"); + + addProperty(memFree, + "DCE:6565bffd-cb09-4927-9385-2ecac68035c7", + "memFree", + "readonly", + "MiB", + "external", + "configure,event"); + + addProperty(memCapacity, + "DCE:8dcef419-b440-4bcf-b893-cab79b6024fb", + "memCapacity", + "readwrite", + "MiB", + "external", + "allocation"); + + addProperty(shmFree, + "shmFree", + "shmFree", + "readonly", + "MB", + "external", + "property"); + + addProperty(shmCapacity, + "shmCapacity", + "shmCapacity", + "readonly", + "MB", + "external", + "property"); + + addProperty(loadTotal, + "DCE:28b23bc8-e4c0-421b-9c52-415a24715209", + "loadTotal", "readonly", "", "external", "configure"); - addProperty(nic_allocation, - nic_allocation_struct(), - "nic_allocation", - "nic_allocation", + addProperty(loadCapacityPerCore, + 1.0, + "DCE:3bf07b37-0c00-4e2a-8275-52bd4e391f07", + "loadCapacityPerCore", + "readwrite", + "", + "gt", + "allocation,execparam"); + + addProperty(loadThreshold, + 80, + "DCE:22a60339-b66e-4309-91ae-e9bfed6f0490", + "loadThreshold", "readwrite", + "%", + "external", + "configure,event"); + + addProperty(loadFree, + "DCE:6c000787-6fea-4765-8686-2e051e6c24b0", + "loadFree", + "readonly", "", "external", - "allocation"); + "configure,event"); - addProperty(advanced, - advanced_struct(), - "advanced", + addProperty(loadCapacity, + "DCE:72c1c4a9-2bcf-49c5-bafd-ae2c1d567056", + "loadCapacity", + "readwrite", "", + "external", + "allocation,event"); + + addProperty(reserved_capacity_per_component, + 0.1, + "reserved_capacity_per_component", + "reserved_capacity_per_component", "readwrite", "", "external", "configure"); - addProperty(nic_allocation_status, - "nic_allocation_status", + addProperty(processor_cores, + "processor_cores", "", "readonly", "", "external", "configure"); - addProperty(nic_metrics, - "nic_metrics", + addProperty(processor_monitor_list, + "processor_monitor_list", "", "readonly", "", "external", "configure"); - addProperty(networkMonitor, - "networkMonitor", + addProperty(mcastnicVLANs, + "DCE:65544aad-4c73-451f-93de-d4d76984025a", + "mcastnicVLANs", + "readwrite", "", - "readonly", + "external", + "allocation"); + + // Set the sequence with its initial values + nic_interfaces.push_back("e.*"); + addProperty(nic_interfaces, + nic_interfaces, + "nic_interfaces", + "", + "readwrite", "", "external", - "configure"); + "property,configure"); - addProperty(component_monitor, - "component_monitor", + addProperty(available_nic_interfaces, + "available_nic_interfaces", "", "readonly", "", "external", - "property"); + "configure"); - addProperty(affinity, - affinity_struct(), - "affinity", + addProperty(nic_allocation, + nic_allocation_struct(), + "nic_allocation", + "nic_allocation", + "readwrite", + "", + "external", + "allocation"); + + addProperty(advanced, + advanced_struct(), + "advanced", "", "readwrite", "", "external", - "property"); + "configure"); addProperty(threshold_event, threshold_event_struct(), @@ -348,14 +455,6 @@ void GPP_base::loadProperties() "external", "message"); - addProperty(busy_reason, - "busy_reason", - "", - "readonly", - "", - "external", - "property"); - addProperty(thresholds, thresholds_struct(), "thresholds", @@ -365,17 +464,17 @@ void GPP_base::loadProperties() "external", "property"); - addProperty(threshold_cycle_time, - 500, - "threshold_cycle_time", - "threshold_cycle_time", - "readwrite", - "milliseconds", + addProperty(loadAverage, + loadAverage_struct(), + "DCE:9da85ebc-6503-48e7-af36-b77c7ad0c2b4", + "loadAverage", + "readonly", + "", "external", - "property"); - + "configure,event"); + addProperty(gpp_limits, - ulimit_struct(), + gpp_limits_struct(), "gpp_limits", "", "readonly", @@ -392,107 +491,64 @@ void GPP_base::loadProperties() "external", "property"); - addProperty(utilization, - "utilization", + addProperty(redhawk__reservation_request, + redhawk__reservation_request_struct(), + "redhawk::reservation_request", "", - "readonly", + "readwrite", "", "external", - "property"); + "allocation"); - addProperty(processor_cores, - "processor_cores", + addProperty(affinity, + affinity_struct(), + "affinity", "", - "readonly", + "readwrite", "", "external", - "configure"); + "property"); - addProperty(processor_monitor_list, - "processor_monitor_list", + addProperty(nic_allocation_status, + "nic_allocation_status", "", "readonly", "", "external", "configure"); - addProperty(memFree, - "DCE:6565bffd-cb09-4927-9385-2ecac68035c7", - "memFree", + addProperty(nic_metrics, + "nic_metrics", + "", "readonly", - "MiB", - "external", - "configure,event"); - - addProperty(memCapacity, - "DCE:8dcef419-b440-4bcf-b893-cab79b6024fb", - "memCapacity", - "readwrite", - "MiB", - "external", - "allocation,event"); - - addProperty(reserved_capacity_per_component, - 0.1, - "reserved_capacity_per_component", - "reserved_capacity_per_component", - "readwrite", "", "external", "configure"); - addProperty(loadTotal, - "DCE:28b23bc8-e4c0-421b-9c52-415a24715209", - "loadTotal", + addProperty(networkMonitor, + "networkMonitor", + "", "readonly", "", "external", "configure"); - addProperty(loadThreshold, - 80, - "DCE:22a60339-b66e-4309-91ae-e9bfed6f0490", - "loadThreshold", - "readwrite", - "%", - "external", - "configure,event"); - - addProperty(loadCapacityPerCore, - 1.0, - "DCE:3bf07b37-0c00-4e2a-8275-52bd4e391f07", - "loadCapacityPerCore", - "readwrite", + addProperty(utilization, + "utilization", "", - "gt", - "allocation,execparam"); - - addProperty(loadFree, - "DCE:6c000787-6fea-4765-8686-2e051e6c24b0", - "loadFree", "readonly", "", "external", - "configure,event"); + "property"); - addProperty(loadCapacity, - "DCE:72c1c4a9-2bcf-49c5-bafd-ae2c1d567056", - "loadCapacity", - "readwrite", + addProperty(component_monitor, + "component_monitor", "", - "external", - "allocation,event"); - - addProperty(loadAverage, - loadAverage_struct(), - "DCE:9da85ebc-6503-48e7-af36-b77c7ad0c2b4", - "loadAverage", "readonly", "", "external", "property"); - } diff --git a/GPP/cpp/GPP_base.h b/GPP/cpp/GPP_base.h index 5809f2c19..e46488a65 100644 --- a/GPP/cpp/GPP_base.h +++ b/GPP/cpp/GPP_base.h @@ -4,21 +4,21 @@ * * This file is part of REDHAWK GPP. * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. + * REDHAWK GPP is free software: you can redistribute it and/or modify it under + * the terms of the GNU Lesser General Public License as published by the Free + * Software Foundation, either version 3 of the License, or (at your option) any + * later version. * * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more + * details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see http://www.gnu.org/licenses/. */ -#ifndef GPP_IMPL_BASE_H -#define GPP_IMPL_BASE_H +#ifndef GPP_BASE_IMPL_BASE_H +#define GPP_BASE_IMPL_BASE_H #include #include @@ -47,76 +47,114 @@ class GPP_base : public ExecutableDevice_impl, protected ThreadedComponent protected: // Member variables exposed as properties + /// Property: device_kind std::string device_kind; + /// Property: device_model std::string device_model; + /// Property: processor_name std::string processor_name; + /// Property: os_name std::string os_name; + /// Property: os_version std::string os_version; + /// Property: hostName std::string hostName; + // Property: docker_omniorb_cfg + std::string docker_omniorb_cfg; + /// Property: useScreen + bool useScreen; + /// Property: componentOutputLog std::string componentOutputLog; - bool useScreen; - advanced_struct advanced; - - std::vector nic_interfaces; - std::vector available_nic_interfaces; - nic_allocation_struct nic_allocation; + /// Property: mcastnicInterface std::string mcastnicInterface; + /// Property: mcastnicIngressTotal CORBA::Long mcastnicIngressTotal; + /// Property: mcastnicEgressTotal CORBA::Long mcastnicEgressTotal; + /// Property: mcastnicIngressCapacity CORBA::Long mcastnicIngressCapacity; + /// Property: mcastnicEgressCapacity CORBA::Long mcastnicEgressCapacity; + /// Property: mcastnicIngressFree CORBA::Long mcastnicIngressFree; + /// Property: mcastnicEgressFree CORBA::Long mcastnicEgressFree; + /// Property: mcastnicThreshold CORBA::Long mcastnicThreshold; - std::vector mcastnicVLANs; - std::vector nic_allocation_status; - std::vector nic_metrics; - std::vector networkMonitor; - std::vector component_monitor; - - // reporting struct when a threshold is broke - threshold_event_struct threshold_event; - // threshold items to watch - thresholds_struct thresholds; - /// Property to annotate why the system is busy - std::string busy_reason; - // time between cycles to refresh threshold metrics + /// Property: threshold_cycle_time CORBA::ULong threshold_cycle_time; - // ulimits for the GPP process - ulimit_struct gpp_limits; - // ulimits for the system as a whole - sys_limits_struct sys_limits; + /// Property: busy_reason + std::string busy_reason; + /// Property: cacheDirectory + std::string cacheDirectory; + /// Property: workingDirectory + std::string workingDirectory; /// Property: memFree CORBA::LongLong memFree; /// Property: memCapacity CORBA::LongLong memCapacity; + /// Property: shmFree + CORBA::LongLong shmFree; + /// Property: shmCapacity + CORBA::LongLong shmCapacity; /// Property: loadTotal double loadTotal; - /// Property: loadThreshold - CORBA::Long loadThreshold; /// Property: loadCapacityPerCore double loadCapacityPerCore; + /// Property: loadThreshold + CORBA::Long loadThreshold; /// Property: loadFree double loadFree; /// Property: loadCapacity double loadCapacity; + /// Property: reserved_capacity_per_component + float reserved_capacity_per_component; + /// Property: processor_cores + short processor_cores; + /// Property: processor_monitor_list + std::string processor_monitor_list; + /// Property: mcastnicVLANs + std::vector mcastnicVLANs; + /// Property: nic_interfaces + std::vector nic_interfaces; + /// Property: available_nic_interfaces + std::vector available_nic_interfaces; + /// Property: nic_allocation + nic_allocation_struct nic_allocation; + /// Property: advanced + advanced_struct advanced; + /// Message structure definition for threshold_event + threshold_event_struct threshold_event; + /// Property: thresholds + thresholds_struct thresholds; /// Property: loadAverage loadAverage_struct loadAverage; - /// Property: reserved capacity per core for reservation schema - float reserved_capacity_per_component; - /// Property processor_cores - number of cores the machine supports - short processor_cores; - /// Property processor_monitor_list - list of the cores we are watching.. - std::string processor_monitor_list; - // Property affinity - controls affinity processing for the GPP + /// Property: gpp_limits + gpp_limits_struct gpp_limits; + /// Property: sys_limits + sys_limits_struct sys_limits; + /// Property: redhawk__reservation_request + redhawk__reservation_request_struct redhawk__reservation_request; + /// Property: affinity affinity_struct affinity; + /// Property: nic_allocation_status + std::vector nic_allocation_status; + /// Property: nic_metrics + std::vector nic_metrics; + /// Property: networkMonitor + std::vector networkMonitor; + /// Property: utilization + std::vector utilization; + /// Property: component_monitor + std::vector component_monitor; // Ports + /// Port: propEvent PropertyEventSupplier *propEvent; + /// Port: MessageEvent_out MessageSupplierPort *MessageEvent_out; - std::vector utilization; private: void construct(); }; -#endif // GPP_IMPL_BASE_H +#endif // GPP_BASE_IMPL_BASE_H diff --git a/GPP/cpp/Makefile.am b/GPP/cpp/Makefile.am index 9db975e8e..e1967954a 100644 --- a/GPP/cpp/Makefile.am +++ b/GPP/cpp/Makefile.am @@ -26,6 +26,9 @@ xmldir = $(prefix)/dev/devices/GPP/ dist_xml_DATA = ../GPP.scd.xml ../GPP.prf.xml ../GPP.spd.xml ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +screendir = $(bindir) +screen_DATA = $(srcdir)/gpp.screenrc + all-local: GPP mkdir -p ../tests/sdr/dev/devices/GPP/cpp cp GPP ../tests/sdr/dev/devices/GPP/cpp/ diff --git a/GPP/cpp/Makefile.am.ide b/GPP/cpp/Makefile.am.ide index 9404e22f8..24b85f560 100644 --- a/GPP/cpp/Makefile.am.ide +++ b/GPP/cpp/Makefile.am.ide @@ -14,16 +14,11 @@ redhawk_SOURCES_auto += NicFacade.h redhawk_SOURCES_auto += NicInterfaceFilter.cpp redhawk_SOURCES_auto += NicInterfaceFilter.h redhawk_SOURCES_auto += main.cpp -redhawk_SOURCES_auto += reports/NicThroughputThresholdMonitor.cpp -redhawk_SOURCES_auto += reports/NicThroughputThresholdMonitor.h -redhawk_SOURCES_auto += reports/FreeMemoryThresholdMonitor.cpp -redhawk_SOURCES_auto += reports/FreeMemoryThresholdMonitor.h redhawk_SOURCES_auto += reports/Reporting.h +redhawk_SOURCES_auto += reports/ThresholdMonitor.cpp redhawk_SOURCES_auto += reports/ThresholdMonitor.h redhawk_SOURCES_auto += reports/SystemMonitorReporting.cpp redhawk_SOURCES_auto += reports/SystemMonitorReporting.h -redhawk_SOURCES_auto += reports/CpuThresholdMonitor.cpp -redhawk_SOURCES_auto += reports/CpuThresholdMonitor.h redhawk_SOURCES_auto += parsers/ProcStatFileParser.cpp redhawk_SOURCES_auto += parsers/ProcStatFileParser.h redhawk_SOURCES_auto += parsers/PidProcStatParser.cpp @@ -59,7 +54,6 @@ redhawk_SOURCES_auto += utils/CmdlineExecutor.cpp redhawk_SOURCES_auto += utils/CmdlineExecutor.h redhawk_SOURCES_auto += utils/EnvironmentPathParser.cpp redhawk_SOURCES_auto += utils/EnvironmentPathParser.h -redhawk_SOURCES_auto += utils/EventDispatcher.h redhawk_SOURCES_auto += utils/FileReader.cpp redhawk_SOURCES_auto += utils/FileReader.h redhawk_SOURCES_auto += utils/IOError.h diff --git a/GPP/cpp/NicFacade.cpp b/GPP/cpp/NicFacade.cpp index e8951a0aa..ec81374b0 100644 --- a/GPP/cpp/NicFacade.cpp +++ b/GPP/cpp/NicFacade.cpp @@ -28,6 +28,7 @@ #include #include +#include #if BOOST_FILESYSTEM_VERSION < 3 #define BOOST_PATH_STRING(x) (x) @@ -97,6 +98,12 @@ NicFacade::poll_nic_interfaces() const tmp << BOOST_PATH_STRING(iter->path()); boost::filesystem::path test_file( tmp.str() + "/statistics/rx_bytes" ); + std::string operstate = tmp.str()+"/operstate"; + std::ifstream fp(operstate.c_str()); + std::string _state; + std::getline(fp, _state); + if (_state==std::string("down")) continue; + if(boost::filesystem::is_regular_file(test_file)) { interfaces.push_back( BOOST_PATH_STRING(iter->path().filename()) ); diff --git a/GPP/cpp/affinity_struct.h b/GPP/cpp/affinity_struct.h deleted file mode 100644 index 8278b0006..000000000 --- a/GPP/cpp/affinity_struct.h +++ /dev/null @@ -1,110 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK GPP. - * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef AFFINITY_STRUCTPROPS_H -#define AFFINITY_STRUCTPROPS_H - -#include -#include -#include - -struct affinity_struct { - affinity_struct () - { - force_override = false; - deploy_per_socket = false; - disabled = true; - }; - - static std::string getId() { - return std::string("affinity"); - }; - - std::string exec_directive_value; - std::string exec_directive_class; - bool force_override; - std::string blacklist_cpus; - bool deploy_per_socket; - bool disabled; -}; - -inline bool operator>>= (const CORBA::Any& a, affinity_struct& s) { - CF::Properties* temp; - if (!(a >>= temp)) return false; - const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); - if (props.contains("affinity::exec_directive_value")) { - if (!(props["affinity::exec_directive_value"] >>= s.exec_directive_value)) return false; - } - if (props.contains("affinity::exec_directive_class")) { - if (!(props["affinity::exec_directive_class"] >>= s.exec_directive_class)) return false; - } - if (props.contains("affinity::force_override")) { - if (!(props["affinity::force_override"] >>= s.force_override)) return false; - } - if (props.contains("affinity::blacklist_cpus")) { - if (!(props["affinity::blacklist_cpus"] >>= s.blacklist_cpus)) return false; - } - if (props.contains("affinity::deploy_per_socket")) { - if (!(props["affinity::deploy_per_socket"] >>= s.deploy_per_socket)) return false; - } - if (props.contains("affinity::disabled")) { - if (!(props["affinity::disabled"] >>= s.disabled)) return false; - } - return true; -} - -inline void operator<<= (CORBA::Any& a, const affinity_struct& s) { - redhawk::PropertyMap props; - - props["affinity::exec_directive_value"] = s.exec_directive_value; - - props["affinity::exec_directive_class"] = s.exec_directive_class; - - props["affinity::force_override"] = s.force_override; - - props["affinity::blacklist_cpus"] = s.blacklist_cpus; - - props["affinity::deploy_per_socket"] = s.deploy_per_socket; - - props["affinity::disabled"] = s.disabled; - a <<= props; -} - -inline bool operator== (const affinity_struct& s1, const affinity_struct& s2) { - if (s1.exec_directive_value!=s2.exec_directive_value) - return false; - if (s1.exec_directive_class!=s2.exec_directive_class) - return false; - if (s1.force_override!=s2.force_override) - return false; - if (s1.blacklist_cpus!=s2.blacklist_cpus) - return false; - if (s1.deploy_per_socket!=s2.deploy_per_socket) - return false; - if (s1.disabled!=s2.disabled) - return false; - return true; -} - -inline bool operator!= (const affinity_struct& s1, const affinity_struct& s2) { - return !(s1==s2); -} - - -#endif diff --git a/GPP/cpp/build.sh b/GPP/cpp/build.sh index 7f75c22f1..a12b32c01 100755 --- a/GPP/cpp/build.sh +++ b/GPP/cpp/build.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. @@ -25,5 +25,14 @@ if [ ! -e Makefile ]; then ./configure fi -make -j $* +if [ $# == 1 ]; then + if [ $1 == 'clean' ]; then + make distclean + else + make -j $* + fi +else + make -j $* +fi +exit $? diff --git a/GPP/cpp/configure.ac b/GPP/cpp/configure.ac index 573485051..4907c0181 100644 --- a/GPP/cpp/configure.ac +++ b/GPP/cpp/configure.ac @@ -17,7 +17,7 @@ * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see http://www.gnu.org/licenses/. */ -AC_INIT(GPP, 2.0.9) +AC_INIT(GPP, 2.2.1) AM_INIT_AUTOMAKE([foreign nostdinc subdir-objects]) AC_CONFIG_MACRO_DIR([m4]) @@ -33,7 +33,7 @@ m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) # Dependencies export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig" -PKG_CHECK_MODULES([PROJECTDEPS], [ossie >= 1.10 omniORB4 >= 4.1.0 ]) +PKG_CHECK_MODULES([PROJECTDEPS], [ossie >= 2.2 omniORB4 >= 4.1.0 ]) OSSIE_ENABLE_LOG4CXX AX_BOOST_BASE([1.41]) AX_BOOST_SYSTEM diff --git a/GPP/cpp/reconf b/GPP/cpp/reconf index b6b303be0..22f279656 100755 --- a/GPP/cpp/reconf +++ b/GPP/cpp/reconf @@ -1,3 +1,4 @@ +#!/bin/sh # # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. @@ -17,5 +18,8 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # + +rm -f config.cache [ -d m4 ] || mkdir m4 autoreconf -i + diff --git a/GPP/cpp/reports/CpuThresholdMonitor.cpp b/GPP/cpp/reports/CpuThresholdMonitor.cpp deleted file mode 100644 index ce71e5940..000000000 --- a/GPP/cpp/reports/CpuThresholdMonitor.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK GPP. - * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#include "CpuThresholdMonitor.h" -#include "statistics/Statistics.h" -#include "utils/ReferenceWrapper.h" - -class CpuUsageAccumulatorQueryFunction -{ -public: - CpuUsageAccumulatorQueryFunction( const CpuStatistics& cpu_usage_accumulator ): - cpu_usage_accumulator_(cpu_usage_accumulator) - {} - - float operator()() const { return cpu_usage_accumulator_.get_idle_percent(); } - -private: - const CpuStatistics& cpu_usage_accumulator_; -}; - -CpuThresholdMonitor::CpuThresholdMonitor( const std::string& source_id, - const float* threshold, - const CpuStatistics & cpu_usage_accumulator, - const bool enableDispatch ): - GenericThresholdMonitor(source_id, GetResourceId(), GetMessageClass(), MakeCref(*threshold), CpuUsageAccumulatorQueryFunction(cpu_usage_accumulator), enableDispatch ) -{ - -} diff --git a/GPP/cpp/reports/CpuThresholdMonitor.h b/GPP/cpp/reports/CpuThresholdMonitor.h deleted file mode 100644 index 2821e20de..000000000 --- a/GPP/cpp/reports/CpuThresholdMonitor.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK GPP. - * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef CPU_THRESHOLD_MONITOR_H_ -#define CPU_THRESHOLD_MONITOR_H_ - -#include "ThresholdMonitor.h" -#include "statistics/Statistics.h" - -class CpuThresholdMonitor : public GenericThresholdMonitor -{ -public: - CpuThresholdMonitor( const std::string& source_id, const float* threshold, const CpuStatistics & cpu_usage_accumulator, - const bool enableDispatch=false ); - - static std::string GetResourceId(){ return "cpu"; } - static std::string GetMessageClass(){ return "CPU_IDLE"; } -}; - -#endif diff --git a/GPP/cpp/reports/FreeMemoryThresholdMonitor.cpp b/GPP/cpp/reports/FreeMemoryThresholdMonitor.cpp deleted file mode 100644 index e2cff7770..000000000 --- a/GPP/cpp/reports/FreeMemoryThresholdMonitor.cpp +++ /dev/null @@ -1,27 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK GPP. - * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#include "FreeMemoryThresholdMonitor.h" -#include "utils/ReferenceWrapper.h" - -FreeMemoryThresholdMonitor::FreeMemoryThresholdMonitor( const std::string& source_id, QueryFunction threshold, QueryFunction measured ): -GenericThresholdMonitor(source_id, GetResourceId(), GetMessageClass(), threshold, measured ) -{ - -} diff --git a/GPP/cpp/reports/FreeMemoryThresholdMonitor.h b/GPP/cpp/reports/FreeMemoryThresholdMonitor.h deleted file mode 100644 index 46498ee9e..000000000 --- a/GPP/cpp/reports/FreeMemoryThresholdMonitor.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK GPP. - * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef FREE_MEMORY_THRESHOLD_MONITOR_H_ -#define FREE_MEMORY_THRESHOLD_MONITOR_H_ -#include "ThresholdMonitor.h" - -class FreeMemoryThresholdMonitor : public GenericThresholdMonitor -{ -public: - - FreeMemoryThresholdMonitor( const std::string& source_id, QueryFunction threshold, QueryFunction measured ) ; - - static std::string GetResourceId(){ return "physical_ram"; } - static std::string GetMessageClass(){ return "MEMORY_FREE"; } - -}; - -#endif diff --git a/GPP/cpp/reports/NicThroughputThresholdMonitor.cpp b/GPP/cpp/reports/NicThroughputThresholdMonitor.cpp deleted file mode 100644 index 4b297dc67..000000000 --- a/GPP/cpp/reports/NicThroughputThresholdMonitor.cpp +++ /dev/null @@ -1,26 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK GPP. - * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#include "NicThroughputThresholdMonitor.h" -#include "../utils/ReferenceWrapper.h" - -NicThroughputThresholdMonitor::NicThroughputThresholdMonitor( const std::string& source_id, const std::string& resource_id, NicThroughputThresholdMonitor::QueryFunction threshold, NicThroughputThresholdMonitor::QueryFunction measured ): -GenericThresholdMonitor >(source_id, resource_id, GetMessageClass(), threshold, measured ) -{ -} diff --git a/GPP/cpp/reports/NicThroughputThresholdMonitor.h b/GPP/cpp/reports/NicThroughputThresholdMonitor.h deleted file mode 100644 index 30f92dfb6..000000000 --- a/GPP/cpp/reports/NicThroughputThresholdMonitor.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK GPP. - * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef NIC_THROUGHPUT_THRESHOLD_MONITOR_H_ -#define NIC_THROUGHPUT_THRESHOLD_MONITOR_H_ - -#include "ThresholdMonitor.h" - -class NicThroughputThresholdMonitor : public GenericThresholdMonitor > -{ -public: - NicThroughputThresholdMonitor( const std::string& source_id, const std::string& resource_id, QueryFunction threshold, QueryFunction measured ); - - static std::string GetMessageClass(){ return "NIC_THROUGHPUT"; } - - bool is_threshold_exceeded() const - { - if (get_threshold_value() < 0 ) return false; - return this->GenericThresholdMonitor< float,std::greater_equal >::is_threshold_exceeded(); - } - - void update() - { - if (get_threshold_value() < 0 ) return; - this->GenericThresholdMonitor< float,std::greater_equal >::update(); - } -}; - -#endif diff --git a/GPP/cpp/reports/ThresholdMonitor.cpp b/GPP/cpp/reports/ThresholdMonitor.cpp new file mode 100644 index 000000000..d815d5869 --- /dev/null +++ b/GPP/cpp/reports/ThresholdMonitor.cpp @@ -0,0 +1,128 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK GPP. + * + * REDHAWK GPP is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include "ThresholdMonitor.h" + +ThresholdMonitor::ThresholdMonitor(const std::string& resource_id, const std::string& threshold_class): + resource_id_(resource_id), + threshold_class_(threshold_class), + enabled_(true), + prev_threshold_exceeded_(false) +{ +} + +const std::string& ThresholdMonitor::get_resource_id() const +{ + return resource_id_; +} + +const std::string& ThresholdMonitor::get_threshold_class() const +{ + return threshold_class_; +} + +bool ThresholdMonitor::is_enabled() const +{ + return enabled_; +} + +void ThresholdMonitor::enable() +{ + enabled_ = true; + update(); +} + +void ThresholdMonitor::disable() +{ + enabled_ = false; + update(); +} + +void ThresholdMonitor::update() +{ + if (enabled_) { + update_threshold(); + } + if (prev_threshold_exceeded_ != is_threshold_exceeded()) { + notification_(this); + } + prev_threshold_exceeded_ = is_threshold_exceeded(); +} + +bool ThresholdMonitor::is_threshold_exceeded() const +{ + if (!enabled_) return false; + return check_threshold(); +} + + +void FunctionThresholdMonitor::update_threshold() +{ + exceeded_ = callback_(this); +} + +bool FunctionThresholdMonitor::check_threshold() const +{ + return exceeded_; +} + + +ThresholdMonitorSet::ThresholdMonitorSet(const std::string& resource_id, const std::string& threshold_class) : + ThresholdMonitor(resource_id, threshold_class) +{ +} + +void ThresholdMonitorSet::add_monitor(const boost::shared_ptr& monitor) +{ + monitors_.push_back(monitor); +} + +void ThresholdMonitorSet::enable() +{ + std::for_each(monitors_.begin(), monitors_.end(), boost::bind(&ThresholdMonitor::enable, _1)); + ThresholdMonitor::enable(); +} + +void ThresholdMonitorSet::disable() +{ + std::for_each(monitors_.begin(), monitors_.end(), boost::bind(&ThresholdMonitor::disable, _1)); + ThresholdMonitor::disable(); +} + +void ThresholdMonitorSet::update_threshold() +{ + std::for_each(monitors_.begin(), monitors_.end(), boost::bind(&ThresholdMonitor::update, _1)); +} + +bool ThresholdMonitorSet::check_threshold() const +{ + if (monitors_.empty()) { + return false; + } + + for (MonitorList::const_iterator monitor = monitors_.begin(); monitor != monitors_.end(); ++monitor) { + if (!((*monitor)->is_threshold_exceeded())) { + return false; + } + } + return true; +} diff --git a/GPP/cpp/reports/ThresholdMonitor.h b/GPP/cpp/reports/ThresholdMonitor.h index 422ba8fb9..d1fdc6b94 100644 --- a/GPP/cpp/reports/ThresholdMonitor.h +++ b/GPP/cpp/reports/ThresholdMonitor.h @@ -19,149 +19,93 @@ */ #ifndef THRESHOLD_MONITOR_H_ #define THRESHOLD_MONITOR_H_ + #include -#include -#include -#include -#include + +#include + +#include #include "utils/Updateable.h" -#include "utils/EventDispatcher.h" -#include "Reporting.h" -#include "utils/ConversionWrapper.h" -#include "struct_props.h" -class ThresholdMonitor : public Updateable, public EventDispatcherMixin +class ThresholdMonitor : public Updateable { public: - ThresholdMonitor( const std::string& message_class, const std::string& resource_id, const bool enableDispatch=true): - _enable_dispatch( enableDispatch), - resource_id_(resource_id), - message_class_(message_class) - {} - - ThresholdMonitor( const std::string& source_id, const std::string& resource_id, const std::string& message_class, const bool enableDispatch=true): - _enable_dispatch(enableDispatch), - source_id_(source_id), - resource_id_(resource_id), - message_class_(message_class) - {} - - virtual void update() = 0; - //void report(){ update(); } - - virtual std::string get_threshold() const = 0; - virtual std::string get_measured() const = 0; - virtual bool is_threshold_exceeded() const = 0; - void enable_dispatch() { _enable_dispatch=true;} - void disable_dispatch() { _enable_dispatch=false;} - std::string get_source_id() const{ return source_id_; } - std::string get_resource_id() const{ return resource_id_; } - std::string get_message_class() const{ return message_class_; } + ThresholdMonitor(const std::string& resource_id, const std::string& threshold_class); -protected: - void dispatch_message() const - { - if ( !_enable_dispatch ) return; - - threshold_event_struct message; - message.source_id = get_source_id(); - message.resource_id = get_resource_id(); - message.threshold_class = get_message_class(); - message.type = get_message_type(); - message.threshold_value = get_threshold(); - message.measured_value = get_measured(); - message.message = get_message_string(); - message.timestamp = time(NULL); - - dispatch(message); - } - std::string get_message_type() const - { - return is_threshold_exceeded() ? "THRESHOLD_EXCEEDED" : "THRESHOLD_NOT_EXCEEDED"; - } - std::string get_message_string() const - { - std::stringstream sstr; - std::string exceeded_or_not( is_threshold_exceeded() ? "" : "not " ); + const std::string& get_resource_id() const; + const std::string& get_threshold_class() const; + + bool is_enabled() const; + virtual void enable(); + virtual void disable(); - sstr << get_message_class() << " threshold " << exceeded_or_not << "exceeded " - << "(resource_id=" << get_resource_id() - << " threshold_value=" << get_threshold() - << " measured_value=" << get_measured() << ")"; + void update(); - return sstr.str(); + bool is_threshold_exceeded() const; + + template + void add_listener(Target target, Func func) + { + notification_.add(target, func); } - bool _enable_dispatch; +protected: + virtual void update_threshold() = 0; + virtual bool check_threshold() const = 0; -private: - const std::string source_id_; const std::string resource_id_; - const std::string message_class_; + const std::string threshold_class_; + bool enabled_; + bool prev_threshold_exceeded_; + ossie::notification notification_; }; -template > -class GenericThresholdMonitor : public ThresholdMonitor +class FunctionThresholdMonitor : public ThresholdMonitor { public: - typedef DATA_TYPE DataType; - typedef boost::function< DataType() > QueryFunction; - -public: - GenericThresholdMonitor( const std::string& message_class, const std::string& resource_id, QueryFunction threshold, QueryFunction measured, const bool enableDispatch=true ): -ThresholdMonitor(message_class, resource_id, enableDispatch), - threshold_(threshold), - measured_(measured), - threshold_value_( threshold() ), - measured_value_( measured() ), - prev_threshold_exceeded_(false) + template + FunctionThresholdMonitor(const std::string& resource_id, const std::string& threshold_class, Func func) : + ThresholdMonitor(resource_id, threshold_class), + callback_(func), + exceeded_(false) { } - GenericThresholdMonitor( const std::string& source_id, const std::string& resource_id, const std::string& message_class, QueryFunction threshold, QueryFunction measured, const bool enableDispatch=true ): -ThresholdMonitor(source_id, resource_id, message_class, enableDispatch ), - threshold_(threshold), - measured_(measured), - threshold_value_( threshold() ), - measured_value_( measured() ), - prev_threshold_exceeded_(false) + template + FunctionThresholdMonitor(const std::string& resource_id, const std::string& threshold_class, + Target target, Func func) : + ThresholdMonitor(resource_id, threshold_class), + callback_(target, func), + exceeded_(false) { } - void update() - { - threshold_value_ = threshold_(); - measured_value_ = measured_(); - if( prev_threshold_exceeded_ != is_threshold_exceeded() ) - { - dispatch_message(); - } - - prev_threshold_exceeded_ = is_threshold_exceeded(); - } +private: + virtual void update_threshold(); + virtual bool check_threshold() const; - std::string get_threshold() const{ return boost::lexical_cast(threshold_value_); } - std::string get_measured() const{ return boost::lexical_cast(measured_value_); } + redhawk::callback callback_; + bool exceeded_; +}; - bool is_threshold_exceeded() const - { - return COMPARISON_FUNCTION()( get_measured_value(), get_threshold_value() ); - } +class ThresholdMonitorSet : public ThresholdMonitor +{ +public: + ThresholdMonitorSet(const std::string& resource_id, const std::string& threshold_class); + + void add_monitor(const boost::shared_ptr& monitor); - DataType get_threshold_value() const { return threshold_value_; } - DataType get_measured_value() const { return measured_value_; } + virtual void enable(); + virtual void disable(); private: - QueryFunction threshold_; - QueryFunction measured_; + virtual void update_threshold(); + virtual bool check_threshold() const; - DataType threshold_value_; - DataType measured_value_; - bool prev_threshold_exceeded_; + typedef std::vector< boost::shared_ptr > MonitorList; + MonitorList monitors_; }; - - #endif diff --git a/GPP/cpp/struct_props.h b/GPP/cpp/struct_props.h index 75e5f3f37..30ecc02c3 100644 --- a/GPP/cpp/struct_props.h +++ b/GPP/cpp/struct_props.h @@ -4,15 +4,15 @@ * * This file is part of REDHAWK GPP. * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. + * REDHAWK GPP is free software: you can redistribute it and/or modify it under + * the terms of the GNU Lesser General Public License as published by the Free + * Software Foundation, either version 3 of the License, or (at your option) any + * later version. * * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more + * details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see http://www.gnu.org/licenses/. @@ -28,19 +28,26 @@ #include #include -#include "affinity_struct.h" +#include struct nic_allocation_struct { nic_allocation_struct () { + identifier = ""; data_rate = 0.0; data_size = 1; multicast_support = "False"; - }; + ip_addressable = ""; + interface = ""; + } static std::string getId() { return std::string("nic_allocation"); - }; + } + + static const char* getFormat() { + return "sfhsss"; + } std::string identifier; float data_rate; @@ -53,77 +60,44 @@ struct nic_allocation_struct { inline bool operator>>= (const CORBA::Any& a, nic_allocation_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("nic_allocation::identifier", props[idx].id)) { - if (!(props[idx].value >>= s.identifier)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_allocation::data_rate", props[idx].id)) { - if (!(props[idx].value >>= s.data_rate)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_allocation::data_size", props[idx].id)) { - if (!(props[idx].value >>= s.data_size)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_allocation::multicast_support", props[idx].id)) { - if (!(props[idx].value >>= s.multicast_support)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_allocation::ip_addressable", props[idx].id)) { - if (!(props[idx].value >>= s.ip_addressable)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_allocation::interface", props[idx].id)) { - if (!(props[idx].value >>= s.interface)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("nic_allocation::identifier")) { + if (!(props["nic_allocation::identifier"] >>= s.identifier)) return false; + } + if (props.contains("nic_allocation::data_rate")) { + if (!(props["nic_allocation::data_rate"] >>= s.data_rate)) return false; + } + if (props.contains("nic_allocation::data_size")) { + if (!(props["nic_allocation::data_size"] >>= s.data_size)) return false; + } + if (props.contains("nic_allocation::multicast_support")) { + if (!(props["nic_allocation::multicast_support"] >>= s.multicast_support)) return false; + } + if (props.contains("nic_allocation::ip_addressable")) { + if (!(props["nic_allocation::ip_addressable"] >>= s.ip_addressable)) return false; + } + if (props.contains("nic_allocation::interface")) { + if (!(props["nic_allocation::interface"] >>= s.interface)) return false; } return true; -}; +} inline void operator<<= (CORBA::Any& a, const nic_allocation_struct& s) { - CF::Properties props; - props.length(6); - props[0].id = CORBA::string_dup("nic_allocation::identifier"); - props[0].value <<= s.identifier; - props[1].id = CORBA::string_dup("nic_allocation::data_rate"); - props[1].value <<= s.data_rate; - props[2].id = CORBA::string_dup("nic_allocation::data_size"); - props[2].value <<= s.data_size; - props[3].id = CORBA::string_dup("nic_allocation::multicast_support"); - props[3].value <<= s.multicast_support; - props[4].id = CORBA::string_dup("nic_allocation::ip_addressable"); - props[4].value <<= s.ip_addressable; - props[5].id = CORBA::string_dup("nic_allocation::interface"); - props[5].value <<= s.interface; + redhawk::PropertyMap props; + + props["nic_allocation::identifier"] = s.identifier; + + props["nic_allocation::data_rate"] = s.data_rate; + + props["nic_allocation::data_size"] = s.data_size; + + props["nic_allocation::multicast_support"] = s.multicast_support; + + props["nic_allocation::ip_addressable"] = s.ip_addressable; + + props["nic_allocation::interface"] = s.interface; a <<= props; -}; +} inline bool operator== (const nic_allocation_struct& s1, const nic_allocation_struct& s2) { if (s1.identifier!=s2.identifier) @@ -139,21 +113,25 @@ inline bool operator== (const nic_allocation_struct& s1, const nic_allocation_st if (s1.interface!=s2.interface) return false; return true; -}; +} inline bool operator!= (const nic_allocation_struct& s1, const nic_allocation_struct& s2) { return !(s1==s2); -}; +} struct advanced_struct { advanced_struct () { maximum_throughput_percentage = 80.0; - }; + } static std::string getId() { return std::string("advanced"); - }; + } + + static const char* getFormat() { + return "d"; + } double maximum_throughput_percentage; }; @@ -161,46 +139,53 @@ struct advanced_struct { inline bool operator>>= (const CORBA::Any& a, advanced_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("maximum_throughput_percentage", props[idx].id)) { - if (!(props[idx].value >>= s.maximum_throughput_percentage)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("maximum_throughput_percentage")) { + if (!(props["maximum_throughput_percentage"] >>= s.maximum_throughput_percentage)) return false; } return true; -}; +} inline void operator<<= (CORBA::Any& a, const advanced_struct& s) { - CF::Properties props; - props.length(1); - props[0].id = CORBA::string_dup("maximum_throughput_percentage"); - props[0].value <<= s.maximum_throughput_percentage; + redhawk::PropertyMap props; + + props["maximum_throughput_percentage"] = s.maximum_throughput_percentage; a <<= props; -}; +} inline bool operator== (const advanced_struct& s1, const advanced_struct& s2) { if (s1.maximum_throughput_percentage!=s2.maximum_throughput_percentage) return false; return true; -}; +} inline bool operator!= (const advanced_struct& s1, const advanced_struct& s2) { return !(s1==s2); -}; +} + +namespace enums { + // Enumerated values for threshold_event + namespace threshold_event { + // Enumerated values for threshold_event::type + namespace type { + static const std::string Threshold_Exceeded = "THRESHOLD_EXCEEDED"; + static const std::string Threshold_Not_Exceeded = "THRESHOLD_NOT_EXCEEDED"; + } + } +} struct threshold_event_struct { threshold_event_struct () { - }; + } static std::string getId() { return std::string("threshold_event"); - }; + } + + static const char* getFormat() { + return "sssssssd"; + } std::string source_id; std::string resource_id; @@ -215,97 +200,54 @@ struct threshold_event_struct { inline bool operator>>= (const CORBA::Any& a, threshold_event_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("threshold_event::source_id", props[idx].id)) { - if (!(props[idx].value >>= s.source_id)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("threshold_event::resource_id", props[idx].id)) { - if (!(props[idx].value >>= s.resource_id)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("threshold_event::threshold_class", props[idx].id)) { - if (!(props[idx].value >>= s.threshold_class)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("threshold_event::type", props[idx].id)) { - if (!(props[idx].value >>= s.type)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("threshold_event::threshold_value", props[idx].id)) { - if (!(props[idx].value >>= s.threshold_value)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("threshold_event::measured_value", props[idx].id)) { - if (!(props[idx].value >>= s.measured_value)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("threshold_event::message", props[idx].id)) { - if (!(props[idx].value >>= s.message)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("threshold_event::timestamp", props[idx].id)) { - if (!(props[idx].value >>= s.timestamp)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("threshold_event::source_id")) { + if (!(props["threshold_event::source_id"] >>= s.source_id)) return false; + } + if (props.contains("threshold_event::resource_id")) { + if (!(props["threshold_event::resource_id"] >>= s.resource_id)) return false; + } + if (props.contains("threshold_event::threshold_class")) { + if (!(props["threshold_event::threshold_class"] >>= s.threshold_class)) return false; + } + if (props.contains("threshold_event::type")) { + if (!(props["threshold_event::type"] >>= s.type)) return false; + } + if (props.contains("threshold_event::threshold_value")) { + if (!(props["threshold_event::threshold_value"] >>= s.threshold_value)) return false; + } + if (props.contains("threshold_event::measured_value")) { + if (!(props["threshold_event::measured_value"] >>= s.measured_value)) return false; + } + if (props.contains("threshold_event::message")) { + if (!(props["threshold_event::message"] >>= s.message)) return false; + } + if (props.contains("threshold_event::timestamp")) { + if (!(props["threshold_event::timestamp"] >>= s.timestamp)) return false; } return true; -}; +} inline void operator<<= (CORBA::Any& a, const threshold_event_struct& s) { - CF::Properties props; - props.length(8); - props[0].id = CORBA::string_dup("threshold_event::source_id"); - props[0].value <<= s.source_id; - props[1].id = CORBA::string_dup("threshold_event::resource_id"); - props[1].value <<= s.resource_id; - props[2].id = CORBA::string_dup("threshold_event::threshold_class"); - props[2].value <<= s.threshold_class; - props[3].id = CORBA::string_dup("threshold_event::type"); - props[3].value <<= s.type; - props[4].id = CORBA::string_dup("threshold_event::threshold_value"); - props[4].value <<= s.threshold_value; - props[5].id = CORBA::string_dup("threshold_event::measured_value"); - props[5].value <<= s.measured_value; - props[6].id = CORBA::string_dup("threshold_event::message"); - props[6].value <<= s.message; - props[7].id = CORBA::string_dup("threshold_event::timestamp"); - props[7].value <<= s.timestamp; + redhawk::PropertyMap props; + + props["threshold_event::source_id"] = s.source_id; + + props["threshold_event::resource_id"] = s.resource_id; + + props["threshold_event::threshold_class"] = s.threshold_class; + + props["threshold_event::type"] = s.type; + + props["threshold_event::threshold_value"] = s.threshold_value; + + props["threshold_event::measured_value"] = s.measured_value; + + props["threshold_event::message"] = s.message; + + props["threshold_event::timestamp"] = s.timestamp; a <<= props; -}; +} inline bool operator== (const threshold_event_struct& s1, const threshold_event_struct& s2) { if (s1.source_id!=s2.source_id) @@ -325,40 +267,50 @@ inline bool operator== (const threshold_event_struct& s1, const threshold_event_ if (s1.timestamp!=s2.timestamp) return false; return true; -}; +} inline bool operator!= (const threshold_event_struct& s1, const threshold_event_struct& s2) { return !(s1==s2); -}; - +} struct thresholds_struct { thresholds_struct () { + ignore = false; cpu_idle = 10; load_avg = 80; - mem_free = 100LL; + mem_free = 10LL; nic_usage = 900; files_available = 3; threads = 3; - }; + shm_free = 10LL; + } static std::string getId() { return std::string("thresholds"); - }; + } + static const char* getFormat() { + return "bffliffl"; + } + + bool ignore; float cpu_idle; float load_avg; CORBA::LongLong mem_free; CORBA::Long nic_usage; float files_available; float threads; + CORBA::LongLong shm_free; }; inline bool operator>>= (const CORBA::Any& a, thresholds_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("ignore")) { + if (!(props["ignore"] >>= s.ignore)) return false; + } if (props.contains("cpu_idle")) { if (!(props["cpu_idle"] >>= s.cpu_idle)) return false; } @@ -377,12 +329,17 @@ inline bool operator>>= (const CORBA::Any& a, thresholds_struct& s) { if (props.contains("threads")) { if (!(props["threads"] >>= s.threads)) return false; } + if (props.contains("shm_free")) { + if (!(props["shm_free"] >>= s.shm_free)) return false; + } return true; } inline void operator<<= (CORBA::Any& a, const thresholds_struct& s) { redhawk::PropertyMap props; + props["ignore"] = s.ignore; + props["cpu_idle"] = s.cpu_idle; props["load_avg"] = s.load_avg; @@ -394,10 +351,14 @@ inline void operator<<= (CORBA::Any& a, const thresholds_struct& s) { props["files_available"] = s.files_available; props["threads"] = s.threads; + + props["shm_free"] = s.shm_free; a <<= props; } inline bool operator== (const thresholds_struct& s1, const thresholds_struct& s2) { + if (s1.ignore!=s2.ignore) + return false; if (s1.cpu_idle!=s2.cpu_idle) return false; if (s1.load_avg!=s2.load_avg) @@ -410,6 +371,8 @@ inline bool operator== (const thresholds_struct& s1, const thresholds_struct& s2 return false; if (s1.threads!=s2.threads) return false; + if (s1.shm_free!=s2.shm_free) + return false; return true; } @@ -417,14 +380,372 @@ inline bool operator!= (const thresholds_struct& s1, const thresholds_struct& s2 return !(s1==s2); } +struct loadAverage_struct { + loadAverage_struct () + { + } + + static std::string getId() { + return std::string("DCE:9da85ebc-6503-48e7-af36-b77c7ad0c2b4"); + } + + static const char* getFormat() { + return "ddd"; + } + + double onemin; + double fivemin; + double fifteenmin; +}; + +inline bool operator>>= (const CORBA::Any& a, loadAverage_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("onemin")) { + if (!(props["onemin"] >>= s.onemin)) return false; + } + if (props.contains("fivemin")) { + if (!(props["fivemin"] >>= s.fivemin)) return false; + } + if (props.contains("fifteenmin")) { + if (!(props["fifteenmin"] >>= s.fifteenmin)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const loadAverage_struct& s) { + redhawk::PropertyMap props; + + props["onemin"] = s.onemin; + + props["fivemin"] = s.fivemin; + + props["fifteenmin"] = s.fifteenmin; + a <<= props; +} + +inline bool operator== (const loadAverage_struct& s1, const loadAverage_struct& s2) { + if (s1.onemin!=s2.onemin) + return false; + if (s1.fivemin!=s2.fivemin) + return false; + if (s1.fifteenmin!=s2.fifteenmin) + return false; + return true; +} + +inline bool operator!= (const loadAverage_struct& s1, const loadAverage_struct& s2) { + return !(s1==s2); +} + +struct gpp_limits_struct { + gpp_limits_struct () + { + } + + static std::string getId() { + return std::string("gpp_limits"); + } + + static const char* getFormat() { + return "iiii"; + } + + CORBA::Long current_threads; + CORBA::Long max_threads; + CORBA::Long current_open_files; + CORBA::Long max_open_files; +}; + +inline bool operator>>= (const CORBA::Any& a, gpp_limits_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("current_threads")) { + if (!(props["current_threads"] >>= s.current_threads)) return false; + } + if (props.contains("max_threads")) { + if (!(props["max_threads"] >>= s.max_threads)) return false; + } + if (props.contains("current_open_files")) { + if (!(props["current_open_files"] >>= s.current_open_files)) return false; + } + if (props.contains("max_open_files")) { + if (!(props["max_open_files"] >>= s.max_open_files)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const gpp_limits_struct& s) { + redhawk::PropertyMap props; + + props["current_threads"] = s.current_threads; + + props["max_threads"] = s.max_threads; + + props["current_open_files"] = s.current_open_files; + + props["max_open_files"] = s.max_open_files; + a <<= props; +} + +inline bool operator== (const gpp_limits_struct& s1, const gpp_limits_struct& s2) { + if (s1.current_threads!=s2.current_threads) + return false; + if (s1.max_threads!=s2.max_threads) + return false; + if (s1.current_open_files!=s2.current_open_files) + return false; + if (s1.max_open_files!=s2.max_open_files) + return false; + return true; +} + +inline bool operator!= (const gpp_limits_struct& s1, const gpp_limits_struct& s2) { + return !(s1==s2); +} + +struct sys_limits_struct { + sys_limits_struct () + { + } + + static std::string getId() { + return std::string("sys_limits"); + } + + static const char* getFormat() { + return "iiii"; + } + + CORBA::Long current_threads; + CORBA::Long max_threads; + CORBA::Long current_open_files; + CORBA::Long max_open_files; +}; + +inline bool operator>>= (const CORBA::Any& a, sys_limits_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("sys_limits::current_threads")) { + if (!(props["sys_limits::current_threads"] >>= s.current_threads)) return false; + } + if (props.contains("sys_limits::max_threads")) { + if (!(props["sys_limits::max_threads"] >>= s.max_threads)) return false; + } + if (props.contains("sys_limits::current_open_files")) { + if (!(props["sys_limits::current_open_files"] >>= s.current_open_files)) return false; + } + if (props.contains("sys_limits::max_open_files")) { + if (!(props["sys_limits::max_open_files"] >>= s.max_open_files)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const sys_limits_struct& s) { + redhawk::PropertyMap props; + + props["sys_limits::current_threads"] = s.current_threads; + + props["sys_limits::max_threads"] = s.max_threads; + + props["sys_limits::current_open_files"] = s.current_open_files; + + props["sys_limits::max_open_files"] = s.max_open_files; + a <<= props; +} + +inline bool operator== (const sys_limits_struct& s1, const sys_limits_struct& s2) { + if (s1.current_threads!=s2.current_threads) + return false; + if (s1.max_threads!=s2.max_threads) + return false; + if (s1.current_open_files!=s2.current_open_files) + return false; + if (s1.max_open_files!=s2.max_open_files) + return false; + return true; +} + +inline bool operator!= (const sys_limits_struct& s1, const sys_limits_struct& s2) { + return !(s1==s2); +} + +struct redhawk__reservation_request_struct { + redhawk__reservation_request_struct () + { + } + + static std::string getId() { + return std::string("redhawk::reservation_request"); + } + + static const char* getFormat() { + return "s[s][s]"; + } + + std::string obj_id; + std::vector kinds; + std::vector values; +}; + +inline bool operator>>= (const CORBA::Any& a, redhawk__reservation_request_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("redhawk::reservation_request::obj_id")) { + if (!(props["redhawk::reservation_request::obj_id"] >>= s.obj_id)) return false; + } + if (props.contains("redhawk::reservation_request::kinds")) { + if (!(props["redhawk::reservation_request::kinds"] >>= s.kinds)) return false; + } + if (props.contains("redhawk::reservation_request::values")) { + if (!(props["redhawk::reservation_request::values"] >>= s.values)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const redhawk__reservation_request_struct& s) { + redhawk::PropertyMap props; + + props["redhawk::reservation_request::obj_id"] = s.obj_id; + + props["redhawk::reservation_request::kinds"] = s.kinds; + + props["redhawk::reservation_request::values"] = s.values; + a <<= props; +} + +inline bool operator== (const redhawk__reservation_request_struct& s1, const redhawk__reservation_request_struct& s2) { + if (s1.obj_id!=s2.obj_id) + return false; + if (s1.kinds!=s2.kinds) + return false; + if (s1.values!=s2.values) + return false; + return true; +} + +inline bool operator!= (const redhawk__reservation_request_struct& s1, const redhawk__reservation_request_struct& s2) { + return !(s1==s2); +} + +namespace enums { + // Enumerated values for affinity + namespace affinity { + // Enumerated values for affinity::exec_directive_class + namespace exec_directive_class { + static const std::string socket = "socket"; + static const std::string nic = "nic"; + static const std::string cpu = "cpu"; + static const std::string cgroup = "cgroup"; + } + } +} + +struct affinity_struct { + affinity_struct () + { + exec_directive_value = "0"; + exec_directive_class = "socket"; + force_override = false; + blacklist_cpus = ""; + deploy_per_socket = false; + disabled = true; + } + + static std::string getId() { + return std::string("affinity"); + } + + static const char* getFormat() { + return "ssbsbb"; + } + + std::string exec_directive_value; + std::string exec_directive_class; + bool force_override; + std::string blacklist_cpus; + bool deploy_per_socket; + bool disabled; +}; + +inline bool operator>>= (const CORBA::Any& a, affinity_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("affinity::exec_directive_value")) { + if (!(props["affinity::exec_directive_value"] >>= s.exec_directive_value)) return false; + } + if (props.contains("affinity::exec_directive_class")) { + if (!(props["affinity::exec_directive_class"] >>= s.exec_directive_class)) return false; + } + if (props.contains("affinity::force_override")) { + if (!(props["affinity::force_override"] >>= s.force_override)) return false; + } + if (props.contains("affinity::blacklist_cpus")) { + if (!(props["affinity::blacklist_cpus"] >>= s.blacklist_cpus)) return false; + } + if (props.contains("affinity::deploy_per_socket")) { + if (!(props["affinity::deploy_per_socket"] >>= s.deploy_per_socket)) return false; + } + if (props.contains("affinity::disabled")) { + if (!(props["affinity::disabled"] >>= s.disabled)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const affinity_struct& s) { + redhawk::PropertyMap props; + + props["affinity::exec_directive_value"] = s.exec_directive_value; + + props["affinity::exec_directive_class"] = s.exec_directive_class; + + props["affinity::force_override"] = s.force_override; + + props["affinity::blacklist_cpus"] = s.blacklist_cpus; + + props["affinity::deploy_per_socket"] = s.deploy_per_socket; + + props["affinity::disabled"] = s.disabled; + a <<= props; +} + +inline bool operator== (const affinity_struct& s1, const affinity_struct& s2) { + if (s1.exec_directive_value!=s2.exec_directive_value) + return false; + if (s1.exec_directive_class!=s2.exec_directive_class) + return false; + if (s1.force_override!=s2.force_override) + return false; + if (s1.blacklist_cpus!=s2.blacklist_cpus) + return false; + if (s1.deploy_per_socket!=s2.deploy_per_socket) + return false; + if (s1.disabled!=s2.disabled) + return false; + return true; +} + +inline bool operator!= (const affinity_struct& s1, const affinity_struct& s2) { + return !(s1==s2); +} + struct nic_allocation_status_struct_struct { nic_allocation_status_struct_struct () { - }; + } static std::string getId() { return std::string("nic_allocation_status_struct"); - }; + } + + static const char* getFormat() { + return "sfhsss"; + } std::string identifier; float data_rate; @@ -437,77 +758,44 @@ struct nic_allocation_status_struct_struct { inline bool operator>>= (const CORBA::Any& a, nic_allocation_status_struct_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("nic_allocation_status::identifier", props[idx].id)) { - if (!(props[idx].value >>= s.identifier)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_allocation_status::data_rate", props[idx].id)) { - if (!(props[idx].value >>= s.data_rate)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_allocation_status::data_size", props[idx].id)) { - if (!(props[idx].value >>= s.data_size)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_allocation_status::multicast_support", props[idx].id)) { - if (!(props[idx].value >>= s.multicast_support)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_allocation_status::ip_addressable", props[idx].id)) { - if (!(props[idx].value >>= s.ip_addressable)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_allocation_status::interface", props[idx].id)) { - if (!(props[idx].value >>= s.interface)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("nic_allocation_status::identifier")) { + if (!(props["nic_allocation_status::identifier"] >>= s.identifier)) return false; + } + if (props.contains("nic_allocation_status::data_rate")) { + if (!(props["nic_allocation_status::data_rate"] >>= s.data_rate)) return false; + } + if (props.contains("nic_allocation_status::data_size")) { + if (!(props["nic_allocation_status::data_size"] >>= s.data_size)) return false; + } + if (props.contains("nic_allocation_status::multicast_support")) { + if (!(props["nic_allocation_status::multicast_support"] >>= s.multicast_support)) return false; + } + if (props.contains("nic_allocation_status::ip_addressable")) { + if (!(props["nic_allocation_status::ip_addressable"] >>= s.ip_addressable)) return false; + } + if (props.contains("nic_allocation_status::interface")) { + if (!(props["nic_allocation_status::interface"] >>= s.interface)) return false; } return true; -}; +} inline void operator<<= (CORBA::Any& a, const nic_allocation_status_struct_struct& s) { - CF::Properties props; - props.length(6); - props[0].id = CORBA::string_dup("nic_allocation_status::identifier"); - props[0].value <<= s.identifier; - props[1].id = CORBA::string_dup("nic_allocation_status::data_rate"); - props[1].value <<= s.data_rate; - props[2].id = CORBA::string_dup("nic_allocation_status::data_size"); - props[2].value <<= s.data_size; - props[3].id = CORBA::string_dup("nic_allocation_status::multicast_support"); - props[3].value <<= s.multicast_support; - props[4].id = CORBA::string_dup("nic_allocation_status::ip_addressable"); - props[4].value <<= s.ip_addressable; - props[5].id = CORBA::string_dup("nic_allocation_status::interface"); - props[5].value <<= s.interface; + redhawk::PropertyMap props; + + props["nic_allocation_status::identifier"] = s.identifier; + + props["nic_allocation_status::data_rate"] = s.data_rate; + + props["nic_allocation_status::data_size"] = s.data_size; + + props["nic_allocation_status::multicast_support"] = s.multicast_support; + + props["nic_allocation_status::ip_addressable"] = s.ip_addressable; + + props["nic_allocation_status::interface"] = s.interface; a <<= props; -}; +} inline bool operator== (const nic_allocation_status_struct_struct& s1, const nic_allocation_status_struct_struct& s2) { if (s1.identifier!=s2.identifier) @@ -523,25 +811,55 @@ inline bool operator== (const nic_allocation_status_struct_struct& s1, const nic if (s1.interface!=s2.interface) return false; return true; -}; +} inline bool operator!= (const nic_allocation_status_struct_struct& s1, const nic_allocation_status_struct_struct& s2) { return !(s1==s2); -}; +} struct nic_metrics_struct_struct { nic_metrics_struct_struct () { + interface = ""; + mac_address = ""; rate = 0.0; + ipv4_address = ""; + ipv4_netmask = ""; + ipv4_broadcast = ""; + ipv6_address = ""; + ipv6_netmask = ""; + ipv6_scope = ""; + flags = ""; + module = ""; + mtu = ""; + state = ""; + rx_bytes = ""; + rx_compressed = ""; + rx_crc_errors = ""; + rx_dropped = ""; + rx_errors = ""; + rx_packets = ""; + tx_bytes = ""; + tx_compressed = ""; + tx_dropped = ""; + tx_errors = ""; + tx_packets = ""; + tx_queue_len = ""; + vlans = ""; multicast_support = false; rate_allocated = 0; + time_string_utc = ""; time = 0; current_throughput = 0; - }; + } static std::string getId() { return std::string("nic_metrics_struct"); - }; + } + + static const char* getFormat() { + return "ssdsssssssssssssssssssssssbdsdd"; + } std::string interface; std::string mac_address; @@ -579,327 +897,169 @@ struct nic_metrics_struct_struct { inline bool operator>>= (const CORBA::Any& a, nic_metrics_struct_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("nic_metrics::interface", props[idx].id)) { - if (!(props[idx].value >>= s.interface)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::mac_address", props[idx].id)) { - if (!(props[idx].value >>= s.mac_address)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::rate", props[idx].id)) { - if (!(props[idx].value >>= s.rate)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::ipv4_address", props[idx].id)) { - if (!(props[idx].value >>= s.ipv4_address)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::ipv4_netmask", props[idx].id)) { - if (!(props[idx].value >>= s.ipv4_netmask)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::ipv4_broadcast", props[idx].id)) { - if (!(props[idx].value >>= s.ipv4_broadcast)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::ipv6_address", props[idx].id)) { - if (!(props[idx].value >>= s.ipv6_address)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::ipv6_netmask", props[idx].id)) { - if (!(props[idx].value >>= s.ipv6_netmask)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::ipv6_scope", props[idx].id)) { - if (!(props[idx].value >>= s.ipv6_scope)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::flags", props[idx].id)) { - if (!(props[idx].value >>= s.flags)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::module", props[idx].id)) { - if (!(props[idx].value >>= s.module)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::mtu", props[idx].id)) { - if (!(props[idx].value >>= s.mtu)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::state", props[idx].id)) { - if (!(props[idx].value >>= s.state)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::rx_bytes", props[idx].id)) { - if (!(props[idx].value >>= s.rx_bytes)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::rx_compressed", props[idx].id)) { - if (!(props[idx].value >>= s.rx_compressed)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::rx_crc_errors", props[idx].id)) { - if (!(props[idx].value >>= s.rx_crc_errors)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::rx_dropped", props[idx].id)) { - if (!(props[idx].value >>= s.rx_dropped)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::rx_errors", props[idx].id)) { - if (!(props[idx].value >>= s.rx_errors)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::rx_packets", props[idx].id)) { - if (!(props[idx].value >>= s.rx_packets)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::tx_bytes", props[idx].id)) { - if (!(props[idx].value >>= s.tx_bytes)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::tx_compressed", props[idx].id)) { - if (!(props[idx].value >>= s.tx_compressed)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::tx_dropped", props[idx].id)) { - if (!(props[idx].value >>= s.tx_dropped)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::tx_errors", props[idx].id)) { - if (!(props[idx].value >>= s.tx_errors)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::tx_packets", props[idx].id)) { - if (!(props[idx].value >>= s.tx_packets)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::tx_queue_len", props[idx].id)) { - if (!(props[idx].value >>= s.tx_queue_len)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::vlans", props[idx].id)) { - if (!(props[idx].value >>= s.vlans)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::multicast_support", props[idx].id)) { - if (!(props[idx].value >>= s.multicast_support)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::rate_allocated", props[idx].id)) { - if (!(props[idx].value >>= s.rate_allocated)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::time_string_utc", props[idx].id)) { - if (!(props[idx].value >>= s.time_string_utc)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::time", props[idx].id)) { - if (!(props[idx].value >>= s.time)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_metrics::current_throughput", props[idx].id)) { - if (!(props[idx].value >>= s.current_throughput)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("nic_metrics::interface")) { + if (!(props["nic_metrics::interface"] >>= s.interface)) return false; + } + if (props.contains("nic_metrics::mac_address")) { + if (!(props["nic_metrics::mac_address"] >>= s.mac_address)) return false; + } + if (props.contains("nic_metrics::rate")) { + if (!(props["nic_metrics::rate"] >>= s.rate)) return false; + } + if (props.contains("nic_metrics::ipv4_address")) { + if (!(props["nic_metrics::ipv4_address"] >>= s.ipv4_address)) return false; + } + if (props.contains("nic_metrics::ipv4_netmask")) { + if (!(props["nic_metrics::ipv4_netmask"] >>= s.ipv4_netmask)) return false; + } + if (props.contains("nic_metrics::ipv4_broadcast")) { + if (!(props["nic_metrics::ipv4_broadcast"] >>= s.ipv4_broadcast)) return false; + } + if (props.contains("nic_metrics::ipv6_address")) { + if (!(props["nic_metrics::ipv6_address"] >>= s.ipv6_address)) return false; + } + if (props.contains("nic_metrics::ipv6_netmask")) { + if (!(props["nic_metrics::ipv6_netmask"] >>= s.ipv6_netmask)) return false; + } + if (props.contains("nic_metrics::ipv6_scope")) { + if (!(props["nic_metrics::ipv6_scope"] >>= s.ipv6_scope)) return false; + } + if (props.contains("nic_metrics::flags")) { + if (!(props["nic_metrics::flags"] >>= s.flags)) return false; + } + if (props.contains("nic_metrics::module")) { + if (!(props["nic_metrics::module"] >>= s.module)) return false; + } + if (props.contains("nic_metrics::mtu")) { + if (!(props["nic_metrics::mtu"] >>= s.mtu)) return false; + } + if (props.contains("nic_metrics::state")) { + if (!(props["nic_metrics::state"] >>= s.state)) return false; + } + if (props.contains("nic_metrics::rx_bytes")) { + if (!(props["nic_metrics::rx_bytes"] >>= s.rx_bytes)) return false; + } + if (props.contains("nic_metrics::rx_compressed")) { + if (!(props["nic_metrics::rx_compressed"] >>= s.rx_compressed)) return false; + } + if (props.contains("nic_metrics::rx_crc_errors")) { + if (!(props["nic_metrics::rx_crc_errors"] >>= s.rx_crc_errors)) return false; + } + if (props.contains("nic_metrics::rx_dropped")) { + if (!(props["nic_metrics::rx_dropped"] >>= s.rx_dropped)) return false; + } + if (props.contains("nic_metrics::rx_errors")) { + if (!(props["nic_metrics::rx_errors"] >>= s.rx_errors)) return false; + } + if (props.contains("nic_metrics::rx_packets")) { + if (!(props["nic_metrics::rx_packets"] >>= s.rx_packets)) return false; + } + if (props.contains("nic_metrics::tx_bytes")) { + if (!(props["nic_metrics::tx_bytes"] >>= s.tx_bytes)) return false; + } + if (props.contains("nic_metrics::tx_compressed")) { + if (!(props["nic_metrics::tx_compressed"] >>= s.tx_compressed)) return false; + } + if (props.contains("nic_metrics::tx_dropped")) { + if (!(props["nic_metrics::tx_dropped"] >>= s.tx_dropped)) return false; + } + if (props.contains("nic_metrics::tx_errors")) { + if (!(props["nic_metrics::tx_errors"] >>= s.tx_errors)) return false; + } + if (props.contains("nic_metrics::tx_packets")) { + if (!(props["nic_metrics::tx_packets"] >>= s.tx_packets)) return false; + } + if (props.contains("nic_metrics::tx_queue_len")) { + if (!(props["nic_metrics::tx_queue_len"] >>= s.tx_queue_len)) return false; + } + if (props.contains("nic_metrics::vlans")) { + if (!(props["nic_metrics::vlans"] >>= s.vlans)) return false; + } + if (props.contains("nic_metrics::multicast_support")) { + if (!(props["nic_metrics::multicast_support"] >>= s.multicast_support)) return false; + } + if (props.contains("nic_metrics::rate_allocated")) { + if (!(props["nic_metrics::rate_allocated"] >>= s.rate_allocated)) return false; + } + if (props.contains("nic_metrics::time_string_utc")) { + if (!(props["nic_metrics::time_string_utc"] >>= s.time_string_utc)) return false; + } + if (props.contains("nic_metrics::time")) { + if (!(props["nic_metrics::time"] >>= s.time)) return false; + } + if (props.contains("nic_metrics::current_throughput")) { + if (!(props["nic_metrics::current_throughput"] >>= s.current_throughput)) return false; } return true; -}; +} inline void operator<<= (CORBA::Any& a, const nic_metrics_struct_struct& s) { - CF::Properties props; - props.length(31); - props[0].id = CORBA::string_dup("nic_metrics::interface"); - props[0].value <<= s.interface; - props[1].id = CORBA::string_dup("nic_metrics::mac_address"); - props[1].value <<= s.mac_address; - props[2].id = CORBA::string_dup("nic_metrics::rate"); - props[2].value <<= s.rate; - props[3].id = CORBA::string_dup("nic_metrics::ipv4_address"); - props[3].value <<= s.ipv4_address; - props[4].id = CORBA::string_dup("nic_metrics::ipv4_netmask"); - props[4].value <<= s.ipv4_netmask; - props[5].id = CORBA::string_dup("nic_metrics::ipv4_broadcast"); - props[5].value <<= s.ipv4_broadcast; - props[6].id = CORBA::string_dup("nic_metrics::ipv6_address"); - props[6].value <<= s.ipv6_address; - props[7].id = CORBA::string_dup("nic_metrics::ipv6_netmask"); - props[7].value <<= s.ipv6_netmask; - props[8].id = CORBA::string_dup("nic_metrics::ipv6_scope"); - props[8].value <<= s.ipv6_scope; - props[9].id = CORBA::string_dup("nic_metrics::flags"); - props[9].value <<= s.flags; - props[10].id = CORBA::string_dup("nic_metrics::module"); - props[10].value <<= s.module; - props[11].id = CORBA::string_dup("nic_metrics::mtu"); - props[11].value <<= s.mtu; - props[12].id = CORBA::string_dup("nic_metrics::state"); - props[12].value <<= s.state; - props[13].id = CORBA::string_dup("nic_metrics::rx_bytes"); - props[13].value <<= s.rx_bytes; - props[14].id = CORBA::string_dup("nic_metrics::rx_compressed"); - props[14].value <<= s.rx_compressed; - props[15].id = CORBA::string_dup("nic_metrics::rx_crc_errors"); - props[15].value <<= s.rx_crc_errors; - props[16].id = CORBA::string_dup("nic_metrics::rx_dropped"); - props[16].value <<= s.rx_dropped; - props[17].id = CORBA::string_dup("nic_metrics::rx_errors"); - props[17].value <<= s.rx_errors; - props[18].id = CORBA::string_dup("nic_metrics::rx_packets"); - props[18].value <<= s.rx_packets; - props[19].id = CORBA::string_dup("nic_metrics::tx_bytes"); - props[19].value <<= s.tx_bytes; - props[20].id = CORBA::string_dup("nic_metrics::tx_compressed"); - props[20].value <<= s.tx_compressed; - props[21].id = CORBA::string_dup("nic_metrics::tx_dropped"); - props[21].value <<= s.tx_dropped; - props[22].id = CORBA::string_dup("nic_metrics::tx_errors"); - props[22].value <<= s.tx_errors; - props[23].id = CORBA::string_dup("nic_metrics::tx_packets"); - props[23].value <<= s.tx_packets; - props[24].id = CORBA::string_dup("nic_metrics::tx_queue_len"); - props[24].value <<= s.tx_queue_len; - props[25].id = CORBA::string_dup("nic_metrics::vlans"); - props[25].value <<= s.vlans; - props[26].id = CORBA::string_dup("nic_metrics::multicast_support"); - props[26].value <<= s.multicast_support; - props[27].id = CORBA::string_dup("nic_metrics::rate_allocated"); - props[27].value <<= s.rate_allocated; - props[28].id = CORBA::string_dup("nic_metrics::time_string_utc"); - props[28].value <<= s.time_string_utc; - props[29].id = CORBA::string_dup("nic_metrics::time"); - props[29].value <<= s.time; - props[30].id = CORBA::string_dup("nic_metrics::current_throughput"); - props[30].value <<= s.current_throughput; + redhawk::PropertyMap props; + + props["nic_metrics::interface"] = s.interface; + + props["nic_metrics::mac_address"] = s.mac_address; + + props["nic_metrics::rate"] = s.rate; + + props["nic_metrics::ipv4_address"] = s.ipv4_address; + + props["nic_metrics::ipv4_netmask"] = s.ipv4_netmask; + + props["nic_metrics::ipv4_broadcast"] = s.ipv4_broadcast; + + props["nic_metrics::ipv6_address"] = s.ipv6_address; + + props["nic_metrics::ipv6_netmask"] = s.ipv6_netmask; + + props["nic_metrics::ipv6_scope"] = s.ipv6_scope; + + props["nic_metrics::flags"] = s.flags; + + props["nic_metrics::module"] = s.module; + + props["nic_metrics::mtu"] = s.mtu; + + props["nic_metrics::state"] = s.state; + + props["nic_metrics::rx_bytes"] = s.rx_bytes; + + props["nic_metrics::rx_compressed"] = s.rx_compressed; + + props["nic_metrics::rx_crc_errors"] = s.rx_crc_errors; + + props["nic_metrics::rx_dropped"] = s.rx_dropped; + + props["nic_metrics::rx_errors"] = s.rx_errors; + + props["nic_metrics::rx_packets"] = s.rx_packets; + + props["nic_metrics::tx_bytes"] = s.tx_bytes; + + props["nic_metrics::tx_compressed"] = s.tx_compressed; + + props["nic_metrics::tx_dropped"] = s.tx_dropped; + + props["nic_metrics::tx_errors"] = s.tx_errors; + + props["nic_metrics::tx_packets"] = s.tx_packets; + + props["nic_metrics::tx_queue_len"] = s.tx_queue_len; + + props["nic_metrics::vlans"] = s.vlans; + + props["nic_metrics::multicast_support"] = s.multicast_support; + + props["nic_metrics::rate_allocated"] = s.rate_allocated; + + props["nic_metrics::time_string_utc"] = s.time_string_utc; + + props["nic_metrics::time"] = s.time; + + props["nic_metrics::current_throughput"] = s.current_throughput; a <<= props; -}; +} inline bool operator== (const nic_metrics_struct_struct& s1, const nic_metrics_struct_struct& s2) { if (s1.interface!=s2.interface) @@ -965,20 +1125,24 @@ inline bool operator== (const nic_metrics_struct_struct& s1, const nic_metrics_s if (s1.current_throughput!=s2.current_throughput) return false; return true; -}; +} inline bool operator!= (const nic_metrics_struct_struct& s1, const nic_metrics_struct_struct& s2) { return !(s1==s2); -}; +} struct interfaces_struct { interfaces_struct () { - }; + } static std::string getId() { return std::string("interfaces"); - }; + } + + static const char* getFormat() { + return "sfs"; + } std::string interface; float throughput; @@ -986,135 +1150,58 @@ struct interfaces_struct { }; inline bool operator>>= (const CORBA::Any& a, interfaces_struct& s) { - CF::Properties* temp; - if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("interface", props[idx].id)) { - if (!(props[idx].value >>= s.interface)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("throughput", props[idx].id)) { - if (!(props[idx].value >>= s.throughput)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("vlans", props[idx].id)) { - if (!(props[idx].value >>= s.vlans)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - } - return true; -}; - -inline void operator<<= (CORBA::Any& a, const interfaces_struct& s) { - CF::Properties props; - props.length(3); - props[0].id = CORBA::string_dup("interface"); - props[0].value <<= s.interface; - props[1].id = CORBA::string_dup("throughput"); - props[1].value <<= s.throughput; - props[2].id = CORBA::string_dup("vlans"); - props[2].value <<= s.vlans; - a <<= props; -}; - -inline bool operator== (const interfaces_struct& s1, const interfaces_struct& s2) { - if (s1.interface!=s2.interface) - return false; - if (s1.throughput!=s2.throughput) - return false; - if (s1.vlans!=s2.vlans) - return false; - return true; -}; - -inline bool operator!= (const interfaces_struct& s1, const interfaces_struct& s2) { - return !(s1==s2); -}; - -struct ulimit_struct { - ulimit_struct () - { - }; - - static std::string getId() { - return std::string("ulimit"); - }; - - CORBA::Long current_threads; - CORBA::Long max_threads; - CORBA::Long current_open_files; - CORBA::Long max_open_files; -}; - -inline bool operator>>= (const CORBA::Any& a, ulimit_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); - if (props.contains("current_threads")) { - if (!(props["current_threads"] >>= s.current_threads)) return false; - } - if (props.contains("max_threads")) { - if (!(props["max_threads"] >>= s.max_threads)) return false; + if (props.contains("interface")) { + if (!(props["interface"] >>= s.interface)) return false; } - if (props.contains("current_open_files")) { - if (!(props["current_open_files"] >>= s.current_open_files)) return false; + if (props.contains("throughput")) { + if (!(props["throughput"] >>= s.throughput)) return false; } - if (props.contains("max_open_files")) { - if (!(props["max_open_files"] >>= s.max_open_files)) return false; + if (props.contains("vlans")) { + if (!(props["vlans"] >>= s.vlans)) return false; } return true; } -inline void operator<<= (CORBA::Any& a, const ulimit_struct& s) { +inline void operator<<= (CORBA::Any& a, const interfaces_struct& s) { redhawk::PropertyMap props; - props["current_threads"] = s.current_threads; - - props["max_threads"] = s.max_threads; + props["interface"] = s.interface; - props["current_open_files"] = s.current_open_files; + props["throughput"] = s.throughput; - props["max_open_files"] = s.max_open_files; + props["vlans"] = s.vlans; a <<= props; } -inline bool operator== (const ulimit_struct& s1, const ulimit_struct& s2) { - if (s1.current_threads!=s2.current_threads) - return false; - if (s1.max_threads!=s2.max_threads) +inline bool operator== (const interfaces_struct& s1, const interfaces_struct& s2) { + if (s1.interface!=s2.interface) return false; - if (s1.current_open_files!=s2.current_open_files) + if (s1.throughput!=s2.throughput) return false; - if (s1.max_open_files!=s2.max_open_files) + if (s1.vlans!=s2.vlans) return false; return true; } -inline bool operator!= (const ulimit_struct& s1, const ulimit_struct& s2) { +inline bool operator!= (const interfaces_struct& s1, const interfaces_struct& s2) { return !(s1==s2); } struct utilization_entry_struct { utilization_entry_struct () { - }; + } static std::string getId() { return std::string("utilization_entry"); - }; + } + + static const char* getFormat() { + return "sffff"; + } std::string description; float component_load; @@ -1177,69 +1264,19 @@ inline bool operator== (const utilization_entry_struct& s1, const utilization_en inline bool operator!= (const utilization_entry_struct& s1, const utilization_entry_struct& s2) { return !(s1==s2); } -struct loadAverage_struct { - loadAverage_struct () - { - }; - - static std::string getId() { - return std::string("DCE:9da85ebc-6503-48e7-af36-b77c7ad0c2b4"); - }; - - double onemin; - double fivemin; - double fifteenmin; -}; - -inline bool operator>>= (const CORBA::Any& a, loadAverage_struct& s) { - CF::Properties* temp; - if (!(a >>= temp)) return false; - const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); - if (props.contains("onemin")) { - if (!(props["onemin"] >>= s.onemin)) return false; - } - if (props.contains("fivemin")) { - if (!(props["fivemin"] >>= s.fivemin)) return false; - } - if (props.contains("fifteenmin")) { - if (!(props["fifteenmin"] >>= s.fifteenmin)) return false; - } - return true; -} - -inline void operator<<= (CORBA::Any& a, const loadAverage_struct& s) { - redhawk::PropertyMap props; - - props["onemin"] = s.onemin; - - props["fivemin"] = s.fivemin; - - props["fifteenmin"] = s.fifteenmin; - a <<= props; -} - -inline bool operator== (const loadAverage_struct& s1, const loadAverage_struct& s2) { - if (s1.onemin!=s2.onemin) - return false; - if (s1.fivemin!=s2.fivemin) - return false; - if (s1.fifteenmin!=s2.fifteenmin) - return false; - return true; -} - -inline bool operator!= (const loadAverage_struct& s1, const loadAverage_struct& s2) { - return !(s1==s2); -} struct component_monitor_struct { component_monitor_struct () { - }; + } static std::string getId() { return std::string("component_monitor::component_monitor"); - }; + } + + static const char* getFormat() { + return "ssHfffIII"; + } std::string component_id; std::string waveform_id; @@ -1334,67 +1371,5 @@ inline bool operator== (const component_monitor_struct& s1, const component_moni inline bool operator!= (const component_monitor_struct& s1, const component_monitor_struct& s2) { return !(s1==s2); } -struct sys_limits_struct { - sys_limits_struct () - { - }; - - static std::string getId() { - return std::string("sys_limits"); - }; - - CORBA::Long current_threads; - CORBA::Long max_threads; - CORBA::Long current_open_files; - CORBA::Long max_open_files; -}; - -inline bool operator>>= (const CORBA::Any& a, sys_limits_struct& s) { - CF::Properties* temp; - if (!(a >>= temp)) return false; - const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); - if (props.contains("sys_limits::current_threads")) { - if (!(props["sys_limits::current_threads"] >>= s.current_threads)) return false; - } - if (props.contains("sys_limits::max_threads")) { - if (!(props["sys_limits::max_threads"] >>= s.max_threads)) return false; - } - if (props.contains("sys_limits::current_open_files")) { - if (!(props["sys_limits::current_open_files"] >>= s.current_open_files)) return false; - } - if (props.contains("sys_limits::max_open_files")) { - if (!(props["sys_limits::max_open_files"] >>= s.max_open_files)) return false; - } - return true; -} - -inline void operator<<= (CORBA::Any& a, const sys_limits_struct& s) { - redhawk::PropertyMap props; - - props["sys_limits::current_threads"] = s.current_threads; - - props["sys_limits::max_threads"] = s.max_threads; - - props["sys_limits::current_open_files"] = s.current_open_files; - - props["sys_limits::max_open_files"] = s.max_open_files; - a <<= props; -} - -inline bool operator== (const sys_limits_struct& s1, const sys_limits_struct& s2) { - if (s1.current_threads!=s2.current_threads) - return false; - if (s1.max_threads!=s2.max_threads) - return false; - if (s1.current_open_files!=s2.current_open_files) - return false; - if (s1.max_open_files!=s2.max_open_files) - return false; - return true; -} - -inline bool operator!= (const sys_limits_struct& s1, const sys_limits_struct& s2) { - return !(s1==s2); -} #endif // STRUCTPROPS_H diff --git a/GPP/cpp/utils/EventDispatcher.h b/GPP/cpp/utils/EventDispatcher.h deleted file mode 100644 index 87eb228b2..000000000 --- a/GPP/cpp/utils/EventDispatcher.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK GPP. - * - * REDHAWK GPP is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef EVENT_DISPATCHER_H_ -#define EVENT_DISPATCHER_H_ - -#include -#include - -#include - -/** - * EventDispatcher uses the Observer pattern to dispatch events to 1 or more - * attached event listeners. Listeners are registered as callback functions - * in attach_listener() and called in sequence during dispatch(). - */ -template -class EventDispatcher -{ -public: - typedef MESSAGE_TYPE MessageType; - typedef boost::function< void (const MessageType&) > EventHandler; - typedef std::vector EventHandlers; - -public: - void attach_listener( const EventHandler& handler ) - { - event_handlers_.push_back(handler); - } - - void dispatch( const MessageType& message ) const - { - typename EventHandlers::const_iterator iter = event_handlers_.begin(); - typename EventHandlers::const_iterator end = event_handlers_.end(); - for( ; iter!=end; ++iter ) - { - (*iter)(message); - } - } - -private: - EventHandlers event_handlers_; -}; - -/** - * EventDispatcherMixin adds event dispatching semantics to child classes. - * attach_listener() is exposed as a public method, as this is the client- - * facing operation for event dispatching classes. dispatch() is - * exposed as protected, as it is expected for the event dispatching class to - * call dispatch() itself. - * - * Use the EventDispatcher class directly for cases where attach_listener() - * and dispatch() need to be called by the same object. - */ -template -class EventDispatcherMixin -{ -public: - typedef typename EventDispatcher::EventHandler EventHandler; - typedef typename EventDispatcher::MessageType MessageType; - -protected: - virtual ~EventDispatcherMixin(){} - -public: - void attach_listener( const EventHandler& handler ) - { - event_dispatcher_.attach_listener(handler); - } - -protected: - void dispatch( const MessageType& message ) const - { - event_dispatcher_.dispatch(message); - } - -private: - EventDispatcher event_dispatcher_; -}; - -#endif diff --git a/GPP/cpp/utils/affinity.cpp b/GPP/cpp/utils/affinity.cpp index c5709bc31..f2a93f06e 100644 --- a/GPP/cpp/utils/affinity.cpp +++ b/GPP/cpp/utils/affinity.cpp @@ -305,7 +305,6 @@ namespace gpp { os << numa_node_of_cpu( cpuid ); redhawk::affinity::CpuList tlist = get_cpu_list( "socket", os.str() ); RH_INFO(get_affinity_logger(), "Promoting NIC affinity to PID:" << pid << " SOCKET:" << os.str() ); - cpulist.clear(); for( int i=0; i < (int)tlist.size();i++ ) { if ( tlist[i] == cpuid ) continue; cpulist.push_back( tlist[i] ); diff --git a/GPP/tests/.gitignore b/GPP/tests/.gitignore index 65cc76261..0ba21347d 100644 --- a/GPP/tests/.gitignore +++ b/GPP/tests/.gitignore @@ -1,4 +1,3 @@ sdr/dev/devices/GPP/ -sdr/dev/mgr/DeviceManager -sdr/dom/mgr/DomainManager - +sdr/dev/mgr +sdr/dom/mgr diff --git a/GPP/tests/.md5sums b/GPP/tests/.md5sums new file mode 100644 index 000000000..6ac7101fd --- /dev/null +++ b/GPP/tests/.md5sums @@ -0,0 +1 @@ +dbaf096c6b94ee57160c4c2e3f5abc26 test_GPP.py diff --git a/GPP/tests/busy.py b/GPP/tests/bin/busy.py similarity index 100% rename from GPP/tests/busy.py rename to GPP/tests/bin/busy.py diff --git a/GPP/tests/bin/echo_pid.py b/GPP/tests/bin/echo_pid.py new file mode 100755 index 000000000..8167279a1 --- /dev/null +++ b/GPP/tests/bin/echo_pid.py @@ -0,0 +1,7 @@ +#!/usr/bin/python + +import os + +print 'CWD:', os.getcwd() +with open('pid.out', 'w') as fp: + print >>fp, os.getpid() diff --git a/GPP/tests/sdr/dev/mgr/DeviceManager.Linux.armv7l.prf.xml b/GPP/tests/sdr/dev/mgr/DeviceManager.Linux.armv7l.prf.xml deleted file mode 100644 index 22cb8b01b..000000000 --- a/GPP/tests/sdr/dev/mgr/DeviceManager.Linux.armv7l.prf.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - SCA required property describing the Operating System - - Linux - - - - - - SCA required property describing the CPU type - - armv7l - - - - diff --git a/GPP/tests/sdr/dev/mgr/DeviceManager.Linux.x86.prf.xml b/GPP/tests/sdr/dev/mgr/DeviceManager.Linux.x86.prf.xml deleted file mode 100644 index 811169cbb..000000000 --- a/GPP/tests/sdr/dev/mgr/DeviceManager.Linux.x86.prf.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - SCA required property describing the Operating System - - Linux - - - - - - SCA required property describing the CPU type - - x86 - - - - diff --git a/GPP/tests/sdr/dev/mgr/DeviceManager.Linux.x86_64.prf.xml b/GPP/tests/sdr/dev/mgr/DeviceManager.Linux.x86_64.prf.xml deleted file mode 100644 index 37070a752..000000000 --- a/GPP/tests/sdr/dev/mgr/DeviceManager.Linux.x86_64.prf.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - - - - SCA required property describing the Operating System - - Linux - - - - - - SCA required property describing the CPU type - - x86_64 - - - - diff --git a/GPP/tests/sdr/dev/mgr/DeviceManager.prf.xml b/GPP/tests/sdr/dev/mgr/DeviceManager.prf.xml deleted file mode 100644 index 8ff2ea4f7..000000000 --- a/GPP/tests/sdr/dev/mgr/DeviceManager.prf.xml +++ /dev/null @@ -1,69 +0,0 @@ - - - - - - These are the properties to configure the device manager - - - - A URI that points to a log4j configuration file used - for the device manager and all devices spawned by this device. - - - - - - Path to the Device Configuration Descriptor (DCD) XML file.. - - - - - - - The name of the domain in which this device manager will operate. - During startup, the device manager will attempt to connect to the - domain manager for the given domain. - - - - - - - The amount of time (in seconds) that the Device Manager will wait for a Device to exit before issuing a kill signal - - 0.5 - - - - - The location where files from remote SCA filesystems will be cached. - - - - - - - Name of the host where this device manager is running - - - - diff --git a/GPP/tests/sdr/dev/mgr/DeviceManager.scd.xml b/GPP/tests/sdr/dev/mgr/DeviceManager.scd.xml deleted file mode 100644 index 4fdd8ceb1..000000000 --- a/GPP/tests/sdr/dev/mgr/DeviceManager.scd.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - 2.2 - - devicemanager - - - - - - - - - - - - diff --git a/GPP/tests/sdr/dev/mgr/DeviceManager.spd.xml b/GPP/tests/sdr/dev/mgr/DeviceManager.spd.xml deleted file mode 100644 index c5f1b76e0..000000000 --- a/GPP/tests/sdr/dev/mgr/DeviceManager.spd.xml +++ /dev/null @@ -1,57 +0,0 @@ - - - - - - - - - - - - - - - - x86 Implementation of a Device Manager - - - - - - DeviceManager - - - - - - x86_64 Implementation of a Device Manager - - - - - - DeviceManager - - - - - diff --git a/GPP/tests/sdr/dev/nodes/test_VarCache_node/DeviceManager.dcd.xml.in b/GPP/tests/sdr/dev/nodes/test_VarCache_node/DeviceManager.dcd.xml.in new file mode 100644 index 000000000..0b68eb0d9 --- /dev/null +++ b/GPP/tests/sdr/dev/nodes/test_VarCache_node/DeviceManager.dcd.xml.in @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + GPP_1 + + + + + + + + + + + + + diff --git a/GPP/tests/sdr/dom/components/check_cwd/check_cwd.prf.xml b/GPP/tests/sdr/dom/components/check_cwd/check_cwd.prf.xml new file mode 100644 index 000000000..38b330346 --- /dev/null +++ b/GPP/tests/sdr/dom/components/check_cwd/check_cwd.prf.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/svc_error_cpp.scd.xml b/GPP/tests/sdr/dom/components/check_cwd/check_cwd.scd.xml similarity index 100% rename from redhawk/src/testing/sdr/dom/components/svc_error_cpp/svc_error_cpp.scd.xml rename to GPP/tests/sdr/dom/components/check_cwd/check_cwd.scd.xml diff --git a/GPP/tests/sdr/dom/components/check_cwd/check_cwd.spd.xml b/GPP/tests/sdr/dom/components/check_cwd/check_cwd.spd.xml new file mode 100644 index 000000000..b349f8c49 --- /dev/null +++ b/GPP/tests/sdr/dom/components/check_cwd/check_cwd.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/check_cwd.py + + + + + + + diff --git a/GPP/tests/sdr/dom/components/check_cwd/python/check_cwd.py b/GPP/tests/sdr/dom/components/check_cwd/python/check_cwd.py new file mode 100755 index 000000000..0e29d6ee4 --- /dev/null +++ b/GPP/tests/sdr/dom/components/check_cwd/python/check_cwd.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: check_cwd.spd.xml +from ossie.resource import start_component +import os +import logging + +from check_cwd_base import * + +class check_cwd_i(check_cwd_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + self.cwd = os.getcwd() + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", check_cwd_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = check_cwd_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(check_cwd_i) + diff --git a/GPP/tests/sdr/dom/components/check_cwd/python/check_cwd_base.py b/GPP/tests/sdr/dom/components/check_cwd/python/check_cwd_base.py new file mode 100644 index 000000000..be16cd5e6 --- /dev/null +++ b/GPP/tests/sdr/dom/components/check_cwd/python/check_cwd_base.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: check_cwd.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * +from ossie.properties import simple_property + +import Queue, copy, time, threading + +class check_cwd_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + cwd = simple_property(id_="cwd", + type_="string", + mode="readonly", + action="external", + kinds=("property",)) + + + + diff --git a/GPP/tests/sdr/dom/components/load_comp/python/load_comp.py b/GPP/tests/sdr/dom/components/load_comp/python/load_comp.py index abac9ae08..6fb32d8dc 100755 --- a/GPP/tests/sdr/dom/components/load_comp/python/load_comp.py +++ b/GPP/tests/sdr/dom/components/load_comp/python/load_comp.py @@ -25,7 +25,7 @@ # Source: load_comp.spd.xml from ossie.resource import start_component import logging -import subprocess +import multiprocessing from load_comp_base import * @@ -42,7 +42,11 @@ def constructor(self): """ # TODO add customization here. - sp = subprocess.Popen(['../../../../busy.py'], executable='../../../../busy.py') + def busy(): + while True: + pass + mp = multiprocessing.Process(target=busy) + mp.start() def process(self): """ diff --git a/GPP/tests/sdr/dom/mgr/DomainManager.prf.xml b/GPP/tests/sdr/dom/mgr/DomainManager.prf.xml deleted file mode 100644 index 3c48097d9..000000000 --- a/GPP/tests/sdr/dom/mgr/DomainManager.prf.xml +++ /dev/null @@ -1,87 +0,0 @@ - - - - - - These are the properties to configure the domain manager - - - - A URI on the SCA FileManager that points to a log4j configuration file used - for the domain manager and the entire domain. - - - - - - - Path to the Domain Manager Descriptor (DMD) XML file.. - - - - - - - The name of the domain to which this domain manager will be bound. - - - - - - - Enable CORBA persistence for the domain manager. - - true - - - - - - The URL to a database for domain persistence information. - - - - - - - Replace any existing name binding for the domain manager. - - false - - - - - - The amount of time, in seconds, to wait for a component to bind to the name service after being launched. - - 60 - seconds - - - - - - Current version of REDHAWK that this Domain Manager is running - - - - - diff --git a/GPP/tests/sdr/dom/mgr/DomainManager.scd.xml b/GPP/tests/sdr/dom/mgr/DomainManager.scd.xml deleted file mode 100644 index 4c1c5b8ff..000000000 --- a/GPP/tests/sdr/dom/mgr/DomainManager.scd.xml +++ /dev/null @@ -1,38 +0,0 @@ - - - - - - 2.2 - - domainmanager - - - - - - - - - - - - diff --git a/GPP/tests/sdr/dom/mgr/DomainManager.spd.xml b/GPP/tests/sdr/dom/mgr/DomainManager.spd.xml deleted file mode 100644 index 1e871939e..000000000 --- a/GPP/tests/sdr/dom/mgr/DomainManager.spd.xml +++ /dev/null @@ -1,43 +0,0 @@ - - - - - - - REDHAWK test author - - - - - - - - - implementation of a Domain Manager - - - /mgr/DomainManager - - - - - - diff --git a/GPP/tests/sdr/dom/waveforms/busy_w/busy_w.sad.xml b/GPP/tests/sdr/dom/waveforms/busy_w/busy_w.sad.xml index 87bc98e46..c1e4d48e8 100644 --- a/GPP/tests/sdr/dom/waveforms/busy_w/busy_w.sad.xml +++ b/GPP/tests/sdr/dom/waveforms/busy_w/busy_w.sad.xml @@ -29,7 +29,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - busy_comp_1 + busy_cOmp_1 diff --git a/GPP/tests/sdr/dom/waveforms/check_cwd_w/check_cwd_w.sad.xml b/GPP/tests/sdr/dom/waveforms/check_cwd_w/check_cwd_w.sad.xml new file mode 100644 index 000000000..b926c6a8f --- /dev/null +++ b/GPP/tests/sdr/dom/waveforms/check_cwd_w/check_cwd_w.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + check_cwd_1 + + + + + + + + + + diff --git a/GPP/tests/sdr/dom/waveforms/wav_floor_w/wav_floor_w.sad.xml b/GPP/tests/sdr/dom/waveforms/wav_floor_w/wav_floor_w.sad.xml new file mode 100644 index 000000000..870949d32 --- /dev/null +++ b/GPP/tests/sdr/dom/waveforms/wav_floor_w/wav_floor_w.sad.xml @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + busy_comp_1 + + + + + + + + + busy_comp_2 + + + + + + + + + + + + diff --git a/GPP/tests/sdr/dom/waveforms/wav_one_floor_w/wav_one_floor_w.sad.xml b/GPP/tests/sdr/dom/waveforms/wav_one_floor_w/wav_one_floor_w.sad.xml new file mode 100644 index 000000000..b96a91939 --- /dev/null +++ b/GPP/tests/sdr/dom/waveforms/wav_one_floor_w/wav_one_floor_w.sad.xml @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + busy_comp_1 + + + + + + + + + + + + busy_comp_2 + + + + + + + + + + + diff --git a/GPP/tests/sdr/dom/waveforms/wav_two_floor_w/wav_two_floor_w.sad.xml b/GPP/tests/sdr/dom/waveforms/wav_two_floor_w/wav_two_floor_w.sad.xml new file mode 100644 index 000000000..ae4e537fe --- /dev/null +++ b/GPP/tests/sdr/dom/waveforms/wav_two_floor_w/wav_two_floor_w.sad.xml @@ -0,0 +1,57 @@ + + + + + + + + + + + + + + + busy_comp_1 + + + + + + + + + + + + busy_comp_2 + + + + + + + + + + + + diff --git a/GPP/tests/test_GPP.py b/GPP/tests/test_GPP.py old mode 100755 new mode 100644 index ba83d798c..317d30b92 --- a/GPP/tests/test_GPP.py +++ b/GPP/tests/test_GPP.py @@ -21,99 +21,556 @@ import unittest import os +import resource import socket import time -import signal import commands import sys -import threading import Queue -from omniORB import any -from ossie.cf import ExtendedEvent -from ossie.parsers import DCDParser -from omniORB import CORBA -import omniORB +import shutil +import subprocess, multiprocessing + +from omniORB import any, CORBA + import CosEventChannelAdmin, CosEventChannelAdmin__POA from ossie.utils.sandbox.registrar import ApplicationRegistrarStub -import subprocess, multiprocessing +from ossie.utils.sandbox import naming from ossie.utils import sb, redhawk from ossie.cf import CF, CF__POA import ossie.utils.testing -from shutil import copyfile -import shutil -import os +import ossie.properties +from redhawk import numa -# numa layout: node 0 cpus, node 1 cpus, node 0 cpus sans cpuid=0 - -maxcpus=32 -maxnodes=2 -all_cpus='0-'+str(maxcpus-1) -all_cpus_sans0='1-'+str(maxcpus-1) -numa_match={ "all" : "0-31", - "sock0": "0-7,16-23", - "sock1": "8-15,24-31", - "sock0sans0": "1-7,16-23", - "sock1sans0": "1-7,16-23", - "5" : "5", - "8-10" : "8-10" } -numa_layout=[ "0-7,16-23", "8-15,24-31" ] - -affinity_test_src={ "all" : "0-31", - "sock0": "0", - "sock1": "1", - "sock0sans0": "0", - "5" : "5", - "8-10" : "8,9,10", - "eface" : "em1" } - -def get_match( key="all" ): - if key and key in numa_match: - return numa_match[key] - return numa_match["all"] - -def spawnNodeBooter(dmdFile=None, - dcdFile=None, - debug=0, - domainname=None, - loggingURI=None, - endpoint=None, - dbURI=None, - execparams="", - nodeBooterPath=os.getenv('OSSIEHOME')+"/bin/nodeBooter", - sdrroot = None): - args = [] - if dmdFile != None: - args.extend(["-D", dmdFile]) - if dcdFile != None: - args.extend(["-d", dcdFile]) - if domainname == None: - # Always use the --domainname argument because - # we don't want to have to read the DCD files or regnerate them - args.extend(["--domainname", 'sample_domain']) - else: - args.extend(["--domainname", domainname]) - - if endpoint == None: - args.append("--nopersist") - else: - args.extend(["-ORBendPoint", endpoint]) - - if dbURI: - args.extend(["--dburl", dbURI]) - - if sdrroot == None: - sdrroot = os.getenv('SDRROOT') +from _unitTestHelpers import scatest, runtestHelpers + +def hasNumaSupport(): + return runtestHelpers.haveDefine('../cpp/Makefile', 'HAVE_LIBNUMA') + +topology = numa.NumaTopology() + +skipUnless = scatest._skipUnless +def requireNuma(obj): + return skipUnless(topology.available() and hasNumaSupport(), 'Affinity control is disabled')(obj) + + +def wait_predicate(pred, timeout): + end = time.time() + timeout + while time.time() < end: + if pred(): + return + time.sleep(0.1) + +def nolaunch(obj): + """ + Decorator to disable automatic launch of the GPP from the setUp() method. + This is for use by tests that must override properties that can only be set + via the command line or initializeProperties(). + """ + obj.nolaunch = True + return obj + +# Base unit testing class for new-style GPP tests, based off of the default +# generated unit test. Adds simplified management and cleanup of programs +# launched by the GPP. +class GPPSandboxTest(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the device. + SPD_FILE = '../GPP.spd.xml' + + def setUp(self): + print "\n-----------------------" + print "Running: ", self.id().split('.')[-1] + print "-----------------------\n" + + if self._shouldLaunch(): + self.launchGPP() + else: + self.comp = None + + self._pids = [] + self._testDirs = [] + self._testFiles = [] + self._busyProcs = [] + + def _shouldLaunch(self): + method = getattr(self, self._testMethodName, None) + if not method: + return True + # Check for the 'nolaunch' attribute (its value is irrelevant); unless + # it's present, launch the GPP + return not hasattr(method, 'nolaunch') + + def launchGPP(self, properties={}): + # Launch the device, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl, properties=properties) + return self.comp + + def tearDown(self): + # Clean up any leftover busy subprocesses + for proc in self._busyProcs: + if proc.poll() is None: + proc.kill() + + # Terminate all launched executables, ignoring errors + remaining_pids = [] + for pid in self._pids: + try: + self.comp.ref.terminate(pid) + except: + remaining_pids.append(pid) + + # In case the GPP really failed badly, manually kill the processes + for pid in remaining_pids: + try: + os.killpg(pid, 9) + except OSError: + pass + + # Clean up all sandbox artifacts created during test + sb.release() + + for filename in self._testFiles: + try: + os.unlink(filename) + except OSError: + pass + + for path in self._testDirs: + shutil.rmtree(path) + + def addTestDirectory(self, path): + self._testDirs.append(path) + + def addTestFile(self, path): + self._testFiles.append(path) + + def removeTestFile(self, path): + self._testFiles.remove(path) + + def addBusyTasks(self, count): + self._busyProcs += [subprocess.Popen('bin/busy.py') for _ in xrange(count)] + + def clearBusyTasks(self): + for proc in self._busyProcs: + proc.kill() + self._busyProcs = [] + + def waitUsageState(self, state, timeout): + wait_predicate(lambda: self.comp._get_usageState() == state, timeout) + self.assertEqual(self.comp._get_usageState(), state) + + def _execute(self, executable, options, parameters): + if isinstance(options, dict): + options = [CF.DataType(k, any.to_any(v)) for k, v in options.items()] + if isinstance(parameters, dict): + parameters = [CF.DataType(k, any.to_any(v)) for k, v in parameters.items()] + pid = self.comp.ref.execute(executable, options, parameters) + if pid != 0: + self._pids.append(pid) + return pid + + def _launchComponent(self, executable, name, profile, options={}, parameters={}): + # Using the stub from the naming module allows fetching the component + # object, which the other version does not support; these should be + # consolidated at some point + appReg = naming.ApplicationRegistrarStub() + appreg_ior = sb.orb.object_to_string(appReg._this()) + + params = {} + params.update(parameters) + params['COMPONENT_IDENTIFIER'] = name + params['NAME_BINDING'] = name + params['PROFILE_NAME'] = profile + params['NAMING_CONTEXT_IOR'] = appreg_ior + + pid = self._execute(executable, options, params) + self.assertNotEqual(pid, 0) + + wait_predicate(lambda: appReg.getObject(name) is not None, 2.0) + comp = appReg.getObject(name) + self.failIf(comp is None, "component '" + name + "' never registered") + + return (pid, comp) + + def _launchComponentStub(self, name, options={}, parameters={}): + executable = '/dat/component_stub/python/component_stub.py' + profile = '/component_stub/component_stub.spd.xml' + return self._launchComponent(executable, name, profile, options, parameters) + + +class GPPTests(GPPSandboxTest): + def testPropertyEvents(self): + event_queue = Queue.Queue() + event_channel = sb.createEventChannel('properties') + event_channel.eventReceived.addListener(event_queue.put) + + self.comp.connect(event_channel) + + self.comp.loadThreshold = 81 + + # Make sure the background status events are emitted + try: + event = event_queue.get(timeout=1.0) + except Queue.Empty: + self.fail('Property change event not received') + event = any.from_any(event, keep_structs=True) + event_dict = ossie.properties.props_to_dict(event.properties) + self.assertEqual(self.comp._id, event.sourceId) + self.assertEqual(self.comp.loadThreshold.id, event.properties[0].id) + self.assertEqual(81, any.from_any(event.properties[0].value)) + + def testLimits(self): + limits = resource.getrlimit(resource.RLIMIT_NPROC) + + # Check that the system limits are sane + self.assertTrue(self.comp.sys_limits.current_threads > 0) + if limits[1] == -1: + # system limit is set to unlimited, can only check that the component is reporting a positive value + self.assertTrue(self.comp.sys_limits.current_threads > 0) + else: + self.assertTrue(self.comp.sys_limits.max_threads > self.comp.sys_limits.current_threads) + self.assertTrue(self.comp.sys_limits.current_open_files > 0) + self.assertTrue(self.comp.sys_limits.max_open_files > self.comp.sys_limits.current_open_files) + + # Check that the GPP's process limits are also sane + self.assertTrue(self.comp.gpp_limits.current_threads > 0) + if limits[0] == -1: + # process limit is set to unlimited, can only check that the component is reporting a positive value + self.assertTrue(self.comp.gpp_limits.current_threads > 0) + else: + self.assertTrue(self.comp.gpp_limits.max_threads > self.comp.gpp_limits.current_threads) + self.assertTrue(self.comp.gpp_limits.current_open_files > 0) + self.assertTrue(self.comp.gpp_limits.max_open_files > self.comp.gpp_limits.current_open_files) - args.extend(["-debug", str(debug)]) - args.extend(execparams.split(" ")) - args.insert(0, nodeBooterPath) + def testReservation(self): + # Set the idle threshold to 30% (i.e., can use up to 70%) and the + # reserved capacity per component to 25%; this gives plenty of headroom + # with two components (50% utilization leaves a 20% margin), but a + # third component unambiguously crosses into the busy threshold + self.comp.thresholds.cpu_idle = 30 + self.comp.reserved_capacity_per_component = 0.25 * self.comp.processor_cores + self.assertEquals(self.comp._get_usageState(),CF.Device.IDLE) + + self._launchComponentStub('reservation_1') + self._launchComponentStub('reservation_2') + + # Give the GPP a couple of measurement cycles to make sure it doesn't + # go busy; the CPU utilization (always the first entry) should report + # 50% subscribed + time.sleep(2) + self.assertEquals(self.comp._get_usageState(), CF.Device.ACTIVE) + expected = 0.5 * self.comp.processor_cores + self.assertEquals(expected, self.comp.utilization[0].subscribed) + + # Launch the third component and give up to 2 seconds for the GPP to go + # busy; CPU utilization should now be 75% subscribed + self._launchComponentStub('reservation_3') + self.waitUsageState(CF.Device.BUSY, 2.0) + expected = 0.75 * self.comp.processor_cores + self.assertEquals(expected, self.comp.utilization[0].subscribed) + + # Reduce the reserved capacity such that it consumes less than the idle + # threshold (10% x 3 = 30% active = 70% idle) + self.comp.reserved_capacity_per_component = 0.1 * self.comp.processor_cores + self.waitUsageState(CF.Device.ACTIVE, 2.0) + # 30% is an inexact fraction, so allow a little tolerance + expected = 0.3 * self.comp.processor_cores + self.assertAlmostEquals(expected, self.comp.utilization[0].subscribed, 1) + + def testFloorReservation(self): + # Reserve an absurdly large amount of cores, which should drive the GPP + # to a busy state immediately + self.assertEquals(self.comp._get_usageState(),CF.Device.IDLE) + params = {"RH::GPP::MODIFIED_CPU_RESERVATION_VALUE": 1000.0} + self._launchComponentStub('floor_reservation_1', parameters=params) + + self.waitUsageState(CF.Device.BUSY, 2.0) + + def _unpackThresholdEvents(self, message): + for dt in any.from_any(message, keep_structs=True): + if dt.id != 'threshold_event': + continue + props = any.from_any(dt.value, keep_structs=True) + yield ossie.properties.props_to_dict(props) + + def _checkThresholdEvent(self, resourceId, thresholdClass, exceeded): + try: + event = self.queue.get(timeout=2.0) + except Queue.Empty: + self.fail('Threshold event not received') + self.assertEqual(self.comp._refid, event['threshold_event::source_id']) + self.assertEqual(thresholdClass, event['threshold_event::threshold_class']) + self.assertEqual(resourceId, event['threshold_event::resource_id']) + self._assertThresholdState(event, exceeded) + + def _assertThresholdState(self, event, exceeded): + if exceeded: + event_type = 'THRESHOLD_EXCEEDED' + else: + event_type = 'THRESHOLD_NOT_EXCEEDED' + self.assertEqual(event_type, event['threshold_event::type']) + + def _testThresholdEventType(self, name, resourceId, thresholdClass, value): + # Save the original value and set the test value to trigger an + # "exceeded" event + orig_value = self.comp.thresholds[name] + self.comp.thresholds[name] = value + self._checkThresholdEvent(resourceId, thresholdClass, True) + + # Turning off the threshold should trigger a "not exceeded" event + self.comp.thresholds.ignore = True + self._checkThresholdEvent(resourceId, thresholdClass, False) + + # Turning it on again should trigger another "exceeded" event + self.comp.thresholds.ignore = False + self._checkThresholdEvent(resourceId, thresholdClass, True) + + # Restore the original value, trigger "not exceeded" event + self.comp.thresholds[name] = orig_value + self._checkThresholdEvent(resourceId, thresholdClass, False) + + def _checkNicEvents(self, nics, exceeded): + expected = set(nics) + end = time.time() + 2.0 + while expected and (time.time() < end): + try: + event = self.queue.get_nowait() + except Queue.Empty: + time.sleep(0.1) + continue + + # Only 1 device connected to the event channel, the source ID had + # better be correct + self.assertEqual(self.comp._refid, event['threshold_event::source_id']) + + # Should only be receiving one NIC message from each configured + # interface + nic_name = event['threshold_event::resource_id'] + self.failUnless(nic_name in nics, 'Received message from unexpected NIC %s' % nic_name) + self.failUnless(nic_name in expected, 'Received too many messages from NIC %s' % nic_name) + threshold_class = event['threshold_event::threshold_class'] + self.assertEqual('NIC_THROUGHPUT', threshold_class, 'Received unexpected threshold class %s' % threshold_class) + self._assertThresholdState(event, exceeded) + expected.remove(nic_name) + + self.assertEqual(set(), expected, 'Did not receive message from NIC(s): ' + ' '.join(expected)) + + def testThresholdEvents(self): + # Cut down the threshold cycle time to trigger events faster (we're not + # worried about the extra processing time here) + self.comp.threshold_cycle_time = 0.1 + + # Create a virtual event channel to queue the GPP's messages + event_channel = sb.createEventChannel('thresholds') + self.queue = Queue.Queue() + def queue_message(message): + # Unpack and queue up threshold event messages + for event in self._unpackThresholdEvents(message): + self.queue.put(event) + + event_channel.eventReceived.addListener(queue_message) + self.comp.connect(event_channel, usesPortName="MessageEvent_out") + + # Test all thresholds except NIC, which is a little more complex + self._testThresholdEventType('cpu_idle', 'cpu', 'CPU_IDLE', 100) + self._testThresholdEventType('mem_free', 'physical_ram', 'MEMORY_FREE', self.comp.memFree + 100) + self._testThresholdEventType('load_avg', 'cpu', 'LOAD_AVG', 0) + self._testThresholdEventType('shm_free', 'shm', 'SHM_FREE', self.comp.shmCapacity) + self._testThresholdEventType('files_available', 'ulimit', 'OPEN_FILES', 100.0) + self._testThresholdEventType('threads', 'ulimit', 'THREADS', 100.0) + + # If there is more than one NIC (real or virtual), each one will emit + # an event; we only really care about the "available" NICs + nics = list(self.comp.available_nic_interfaces) + nic_usage = int(self.comp.thresholds.nic_usage) + self.comp.thresholds.nic_usage = 0 + self._checkNicEvents(nics, True) + + # Turning off the threshold should trigger "not exceeded" event(s) + self.comp.thresholds.ignore = True + self._checkNicEvents(nics, False) + + # Turning it on again should trigger "exceeded" event(s) + self.comp.thresholds.ignore = False + self._checkNicEvents(nics, True) + + # Restore the original value, trigger "not exceeded" event(s) + self.comp.thresholds.nic_usage = nic_usage + self._checkNicEvents(nics, False) + + def testDefaultDirectories(self): + # Test that when cache and working directory are not given, the + # properties still have meaningful values + cwd = os.getcwd() + self.assertEquals(cwd, self.comp.cacheDirectory) + self.assertEquals(cwd, self.comp.workingDirectory) + + @nolaunch + def testCacheDirectory(self): + # Create an alternate directory for the cache + cache_dir = os.path.join(os.getcwd(), 'testCacheDirectory') + os.mkdir(cache_dir) + self.addTestDirectory(cache_dir) + self.launchGPP({'cacheDirectory':cache_dir}) + + # Make sure the property is correct + self.assertEqual(cache_dir, self.comp.cacheDirectory) + + # Load a file and check that it was copied to the right place + expected = os.path.join(cache_dir, 'bin/echo_pid.py') + self.failIf(os.path.exists(expected)) + fs_stub = ComponentTests.FileSystemStub() + self.comp.ref.load(fs_stub._this(), "/bin/echo_pid.py", CF.LoadableDevice.EXECUTABLE) + self.failUnless(os.path.isfile(expected)) + + @nolaunch + def testWorkingDirectory(self): + # Create an alternate directory for the working directory + working_dir = os.path.join(os.getcwd(), 'testWorkingDirectory') + os.mkdir(working_dir) + self.addTestDirectory(working_dir) + self.launchGPP({'workingDirectory':working_dir}) + + # Make sure the property is correct + self.assertEqual(working_dir, self.comp.workingDirectory) + + # Run a test executable that writes to its current directory + expected = os.path.join(working_dir, 'pid.out') + self.failIf(os.path.exists(expected)) + pid = self._execute("/bin/echo_pid.py", {}, {}) + wait_predicate(lambda: os.path.exists(expected), 1.0) + self.failUnless(os.path.exists(expected)) + + # Read the output file and make sure that the right PID was written + with open(expected, 'r') as fp: + echo_pid = int(fp.read().strip()) + self.assertEqual(pid, echo_pid) + + @nolaunch + def testCacheAndWorkingDirectory(self): + # Test the interaction of the cache and working directories; create an + # alternate directory for both + base_dir = os.path.join(os.getcwd(), 'testCacheAndWorkingDirectory') + os.mkdir(base_dir) + self.addTestDirectory(base_dir) + cache_dir = os.path.join(base_dir, 'cache') + os.mkdir(cache_dir) + working_dir = os.path.join(base_dir, 'cwd') + os.mkdir(working_dir) + self.launchGPP({'cacheDirectory':cache_dir, 'workingDirectory':working_dir}) + + # Make sure the properties are correct + self.assertEqual(cache_dir, self.comp.cacheDirectory) + self.assertEqual(working_dir, self.comp.workingDirectory) + + # Load a file and check that it was copied to the right place + expected = os.path.join(cache_dir, 'bin/echo_pid.py') + self.failIf(os.path.exists(expected)) + fs_stub = ComponentTests.FileSystemStub() + self.comp.ref.load(fs_stub._this(), "/bin/echo_pid.py", CF.LoadableDevice.EXECUTABLE) + self.failUnless(os.path.isfile(expected)) + + # Run a test executable that writes to its current directory + expected = os.path.join(working_dir, 'pid.out') + self.failIf(os.path.exists(expected)) + pid = self._execute("/bin/echo_pid.py", {}, {}) + wait_predicate(lambda: os.path.exists(expected), 1.0) + self.failUnless(os.path.exists(expected)) + + # Read the output file and make sure that the right PID was written + with open(expected, 'r') as fp: + echo_pid = int(fp.read().strip()) + self.assertEqual(pid, echo_pid) + + def testSharedMemoryProperties(self): + status = os.statvfs('/dev/shm') + + # The total shouldn't change in normal operation, so using the same + # expected integer math should give the same value + total = (status.f_blocks * status.f_frsize) / 1024 / 1024 + self.assertEqual(total, self.comp.shmCapacity) + + # Free could vary slightly if something else is happening on the + # system, so give it a little bit of slack (1 MB) + free = (status.f_bfree * status.f_frsize) / 1024 / 1024 + self.failIf(abs(free - self.comp.shmFree) > 1) + + def testBusyCpuIdle(self): + # Disable load average threshold + self.comp.thresholds.load_avg = -1 + + self.assertEqual(self.comp._get_usageState(), CF.Device.IDLE) + + # Task all of the CPUs to be busy (more or less) and wait for the idle + # threshold to be exceeded + self.addBusyTasks(self.comp.processor_cores) + self.waitUsageState(CF.Device.BUSY, 5.0) + self.failUnless("CPU IDLE" in self.comp.busy_reason.upper()) + + # Clear all busy tasks and wait for the device to go back to idle + self.clearBusyTasks() + self.waitUsageState(CF.Device.IDLE, 5.0) + self.assertEqual(self.comp._get_usageState(), CF.Device.IDLE) + self.assertEqual(self.comp.busy_reason, "") + + def testBusyLoadAvg(self): + # Disable CPU idle threshold and lower the load average threshold so + # that it's easier to exceed + self.comp.thresholds.cpu_idle = -1 + self.comp.thresholds.load_avg = 25 + + # The load average may exceed the threshold to begin with, depending on + # what the system was doing before this test + print 'Waiting for load average to fall below threshold, may take a while' + self.waitUsageState(CF.Device.IDLE, 30.0) + + # Occupy all of the CPUs with busy tasks and wait for the load average + # to exceed the threshold; this may take a while, since it's based on a + # 1 minute window + self.addBusyTasks(self.comp.processor_cores) + print 'Waiting for load average to exceed threshold, may take a while' + self.waitUsageState(CF.Device.BUSY, 30.0) + self.failUnless("LOAD AVG" in self.comp.busy_reason.upper()) + + # Clear all of the busy tasks; again, due to the 1 minute window, it + # may take a little while for the load average to drop back below the + # threshold + self.clearBusyTasks() + print 'Waiting for load average to fall below threshold, may take a while' + self.waitUsageState(CF.Device.IDLE, 30.0) + self.assertEqual(self.comp.busy_reason, "") + + def testBusySharedMemory(self): + # Cut down the update time for testing + self.comp.threshold_cycle_time = 0.1 - print '\n-------------------------------------------------------------------' - print 'Launching nodeBooter', " ".join(args) - print '-------------------------------------------------------------------' - nb = ossie.utils.Popen(args, cwd=sdrroot, shell=False, preexec_fn=os.setpgrp) + self.assertEqual(self.comp._get_usageState(), CF.Device.IDLE) + + # Set the shared memory threshold a little below the current free, so + # that a relative small uptick in usage will cross the threshold + current_shm = int(self.comp.shmFree) + self.comp.thresholds.shm_free = current_shm - 2 + + # Create a temporary file that consumes a few MB, enough to cross the + # threshold and a little further just to be sure + shm_file = '/dev/shm/test-%d' % os.getpid() + fill_size = 4*1024*1024 + self.addTestFile(shm_file) + with open(shm_file, 'w') as fp: + # Resize the file and write one byte every page to ensure that + # shared memory is consumed + fp.truncate(fill_size) + for pos in xrange(0, fill_size, 4096): + fp.seek(pos) + fp.write('\x00') + + self.waitUsageState(CF.Device.BUSY, 1.0) + + # Remove the file, which should push the free shared memory back over + # the threshold + os.unlink(shm_file) + self.waitUsageState(CF.Device.IDLE, 1.0) - return nb class ComponentTests(ossie.utils.testing.ScaComponentTestCase): """Test for all component implementations in test""" @@ -131,6 +588,7 @@ def tearDown(self): os.remove(sproc) except: pass + try: # kill all busy.py just in case os.system('pkill -9 -f busy.py') @@ -158,27 +616,6 @@ def promptUserInput(self, question, default): else: return default - def check_affinity(self, pname, affinity_match="0-31", use_pidof=True, pid_in=None): - try: - if pid_in: - pid=pid_in - o2=os.popen('cat /proc/'+str(pid)+'/status | grep Cpus_allowed_list') - else: - if use_pidof == True: - o1=os.popen('pidof -x '+pname ) - else: - o1=os.popen('pgrep -f '+pname ) - pid=o1.read() - o2=os.popen('cat /proc/'+pid.split('\n')[0]+'/status | grep Cpus_allowed_list') - cpus_allowed=o2.read().split() - except: - cpus_allowed=[] - - #print pname, cpus_allowed - self.assertEqual(cpus_allowed[1],affinity_match) - return - - def runGPP(self, execparam_overrides={}, initialize=True, configure={}): ####################################################################### # Launch the component with the default execparams @@ -254,11 +691,12 @@ def testScaBasicBehavior(self): # Create a test file system class FileStub(CF__POA.File): - def __init__(self): - self.fobj = open("dat/component_stub.py") + def __init__(self, path): + self.path = path + self.fobj = open(self.path) def sizeOf(self): - return os.path.getsize("dat/component_stub.py") + return os.path.getsize(self.path) def read(self, bytes): return self.fobj.read(bytes) @@ -267,15 +705,20 @@ def close(self): return self.fobj.close() class FileSystemStub(CF__POA.FileSystem): + def __init__(self, path='.'): + self.path = os.path.abspath(path) + def list(self, path): - return [CF.FileSystem.FileInformationType(path[1:], CF.FileSystem.PLAIN, 100, [])] + path = os.path.basename(path) + return [CF.FileSystem.FileInformationType(path, CF.FileSystem.PLAIN, 100, [])] def exists(self, fileName): - tmp_fileName = './dat/'+fileName + tmp_fileName = self.path + fileName return os.access(tmp_fileName, os.F_OK) def open(self, path, readonly): - file = ComponentTests.FileStub() + tmp_fileName = self.path + path + file = ComponentTests.FileStub(tmp_fileName) return file._this() def testExecute(self): @@ -284,7 +727,7 @@ def testExecute(self): configureProps = self.getPropertySet(kinds=("configure",), modes=("readwrite", "writeonly"), includeNil=False) self.comp_obj.configure(configureProps) - fs_stub = ComponentTests.FileSystemStub() + fs_stub = ComponentTests.FileSystemStub('./dat') fs_stub_var = fs_stub._this() self.comp_obj.load(fs_stub_var, "/component_stub.py", CF.LoadableDevice.EXECUTABLE) @@ -322,156 +765,6 @@ def testExecute(self): pass else: self.fail("Process failed to terminate") - - def testBusy(self): - self.runGPP() - - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.IDLE) - cores = multiprocessing.cpu_count() - sleep_time = 3+cores/10.0 - if sleep_time < 7: - sleep_time = 7 - procs = [] - for core in range(cores*2): - procs.append(subprocess.Popen('./busy.py')) - time.sleep(sleep_time) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.BUSY) - br=self.comp.busy_reason.queryValue() - br_cpu="CPU IDLE" in br.upper() or "LOAD AVG" in br.upper() - self.assertEqual(br_cpu, True) - for proc in procs: - proc.kill() - time.sleep(sleep_time) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.IDLE) - self.assertEqual(self.comp.busy_reason, "") - - fs_stub = ComponentTests.FileSystemStub() - fs_stub_var = fs_stub._this() - - self.comp_obj.load(fs_stub_var, "/component_stub.py", CF.LoadableDevice.EXECUTABLE) - self.assertEqual(os.path.isfile("component_stub.py"), True) # Technically this is an internal implementation detail that the file is loaded into the CWD of the device - - comp_id = "DCE:00000000-0000-0000-0000-000000000000:waveform_1" - app_id = "waveform_1" - appReg = ApplicationRegistrarStub(comp_id, app_id) - appreg_ior = sb.orb.object_to_string(appReg._this()) - pid = self.comp_obj.execute("/component_stub.py", [], [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id)), - CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub")),CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), - CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior))]) - self.assertNotEqual(pid, 0) - time.sleep(1) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.ACTIVE) - cores = multiprocessing.cpu_count() - procs = [] - for core in range(cores*2): - procs.append(subprocess.Popen('./busy.py')) - time.sleep(sleep_time) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.BUSY) - br_cpu="CPU IDLE" in br.upper() or "LOAD AVG" in br.upper() - for proc in procs: - proc.kill() - time.sleep(sleep_time) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.ACTIVE) - self.assertEqual(self.comp.busy_reason.queryValue(), "") - - try: - os.kill(pid, 0) - except OSError: - self.fail("Process failed to execute") - time.sleep(1) - self.comp_obj.terminate(pid) - try: - # kill all busy.py just in case - os.system('pkill -9 -f busy.py') - os.kill(pid, 0) - except OSError: - pass - else: - self.fail("Process failed to terminate") - - - - def test_busy_allow(self): - self.runGPP(execparam_overrides={'DEBUG_LEVEL': 3 }) - - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.IDLE) - cores = multiprocessing.cpu_count() - sleep_time = 3+cores/10.0 - if sleep_time < 7: - sleep_time = 7 - procs = [] - for core in range(cores*2+5): - procs.append(subprocess.Popen('./busy.py')) - time.sleep(sleep_time) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.BUSY) - br=self.comp.busy_reason.queryValue() - br_cpu="CPU IDLE" in br.upper() or "LOAD AVG" in br.upper() - self.assertEqual(br_cpu, True) - - # turn off check for idle - self.comp.thresholds.cpu_idle = 0.0 - # wait for busy to be reported... should just be load avg .. takes approx 1 minute - for i in range(42): - br=self.comp.busy_reason.queryValue() - br_cpu="LOAD AVG" in br.upper() - if br_cpu == True: - break - time.sleep(1.5) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.BUSY) - self.assertEqual(br_cpu, True) - - # turn off check for load_avg - self.comp.thresholds.load_avg = 100.0 - for i in range(5): - br=self.comp.busy_reason.queryValue() - if br == "": - break - time.sleep(1.5) - # wait for busy to be reported... should just be load avg now - br=self.comp.busy_reason.queryValue() - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.IDLE) - self.assertEqual(br, "") - - # turn on check for cpu_idle check - self.comp.thresholds.cpu_idle = 10.0 - # wait for busy to be reported... should just be load avg now - for i in range(5): - br=self.comp.busy_reason.queryValue() - br_cpu="CPU IDLE" in br.upper() - if br_cpu == True: - break - time.sleep(1.5) - br_cpu="CPU IDLE" in br.upper() - self.assertEqual(br_cpu, True) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.BUSY) - - # turn on check for load_avg check - self.comp.thresholds.load_avg = 80.0 - # wait for busy to be reported... should just be load avg now - for i in range(5): - br=self.comp.busy_reason.queryValue() - br_cpu="CPU IDLE" in br.upper() or "LOAD AVG" in br.upper() - if br_cpu == True: - break - time.sleep(1.5) - - self.assertEqual(br_cpu, True) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.BUSY) - for proc in procs: - proc.kill() - for i in range(40): - br=self.comp.busy_reason.queryValue() - if br == "": - break - time.sleep(1.5) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.IDLE) - self.assertEqual(self.comp.busy_reason.queryValue(), "") - time.sleep(1) - try: - # kill all busy.py just in case - os.system('pkill -9 -f busy.py') - except OSError: - pass def visual_testBusy(self): @@ -484,7 +777,7 @@ def visual_testBusy(self): sleep_time = 7 procs = [] for core in range(cores*2): - procs.append(subprocess.Popen('./busy.py')) + procs.append(subprocess.Popen('bin/busy.py')) end_time = time.time() + sleep_time while end_time > time.time(): print str(time.time()) + " busy reason: " + str(self.comp.busy_reason) @@ -499,7 +792,7 @@ def visual_testBusy(self): self.assertEqual(self.comp_obj._get_usageState(), CF.Device.IDLE) self.assertEqual(self.comp.busy_reason, "") - fs_stub = ComponentTests.FileSystemStub() + fs_stub = ComponentTests.FileSystemStub('./dat') fs_stub_var = fs_stub._this() self.comp_obj.load(fs_stub_var, "/component_stub.py", CF.LoadableDevice.EXECUTABLE) @@ -518,7 +811,7 @@ def visual_testBusy(self): cores = multiprocessing.cpu_count() procs = [] for core in range(cores*2): - procs.append(subprocess.Popen('./busy.py')) + procs.append(subprocess.Popen('bin/busy.py')) end_time = time.time() + sleep_time while end_time > time.time(): print str(time.time()) + " busy reason: " + str(self.comp.busy_reason) @@ -558,7 +851,7 @@ def No_testScreenExecute(self): useScreen = qr[0].value.value() self.assertEqual(useScreen, True) - fs_stub = ComponentTests.FileSystemStub() + fs_stub = ComponentTests.FileSystemStub('./dat') fs_stub_var = fs_stub._this() self.comp_obj.load(fs_stub_var, "/component_stub.py", CF.LoadableDevice.EXECUTABLE) @@ -625,7 +918,7 @@ def No_testScreenExecute(self): def test_scraping_proc(self): # start up subprocess with spaces in the name... - proc="./busy.py" + proc="bin/busy.py" sproc="./spacely sprockets" shutil.copy(proc,sproc) procs = subprocess.Popen(sproc) @@ -644,84 +937,7 @@ def test_scraping_proc(self): except: pass - - def testPropertyEvents(self): - class Consumer_i(CosEventChannelAdmin__POA.ProxyPushConsumer): - def __init__(self, parent, instance_id): - self.supplier = None - self.parent = parent - self.instance_id = instance_id - self.existence_lock = threading.Lock() - - def push(self, data): - self.parent.actionQueue.put(data) - - def connect_push_supplier(self, supplier): - self.supplier = supplier - - def disconnect_push_consumer(self): - self.existence_lock.acquire() - try: - self.supplier.disconnect_push_supplier() - except: - pass - self.existence_lock.release() - - class SupplierAdmin_i(CosEventChannelAdmin__POA.SupplierAdmin): - def __init__(self, parent): - self.parent = parent - self.instance_counter = 0 - - def obtain_push_consumer(self): - self.instance_counter += 1 - self.parent.consumer_lock.acquire() - self.parent.consumers[self.instance_counter] = Consumer_i(self.parent,self.instance_counter) - objref = self.parent.consumers[self.instance_counter]._this() - self.parent.consumer_lock.release() - return objref - - class EventChannelStub(CosEventChannelAdmin__POA.EventChannel): - def __init__(self): - self.consumer_lock = threading.RLock() - self.consumers = {} - self.actionQueue = Queue.Queue() - self.supplier_admin = SupplierAdmin_i(self) - - def for_suppliers(self): - return self.supplier_admin._this() - - ####################################################################### - # Launch the device - self.runGPP({"propertyEventRate": 5}) - - orb = CORBA.ORB_init() - obj_poa = orb.resolve_initial_references("RootPOA") - poaManager = obj_poa._get_the_POAManager() - poaManager.activate() - - eventChannel = EventChannelStub() - eventChannelId = obj_poa.activate_object(eventChannel) - eventPort = self.comp_obj.getPort("propEvent") - eventPort = eventPort._narrow(CF.Port) - eventPort.connectPort(eventChannel._this(), "eventChannel") - - #configureProps = self.getPropertySet(kinds=("configure",), modes=("readwrite", "writeonly"), includeNil=False) - configureProps = [CF.DataType(id='DCE:22a60339-b66e-4309-91ae-e9bfed6f0490',value=any.to_any(81))] - self.comp_obj.configure(configureProps) - - # Make sure the background status events are emitted - time.sleep(0.5) - - self.assert_(eventChannel.actionQueue.qsize() > 0) - - event = eventChannel.actionQueue.get() - event = any.from_any(event, keep_structs=True) - event_dict = ossie.properties.props_to_dict(event.properties) - self.assert_(self.comp_obj._get_identifier() == event.sourceId) - self.assert_('DCE:22a60339-b66e-4309-91ae-e9bfed6f0490' == event.properties[0].id) - self.assert_(81 == any.from_any(event.properties[0].value)) - - + def test_mcastNicThreshold(self): # set mcast exec param values for the test @@ -824,19 +1040,6 @@ def test_loadCapacity(self): allocProps = [CF.DataType(id='DCE:72c1c4a9-2bcf-49c5-bafd-ae2c1d567056',value=any.to_any(capacity*2))] self.assertRaises( CF.Device.InsufficientCapacity, self.comp_obj.allocateCapacity, allocProps) - def test_sys_limits(self): - - self.runGPP() - p=CF.DataType(id='sys_limits',value=any.to_any(None)) - retval = self.comp.query([p])[0].value._v - ids = [] - for item in retval: - ids.append(item.id) - self.assertTrue('sys_limits::current_threads') - self.assertTrue('sys_limits::max_threads') - self.assertTrue('sys_limits::current_open_files') - self.assertTrue('sys_limits::max_open_files') - def get_single_nic_interface(self): import commands (exitstatus, ifconfig_info) = commands.getstatusoutput('/sbin/ifconfig -a') @@ -993,458 +1196,287 @@ def test_threshold_usagestate(self): self.assertEquals(ustate, CF.Device.IDLE) + def test_threshold_usagestate_ignored(self): - def DeployWithAffinityOptions(self, options_list, numa_layout_test, bl_cpus ): - self.runGPP() - - # enable affinity processing.. - props=[ossie.cf.CF.DataType(id='affinity', value=CORBA.Any(CORBA.TypeCode("IDL:CF/Properties:1.0"), - [ ossie.cf.CF.DataType(id='affinity::exec_directive_value', value=CORBA.Any(CORBA.TC_string, '')), - ossie.cf.CF.DataType(id='affinity::exec_directive_class', value=CORBA.Any(CORBA.TC_string, 'socket')), - ossie.cf.CF.DataType(id='affinity::force_override', value=CORBA.Any(CORBA.TC_boolean, False)), - ossie.cf.CF.DataType(id='affinity::blacklist_cpus', value=CORBA.Any(CORBA.TC_string, bl_cpus)), - ossie.cf.CF.DataType(id='affinity::deploy_per_socket', value=CORBA.Any(CORBA.TC_boolean, False)), - ossie.cf.CF.DataType(id='affinity::disabled', value=CORBA.Any(CORBA.TC_boolean, False)) ## enable affinity - ] ))] + self.get_single_nic_interface() + if len(self.nic_list)> 1: + self.runGPP(configure={'nic_interfaces' : [ self.nic_list[0] ]}) + else: + self.runGPP() - self.comp_obj.configure(props) + self.comp.thresholds.ignore = True - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.IDLE) - - fs_stub = ComponentTests.FileSystemStub() - fs_stub_var = fs_stub._this() + # set cpu to be 100.00 ... the check busy state.. + orig_thres = self.comp.thresholds.cpu_idle.queryValue() + self.comp.thresholds.cpu_idle = 100.00 + ustate=None + for i in xrange(6): + ustate= self.comp._get_usageState() + if ustate == CF.Device.BUSY: break + time.sleep(.5) - ## Run a component with NIC based affinity - self.comp_obj.load(fs_stub_var, "/component_stub.py", CF.LoadableDevice.EXECUTABLE) - self.assertEqual(os.path.isfile("component_stub.py"), True) # Technically this is an internal implementation detail that the file is loaded into the CWD of the device - - comp_id = "DCE:00000000-0000-0000-0000-000000000000:waveform_1" - app_id = "waveform_1" - appReg = ApplicationRegistrarStub(comp_id, app_id) - appreg_ior = sb.orb.object_to_string(appReg._this()) - pid = self.comp_obj.execute("/component_stub.py", [ - CF.DataType(id="AFFINITY", value=any.to_any( options_list ) ) ], - [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id)), - CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub")),CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), - CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior))]) - self.assertNotEqual(pid, 0) - - self.check_affinity( 'component_stub.py', get_match(numa_layout_test), False) - - try: - os.kill(pid, 0) - except OSError: - self.fail("Process failed to execute") - time.sleep(1) - self.comp_obj.terminate(pid) - try: - os.kill(pid, 0) - except OSError: - pass - else: - self.fail("Process failed to terminate") + self.assertNotEquals(ustate, CF.Device.BUSY) + # set cpu idle back + self.comp.thresholds.cpu_idle = orig_thres + ustate=None + for i in xrange(6): + ustate= self.comp._get_usageState() + if ustate == CF.Device.IDLE: break + time.sleep(.5) - def testNicAffinity(self): - self.DeployWithAffinityOptions( [ CF.DataType(id='nic',value=any.to_any(affinity_test_src['eface'])) ], "sock0", '' ) + self.assertEquals(ustate, CF.Device.IDLE) - def testNicAffinityWithBlackList(self): - self.DeployWithAffinityOptions( [ CF.DataType(id='nic',value=any.to_any(affinity_test_src['eface'])) ], "sock0sans0", '0' ) + # set mem_free + orig_thres = self.comp.thresholds.mem_free.queryValue() + mem_free = self.comp.memFree.queryValue() + self.comp.thresholds.mem_free = mem_free+2000 + ustate=None + for i in xrange(6): + ustate= self.comp._get_usageState() + if ustate == CF.Device.BUSY: break + time.sleep(.5) - def testCpuAffinity(self): - if maxcpus > 6: - self.DeployWithAffinityOptions( [ CF.DataType(id='affinity::exec_directive_class',value=any.to_any('cpu')), - CF.DataType(id='affinity::exec_directive_value',value=any.to_any(affinity_test_src['5'])) ], "5", '' ) + self.assertNotEquals(ustate, CF.Device.BUSY) - def testSocketAffinity(self): - self.DeployWithAffinityOptions( [ CF.DataType(id='affinity::exec_directive_class',value=any.to_any('socket')), - CF.DataType(id='affinity::exec_directive_value',value=any.to_any(affinity_test_src['sock1'])) ], - "sock1sans0", '0' ) + # set mem_free back + self.comp.thresholds.mem_free = orig_thres + ustate=None + for i in xrange(6): + ustate= self.comp._get_usageState() + if ustate == CF.Device.IDLE: break + time.sleep(.5) - def testDeployOnSocket(self): - self.runGPP() + self.assertEquals(ustate, CF.Device.IDLE) - # enable affinity processing.. - props=[ossie.cf.CF.DataType(id='affinity', value=CORBA.Any(CORBA.TypeCode("IDL:CF/Properties:1.0"), - [ ossie.cf.CF.DataType(id='affinity::exec_directive_value', value=CORBA.Any(CORBA.TC_string, '')), - ossie.cf.CF.DataType(id='affinity::exec_directive_class', value=CORBA.Any(CORBA.TC_string, 'socket')), - ossie.cf.CF.DataType(id='affinity::force_override', value=CORBA.Any(CORBA.TC_boolean, False)), - ossie.cf.CF.DataType(id='affinity::blacklist_cpus', value=CORBA.Any(CORBA.TC_string, '')), - ossie.cf.CF.DataType(id='affinity::deploy_per_socket', value=CORBA.Any(CORBA.TC_boolean, True)), ## enable deploy_on - ossie.cf.CF.DataType(id='affinity::disabled', value=CORBA.Any(CORBA.TC_boolean, False)) ## enable affinity - ] ))] - self.comp_obj.configure(props) + # set load_avg + orig_thres = self.comp.thresholds.load_avg.queryValue() + self.comp.thresholds.load_avg=0.0 + ustate=None + for i in xrange(6): + ustate= self.comp._get_usageState() + if ustate == CF.Device.BUSY: break + time.sleep(.5) - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.IDLE) - - fs_stub = ComponentTests.FileSystemStub() - fs_stub_var = fs_stub._this() + self.assertNotEquals(ustate, CF.Device.BUSY) - ## Run a component with NIC based affinity - self.comp_obj.load(fs_stub_var, "/component_stub.py", CF.LoadableDevice.EXECUTABLE) - self.assertEqual(os.path.isfile("component_stub.py"), True) # Technically this is an internal implementation detail that the file is loaded into the CWD of the device - - comp_id = "DCE:00000000-0000-0000-0000-000000000000:waveform_1" - app_id = "waveform_1" - appReg = ApplicationRegistrarStub(comp_id, app_id) - appreg_ior = sb.orb.object_to_string(appReg._this()) - pid0 = self.comp_obj.execute("/component_stub.py", [], - [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id)), - CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub")),CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), - CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior))]) - self.assertNotEqual(pid0, 0) + # set load_avg back + self.comp.thresholds.load_avg = orig_thres + ustate=None + for i in xrange(6): + ustate= self.comp._get_usageState() + if ustate == CF.Device.IDLE: break + time.sleep(.5) - comp_id = "DCE:00000000-0000-0000-0000-000000000001:waveform_1" - app_id = "waveform_1" - appReg = ApplicationRegistrarStub(comp_id, app_id) - appreg_ior = sb.orb.object_to_string(appReg._this()) - pid1 = self.comp_obj.execute("/component_stub.py", [], - [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id)), - CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub")),CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), - CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior))]) + self.assertEquals(ustate, CF.Device.IDLE) - self.assertNotEqual(pid1, 0) - self.check_affinity( 'component_stub.py', get_match("sock0"), False, pid0) - self.check_affinity( 'component_stub.py', get_match("sock0"), False, pid1) - - for pid in [ pid0, pid1 ]: - try: - os.kill(pid, 0) - except OSError: - self.fail("Process failed to execute") - time.sleep(1) - self.comp_obj.terminate(pid) - try: - os.kill(pid, 0) - except OSError: - pass +@requireNuma +class AffinityTests(GPPSandboxTest): + def setUp(self): + super(AffinityTests,self).setUp() + + # Always enable affinity handling for these tests + self.comp.affinity.disabled = False + + def _getAllowedCpuList(self, pid): + filename = '/proc/%d/status' % pid + with open(filename, 'r') as fp: + for line in fp: + if not 'Cpus_allowed_list' in line: + continue + cpu_list = line.split()[1] + return numa.parseValues(cpu_list, ",") + return [] + + def _getNumCpus(self): + output=2 + try: + status,output=commands.getstatusoutput("ls -d /sys/devices/system/cpu/cpu[0-9]* | wc -l") + if status == 0: + return int(output) else: - self.fail("Process failed to terminate") - - def testForceOverride(self): - self.runGPP() + status,output=commands.getstatusoutput("lscpu | egrep '^CPU\(' | awk '{ print $2 }'") + return int(output) + except: + pass + return output + + def _deployWithAffinityOptions(self, name, affinity={}): + options = {} + if affinity: + options['AFFINITY'] = [CF.DataType(k, any.to_any(v)) for k,v in affinity.items()] + pid, comp = self._launchComponentStub(name, options=options) + return pid + + def _getNicAffinity(self, nic): + ncpus=self._getNumCpus() + cpu_list = [] + with open('/proc/interrupts', 'r') as fp: + for line in fp: + # Remove final newline and make sure the line ends with the NIC + # name (in the unlikely event a machine goes up to "em11") + line = line.rstrip() + if not line.endswith(nic): + continue + # Discard the first entry (the IRQ number) and the last two + # (type and name) to get the CPU IRQ service totals + ncpus+=1 + cpu_irqs = line.split()[1:ncpus] + for cpu, count in enumerate(cpu_irqs): + if int(count) > 0: + cpu_list.append(cpu) + break + return cpu_list - # enable affinity processing.. - props=[ossie.cf.CF.DataType(id='affinity', value=CORBA.Any(CORBA.TypeCode("IDL:CF/Properties:1.0"), - [ ossie.cf.CF.DataType(id='affinity::exec_directive_value', value=CORBA.Any(CORBA.TC_string, '1')), - ossie.cf.CF.DataType(id='affinity::exec_directive_class', value=CORBA.Any(CORBA.TC_string, 'socket')), - ossie.cf.CF.DataType(id='affinity::force_override', value=CORBA.Any(CORBA.TC_boolean, True)), - ossie.cf.CF.DataType(id='affinity::blacklist_cpus', value=CORBA.Any(CORBA.TC_string, '')), - ossie.cf.CF.DataType(id='affinity::deploy_per_socket', value=CORBA.Any(CORBA.TC_boolean, True)), - ossie.cf.CF.DataType(id='affinity::disabled', value=CORBA.Any(CORBA.TC_boolean, False)) ## enable affinity - ] ))] + @skipUnless(len(topology.nodes) > 1, 'At least two NUMA nodes required') + def testNicAffinity(self): + # Pick the first NIC and figure out which CPU(s) service its IRQ, then + # build the list of all CPUs on that node + self.assertNotEqual(0, len(self.comp.available_nic_interfaces), 'no available NIC interfaces') + nic = self.comp.available_nic_interfaces[0] + nodes = set(topology.getNodeForCpu(cpu) for cpu in self._getNicAffinity(nic)) + # Join the CPU lists together + nic_cpus = sum((node.cpus for node in nodes), []) + + # Launch the component stub with affinity based on the selected NIC; + # with no CPU blacklist, GPP will assign the component to all of the + # CPUs on the same socket(s) + pid = self._deployWithAffinityOptions('nic_affinity_1', {'nic':nic}) + allowed_cpus = self._getAllowedCpuList(pid) + self.assertEqual(nic_cpus, allowed_cpus) - self.comp_obj.configure(props) + def testNicAffinityWithBlackList(self): + # Pick the first NIC + self.assertNotEqual(0, len(self.comp.available_nic_interfaces), 'no available NIC interfaces') + nic = self.comp.available_nic_interfaces[0] + nic_cpus = self._getNicAffinity(nic) + # find all the cpus for each node + nodes = set(topology.getNodeForCpu(cpu) for cpu in self._getNicAffinity(nic)) + cpulist = sum((node.cpus for node in nodes), []) + if len(nic_cpus) > 1: + # There's more than one CPU assigned to service NIC interrupts, + # just blacklist the first one + blacklist_cpu = nic_cpus.pop(0) + else: + # Only one CPU for NIC interrupts, figure out its node and + # blacklist one of the other CPUs + cpu = nic_cpus[0] + node = topology.getNodeForCpu(cpu) + cpulist = node.cpus[:] + # Find the CPU in the list and select the next one (wrapping around + # as necessary) to ensure that we don't blacklist the wrong CPU + index = node.cpus.index(cpu) + index = (index + 1) % len(node.cpus) + blacklist_cpu = node.cpus[index] - self.assertEqual(self.comp_obj._get_usageState(), CF.Device.IDLE) - fs_stub = ComponentTests.FileSystemStub() - fs_stub_var = fs_stub._this() + # default nic deployment is per socket, use cpulist per + # node to check results + nic_cpus=[ x for x in cpulist if x not in [blacklist_cpu]] + + # With a CPU blacklist, only the CPUs that are expliclitly allowed to + # handle the NIC are in the allowed list, as opposed to all CPUs in the + # same socket(s) + self.comp.affinity.blacklist_cpus = str(blacklist_cpu) + pid = self._deployWithAffinityOptions('nic_affinity_bl_1', {'nic':nic}) + allowed_cpus = self._getAllowedCpuList(pid) + self.assertEqual(nic_cpus, allowed_cpus) - ## Run a component with NIC based affinity - self.comp_obj.load(fs_stub_var, "/component_stub.py", CF.LoadableDevice.EXECUTABLE) - self.assertEqual(os.path.isfile("component_stub.py"), True) # Technically this is an internal implementation detail that the file is loaded into the CWD of the device - - comp_id = "DCE:00000000-0000-0000-0000-000000000000:waveform_1" - app_id = "waveform_1" - appReg = ApplicationRegistrarStub(comp_id, app_id) - appreg_ior = sb.orb.object_to_string(appReg._this()) - pid0 = self.comp_obj.execute("/component_stub.py", [], - [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id)), - CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub")),CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), - CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior))]) - self.assertNotEqual(pid0, 0) + def testCpuAffinity(self): + # Pick the last CPU in the last node; the component should only be + # allowed to run on that CPU + cpu = topology.nodes[-1].cpus[-1] + pid = self._deployWithAffinityOptions('cpu_affinity_1', {'affinity::exec_directive_class': 'cpu', + 'affinity::exec_directive_value': cpu}) + allowed_cpus = self._getAllowedCpuList(pid) + self.assertEqual([cpu], allowed_cpus) + + @skipUnless(len(topology.nodes) > 1, 'At least two NUMA nodes are required') + def testSocketAffinity(self): + # Pick the last node and deploy to it; the process should be allowed to + # run on all CPUs from that node + node = topology.nodes[-1] + pid = self._deployWithAffinityOptions('socket_affinity_1', {'affinity::exec_directive_class': 'socket', + 'affinity::exec_directive_value': node.node}) + allowed_cpus = self._getAllowedCpuList(pid) + self.assertEqual(node.cpus, allowed_cpus) + + @skipUnless(len(topology.nodes) > 1, 'At least two NUMA nodes are required') + def testSocketAffinityWithBlackList(self): + # Pick the last node for deployment, but blacklist the first half of + # its CPUs + node = topology.nodes[-1] + cpu_count = len(node.cpus) + blacklist_cpus = node.cpus[:cpu_count/2] + cpu_list = node.cpus[cpu_count/2:] + + self.comp.affinity.blacklist_cpus = ','.join(str(cpu) for cpu in blacklist_cpus) + pid = self._deployWithAffinityOptions('socket_affinity_bl_1', {'affinity::exec_directive_class': 'socket', + 'affinity::exec_directive_value': node.node}) + allowed_cpus = self._getAllowedCpuList(pid) + self.assertEqual(cpu_list, allowed_cpus) - comp_id = "DCE:00000000-0000-0000-0000-000000000001:waveform_1" - app_id = "waveform_1" - appReg = ApplicationRegistrarStub(comp_id, app_id) - appreg_ior = sb.orb.object_to_string(appReg._this()) - pid1 = self.comp_obj.execute("/component_stub.py", [], - [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id)), - CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub")),CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), - CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior))]) + def testForceOverride(self): + # Configure the GPP to always deploy to CPU 0 + self.comp.affinity.exec_directive_value = '0' + self.comp.affinity.exec_directive_class = 'cpu' + self.comp.affinity.force_override = True - self.assertNotEqual(pid1, 0) + # Set a runtime affinity directive for CPU1; this should be ignored + pid = self._deployWithAffinityOptions('force_override_1', {'affinity::exec_directive_class': 'cpu', + 'affinity::exec_directive_value': '1'}) + allowed_cpus = self._getAllowedCpuList(pid) + self.assertEqual([0], allowed_cpus) - self.check_affinity( 'component_stub.py',get_match("sock1"), False, pid0) - self.check_affinity( 'component_stub.py',get_match("sock1"), False, pid1) - - for pid in [ pid0, pid1 ]: - try: - os.kill(pid, 0) - except OSError: - self.fail("Process failed to execute") - time.sleep(1) - self.comp_obj.terminate(pid) - try: - os.kill(pid, 0) - except OSError: - pass - else: - self.fail("Process failed to terminate") + def testDeployOnSocket(self): + self.comp.affinity.deploy_per_socket = True - def testReservation(self): - self.runGPP() - self.comp.thresholds.cpu_idle = 50 - self.comp.reserved_capacity_per_component = 0.5 - number_reservations = (self.comp.processor_cores / self.comp.reserved_capacity_per_component) * ((100-self.comp.thresholds.cpu_idle)/100.0) - comp_id = "DCE:00000000-0000-0000-0000-000000000000:waveform_1" - app_id = "waveform_1" - appReg = ApplicationRegistrarStub(comp_id, app_id) - appreg_ior = sb.orb.object_to_string(appReg._this()) - self.assertEquals(self.comp._get_usageState(),CF.Device.IDLE) - for i in range(int(number_reservations-1)): - self.child_pids.append(self.comp.ref.execute("/component_stub.py", [], [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id+str(i))), CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub_"+str(i))), CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior))])) - time.sleep(0.1) - time.sleep(2) - self.assertEquals(self.comp._get_usageState(),CF.Device.ACTIVE) - self.child_pids.append(self.comp_obj.execute("/component_stub.py", [], [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id)), - CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub")),CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), - CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior))])) - time.sleep(2) - self.assertEquals(self.comp._get_usageState(),CF.Device.BUSY) + pid0 = self._deployWithAffinityOptions('deploy_on_socket_1') + allowed_cpus = self._getAllowedCpuList(pid0) + self.assertEqual(topology.nodes[0].cpus, allowed_cpus) - def testFloorReservation(self): - self.runGPP() - self.comp.thresholds.cpu_idle = 10 - self.comp.reserved_capacity_per_component = 0.5 - number_reservations = (self.comp.processor_cores / self.comp.reserved_capacity_per_component) * ((100-self.comp.thresholds.cpu_idle)/100.0) - comp_id = "DCE:00000000-0000-0000-0000-000000000000:waveform_1" - app_id = "waveform_1" - appReg = ApplicationRegistrarStub(comp_id, app_id) - appreg_ior = sb.orb.object_to_string(appReg._this()) - self.assertEquals(self.comp._get_usageState(),CF.Device.IDLE) - self.child_pids.append(self.comp_obj.execute("/component_stub.py", [], [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id+'_1')), CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub_1")), CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior))])) - time.sleep(2.1) - self.assertEquals(self.comp._get_usageState(),CF.Device.ACTIVE) - self.child_pids.append(self.comp_obj.execute("/component_stub.py", [], [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id+'_1')), CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub_1")), CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior))])) - time.sleep(2.1) - self.assertEquals(self.comp._get_usageState(),CF.Device.ACTIVE) - pid = self.child_pids.pop() - self.comp_obj.terminate(pid) - time.sleep(2.1) - reservation = CF.DataType(id="RH::GPP::MODIFIED_CPU_RESERVATION_VALUE", value=any.to_any(1000.0)) - self.child_pids.append(self.comp_obj.execute("/component_stub.py", [], [CF.DataType(id="COMPONENT_IDENTIFIER", value=any.to_any(comp_id)), - CF.DataType(id="NAME_BINDING", value=any.to_any("component_stub")),CF.DataType(id="PROFILE_NAME", value=any.to_any("/component_stub/component_stub.spd.xml")), - CF.DataType(id="NAMING_CONTEXT_IOR", value=any.to_any(appreg_ior)), reservation])) - time.sleep(2) - self.assertEquals(self.comp._get_usageState(),CF.Device.BUSY) + pid1 = self._deployWithAffinityOptions('deploy_on_socket_2') + allowed_cpus = self._getAllowedCpuList(pid1) + self.assertEqual(topology.nodes[0].cpus, allowed_cpus) -class ComponentTests_SystemReservations(ossie.utils.testing.ScaComponentTestCase): +class DomainSupport(scatest.CorbaTestCase): """Test for all component implementations in test""" - child_pids = [] - dom = None - _domainBooter = None - _domainManager = None - _deviceBooter = None - _deviceLock = threading.Lock() - _deviceBooters = [] - _deviceManagers = [] - - def _getDeviceManager(self, domMgr, id): - for devMgr in domMgr._get_deviceManagers(): - try: - if id == devMgr._get_identifier(): - return devMgr - except CORBA.Exception: - # The DeviceManager being checked is unreachable. - pass - return None - def waitTermination(self, child, timeout=5.0, pause=0.1): - while child.poll() is None and timeout > 0.0: - timeout -= pause - time.sleep(pause) - return child.poll() != None - - def terminateChild(self, child, signals=(signal.SIGINT, signal.SIGTERM)): - if child.poll() != None: - return - try: - for sig in signals: - os.kill(child.pid, sig) - if self.waitTermination(child): - break - child.wait() - except OSError, e: - pass - finally: - pass - - def launchDomainManager(self, dmdFile="", domain_name = '', *args, **kwargs): - # Only allow one DomainManager, although this isn't a hard requirement. - # If it has exited, allow a relaunch. - if self._domainBooter and self._domainBooter.poll() == None: - return (self._domainBooter, self._domainManager) - - # Launch the nodebooter. - self._domainBooter = spawnNodeBooter(dmdFile=dmdFile, domainname = domain_name, *args, **kwargs) - number_attempts = 0 - while self._domainBooter.poll() == None: - try: - self.dom = redhawk.attach(domain_name) - except: - number_attempts += 1 - if number_attempts >= 20: - raise - time.sleep(0.1) - continue - self._domainManager = self.dom.ref - if self._domainManager: - try: - self._domainManager._get_identifier() - break - except: - pass - return (self._domainBooter, self._domainManager) - - def _addDeviceBooter(self, devBooter): - self._deviceLock.acquire() - try: - self._deviceBooters.append(devBooter) - finally: - self._deviceLock.release() - - def _addDeviceManager(self, devMgr): - self._deviceLock.acquire() - try: - self._deviceManagers.append(devMgr) - finally: - self._deviceLock.release() - - def launchDeviceManager(self, dcdFile, domainManager=None, wait=True, *args, **kwargs): - if not os.path.isfile(os.getcwd()+'/'+dcdFile): - print "ERROR: Invalid DCD path provided to launchDeviceManager ", dcdFile - return (None, None) - - # Launch the nodebooter. - if domainManager == None: - name = None - else: - name = domainManager._get_name() - devBooter = spawnNodeBooter(dcdFile=os.getcwd()+'/'+dcdFile, domainname=name, *args, **kwargs) - self._addDeviceBooter(devBooter) - - if wait: - devMgr = self.waitDeviceManager(devBooter, dcdFile, domainManager) + def _makeLink(self, src, dest): + if os.path.exists(dest): + os.unlink(dest) + os.symlink(src, dest) + + def launchDomainManager(self, *args, **kwargs): + domBooter, domMgr = super(DomainSupport,self).launchDomainManager(*args, loggingURI='', nodeBooterPath='nodeBooter', **kwargs) + if domMgr is None: + self.dom = None else: - devMgr = None - - return (devBooter, devMgr) + self.dom = redhawk.attach(domMgr._get_name()) + return domBooter, domMgr - def waitDeviceManager(self, devBooter, dcdFile, domainManager=None): - try: - dcdPath = os.getcwd()+'/'+dcdFile - except IOError: - print "ERROR: Invalid DCD path provided to waitDeviceManager", dcdFile - return None - - # Parse the DCD file to get the identifier and number of devices, which can be - # determined from the number of componentplacement elements. - dcd = DCDParser.parse(dcdPath) - if dcd.get_partitioning(): - numDevices = len(dcd.get_partitioning().get_componentplacement()) - else: - numDevices = 0 - - # Allow the caller to override the DomainManager (assuming they have a good reason). - if not domainManager: - domainManager = self._domainManager - - # As long as the nodebooter process is still alive, keep checking for the - # DeviceManager. - devMgr = None - while devBooter.poll() == None: - devMgrs = self.dom.devMgrs - for dM in devMgrs: - if dcd.get_id() == dM._get_identifier(): - devMgr = dM.ref - #devMgr = self._getDeviceManager(domainManager, dcd.get_id()) - if devMgr: - break - time.sleep(0.1) - - if devMgr: - self._waitRegisteredDevices(devMgr, numDevices) - self._addDeviceManager(devMgr) - return devMgr - - def _waitRegisteredDevices(self, devMgr, numDevices, timeout=5.0, pause=0.1): - while timeout > 0.0: - if (len(devMgr._get_registeredDevices())+len(devMgr._get_registeredServices())) == numDevices: - return True - else: - timeout -= pause - time.sleep(pause) - return False + def launchDeviceManager(self, *args, **kwargs): + return super(DomainSupport,self).launchDeviceManager(*args, loggingURI='', nodeBooterPath='nodeBooter', **kwargs) def setUp(self): - super(ComponentTests_SystemReservations,self).setUp() - self.child_pids=[] - self._domainBooter = None - self._domainManager = None - self._deviceBooter = None - self.orig_sdrroot=os.getenv('SDRROOT') - os.putenv('SDRROOT', os.getcwd()+'/sdr') + super(DomainSupport,self).setUp() + self.orig_sdrroot=os.environ['SDRROOT'] + os.environ['SDRROOT'] = os.getcwd()+'/sdr' print "\n-----------------------" print "Running: ", self.id().split('.')[-1] print "-----------------------\n" - copyfile(self.orig_sdrroot+'/dom/mgr/DomainManager', 'sdr/dom/mgr/DomainManager') - os.chmod('sdr/dom/mgr/DomainManager',0777) - copyfile(self.orig_sdrroot+'/dev/mgr/DeviceManager', 'sdr/dev/mgr/DeviceManager') - os.chmod('sdr/dev/mgr/DeviceManager',0777) - if not os.path.exists('sdr/dev/devices/GPP/cpp'): - os.makedirs('sdr/dev/devices/GPP/cpp') - copyfile('../cpp/GPP', 'sdr/dev/devices/GPP/cpp/GPP') - os.chmod('sdr/dev/devices/GPP/cpp/GPP',0777) - + self._makeLink(self.orig_sdrroot+'/dom/mgr', 'sdr/dom/mgr') + self._makeLink(self.orig_sdrroot+'/dev/mgr', 'sdr/dev/mgr') + print 'done staging DomainManager' def tearDown(self): - super(ComponentTests_SystemReservations, self).tearDown() - try: - # kill all busy.py just in case - os.system('pkill -9 -f busy.py') - except OSError: - pass - for child_p in self.child_pids: - try: - print "teardown (2)", child_p - os.system('kill -9 '+str(child_p)) - except OSError: - pass - if self.dom != None: - time.sleep(1) - self.dom.terminate() - self.dom = None - self.terminateChild(self._domainBooter) - if self._domainBooter: - self.terminateChild(self._domainBooter) - if self._deviceBooter: - self.terminateChild(self._deviceBooter) - os.putenv('SDRROOT', self.orig_sdrroot) + super(DomainSupport, self).tearDown() + os.environ['SDRROOT'] = self.orig_sdrroot - - def runGPP(self, execparam_overrides={}, initialize=True): - ####################################################################### - # Launch the component with the default execparams - execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False) - execparams = dict([(x.id, any.from_any(x.value)) for x in execparams]) - execparams.update(execparam_overrides) - #execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False) - #execparams = dict([(x.id, any.from_any(x.value)) for x in execparams]) - #self.launch(execparams, debugger='valgrind') - self.launch(execparams, initialize=initialize ) - - ####################################################################### - # Verify the basic state of the component - self.assertNotEqual(self.comp_obj, None) - self.assertEqual(self.comp_obj._non_existent(), False) - self.assertEqual(self.comp_obj._is_a("IDL:CF/ExecutableDevice:1.0"), True) - #self.assertEqual(self.spd.get_id(), self.comp_obj._get_identifier()) - +class ComponentTests_SystemReservations(DomainSupport): def close(self, value_1, value_2, margin = 0.01): if (value_2 * (1-margin)) < value_1 and (value_2 * (1+margin)) > value_1: return True @@ -1457,9 +1489,9 @@ def float_eq(self, a,b,eps=0.0000001): def testMonitorComponents(self): self.assertEquals(os.path.isfile('sdr/dom/mgr/DomainManager'),True) self.assertEquals(os.path.isfile('sdr/dev/mgr/DeviceManager'),True) - self._domainBooter, domMgr = self.launchDomainManager(domain_name='REDHAWK_TEST_'+str(os.getpid())) + self._domainBooter, domMgr = self.launchDomainManager() self.assertNotEquals(domMgr,None) - self._deviceBooter, devMgr = self.launchDeviceManager("sdr/dev/nodes/DevMgr_sample/DeviceManager.dcd.xml", domainManager=self.dom.ref) + self._deviceBooter, devMgr = self.launchDeviceManager("/nodes/DevMgr_sample/DeviceManager.dcd.xml") self.assertNotEquals(devMgr,None) app_1=self.dom.createApplication('/waveforms/load_comp_w/load_comp_w.sad.xml','load_comp_w',[]) wait_amount = (self.dom.devMgrs[0].devs[0].threshold_cycle_time / 1000.0) * 6 @@ -1484,9 +1516,9 @@ def testMonitorComponents(self): def testDeadlock(self): self.assertEquals(os.path.isfile('sdr/dom/mgr/DomainManager'),True) self.assertEquals(os.path.isfile('sdr/dev/mgr/DeviceManager'),True) - self._domainBooter, domMgr = self.launchDomainManager(domain_name='REDHAWK_TEST_'+str(os.getpid())) + self._domainBooter, domMgr = self.launchDomainManager() self.assertNotEquals(domMgr,None) - self._deviceBooter, devMgr = self.launchDeviceManager("sdr/dev/nodes/DevMgr_sample/DeviceManager.dcd.xml", domainManager=self.dom.ref) + self._deviceBooter, devMgr = self.launchDeviceManager("/nodes/DevMgr_sample/DeviceManager.dcd.xml") self.assertNotEquals(devMgr,None) self.dom.devMgrs[0].devs[0].threshold_cycle_time = 50 count = 0 @@ -1502,9 +1534,9 @@ def testDeadlock(self): def testSystemReservation(self): self.assertEquals(os.path.isfile('sdr/dom/mgr/DomainManager'),True) self.assertEquals(os.path.isfile('sdr/dev/mgr/DeviceManager'),True) - self._domainBooter, domMgr = self.launchDomainManager(domain_name='REDHAWK_TEST_'+str(os.getpid())) + self._domainBooter, domMgr = self.launchDomainManager() self.assertNotEquals(domMgr,None) - self._deviceBooter, devMgr = self.launchDeviceManager("sdr/dev/nodes/DevMgr_sample/DeviceManager.dcd.xml", domainManager=self.dom.ref) + self._deviceBooter, devMgr = self.launchDeviceManager("/nodes/DevMgr_sample/DeviceManager.dcd.xml") self.assertNotEquals(devMgr,None) self.comp= self.dom.devMgrs[0].devs[0] cpus = self.dom.devMgrs[0].devs[0].processor_cores @@ -1516,6 +1548,8 @@ def testSystemReservation(self): time.sleep(wait_amount) self.assertEquals(self.close(upper_capacity, self.dom.devMgrs[0].devs[0].utilization[0]['maximum']), True) + time.sleep(1) + base_util = self.dom.devMgrs[0].devs[0].utilization[0] subscribed = base_util['subscribed'] system_load_base = base_util['system_load'] @@ -1523,7 +1557,7 @@ def testSystemReservation(self): extra_reservation = 1 _value=any.to_any(extra_reservation) _value._t=CORBA.TC_double - app_1=self.dom.createApplication('/waveforms/busy_w/busy_w.sad.xml','busy_w',[CF.DataType(id='SPECIALIZED_CPU_RESERVATION',value=any.to_any([CF.DataType(id='busy_comp_1',value=any.to_any(_value))]))]) + app_1=self.dom.createApplication('/waveforms/busy_w/busy_w.sad.xml','busy_w',[CF.DataType(id='SPECIALIZED_CPU_RESERVATION',value=any.to_any([CF.DataType(id='busy_comp_1',value=_value)]))]) time.sleep(wait_amount) base_util = self.dom.devMgrs[0].devs[0].utilization[0] @@ -1532,7 +1566,7 @@ def testSystemReservation(self): comp_load = base_util['component_load'] #print "After App1 Create subnow(sub) " , sub_now, " sys_load", system_load_now, " sys_load_base ", system_load_base, " comp_load ", comp_load, " subscribed(base) ", subscribed, " extra ", extra_reservation, " res per", res_per_comp, " idle cap mod ", idle_cap_mod self.assertEquals(self.close(sub_now, extra_reservation), True) - + app_2=self.dom.createApplication('/waveforms/busy_w/busy_w.sad.xml','busy_w',[]) time.sleep(wait_amount) base_util = self.dom.devMgrs[0].devs[0].utilization[0] @@ -1557,8 +1591,6 @@ def testSystemReservation(self): else: self.assertEqual(self.close(sub_now, extra_reservation+res_per_comp), True) - - app_2.start() time.sleep(wait_amount) base_util = self.dom.devMgrs[0].devs[0].utilization[0] @@ -1586,167 +1618,257 @@ def testSystemReservation(self): self.assertEquals(self.close(sub_now, extra_reservation+res_per_comp ), True) self.assertEquals(self.float_eq(sub_now_pre, sub_now, eps=.01), True) + def _verifyReservations(self, extra, application, wait_amount): + base_util = self.dom.devMgrs[0].devs[0].utilization[0] + system_load_now = base_util['system_load'] + sub_now = base_util['subscribed'] + comp_load = base_util['component_load'] + self.assertEquals(self.close(sub_now, extra), True) + self.assertEquals(comp_load, 0) + application.start() + time.sleep(wait_amount) + base_util = self.dom.devMgrs[0].devs[0].utilization[0] + system_load_now = base_util['system_load'] + sub_now = base_util['subscribed'] + comp_load = base_util['component_load'] + self.assertEquals(self.close(sub_now, extra), True) + self.assertEquals(self.close(comp_load, 2, margin=0.1), True) + + application.stop() + time.sleep(wait_amount) + base_util = self.dom.devMgrs[0].devs[0].utilization[0] + system_load_now = base_util['system_load'] + sub_now = base_util['subscribed'] + comp_load = base_util['component_load'] + self.assertEquals(self.close(sub_now, extra), True) + self.assertEquals(comp_load, 0) + + def testAppReservation(self): + self.assertEquals(os.path.isfile('sdr/dom/mgr/DomainManager'),True) + self.assertEquals(os.path.isfile('sdr/dev/mgr/DeviceManager'),True) + self._domainBooter, domMgr = self.launchDomainManager() + self.assertNotEquals(domMgr,None) + self._deviceBooter, devMgr = self.launchDeviceManager("/nodes/DevMgr_sample/DeviceManager.dcd.xml") + self.assertNotEquals(devMgr,None) + self.comp= self.dom.devMgrs[0].devs[0] + cpus = self.dom.devMgrs[0].devs[0].processor_cores + cpu_thresh = self.dom.devMgrs[0].devs[0].thresholds.cpu_idle + res_per_comp = self.dom.devMgrs[0].devs[0].reserved_capacity_per_component + idle_cap_mod = 100.0 * res_per_comp / (cpus*1.0) + upper_capacity = cpus - (cpus * (cpu_thresh/100)) + wait_amount = (self.dom.devMgrs[0].devs[0].threshold_cycle_time / 1000.0) * 4 + time.sleep(wait_amount) + self.assertEquals(self.close(upper_capacity, self.dom.devMgrs[0].devs[0].utilization[0]['maximum']), True) + + time.sleep(1) + + base_util = self.dom.devMgrs[0].devs[0].utilization[0] + subscribed = base_util['subscribed'] + system_load_base = base_util['system_load'] + + extra_reservation = 3 + _value=any.to_any(extra_reservation) + _value._t=CORBA.TC_double + self.assertRaises(CF.ApplicationFactory.CreateApplicationError, self.dom.createApplication, '/waveforms/wav_floor_w/wav_floor_w.sad.xml','busy_w',[CF.DataType(id='SPECIALIZED_CPU_RESERVATION',value=any.to_any([CF.DataType(id='busy_comp_1',value=_value)]))]) + app_1=self.dom.createApplication('/waveforms/wav_floor_w/wav_floor_w.sad.xml','busy_w',[]) + time.sleep(wait_amount) + self._verifyReservations(extra_reservation, app_1, wait_amount) + + app_1.releaseObject() + time.sleep(wait_amount) + base_util = self.dom.devMgrs[0].devs[0].utilization[0] + sub_now = base_util['subscribed'] + comp_load = base_util['component_load'] + self.assertEquals(sub_now, 0) + + def testAppOverloadGenericReservation(self): + self.assertEquals(os.path.isfile('sdr/dom/mgr/DomainManager'),True) + self.assertEquals(os.path.isfile('sdr/dev/mgr/DeviceManager'),True) + self._domainBooter, domMgr = self.launchDomainManager() + self.assertNotEquals(domMgr,None) + self._deviceBooter, devMgr = self.launchDeviceManager("/nodes/DevMgr_sample/DeviceManager.dcd.xml") + self.assertNotEquals(devMgr,None) + self.comp= self.dom.devMgrs[0].devs[0] + cpus = self.dom.devMgrs[0].devs[0].processor_cores + cpu_thresh = self.dom.devMgrs[0].devs[0].thresholds.cpu_idle + res_per_comp = self.dom.devMgrs[0].devs[0].reserved_capacity_per_component + idle_cap_mod = 100.0 * res_per_comp / (cpus*1.0) + upper_capacity = cpus - (cpus * (cpu_thresh/100)) + wait_amount = (self.dom.devMgrs[0].devs[0].threshold_cycle_time / 1000.0) * 4 + time.sleep(wait_amount) + self.assertEquals(self.close(upper_capacity, self.dom.devMgrs[0].devs[0].utilization[0]['maximum']), True) + + time.sleep(1) + + base_util = self.dom.devMgrs[0].devs[0].utilization[0] + subscribed = base_util['subscribed'] + system_load_base = base_util['system_load'] + + extra_reservation = 4 + _value=any.to_any(extra_reservation) + _value._t=CORBA.TC_double + app_1=self.dom.createApplication('/waveforms/wav_floor_w/wav_floor_w.sad.xml','busy_w',[CF.DataType(id='SPECIALIZED_CPU_RESERVATION',value=any.to_any([CF.DataType(id='',value=_value)]))]) + time.sleep(wait_amount) + self._verifyReservations(extra_reservation, app_1, wait_amount) + + def testAppOverloadSpecificReservation(self): + self.assertEquals(os.path.isfile('sdr/dom/mgr/DomainManager'),True) + self.assertEquals(os.path.isfile('sdr/dev/mgr/DeviceManager'),True) + self._domainBooter, domMgr = self.launchDomainManager() + self.assertNotEquals(domMgr,None) + self._deviceBooter, devMgr = self.launchDeviceManager("/nodes/DevMgr_sample/DeviceManager.dcd.xml") + self.assertNotEquals(devMgr,None) + self.comp= self.dom.devMgrs[0].devs[0] + cpus = self.dom.devMgrs[0].devs[0].processor_cores + cpu_thresh = self.dom.devMgrs[0].devs[0].thresholds.cpu_idle + res_per_comp = self.dom.devMgrs[0].devs[0].reserved_capacity_per_component + idle_cap_mod = 100.0 * res_per_comp / (cpus*1.0) + upper_capacity = cpus - (cpus * (cpu_thresh/100)) + wait_amount = (self.dom.devMgrs[0].devs[0].threshold_cycle_time / 1000.0) * 4 + time.sleep(wait_amount) + self.assertEquals(self.close(upper_capacity, self.dom.devMgrs[0].devs[0].utilization[0]['maximum']), True) + + time.sleep(1) + + base_util = self.dom.devMgrs[0].devs[0].utilization[0] + subscribed = base_util['subscribed'] + system_load_base = base_util['system_load'] + + extra_reservation = 4 + _value=any.to_any(extra_reservation) + _value._t=CORBA.TC_double + self.assertRaises(CF.ApplicationFactory.CreateApplicationError, self.dom.createApplication, '/waveforms/wav_floor_w/wav_floor_w.sad.xml','busy_w',[CF.DataType(id='SPECIALIZED_CPU_RESERVATION',value=any.to_any([CF.DataType(id='COLLOC_SET1',value=_value)]))]) + app_1=self.dom.createApplication('/waveforms/wav_floor_w/wav_floor_w.sad.xml','busy_w',[CF.DataType(id='SPECIALIZED_CPU_RESERVATION',value=any.to_any([CF.DataType(id='ID_TEST_SET1',value=_value)]))]) + time.sleep(wait_amount) + self._verifyReservations(extra_reservation, app_1, wait_amount) + + def testAppOverloadTwoSpecificReservation(self): + self.assertEquals(os.path.isfile('sdr/dom/mgr/DomainManager'),True) + self.assertEquals(os.path.isfile('sdr/dev/mgr/DeviceManager'),True) + self._domainBooter, domMgr = self.launchDomainManager() + self.assertNotEquals(domMgr,None) + self._deviceBooter, devMgr = self.launchDeviceManager("/nodes/DevMgr_sample/DeviceManager.dcd.xml") + self.assertNotEquals(devMgr,None) + self.comp= self.dom.devMgrs[0].devs[0] + cpus = self.dom.devMgrs[0].devs[0].processor_cores + cpu_thresh = self.dom.devMgrs[0].devs[0].thresholds.cpu_idle + res_per_comp = self.dom.devMgrs[0].devs[0].reserved_capacity_per_component + idle_cap_mod = 100.0 * res_per_comp / (cpus*1.0) + upper_capacity = cpus - (cpus * (cpu_thresh/100)) + wait_amount = (self.dom.devMgrs[0].devs[0].threshold_cycle_time / 1000.0) * 4 + time.sleep(wait_amount) + self.assertEquals(self.close(upper_capacity, self.dom.devMgrs[0].devs[0].utilization[0]['maximum']), True) + + time.sleep(1) + + base_util = self.dom.devMgrs[0].devs[0].utilization[0] + subscribed = base_util['subscribed'] + system_load_base = base_util['system_load'] + + extra_reservation = 4 + _value=any.to_any(extra_reservation/2) + _value._t=CORBA.TC_double + self.assertRaises(CF.ApplicationFactory.CreateApplicationError, self.dom.createApplication, '/waveforms/wav_floor_w/wav_floor_w.sad.xml','busy_w',[CF.DataType(id='SPECIALIZED_CPU_RESERVATION',value=any.to_any([CF.DataType(id='COLLOC_SET1',value=any.to_any(_value)),CF.DataType(id='ID_TEST_SET2',value=_value)]))]) + app_1=self.dom.createApplication('/waveforms/wav_two_floor_w/wav_two_floor_w.sad.xml','busy_w',[CF.DataType(id='SPECIALIZED_CPU_RESERVATION',value=any.to_any([CF.DataType(id='ID_TEST_SET1',value=any.to_any(_value)),CF.DataType(id='ID_TEST_SET2',value=_value)]))]) + time.sleep(wait_amount) + self._verifyReservations(extra_reservation, app_1, wait_amount) + + def testAppOverloadOneSpecificReservation(self): + self.assertEquals(os.path.isfile('sdr/dom/mgr/DomainManager'),True) + self.assertEquals(os.path.isfile('sdr/dev/mgr/DeviceManager'),True) + self._domainBooter, domMgr = self.launchDomainManager() + self.assertNotEquals(domMgr,None) + self._deviceBooter, devMgr = self.launchDeviceManager("/nodes/DevMgr_sample/DeviceManager.dcd.xml") + self.assertNotEquals(devMgr,None) + self.comp= self.dom.devMgrs[0].devs[0] + cpus = self.dom.devMgrs[0].devs[0].processor_cores + cpu_thresh = self.dom.devMgrs[0].devs[0].thresholds.cpu_idle + res_per_comp = self.dom.devMgrs[0].devs[0].reserved_capacity_per_component + idle_cap_mod = 100.0 * res_per_comp / (cpus*1.0) + upper_capacity = cpus - (cpus * (cpu_thresh/100)) + wait_amount = (self.dom.devMgrs[0].devs[0].threshold_cycle_time / 1000.0) * 4 + time.sleep(wait_amount) + self.assertEquals(self.close(upper_capacity, self.dom.devMgrs[0].devs[0].utilization[0]['maximum']), True) + + time.sleep(1) + + base_util = self.dom.devMgrs[0].devs[0].utilization[0] + subscribed = base_util['subscribed'] + system_load_base = base_util['system_load'] + + extra_reservation = 4 + _value=any.to_any(extra_reservation/2) + _value._t=CORBA.TC_double + app_1=self.dom.createApplication('/waveforms/wav_one_floor_w/wav_one_floor_w.sad.xml','busy_w',[CF.DataType(id='SPECIALIZED_CPU_RESERVATION',value=any.to_any([CF.DataType(id='ID_TEST_SET1',value=any.to_any(_value)),CF.DataType(id='ID_TEST_SET2',value=_value)]))]) + time.sleep(wait_amount) + self._verifyReservations(extra_reservation, app_1, wait_amount) + + +class LoadableDeviceVariableDirectoriesTest(DomainSupport): + def setUp(self): + super(LoadableDeviceVariableDirectoriesTest,self).setUp() + self.launchDomainManager() + self._testFiles = [] + + dcd_file = 'sdr/dev/nodes/test_VarCache_node/DeviceManager.dcd.xml' + with open(dcd_file + '.in', 'r') as fp: + original = fp.read() + + cwd = os.getcwd() + self.base_dir = cwd + '/LoadableDeviceVariableDirectoriesTest' + self.cache_dir = self.base_dir+'/cache' + self.cwd_dir = self.base_dir+'/cwd' + modified = original.replace('@@@CACHE_DIRECTORY@@@', self.cache_dir) + modified = modified.replace('@@@CURRENT_WORKING_DIRECTORY@@@', self.cwd_dir) + + with open(dcd_file, 'w') as fp: + fp.write(modified) + self._testFiles.append(dcd_file) + + def tearDown(self): + super(LoadableDeviceVariableDirectoriesTest, self).tearDown() + for file in self._testFiles: + os.unlink(file) + + shutil.rmtree(self.base_dir) + + def test_CheckDEPLOYMENTROOT(self): + self.assertNotEqual(self.dom, None) + nodebooter, devMgr = self.launchDeviceManager("/nodes/test_VarCache_node/DeviceManager.dcd.xml") + self.assertNotEqual(devMgr, None) + app = self.dom.createApplication('/waveforms/check_cwd_w/check_cwd_w.sad.xml') + self.assertNotEqual(app, None) + self.assertEquals(app.comps[0].cwd, self.cwd_dir) + pid = str(app._get_componentProcessIds()[0].processId) + fp=open('/proc/'+pid+'/cmdline','r') + cmdline = fp.read() + fp.close() + _args = cmdline.split('\x00') + idx = _args.index('RH::DEPLOYMENT_ROOT') + deployment_root=_args[idx+1] + self.assertEquals(deployment_root, self.dom.devices[0].cacheDirectory) + + def test_CompConfigCacheCWD(self): + self.assertNotEqual(self.dom, None) + nodebooter, devMgr = self.launchDeviceManager("/nodes/test_VarCache_node/DeviceManager.dcd.xml") + self.assertNotEqual(devMgr, None) + app = self.dom.createApplication('/waveforms/check_cwd_w/check_cwd_w.sad.xml') + self.assertNotEqual(app, None) + self.assertEquals(app.comps[0].cwd, self.cwd_dir) + found_dir = False + for root, dirs, files in os.walk(self.base_dir): + if 'check_cwd.py' in files: + if 'cache/components/check_cwd/python' in root: + found_dir = True + break + self.assertTrue(found_dir) - # TODO Add additional tests here - # - # See: - # ossie.utils.testing.bulkio_helpers, - # ossie.utils.testing.bluefile_helpers - # for modules that will assist with testing components with BULKIO ports - -def get_nonnuma_affinity_ctx( affinity_ctx ): - # test should run but affinity will be ignored - import multiprocessing - maxcpus=multiprocessing.cpu_count() - maxnodes=1 - all_cpus='0-'+str(maxcpus-1) - all_cpus_sans0='0-'+str(maxcpus-1) - if maxcpus == 2: - all_cpus_sans0='0-1' - elif maxcpus == 1 : - all_cpus='0' - all_cpus_sans0='' - - numa_layout=[ all_cpus ] - affinity_match={ "all" : all_cpus, - "sock0": all_cpus, - "sock1": all_cpus, - "sock0sans0": all_cpus_sans0, - "sock1sans0": all_cpus_sans0, - "5" : all_cpus, - "8-10" : all_cpus } - - affinity_ctx['maxcpus']=maxcpus - affinity_ctx['maxnodes']=maxnodes - affinity_ctx['all_cpus']=all_cpus - affinity_ctx['all_cpus_sans0']=all_cpus_sans0 - affinity_ctx['numa_layout']=numa_layout - affinity_ctx['affinity_match']=affinity_match - -def get_numa_affinity_ctx( affinity_ctx ): - # test numaclt --show .. look for cpu bind of 0,1 and cpu id atleast 31 - maxnode=0 - maxcpu=0 - lines = [line.rstrip() for line in os.popen('numactl --show')] - for l in lines: - if l.startswith('nodebind'): - maxnode=int(l.split()[-1]) - if l.startswith('physcpubind'): - maxcpu=int(l.split()[-1]) - - maxcpus=maxcpu+1 - maxnodes=maxnode+1 - numa_layout=[] - try: - for i in range(maxnodes): - xx = [line.rstrip() for line in open('/sys/devices/system/node/node'+str(i)+'/cpulist')] - numa_layout.append(xx[0]) - except: - pass - - all_cpus='0-'+str(maxcpus-1) - all_cpus_sans0='1-'+str(maxcpus-1) - if maxcpus == 2: - all_cpus_sans0='1' - elif maxcpus == 1 : - all_cpus="0" - all_cpus_sans0='' - - affinity_match = { "all":all_cpus, - "sock0": all_cpus, - "sock1": all_cpus, - "sock0sans0": all_cpus_sans0, - "sock1sans0": all_cpus_sans0, - "5" : all_cpus, - "8-10" : all_cpus } - - if len(numa_layout) > 0: - affinity_match["sock0"]=numa_layout[0] - aa=numa_layout[0] - if maxcpus > 2: - affinity_match["sock0sans0"] = str(int(aa[0])+1)+aa[1:] - - if len(numa_layout) > 1: - affinity_match["sock1"]=numa_layout[1] - affinity_match["sock1sans0"]=numa_layout[1] - - if maxcpus > 5: - affinity_match["5"]="5" - - if maxcpus > 11: - affinity_match["8-10"]="8-10" - - if maxcpus == 2: - affinity_match["5"] = all_cpus_sans0 - affinity_match["8-10"]= all_cpus_sans0 - - affinity_ctx['maxcpus']=maxcpus - affinity_ctx['maxnodes']=maxnodes - affinity_ctx['all_cpus']=all_cpus - affinity_ctx['all_cpus_sans0']=all_cpus_sans0 - affinity_ctx['numa_layout']=numa_layout - affinity_ctx['affinity_match']=affinity_match - if __name__ == "__main__": - # figure out numa layout, test numaclt --show .. - all_cpus="0" - maxnode=1 - maxcpu=1 - eface="em1" - # - # Figure out ethernet interface to use - # - lines = [line.rstrip() for line in os.popen('cat /proc/net/dev')] - import re - for l in lines[2:]: - t1=l.split(':')[0].lstrip() - if re.match('e.*', t1 ) : - eface=t1 - break - - affinity_test_src['eface']=eface - - nonnuma_affinity_ctx={} - get_nonnuma_affinity_ctx(nonnuma_affinity_ctx) - numa_affinity_ctx={} - get_numa_affinity_ctx(numa_affinity_ctx) - - # figure out if GPP has numa library dependency - lines = [ line.rstrip() for line in os.popen('ldd ../cpp/GPP') ] - numa=False - for l in lines: - if "libnuma" in l: - numa=True - - if numa: - print "NumaSupport ", numa_affinity_ctx - maxcpus = numa_affinity_ctx['maxcpus'] - maxnodes = numa_affinity_ctx['maxnodes'] - all_cpus = numa_affinity_ctx['all_cpus'] - all_cpus_sans0 = numa_affinity_ctx['all_cpus_sans0'] - numa_layout=numa_affinity_ctx['numa_layout'] - numa_match=numa_affinity_ctx['affinity_match'] - else: - print "NonNumaSupport ", nonnuma_affinity_ctx - maxcpus = nonnuma_affinity_ctx['maxcpus'] - maxnodes = nonnuma_affinity_ctx['maxnodes'] - all_cpus = nonnuma_affinity_ctx['all_cpus'] - all_cpus_sans0 = nonnuma_affinity_ctx['all_cpus_sans0'] - numa_layout=nonnuma_affinity_ctx['numa_layout'] - numa_match=nonnuma_affinity_ctx['affinity_match'] - - if maxnodes < 2 : - affinity_test_src["sock1"] = "0" - - if maxcpus == 2: - affinity_test_src["8-10"] = all_cpus_sans0 - affinity_test_src["5"] = all_cpus_sans0 - else: - if maxcpus < 9 or maxcpus < 11 : - affinity_test_src["8-10"] = all_cpus - affinity_test_src["5"] = all_cpus - - print "numa findings maxnodes:", maxnodes, " maxcpus:", maxcpus, " numa_match:", numa_match, " numa_layout", numa_layout, " map:", affinity_test_src + if False: + # Debugging support: enable this conditional to dump NUMA topology + print "NumaSupport %d nodes %d CPUs" % (len(topology.nodes), len(topology.cpus)) + for node in topology.nodes: + print 'Node', node.node, 'CPUs:', node.cpus ossie.utils.testing.main("../GPP.spd.xml") # By default tests all implementations diff --git a/README.md b/README.md index 0d7132531..cced87ee2 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,8 @@ ## Description REDHAWK is a software-defined radio (SDR) framework designed to support the development, deployment, and management of real-time software radio applications. To support the design and development of software applications, REDHAWK provides tools that allow development and testing of software modules called "Components" and composition of Components into "Waveform Applications" that can be seamlessly deployed on a single computer or multiple network-enabled computers. +REDHAWK 2.1.0 added support for new C++ Components to be created as shared libraries, which allow multiple Components to be deployed into the same process and enable faster, lower-cost I/O. For documentation on this beta feature, refer to docs/shared-address. + ## Subdirectories * redhawk - Contains the REDHAWK Core Framework base libraries and system software. diff --git a/bulkioInterfaces/.gitignore b/bulkioInterfaces/.gitignore index 9d2191a1b..fb5be4be1 100644 --- a/bulkioInterfaces/.gitignore +++ b/bulkioInterfaces/.gitignore @@ -1,4 +1,5 @@ .deps +.dirstamp .idlj/ .idljni/ .jars/ @@ -28,7 +29,6 @@ build /setup.py /jni/ossie/ /libsrc/java/bin/ -/libsrc/cpp/.dirstamp /libsrc/java/META-INF/MANIFEST.MF /libsrc/java/META-INF/MANIFEST.MF.src /src/ diff --git a/bulkioInterfaces/Makefile.am b/bulkioInterfaces/Makefile.am index 77f55ae8a..fb7f19efc 100644 --- a/bulkioInterfaces/Makefile.am +++ b/bulkioInterfaces/Makefile.am @@ -18,6 +18,8 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # +ACLOCAL_AMFLAGS = -I m4 -I ${OSSIEHOME}/share/aclocal/ossie + ############################################################################### # CONFIGURE YOUR INTERFACES LIBRARY HERE # @@ -53,19 +55,21 @@ bio_dataUshort.idl \ bio_dataLong.idl \ bio_dataLongLong.idl \ bio_dataUlongLong.idl \ +bio_dataBit.idl \ bio_dataSDDS.idl \ bio_dataVITA49.idl +# IDL files that are marked as "internal" are installed to a separate directory +# to discourage external use +IDL_INTERNAL_FILES := internal/bio_dataExt.idl + # External IDL namespaces that your IDLs reference, comment this line # out if your IDL has no external references IDL_EXTERNS := CF # In some cases, you may need to define python externs # f:p Assume Python stub file for file f is in package p. -PYTHON_EXTERNS := PortTypes:ossie.cf DataType:ossie.cf Port:ossie.cf QueryablePort:ossie.cf bulkioDataTypes:bulkio.bulkioInterfaces - -# Relative path to the location of the IDL files -IDL_SOURCE_PATH := . +PYTHON_EXTERNS := PortTypes:ossie.cf DataType:ossie.cf Port:ossie.cf QueryablePort:ossie.cf NegotiablePort:ossie.cf bulkioDataTypes:bulkio.bulkioInterfaces # A Qualifier to append to the version, by default this is a timestamp of the build BUNDLE_QUALIFIER := v$(shell date +%Y%m%d%H%M%S) @@ -95,25 +99,73 @@ lib_LTLIBRARIES = libbulkioInterfaces.la libbulkioInterfaces_la_LDFLAGS = -version-info $(LIBBULKIOINTERFACES_VERSION_INFO) libbulkioInterfaces_la_LIBADD = $(OSSIE_LIBS) -# Again, we don't use the auto variable because order is important to us libbulkioInterfaces_la_SOURCES = \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bulkioDataTypesSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bulkioDataTypesDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bulkioDataTypes.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_runtimeStatsSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_runtimeStatsDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_runtimeStats.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_updateSRISK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_updateSRIDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_updateSRI.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataFloatSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataFloatDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataFloat.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataFileSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataFileDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataFile.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataXMLSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataXMLDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataXML.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataShortSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataShortDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataShort.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataDoubleSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataDoubleDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataDouble.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataCharSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataCharDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataChar.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataOctetSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataOctetDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataOctet.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataUlongSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataUlongDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataUlong.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataUshortSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataUshortDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataUshort.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataLongSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataLongDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataLong.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataLongLongSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataLongLongDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataLongLong.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataUlongLongSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataUlongLongDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataUlongLong.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataSDDSSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataSDDSDynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataSDDS.h \ - src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataVITA49SK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataVITA49DynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/bio_dataVITA49.h + src/cpp/ossie/BULKIO/bulkioDataTypesSK.cpp \ + src/cpp/ossie/BULKIO/bulkioDataTypesDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_runtimeStatsSK.cpp \ + src/cpp/ossie/BULKIO/bio_runtimeStatsDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_updateSRISK.cpp \ + src/cpp/ossie/BULKIO/bio_updateSRIDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataFloatSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataFloatDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataFileSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataFileDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataXMLSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataXMLDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataShortSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataShortDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataDoubleSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataDoubleDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataCharSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataCharDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataOctetSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataOctetDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataUlongSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataUlongDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataUshortSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataUshortDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataLongSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataLongDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataLongLongSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataLongLongDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataUlongLongSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataUlongLongDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataBitSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataBitDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataSDDSSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataSDDSDynSK.cpp \ + src/cpp/ossie/BULKIO/bio_dataVITA49SK.cpp \ + src/cpp/ossie/BULKIO/bio_dataVITA49DynSK.cpp \ + src/cpp/ossie/BULKIO/internal/bio_dataExtSK.cpp \ + src/cpp/ossie/BULKIO/internal/bio_dataExtDynSK.cpp + +pkgincludedir = $(includedir)/$(LIBRARY_NAME)/$(IDL_MODULE) +pkginclude_HEADERS = \ + src/cpp/ossie/BULKIO/bulkioDataTypes.h \ + src/cpp/ossie/BULKIO/bio_runtimeStats.h \ + src/cpp/ossie/BULKIO/bio_updateSRI.h \ + src/cpp/ossie/BULKIO/bio_dataFloat.h \ + src/cpp/ossie/BULKIO/bio_dataFile.h \ + src/cpp/ossie/BULKIO/bio_dataXML.h \ + src/cpp/ossie/BULKIO/bio_dataShort.h \ + src/cpp/ossie/BULKIO/bio_dataDouble.h \ + src/cpp/ossie/BULKIO/bio_dataChar.h \ + src/cpp/ossie/BULKIO/bio_dataOctet.h \ + src/cpp/ossie/BULKIO/bio_dataUlong.h \ + src/cpp/ossie/BULKIO/bio_dataUshort.h \ + src/cpp/ossie/BULKIO/bio_dataLong.h \ + src/cpp/ossie/BULKIO/bio_dataLongLong.h \ + src/cpp/ossie/BULKIO/bio_dataUlongLong.h \ + src/cpp/ossie/BULKIO/bio_dataBit.h \ + src/cpp/ossie/BULKIO/bio_dataSDDS.h \ + src/cpp/ossie/BULKIO/bio_dataVITA49.h + +pkginternalincludedir = $(pkgincludedir)/internal +pkginternalinclude_HEADERS = \ + src/cpp/ossie/BULKIO/internal/bio_dataExt.h + +BUILT_SOURCES = $(libbulkioInterfaces_la_SOURCES) $(pkginclude_HEADERS) +CLEANFILES = $(BUILT_SOURCES) ############################################################################### # DO NOT MODIFY ANY LINES BELOW HERE @@ -124,38 +176,36 @@ LOWER_CASE_IDL_MODULE :=$(shell echo $(IDL_MODULE) | tr A-Z a-z) idldir = $(datadir)/idl/$(LIBRARY_NAME)/$(IDL_MODULE) dist_idl_DATA = $(addprefix idl/$(LIBRARY_NAME)/$(IDL_MODULE)/, $(IDL_FILES)) +idlinternaldir = $(idldir)/internal +dist_idlinternal_DATA = $(addprefix idl/$(LIBRARY_NAME)/$(IDL_MODULE)/, $(IDL_INTERNAL_FILES)) + all-local: all-python install-exec-hook: install-python -clean-local: clean-python clean-java clean-cpp +clean-local: clean-python clean-java rm -rf build +distclean-local: + rm -rf src + # Always build the current directory first (this is hack-ish, but the # alternative is to combine the Makefile.am's) -SUBDIRS = . - -if BUILD_BASE_CLASSES -SUBDIRS += libsrc -endif +SUBDIRS = . libsrc ############################################################################### # C++ (via automake and libtool) pkgconfigdir = $(libdir)/pkgconfig dist_pkgconfig_DATA = $(PACKAGE_NAME).pc -pkgincludedir = $(includedir)/$(LIBRARY_NAME)/$(IDL_MODULE) -pkginclude_HEADERS = $(filter %.h, $(lib$(LOWER_CASE_IDL_MODULE)Interfaces_la_SOURCES)) - -AM_CXXFLAGS = -Wall -I src/cpp -g $(OMNIORB_CFLAGS) $(OSSIE_CFLAGS) -AM_LIBS = $(OSSIE_LIBS) +bulkio_builddir = src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE) -src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/%DynSK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/%SK.cpp src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)/%.h: idl/$(LIBRARY_NAME)/$(IDL_MODULE)/%.idl - $(AM_V_at)mkdir -p "src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE)" - $(AM_V_GEN)$(IDL) -I idl $(OSSIE_CFLAGS) $(OSSIE_IDLFLAGS) -I$(OMNICOS_IDLDIR) -I$(OMNIORB_IDLDIR) -C src/cpp/$(LIBRARY_NAME)/$(IDL_MODULE) -bcxx -Wba -Wbd=DynSK.cpp -Wbh=.h -Wbs=SK.cpp -Wbkeep_inc_path $< +libbulkioInterfaces_la_CXXFLAGS = -Wall -I src/cpp -g $(OMNIORB_CFLAGS) $(OSSIE_CFLAGS) +libbulkioInterfaces_la_LIBS = $(OSSIE_LIBS) -clean-cpp: - rm -rf src/cpp +$(bulkio_builddir)/%DynSK.cpp $(bulkio_builddir)/%SK.cpp $(bulkio_builddir)/%.h: idl/ossie/BULKIO/%.idl + $(AM_V_at)mkdir -p $(dir $@) + $(AM_V_GEN)$(IDL) -I idl $(OSSIE_CFLAGS) $(OSSIE_IDLFLAGS) -I$(OMNICOS_IDLDIR) -I$(OMNIORB_IDLDIR) -C $(dir $@) -bcxx -Wba -Wbd=DynSK.cpp -Wbh=.h -Wbs=SK.cpp -Wbkeep_inc_path $< ############################################################################### # Python @@ -169,9 +219,7 @@ PYTHON_MODULE_NAME := $(LOWER_CASE_IDL_MODULE)Interfaces PYTHON_PACKAGE := $(LOWER_CASE_IDL_MODULE).$(PYTHON_MODULE_NAME) PYTHON_BASE := $(subst .,/,$(PYTHON_PACKAGE)) PY_SRC_OUT_DIR := $(addprefix src/python/, $(subst .,/, $(PYTHON_PACKAGE))) -PY_IDL_SRCS := $(addprefix idl/$(LIBRARY_NAME)/$(IDL_MODULE)/, $(IDL_FILES)) -PY_BUILD_OUT_DIR := $(addprefix build/lib/, $(subst .,/, $(PYTHON_PACKAGE))) -PY_BUILT_SRCS := $(addprefix $(PY_BUILD_OUT_DIR)/, $(subst .idl,_idl.py, $(IDL_FILES))) +PY_IDL_SRCS := $(addprefix idl/$(LIBRARY_NAME)/$(IDL_MODULE)/, $(IDL_FILES) $(IDL_INTERNAL_FILES)) PY_IDL_EXTERNS := $(addprefix -Wbextern=, $(subst ,, $(PYTHON_EXTERNS))) PY_IDL_INCLUDES := -I idl @@ -179,6 +227,7 @@ PY_IDL_INCLUDES := -I idl $(PY_SRC_OUT_DIR)/__init__.py: $(PY_IDL_SRCS) $(AM_V_at)mkdir -p $(PY_SRC_OUT_DIR) $(AM_V_GEN)$(IDL) $(PY_IDL_INCLUDES) $(OSSIE_CFLAGS) $(OSSIE_IDLFLAGS) -I$(OMNICOS_IDLDIR) -I$(OMNIORB_IDLDIR) -C src/python -bpython -Wbpackage=$(PYTHON_PACKAGE) $(PY_IDL_EXTERNS) $^ + $(AM_V_at)touch $@ # Mimic automake silent rules OSSIE_V_pysetup = $(ossie__v_pysetup_$(V)) @@ -186,13 +235,9 @@ ossie__v_pysetup_ = $(ossie__v_pysetup__$(AM_DEFAULT_VERBOSITY)) ossie__v_pysetup_0 = --quiet ossie__v_pysetup__0 = $(ossie__v_pysetup_0) -$(PY_BUILD_OUT_DIR): $(PY_SRC_OUT_DIR)/__init__.py - python setup.py $(OSSIE_V_pysetup) build --build-lib build/lib - $(AM_V_at)touch $(PY_BUILD_OUT_DIR) - -all-python: $(PY_BUILD_OUT_DIR) +all-python: $(PY_SRC_OUT_DIR)/__init__.py -install-python: $(PY_BUILT_SRCS) +install-python: $(PY_SRC_OUT_DIR)/__init__.py setup.py python setup.py install -f --$(PYTHON_INSTALL_SCHEME)=$(DESTDIR)$(prefix) clean-python: @@ -240,7 +285,6 @@ build/java/META-INF/MANIFEST.MF: Makefile.am BULKIOInterfaces.filelist @find $(IDLJ_SRC_DEST) -mindepth 1 -type d | sed 's/src\/java\///' | sed 's/\//./g' | sed 's/^/ /' | sed -e '$$ ! s/$$/,/' >> $@ java_JARFILES = BULKIOInterfaces.jar -java_DATA = BULKIOInterfaces.src.jar IDLJFLAGS = -i idl -i $(OSSIE_IDLDIR) -I $(OMNICOS_IDLDIR) -I $(OMNIORB_IDLDIR) IDLJNIFLAGS = -I idl -I $(OSSIE_IDLDIR) -I $(OMNICOS_IDLDIR) -I $(OMNIORB_IDLDIR) -Wblibname=bulkiojni @@ -250,27 +294,18 @@ IDLJNI_BUILDDIR = $(IDLJ_BUILDDIR) # For IDLJ/IDLJNI rules, set VPATH for .idl files so that pattern rules can # find them. vpath %.idl idl/ossie/BULKIO -idlj_IDLSRC = $(IDL_FILES) +# Only include internal interfaces in pure Java; do not build them for Java- +# side JNI, because we will not be generating the C++-side +idlj_IDLSRC = $(IDL_FILES) $(IDL_INTERNAL_FILES) idljni_IDLSRC = $(IDL_FILES) BULKIOInterfaces_jar_SOURCE = $(idlj_SOURCE) $(idljni_SOURCE) BULKIOInterfaces_jar_CLASSPATH = $(OSSIE_CLASSPATH) BULKIOInterfaces_jar_MANIFEST = build/java/META-INF/MANIFEST.MF +BULKIOInterfaces_jar_JAVACFLAGS = -g +BULKIOInterfaces_jar_JARADD = -C $(IDLJ_SRC_DEST) . -src/java/META-INF/MANIFEST.MF: Makefile - @mkdir -p src/java/META-INF - @rm -f $@ - @echo "Bundle-ManifestVersion: 2" >> $@ - @echo "Bundle-Name: $(BUNDLE_NAME) Source" >> $@ - @echo "Bundle-SymbolicName: $(BUNDLE_SYMBOLIC_NAME).src" >> $@ - @echo "Bundle-Version: $(BUNDLE_VERSION).$(BUNDLE_QUALIFIER)" >> $@ - @echo "Bundle-Vendor: $(BUNDLE_VENDOR)" >> $@ - @echo "Eclipse-SourceBundle: $(BUNDLE_SYMBOLIC_NAME);version=$(BUNDLE_VERSION).$(BUNDLE_QUALIFIER)" >> $@ - -CLEANFILES = BULKIOInterfaces.src.jar src/java/META-INF/MANIFEST.MF $(BULKIOInterfaces_jar_MANIFEST) - -BULKIOInterfaces.src.jar: src/java/META-INF/MANIFEST.MF $(BULKIOInterfaces_jar_SOURCE) - $(RH_V_JAR)$(JAR) cMf $@ -C $(IDLJ_SRC_DEST) . +CLEANFILES += $(BULKIOInterfaces_jar_MANIFEST) # JNI library must be built after the current directory (see SUBDIRS above) SUBDIRS += jni diff --git a/bulkioInterfaces/build.sh b/bulkioInterfaces/build.sh index a3f0a89d1..bda27c026 100755 --- a/bulkioInterfaces/build.sh +++ b/bulkioInterfaces/build.sh @@ -25,9 +25,9 @@ elif [ "$1" = "rpm" ]; then # A very simplistic RPM build scenario mydir=`dirname $0` tmpdir=`mktemp -d` - cp -r ${mydir} ${tmpdir}/bulkioInterfaces-2.0.9 - tar czf ${tmpdir}/bulkioInterfaces-2.0.9.tar.gz --exclude=".git" -C ${tmpdir} bulkioInterfaces-2.0.9 - rpmbuild -ta ${tmpdir}/bulkioInterfaces-2.0.9.tar.gz + cp -r ${mydir} ${tmpdir}/bulkioInterfaces-2.2.1 + tar czf ${tmpdir}/bulkioInterfaces-2.2.1.tar.gz --exclude=".git" -C ${tmpdir} bulkioInterfaces-2.2.1 + rpmbuild -ta ${tmpdir}/bulkioInterfaces-2.2.1.tar.gz rm -rf $tmpdir else # Checks if build is newer than makefile (based on modification time) diff --git a/bulkioInterfaces/bulkio-1.0.0 b/bulkioInterfaces/bulkio-1.0.0 deleted file mode 120000 index e09e0bba5..000000000 --- a/bulkioInterfaces/bulkio-1.0.0 +++ /dev/null @@ -1 +0,0 @@ -libsrc \ No newline at end of file diff --git a/bulkioInterfaces/bulkioInterfaces.spec b/bulkioInterfaces/bulkioInterfaces.spec index e78a30d74..ae2b4af04 100644 --- a/bulkioInterfaces/bulkioInterfaces.spec +++ b/bulkioInterfaces/bulkioInterfaces.spec @@ -28,8 +28,8 @@ Prefix: %{_prefix} %bcond_without java Name: bulkioInterfaces -Version: 2.0.9 -Release: 1%{?dist} +Version: 2.2.1 +Release: 2%{?dist} Summary: The bulkio library for REDHAWK Group: Applications/Engineering @@ -38,10 +38,9 @@ URL: http://redhawksdr.org/ Source: %{name}-%{version}.tar.gz Vendor: REDHAWK -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot - -Requires: redhawk >= 2.0 -BuildRequires: redhawk-devel >= 2.0 +Requires: redhawk = %{version} +BuildRequires: redhawk-devel = %{version} +BuildRequires: cppunit-devel %description Libraries and interface definitions for bulkio interfaces. @@ -83,9 +82,7 @@ rm -rf --preserve-root $RPM_BUILD_ROOT %endif %if %{with java} %{_prefix}/lib/BULKIOInterfaces.jar -%{_prefix}/lib/BULKIOInterfaces.src.jar %{_prefix}/lib/bulkio.jar -%{_prefix}/lib/bulkio.src.jar %{_prefix}/%{_lib}/libbulkiojni.* %endif @@ -98,6 +95,12 @@ rm -rf --preserve-root $RPM_BUILD_ROOT %changelog +* Wed Jun 28 2017 Ryan Bauman - 2.1.2-1 +- Bump for 2.1.2-rc2 + +* Wed Jun 28 2017 Ryan Bauman - 2.1.1-2 +- Bump for 2.1.1-rc2 + * Fri Mar 21 2014 1.10.0-1 - Improve OS version detection for RHEL/CentOS/Fedora diff --git a/bulkioInterfaces/configure.ac b/bulkioInterfaces/configure.ac index 3c971e3d1..c4fec2fe3 100644 --- a/bulkioInterfaces/configure.ac +++ b/bulkioInterfaces/configure.ac @@ -18,11 +18,11 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # -AC_INIT(bulkioInterfaces, 2.0.9) +AC_INIT(bulkioInterfaces, 2.2.1) +AC_CONFIG_MACRO_DIR([m4]) AC_SUBST([LIBBULKIOINTERFACES_VERSION_INFO], [3:0:1]) -#AM_INIT_AUTOMAKE([nostdinc subdir-objects]) -AM_INIT_AUTOMAKE([nostdinc]) +AM_INIT_AUTOMAKE([foreign nostdinc subdir-objects]) AC_PROG_CC AC_PROG_CXX AC_PROG_INSTALL @@ -44,29 +44,22 @@ fi PKG_CHECK_MODULES([OMNIORB], [omniORB4 >= 4.1.0]) RH_PKG_IDLDIR([OMNIORB], [omniORB4]) -PKG_CHECK_MODULES([OSSIE], [ossie >= 2.0.9]) +PKG_CHECK_MODULES([OSSIE], [ossie >= 2.2.1]) RH_PKG_IDLDIR([OSSIE], [ossie]) PKG_CHECK_MODULES([OMNICOS], [omniCOS4 >= 4.0.0]) RH_PKG_IDLDIR([OMNICOS], [omniCOS4]) -# Optionally include BULKIO base class libraries -AC_ARG_ENABLE([base-classes], AS_HELP_STRING([--disable-base-classes], [Disable BULKIO base class libraries])) -AM_CONDITIONAL([BUILD_BASE_CLASSES], [test "$enable_base_classes" != "no"]) +AC_SUBST([BULKIO_SO_VERSION], [0:0:0]) +AC_SUBST([BULKIO_API_VERSION], [2.2]) -if test "$enable_base_classes" != "no"; then - AC_SUBST([BULKIO_SO_VERSION], [0:0:0]) - AC_SUBST([BULKIO_API_VERSION], [2.0]) +AX_BOOST_BASE([1.41]) +AX_BOOST_THREAD +AX_BOOST_SYSTEM +OSSIE_ENABLE_LOG4CXX +CHECK_VECTOR_IMPL - AX_BOOST_BASE([1.41]) - AX_BOOST_THREAD - AX_BOOST_SYSTEM - OSSIE_ENABLE_LOG4CXX - CHECK_VECTOR_IMPL - - AC_SUBST(BULKIO_INF_INCLUDES, "-I../src/cpp -I../src/cpp/ossie") - AC_SUBST(BULKIO_INF_CFLAGS, ) - AC_SUBST(BULKIO_INF_LIBS, ) -fi +AC_SUBST(BULKIOINTERFACES_CFLAGS, "-I \$(top_srcdir)/src/cpp -I \$(top_srcdir)/src/cpp/ossie") +AC_SUBST(BULKIOINTERFACES_LIBS, "-L\$(top_srcdir) -lbulkioInterfaces") # Optionally include java support AC_ARG_ENABLE([java], AS_HELP_STRING([--disable-java], [Disable framework java support])) @@ -75,7 +68,7 @@ HAVE_JAVASUPPORT=no if test "x$enable_java" != "xno"; then # configure was run with java enabled - java_source_version=1.6 + java_source_version=1.8 RH_JAVA_HOME RH_PROG_JAVAC([$java_source_version]) @@ -136,12 +129,49 @@ AC_MSG_RESULT($HAVE_JAVASUPPORT) AM_CONDITIONAL(HAVE_JAVASUPPORT, test $HAVE_JAVASUPPORT = yes) # End optional java support +# C++ unit testing support. May want to conditionally enable/disable this. +AM_PATH_CPPUNIT(1.12.1) +AS_IF([test "x$HAVE_JAVASUPPORT" == "xyes"], [ + dnl Use RPM location hard-coded for now + AC_SUBST([JUNIT_CLASSPATH], "/usr/share/java/junit4.jar") +]) + +# For C++ test components, provide BULKIO_CFLAGS and BULKIO_LIBS so they build +# without modifying their Makefile.am to change the paths. In order to pick up +# the uninstalled headers, we need to provide the path to the BULKIO headers, +# the top-level include directory ("bulkio/bulkio.h") and the bulkio +# directory inside of include ("bulkio.h"). +bulkio_includedir="\$(top_srcdir)/libsrc/cpp/include" +AC_SUBST(BULKIO_CFLAGS, "-I ${bulkio_includedir} -I ${bulkio_includedir}/bulkio ${BULKIOINTERFACES_CFLAGS}") +AC_SUBST(BULKIO_LIBS, "-L\$(top_srcdir)/libsrc -lbulkio-${BULKIO_API_VERSION} ${BULKIOINTERFACES_LIBS}") + AC_CONFIG_FILES([bulkioInterfaces.pc setup.py Makefile jni/Makefile]) -if test "$enable_base_classes" != "no"; then - if test "$HAVE_JAVASUPPORT = yes"; then - AC_CONFIG_FILES([libsrc/java/META-INF/MANIFEST.MF libsrc/java/META-INF/MANIFEST.MF.src]) - fi - AC_CONFIG_FILES([libsrc/Makefile libsrc/bulkio.pc]) +if test "$HAVE_JAVASUPPORT = yes"; then + AC_CONFIG_FILES([libsrc/java/META-INF/MANIFEST.MF]) fi +AC_SUBST(PROJECTDEPS_CFLAGS, "\$(OSSIE_CFLAGS)") +AC_SUBST(PROJECTDEPS_LIBS, "\$(OSSIE_LIBS)") +AC_SUBST(INTERFACEDEPS_CFLAGS, "\$(BULKIO_CFLAGS)") +AC_SUBST(INTERFACEDEPS_LIBS, "\$(BULKIO_LIBS)") +AC_SUBST(REDHAWK_CLASSPATH, "\$(OSSIE_CLASSPATH)") +AC_SUBST(BULKIO_CLASSPATH, "\$(top_srcdir)/libsrc/bulkio.jar:\$(top_srcdir)/BULKIOInterfaces.jar") +AC_CONFIG_FILES([libsrc/Makefile \ + libsrc/bulkio.pc \ + libsrc/testing/Makefile \ + libsrc/testing/tests/cpp/Makefile \ + libsrc/testing/tests/java/Makefile \ + libsrc/testing/components/CPP_Ports/cpp/Makefile \ + libsrc/testing/components/Java_Ports/java/Makefile \ + libsrc/testing/components/Oversized_framedata/cpp/Makefile \ + libsrc/testing/components/src/cpp/Makefile \ + libsrc/testing/components/snk_slow/cpp/Makefile \ + libsrc/testing/components/Oversized_framedata/java/Makefile \ + libsrc/testing/components/TestLargePush/cpp/Makefile \ + libsrc/testing/components/TestLargePush/java/Makefile \ + libsrc/testing/components/multiout_attachable/cpp/Makefile \ + libsrc/testing/components/multiout_attachable/java/Makefile \ + libsrc/testing/components/sri_changed_cpp/cpp/Makefile \ + libsrc/testing/devices/dev_snk/java/Makefile \ + libsrc/testing/devices/dev_src/java/Makefile]) AC_OUTPUT diff --git a/bulkioInterfaces/idl/ossie/BULKIO/bio_dataBit.idl b/bulkioInterfaces/idl/ossie/BULKIO/bio_dataBit.idl new file mode 100644 index 000000000..ddbe7f901 --- /dev/null +++ b/bulkioInterfaces/idl/ossie/BULKIO/bio_dataBit.idl @@ -0,0 +1,39 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef _DATABIT_IDL_ +#define _DATABIT_IDL_ + +#include "ossie/BULKIO/bio_runtimeStats.idl" +#include "ossie/BULKIO/bio_updateSRI.idl" + +module BULKIO { + + struct BitSequence { + CF::OctetSequence data; + unsigned long bits; + }; + + interface dataBit : ProvidesPortStatisticsProvider, updateSRI { + void pushPacket(in BitSequence data, in PrecisionUTCTime T, in boolean EOS, in string streamID); + }; +}; + +#endif diff --git a/bulkioInterfaces/idl/ossie/BULKIO/bio_dataLong.idl b/bulkioInterfaces/idl/ossie/BULKIO/bio_dataLong.idl index a5d663590..352932b68 100644 --- a/bulkioInterfaces/idl/ossie/BULKIO/bio_dataLong.idl +++ b/bulkioInterfaces/idl/ossie/BULKIO/bio_dataLong.idl @@ -18,8 +18,8 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ -#ifndef _DATASHORT_IDL_ -#define _DATASHORT_IDL_ +#ifndef _DATALONG_IDL_ +#define _DATALONG_IDL_ #include "ossie/BULKIO/bio_runtimeStats.idl" #include "ossie/BULKIO/bio_updateSRI.idl" diff --git a/bulkioInterfaces/idl/ossie/BULKIO/bio_dataLongLong.idl b/bulkioInterfaces/idl/ossie/BULKIO/bio_dataLongLong.idl index 585676101..f340da416 100644 --- a/bulkioInterfaces/idl/ossie/BULKIO/bio_dataLongLong.idl +++ b/bulkioInterfaces/idl/ossie/BULKIO/bio_dataLongLong.idl @@ -18,8 +18,8 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ -#ifndef _DATASHORT_IDL_ -#define _DATASHORT_IDL_ +#ifndef _DATALONGLONG_IDL_ +#define _DATALONGLONG_IDL_ #include "ossie/BULKIO/bio_runtimeStats.idl" #include "ossie/BULKIO/bio_updateSRI.idl" diff --git a/bulkioInterfaces/idl/ossie/BULKIO/bio_dataUlongLong.idl b/bulkioInterfaces/idl/ossie/BULKIO/bio_dataUlongLong.idl index f45444180..12fb81d14 100644 --- a/bulkioInterfaces/idl/ossie/BULKIO/bio_dataUlongLong.idl +++ b/bulkioInterfaces/idl/ossie/BULKIO/bio_dataUlongLong.idl @@ -18,8 +18,8 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ -#ifndef _DATASHORT_IDL_ -#define _DATASHORT_IDL_ +#ifndef _DATAULONGLONG_IDL_ +#define _DATAULONGLONG_IDL_ #include "ossie/BULKIO/bio_runtimeStats.idl" #include "ossie/BULKIO/bio_updateSRI.idl" diff --git a/bulkioInterfaces/idl/ossie/BULKIO/bio_dataUshort.idl b/bulkioInterfaces/idl/ossie/BULKIO/bio_dataUshort.idl index a41cea5f3..2ea115b5e 100644 --- a/bulkioInterfaces/idl/ossie/BULKIO/bio_dataUshort.idl +++ b/bulkioInterfaces/idl/ossie/BULKIO/bio_dataUshort.idl @@ -18,8 +18,8 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ -#ifndef _DATASHORT_IDL_ -#define _DATASHORT_IDL_ +#ifndef _DATAUSHORT_IDL_ +#define _DATAUSHORT_IDL_ #include "ossie/BULKIO/bio_runtimeStats.idl" #include "ossie/BULKIO/bio_updateSRI.idl" diff --git a/bulkioInterfaces/idl/ossie/BULKIO/internal/bio_dataExt.idl b/bulkioInterfaces/idl/ossie/BULKIO/internal/bio_dataExt.idl new file mode 100644 index 000000000..f1d9f2d59 --- /dev/null +++ b/bulkioInterfaces/idl/ossie/BULKIO/internal/bio_dataExt.idl @@ -0,0 +1,88 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef _DATAEXT_IDL_ +#define _DATAEXT_IDL_ + +#include "ossie/CF/NegotiablePort.idl" +#include "ossie/BULKIO/bio_dataChar.idl" +#include "ossie/BULKIO/bio_dataDouble.idl" +#include "ossie/BULKIO/bio_dataFloat.idl" +#include "ossie/BULKIO/bio_dataLong.idl" +#include "ossie/BULKIO/bio_dataLongLong.idl" +#include "ossie/BULKIO/bio_dataOctet.idl" +#include "ossie/BULKIO/bio_dataShort.idl" +#include "ossie/BULKIO/bio_dataUlong.idl" +#include "ossie/BULKIO/bio_dataUlongLong.idl" +#include "ossie/BULKIO/bio_dataUshort.idl" +#include "ossie/BULKIO/bio_dataBit.idl" +#include "ossie/BULKIO/bio_dataXML.idl" +#include "ossie/BULKIO/bio_dataFile.idl" + +module BULKIO { + + module internal { + + interface UsesPortStatisticsProviderExt : UsesPortStatisticsProvider, ExtendedCF::NegotiableUsesPort { + }; + + interface dataCharExt : dataChar, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataDoubleExt : dataDouble, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataFloatExt : dataFloat, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataLongExt : dataLong, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataLongLongExt : dataLongLong, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataOctetExt : dataOctet, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataShortExt : dataShort, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataUlongExt : dataUlong, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataUlongLongExt : dataUlongLong, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataUshortExt : dataUshort, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataBitExt : dataBit, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataXMLExt : dataXML, ExtendedCF::NegotiableProvidesPort { + }; + + interface dataFileExt : dataFile, ExtendedCF::NegotiableProvidesPort { + }; + + }; +}; + +#endif diff --git a/bulkioInterfaces/jni/Makefile.am b/bulkioInterfaces/jni/Makefile.am index 480381d84..7d9a537f7 100644 --- a/bulkioInterfaces/jni/Makefile.am +++ b/bulkioInterfaces/jni/Makefile.am @@ -43,6 +43,7 @@ nodist_libbulkiojni_la_SOURCES = \ ossie/BULKIO/jni_bio_dataXML.cpp \ ossie/BULKIO/jni_bio_dataShort.cpp \ ossie/BULKIO/jni_bio_dataDouble.cpp \ + ossie/BULKIO/jni_bio_dataBit.cpp \ ossie/BULKIO/jni_bio_dataChar.cpp \ ossie/BULKIO/jni_bio_dataOctet.cpp \ ossie/BULKIO/jni_bio_dataUlong.cpp \ @@ -61,6 +62,7 @@ nobase_nodist_include_HEADERS = \ ossie/BULKIO/jni_bio_dataXML.h \ ossie/BULKIO/jni_bio_dataShort.h \ ossie/BULKIO/jni_bio_dataDouble.h \ + ossie/BULKIO/jni_bio_dataBit.h \ ossie/BULKIO/jni_bio_dataChar.h \ ossie/BULKIO/jni_bio_dataOctet.h \ ossie/BULKIO/jni_bio_dataUlong.h \ diff --git a/bulkioInterfaces/libsrc/.gitignore b/bulkioInterfaces/libsrc/.gitignore index a90129f13..4b652d27c 100644 --- a/bulkioInterfaces/libsrc/.gitignore +++ b/bulkioInterfaces/libsrc/.gitignore @@ -1 +1,2 @@ .jars +python/bulkio.egg-info/ diff --git a/bulkioInterfaces/libsrc/Makefile.am b/bulkioInterfaces/libsrc/Makefile.am index d0c0876f9..87de784cd 100644 --- a/bulkioInterfaces/libsrc/Makefile.am +++ b/bulkioInterfaces/libsrc/Makefile.am @@ -40,28 +40,46 @@ libbulkio_@BULKIO_API_VERSION@_la_SOURCES = \ cpp/bulkio_in_stream.cpp \ cpp/bulkio_out_port.cpp \ cpp/bulkio_out_stream.cpp \ - cpp/bulkio_attachable_port.cpp \ + cpp/bulkio_attachable_port.cpp \ cpp/bulkio_sri_helpers.cpp \ + cpp/bulkio_stream.cpp \ cpp/bulkio_time_helpers.cpp \ cpp/bulkio_time_operators.cpp \ cpp/bulkio_datablock.cpp \ - cpp/bulkio_p.h + cpp/bulkio_p.h \ + cpp/BulkioTransport.cpp \ + cpp/CorbaTransport.h \ + cpp/CorbaTransport.cpp \ + cpp/LocalTransport.h \ + cpp/LocalTransport.cpp \ + cpp/shm/FifoIPC.h \ + cpp/shm/FifoIPC.cpp \ + cpp/shm/MessageBuffer.h \ + cpp/shm/ShmInputTransport.h \ + cpp/shm/ShmInputTransport.cpp \ + cpp/shm/ShmOutputTransport.h \ + cpp/shm/ShmOutputTransport.cpp \ + cpp/shm/ShmTransportFactory.cpp ## Define the list of public header files and their install location. library_includedir = $(includedir)/bulkio -library_include_HEADERS = cpp/bulkio.h \ - cpp/BULKIO_Interfaces.h \ - cpp/bulkio_base.h \ - cpp/bulkio_callbacks.h \ - cpp/bulkio_traits.h \ - cpp/bulkio_in_port.h \ - cpp/bulkio_in_stream.h \ - cpp/bulkio_out_port.h \ - cpp/bulkio_out_stream.h \ - cpp/bulkio_attachable_base.h \ - cpp/bulkio_time_operators.h \ - cpp/bulkio_datablock.h \ - cpp/bulkio_compat.h +library_include_HEADERS = cpp/include/bulkio/bulkio.h \ + cpp/include/bulkio/BULKIO_Interfaces.h \ + cpp/include/bulkio/BulkioTransport.h \ + cpp/include/bulkio/bulkio_base.h \ + cpp/include/bulkio/bulkio_callbacks.h \ + cpp/include/bulkio/bulkio_traits.h \ + cpp/include/bulkio/bulkio_in_port.h \ + cpp/include/bulkio/bulkio_in_stream.h \ + cpp/include/bulkio/bulkio_out_port.h \ + cpp/include/bulkio/bulkio_out_stream.h \ + cpp/include/bulkio/bulkio_attachable_base.h \ + cpp/include/bulkio/bulkio_stream.h \ + cpp/include/bulkio/bulkio_time_operators.h \ + cpp/include/bulkio/bulkio_datablock.h \ + cpp/include/bulkio/bulkio_datatransfer.h \ + cpp/include/bulkio/bulkio_typetraits.h \ + cpp/include/bulkio/bulkio_compat.h ## The generated configuration header is installed in its own subdirectory of ## $(libdir). The reason for this is that the configuration information put @@ -76,7 +94,9 @@ library_include_HEADERS = cpp/bulkio.h \ ## shipped with the source tarball. #bulkio_libincludedir = $(libdir)/bulkio-$(BULKIO_API_VERSION)/include -libbulkio_@BULKIO_API_VERSION@_la_CXXFLAGS = -Wall -I./cpp -DLOGGING $(BULKIO_INF_INCLUDES) $(BOOST_CPPFLAGS) $(OMNIORB_CFLAGS) $(OSSIE_CFLAGS) +libbulkio_@BULKIO_API_VERSION@_la_CXXFLAGS = -Wall -I $(srcdir)/cpp -I $(srcdir)/cpp/include/bulkio -DLOGGING $(BULKIOINTERFACES_CFLAGS) $(BOOST_CPPFLAGS) $(OMNIORB_CFLAGS) $(OSSIE_CFLAGS) + +libbulkio_@BULKIO_API_VERSION@_la_LIBADD = $(top_builddir)/libbulkioInterfaces.la ############################################################################### # Python @@ -87,14 +107,9 @@ ossie__v_pysetup_ = $(ossie__v_pysetup__$(AM_DEFAULT_VERBOSITY)) ossie__v_pysetup_0 = --quiet ossie__v_pysetup__0 = $(ossie__v_pysetup_0) -all-python: build-python - -build-python: - python setup.py $(OSSIE_V_pysetup) build - +rootflag = $(if $(DESTDIR),--root=$(DESTDIR)) install-python: - test -n "$(DESTDIR)" && buildroot="--root=$(DESTDIR)"; \ - python setup.py install $$buildroot --prefix=$(prefix) --install-purelib=$(prefix)/lib/python --force + $(PYTHON) setup.py install -f $(rootflag) --home=$(prefix) --old-and-unmanageable clean-python: python setup.py clean --all @@ -110,62 +125,34 @@ JAVA_DIR := java JAVA_SRCDIR := $(JAVA_DIR)/src JAVA_SRCS := Const.java \ +DataHelper.java \ DataTransfer.java \ -InCharPort.java \ -InDoublePort.java \ +ChunkingOutPort.java \ +InBitPort.java \ InFilePort.java \ -InFloatPort.java \ -InInt16Port.java \ -InInt32Port.java \ -InInt64Port.java \ -InInt8Port.java \ -InLongLongPort.java \ -InLongPort.java \ -InOctetPort.java \ InSDDSPort.java \ InVITA49Port.java \ -InShortPort.java \ -InUInt16Port.java \ -InUInt32Port.java \ -InUInt64Port.java \ -InUInt8Port.java \ -InULongLongPort.java \ -InULongPort.java \ -InUShortPort.java \ InXMLPort.java \ +InDataPort.java \ +InPortImpl.java \ linkStatistics.java \ -OutCharPort.java \ +OutBitPort.java \ OutDataPort.java \ -OutDoublePort.java \ OutFilePort.java \ -OutFloatPort.java \ -OutInt16Port.java \ -OutInt32Port.java \ -OutInt64Port.java \ -OutInt8Port.java \ -OutLongLongPort.java \ -OutLongPort.java \ -OutOctetPort.java \ OutPortBase.java \ OutSDDSPort.java \ OutVITA49Port.java \ -OutShortPort.java \ -OutUInt16Port.java \ -OutUInt32Port.java \ -OutUInt64Port.java \ -OutUInt8Port.java \ -OutULongLongPort.java \ -OutULongPort.java \ -OutUShortPort.java \ OutXMLPort.java \ connection_descriptor_struct.java \ SriMapStruct.java \ -queueSemaphore.java \ SizeOf.java \ SriListener.java \ ConnectionEventListener.java \ sriState.java \ utils.java \ +BitDataHelper.java \ +FileDataHelper.java \ +XMLDataHelper.java \ sdds/SDDSStream.java \ sdds/SDDSStreamAttachment.java \ sdds/SDDSStreamContainer.java \ @@ -179,19 +166,83 @@ time/Comparator.java \ time/DefaultComparator.java \ time/utils.java +# Numeric ports are generated via sed for ease of maintenance +$(JAVA_SRCDIR)/bulkio/In%Port.java : $(JAVA_DIR)/sed/%.sed $(JAVA_SRCDIR)/bulkio/InPort.java.template + $(AM_V_GEN)sed -f $< $(JAVA_SRCDIR)/bulkio/InPort.java.template > $@ + +$(JAVA_SRCDIR)/bulkio/%DataHelper.java : $(JAVA_DIR)/sed/%.sed $(JAVA_SRCDIR)/bulkio/NumericDataHelper.java.template + $(AM_V_GEN)sed -f $< $(JAVA_SRCDIR)/bulkio/NumericDataHelper.java.template > $@ + +$(JAVA_SRCDIR)/bulkio/Out%Port.java : $(JAVA_DIR)/sed/%.sed $(JAVA_SRCDIR)/bulkio/OutPort.java.template + $(AM_V_GEN)sed -f $< $(JAVA_SRCDIR)/bulkio/OutPort.java.template > $@ + +JAVA_BUILT_SOURCE = $(JAVA_SRCDIR)/bulkio/InDoublePort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InFloatPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InShortPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InLongPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InLongLongPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InCharPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InUShortPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InULongPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InULongLongPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InOctetPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/DoubleDataHelper.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/FloatDataHelper.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/ShortDataHelper.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/LongDataHelper.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/LongLongDataHelper.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/CharDataHelper.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/UShortDataHelper.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/ULongDataHelper.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/ULongLongDataHelper.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OctetDataHelper.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutDoublePort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutFloatPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutShortPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutLongPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutLongLongPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutCharPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutUShortPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutULongPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutULongLongPort.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutOctetPort.java + +# Deprecated port alias classes (e.g., InInt16Port aliases InShortPort) +$(JAVA_SRCDIR)/bulkio/In%Port.java : $(JAVA_DIR)/sed/deprecated/%.sed $(JAVA_SRCDIR)/bulkio/DeprecatedInPort.java.template + $(AM_V_GEN)sed -f $< $(JAVA_SRCDIR)/bulkio/DeprecatedInPort.java.template > $@ + +$(JAVA_SRCDIR)/bulkio/Out%Port.java : $(JAVA_DIR)/sed/deprecated/%.sed $(JAVA_SRCDIR)/bulkio/DeprecatedOutPort.java.template + $(AM_V_GEN)sed -f $< $(JAVA_SRCDIR)/bulkio/DeprecatedOutPort.java.template > $@ + +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InInt16Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InInt32Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InInt64Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InInt8Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InUInt16Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InUInt32Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InUInt64Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/InUInt8Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutInt16Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutInt32Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutInt64Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutInt8Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutUInt16Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutUInt32Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutUInt64Port.java +JAVA_BUILT_SOURCE += $(JAVA_SRCDIR)/bulkio/OutUInt8Port.java + java_JARFILES = bulkio.jar -java_DATA = bulkio.src.jar -bulkio_jar_SOURCE = $(addprefix $(JAVA_SRCDIR)/bulkio/,$(JAVA_SRCS)) +bulkio_jar_SOURCE = $(addprefix $(JAVA_SRCDIR)/bulkio/,$(JAVA_SRCS)) $(JAVA_BUILT_SOURCE) bulkio_jar_CLASSPATH = $(OSSIE_CLASSPATH):$(top_builddir)/BULKIOInterfaces.jar bulkio_jar_MANIFEST = $(JAVA_DIR)/META-INF/MANIFEST.MF +bulkio_jar_JARADD = -C $(JAVA_SRCDIR) bulkio +bulkio_jar_JAVACFLAGS = -g -Xlint bulkio.jar: $(top_builddir)/BULKIOInterfaces.jar -bulkio.src.jar: $(JAVA_DIR)/META-INF/MANIFEST.MF.src $(bulkio_jar_SOURCE) - $(RH_V_JAR)$(JAR) cmf $< $@ -C $(JAVA_SRCDIR) . - -CLEANFILES = bulkio.src.jar +BUILT_SOURCES = $(JAVA_BUILT_SOURCE) +CLEANFILES = $(BUILT_SOURCES) endif @@ -199,9 +250,7 @@ endif # General # -.PHONY: all-local all-python build-python install-python clean-python reallyclean - -all-local: all-python +.PHONY: install-python clean-python reallyclean install-exec-hook: install-python diff --git a/bulkioInterfaces/libsrc/cpp/BulkioTransport.cpp b/bulkioInterfaces/libsrc/cpp/BulkioTransport.cpp new file mode 100644 index 000000000..2aada3e86 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/BulkioTransport.cpp @@ -0,0 +1,232 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include "bulkio_typetraits.h" +#include "bulkio_in_port.h" +#include "bulkio_out_port.h" +#include "bulkio_p.h" + +namespace bulkio { + + template + OutputTransport::OutputTransport(OutPortType* port, PtrType objref) : + redhawk::UsesTransport(port), + _port(port), + _objref(PortType::_duplicate(objref)), + _stats(port->getName()) + { + // Manually set the bit size because the statistics ctor only takes a + // byte count + _stats.setBitSize(NativeTraits::bits); + } + + template + OutputTransport::~OutputTransport() + { + } + + template + void OutputTransport::disconnect() + { + // Send an end-of-stream for all active streams + for (VersionMap::iterator stream = _sriVersions.begin(); stream != _sriVersions.end(); ++stream) { + try { + this->_pushPacket(BufferType(), bulkio::time::utils::notSet(), true, stream->first); + } catch (redhawk::TransportTimeoutError& e) { + // Ignore the timeout. The destination is in a bad state + } + } + _sriVersions.clear(); + } + + template + void OutputTransport::pushSRI(const std::string& streamID, const BULKIO::StreamSRI& sri, int version) + { + VersionMap::iterator existing = _sriVersions.find(streamID); + if (existing != _sriVersions.end()) { + if (version == existing->second) { + return; + } + existing->second = version; + } else { + _sriVersions[streamID] = version; + } + this->_pushSRI(sri); + } + + template + void OutputTransport::pushPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID, + const BULKIO::StreamSRI& sri) + { + this->_sendPacket(data, T, EOS, streamID, sri); + if (EOS) { + _sriVersions.erase(streamID); + } + } + + template + BULKIO::PortStatistics OutputTransport::getStatistics() + { + BULKIO::PortStatistics statistics = _stats.retrieve(); + + // Use our own stream tracking to fill in the statistics stream IDs + statistics.streamIDs.length(0); + for (VersionMap::iterator stream = _sriVersions.begin(); stream != _sriVersions.end(); ++stream) { + ossie::corba::push_back(statistics.streamIDs, stream->first.c_str()); + } + + // Add extended statistics from subclasses to the keywords + ossie::corba::extend(statistics.keywords, _getExtendedStatistics()); + + return statistics; + } + + template + void OutputTransport::_sendPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID, + const BULKIO::StreamSRI& sri) + { + this->_pushPacket(data, T, EOS, streamID); + this->_recordPush(streamID, this->_dataLength(data), EOS); + } + + template + void OutputTransport::_recordPush(const std::string& streamID, size_t elements, bool endOfStream) + { + _stats.update(elements, 0.0, endOfStream, streamID); + } + + template + redhawk::PropertyMap OutputTransport::_getExtendedStatistics() + { + return redhawk::PropertyMap(); + } + + template + size_t OutputTransport::_dataLength(const BufferType& data) + { + return data.size(); + } + + template <> + size_t OutputTransport::_dataLength(const std::string& /*unused*/) + { + return 1; + } + + // + // InputTransport + // + template + InputTransport::InputTransport(InPortType* port, const std::string& transportId) : + redhawk::ProvidesTransport(port, transportId), + _port(port) + { + } + + // + // OutputManager + // + template + OutputManager::OutputManager(OutPortType* port) : + _port(port) + { + } + + template + redhawk::UsesTransport* + OutputManager::createUsesTransport(CORBA::Object_ptr object, + const std::string& connectionId, + const redhawk::PropertyMap& properties) + { + typename PortType::_var_type port; + try { + port = PortType::_narrow(object); + } catch (const CORBA::Exception&) { + // If this narrow fails something has gone horribly wrong, but just + // let the negotiation layer handle it + return 0; + } + return createOutputTransport(port, connectionId, properties); + } + + // + // InputManager + // + template + InputManager::InputManager(InPortType* port) : + _port(port) + { + } + + template + redhawk::ProvidesTransport* + InputManager::createProvidesTransport(const std::string& transportId, + const redhawk::PropertyMap& properties) + { + return createInputTransport(transportId, properties); + } + + // + // TransportFactory + // + template + std::string BulkioTransportFactory::repoId() + { + return PortType::_PD_repoId; + } + + template + redhawk::ProvidesTransportManager* + BulkioTransportFactory::createProvidesManager(redhawk::NegotiableProvidesPortBase* port) + { + InPortType* bulkio_port = dynamic_cast(port); + if (!bulkio_port) { + throw std::logic_error("incorrect input port type for BulkIO transport factory " + repoId()); + } + return this->createInputManager(bulkio_port); + } + + template + redhawk::UsesTransportManager* + BulkioTransportFactory::createUsesManager(redhawk::NegotiableUsesPort* port) + { + OutPortType* bulkio_port = dynamic_cast(port); + if (!bulkio_port) { + throw std::logic_error("incorrect output port type for BulkIO transport factory " + repoId()); + } + return this->createOutputManager(bulkio_port); + } + +#define INSTANTIATE_TEMPLATE(x) \ + template class OutputTransport; \ + template class OutputManager; \ + template class InputTransport; \ + template class InputManager; \ + template class BulkioTransportFactory; + + FOREACH_PORT_TYPE(INSTANTIATE_TEMPLATE); +} diff --git a/bulkioInterfaces/libsrc/cpp/CorbaTransport.cpp b/bulkioInterfaces/libsrc/cpp/CorbaTransport.cpp new file mode 100644 index 000000000..d771215ab --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/CorbaTransport.cpp @@ -0,0 +1,261 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "CorbaTransport.h" + +#include + +#include "bulkio_time_operators.h" +#include "bulkio_p.h" + +namespace bulkio { + + template + class CorbaTransport : public OutputTransport + { + public: + typedef typename PortType::_ptr_type PtrType; + typedef typename PortType::_var_type VarType; + typedef typename OutputTransport::BufferType BufferType; + typedef typename CorbaTraits::SequenceType SequenceType; + typedef typename CorbaTraits::TransportType TransportType; + + CorbaTransport(OutPort* parent, PtrType port) : + OutputTransport(parent, port) + { + } + + virtual std::string transportType() const + { + return "CORBA"; + } + + virtual CF::Properties transportInfo() const + { + return CF::Properties(); + } + + void disconnect() + { + // Set a timeout for the duration of this call, in case the remote + // side is in a bad state. + omniORB::setClientCallTimeout(this->_objref, 1000); + OutputTransport::disconnect(); + omniORB::setClientCallTimeout(this->_objref, 0); + } + + protected: + virtual void _pushSRI(const BULKIO::StreamSRI& sri) + { + try { + this->_objref->pushSRI(sri); + } catch (const CORBA::SystemException& exc) { + throw redhawk::FatalTransportError(ossie::corba::describeException(exc)); + } + } + + virtual void _pushPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID) + { + try { + _pushPacketImpl(data, T, EOS, streamID.c_str()); + } catch (const CORBA::TIMEOUT& exc) { + throw redhawk::TransportTimeoutError("Push timed out"); + } catch (const CORBA::SystemException& exc) { + throw redhawk::FatalTransportError(ossie::corba::describeException(exc)); + } + } + + private: + void _pushPacketImpl(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const char* streamID) + { + const TransportType* ptr = reinterpret_cast(data.data()); + const SequenceType buffer(data.size(), data.size(), const_cast(ptr), false); + this->_objref->pushPacket(buffer, T, EOS, streamID); + } + }; + + template <> + void CorbaTransport::_pushPacketImpl(const redhawk::shared_bitbuffer& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const char* streamID) + { + BULKIO::BitSequence buffer; + size_t bytes = (data.size() + 7) / 8; + const CORBA::Octet* ptr; + redhawk::bitbuffer temp; + if (data.offset() == 0) { + // Bit buffer is byte-aligned, so it can be directly wrapped with a + // non-owning CORBA sequence + ptr = reinterpret_cast(data.data()); + } else { + // Not byte-aligned, copy bits into a temporary buffer and use that + // as the data for the CORBA sequence + temp = data.copy(); + ptr = reinterpret_cast(temp.data()); + } + buffer.data.replace(bytes, bytes, const_cast(ptr), false); + buffer.bits = data.size(); + _objref->pushPacket(buffer, T, EOS, streamID); + } + + template <> + void CorbaTransport::_pushPacketImpl(const std::string& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const char* streamID) + { + _objref->pushPacket(data.c_str(), T, EOS, streamID); + } + + template <> + void CorbaTransport::_pushPacketImpl(const std::string& data, + const BULKIO::PrecisionUTCTime& /* unused */, + bool EOS, + const char* streamID) + { + _objref->pushPacket(data.c_str(), EOS, streamID); + } + + template + class ChunkingTransport : public CorbaTransport + { + public: + typedef typename PortType::_ptr_type PtrType; + typedef typename OutputTransport::BufferType BufferType; + typedef typename CorbaTraits::TransportType TransportType; + + ChunkingTransport(OutPort* parent, PtrType port) : + CorbaTransport(parent, port), + maxSamplesPerPush(_getMaxSamplesPerPush()) + { + } + + /* + * Push a packet whose payload may not fit within the CORBA limit. The + * packet is broken down into sub-packets and sent via multiple pushPacket + * calls. The EOS is set to false for all of the sub-packets, except for + * the last sub-packet, which uses the input EOS argument. + */ + virtual void _sendPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID, + const BULKIO::StreamSRI& sri) + { + double xdelta = sri.xdelta; + size_t itemSize = sri.mode?2:1; + size_t frameSize = itemSize; + if (sri.subsize > 0) { + frameSize *= sri.subsize; + } + // Quantize the push size (in terms of scalars) to the nearest frame, + // which takes both the complex mode and subsize into account + const size_t maxPushSize = (maxSamplesPerPush/frameSize) * frameSize; + + // Always do at least one push (may be empty), ensuring that all samples + // are pushed + size_t first = 0; + size_t samplesRemaining = data.size(); + + // Initialize time of first subpacket + BULKIO::PrecisionUTCTime packetTime = T; + + do { + // Don't send more samples than are remaining + const size_t pushSize = std::min(samplesRemaining, maxPushSize); + samplesRemaining -= pushSize; + + // Send end-of-stream as false for all sub-packets except for the + // last one (when there are no samples remaining after this push), + // which gets the input EOS. + bool packetEOS = false; + if (samplesRemaining == 0) { + packetEOS = EOS; + } + + // Take the next slice of the input buffer. + BufferType subPacket = data.slice(first, first + pushSize); + CorbaTransport::_sendPacket(subPacket, packetTime, packetEOS, streamID, sri); + + // Synthesize the next packet timestamp + if (packetTime.tcstatus == BULKIO::TCS_VALID) { + packetTime += (pushSize/itemSize)* xdelta; + } + + // Advance buffer to next sub-packet boundary + first += pushSize; + } while (samplesRemaining > 0); + } + + private: + static inline size_t _getMaxSamplesPerPush() + { + // Take the maximum transfer size in bytes, multiply by some number + // < 1 to leave some margin for the CORBA header, then determine + // the maximum number of elements via bits, to support numeric data + // data types (e.g., float) and packed bits. + const size_t max_bits = (size_t) (bulkio::Const::MaxTransferBytes() * .9) * 8; + return max_bits / NativeTraits::bits; + } + + const size_t maxSamplesPerPush; + }; + + template + OutputTransport* CorbaTransportFactory::Create(OutPort* parent, + PtrType port) + { + return new ChunkingTransport(parent, port); + } + + template <> + OutputTransport* + CorbaTransportFactory::Create(OutPort* parent, + PtrType port) + { + return new CorbaTransport(parent, port); + } + + template <> + OutputTransport* + CorbaTransportFactory::Create(OutPort* parent, + PtrType port) + { + return new CorbaTransport(parent, port); + } + +#define INSTANTIATE_TEMPLATE(x) \ + template class CorbaTransport; \ + template class CorbaTransportFactory; + +#define INSTANTIATE_NUMERIC_TEMPLATE(x) \ + template class ChunkingTransport; + + FOREACH_PORT_TYPE(INSTANTIATE_TEMPLATE); + FOREACH_NUMERIC_PORT_TYPE(INSTANTIATE_NUMERIC_TEMPLATE); + INSTANTIATE_NUMERIC_TEMPLATE(BULKIO::dataBit); +} diff --git a/bulkioInterfaces/libsrc/cpp/CorbaTransport.h b/bulkioInterfaces/libsrc/cpp/CorbaTransport.h new file mode 100644 index 000000000..2f62dce66 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/CorbaTransport.h @@ -0,0 +1,37 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef __bulkio_corbatransport_h +#define __bulkio_corbatransport_h + +#include + +namespace bulkio { + + template + class CorbaTransportFactory { + public: + typedef typename PortType::_ptr_type PtrType; + + static OutputTransport* Create(OutPort* parent, PtrType port); + }; + +} + +#endif // __bulkio_corbatransport_h diff --git a/bulkioInterfaces/libsrc/cpp/LocalTransport.cpp b/bulkioInterfaces/libsrc/cpp/LocalTransport.cpp new file mode 100644 index 000000000..c1e7b07ee --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/LocalTransport.cpp @@ -0,0 +1,86 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "LocalTransport.h" + +#include + +#include "bulkio_p.h" + +namespace bulkio { + + template + LocalTransport* LocalTransport::Factory(OutPort* parent, + PortBase* port) + { + LocalPortType* local_port = dynamic_cast(port); + if (local_port) { + typename PortType::_var_type corba_port = local_port->_this(); + return new LocalTransport(parent, local_port, corba_port); + } + return 0; + } + + template + LocalTransport::LocalTransport(OutPort* parent, LocalPortType* localPort, PtrType port) : + OutputTransport(parent, port), + _localPort(localPort) + { + _localPort->_add_ref(); + } + + template + LocalTransport::~LocalTransport() + { + _localPort->_remove_ref(); + } + + template + std::string LocalTransport::transportType() const + { + return "local"; + } + + template + CF::Properties LocalTransport::transportInfo() const + { + return CF::Properties(); + } + + template + void LocalTransport::_pushSRI(const BULKIO::StreamSRI& sri) + { + _localPort->pushSRI(sri); + } + + template + void LocalTransport::_pushPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID) + { + _localPort->queuePacket(data, T, EOS, streamID); + } + +#define INSTANTIATE_TEMPLATE(x) template class LocalTransport; + + FOREACH_PORT_TYPE(INSTANTIATE_TEMPLATE); + +} diff --git a/bulkioInterfaces/libsrc/cpp/LocalTransport.h b/bulkioInterfaces/libsrc/cpp/LocalTransport.h new file mode 100644 index 000000000..b4063e411 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/LocalTransport.h @@ -0,0 +1,54 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef __bulkio_localtransport_h +#define __bulkio_localtransport_h + +#include + +namespace bulkio { + + template + class LocalTransport : public OutputTransport + { + public: + typedef typename PortType::_ptr_type PtrType; + typedef typename OutputTransport::BufferType BufferType; + typedef InPort LocalPortType; + + static LocalTransport* Factory(OutPort* parent, PortBase* port); + + LocalTransport(OutPort* parent, LocalPortType* localPort, PtrType port); + ~LocalTransport(); + + virtual std::string transportType() const; + virtual CF::Properties transportInfo() const; + + protected: + virtual void _pushSRI(const BULKIO::StreamSRI& sri); + virtual void _pushPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID); + + LocalPortType* _localPort; + }; +} + +#endif // __bulkio_localtransport_h diff --git a/bulkioInterfaces/libsrc/cpp/bulkio.cpp b/bulkioInterfaces/libsrc/cpp/bulkio.cpp index ce6e96f18..8fde2a128 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio.cpp +++ b/bulkioInterfaces/libsrc/cpp/bulkio.cpp @@ -101,7 +101,7 @@ namespace bulkio { } - linkStatistics::linkStatistics( std::string &portName , const int nbytes ): + linkStatistics::linkStatistics(const std::string &portName, const int nbytes): portName(portName), nbytes(nbytes) { @@ -246,61 +246,4 @@ namespace bulkio { return runningStats; } - - template < typename DataTransferTraits > - DataTransfer< DataTransferTraits >::DataTransfer(const PortSequenceType & data, const BULKIO::PrecisionUTCTime &_T, bool _EOS, const char* _streamID, BULKIO::StreamSRI &_H, bool _sriChanged, bool _inputQueueFlushed) - { - int dataLength = data.length(); - - typedef typename std::_Vector_base< TransportType, typename DataTransferTraits::DataBufferType::allocator_type >::_Vector_impl *VectorPtr; - - VectorPtr vectorPtr = (VectorPtr)(&dataBuffer); - vectorPtr->_M_start = const_cast< PortSequenceType *>(&data)->get_buffer(1); - vectorPtr->_M_finish = vectorPtr->_M_start + dataLength; - vectorPtr->_M_end_of_storage = vectorPtr->_M_finish; - - // - // removed... - // -#if 0 - dataBuffer.resize(dataLength); - if (dataLength > 0) { - memcpy(&dataBuffer[0], &data[0], dataLength * sizeof(data[0])); - } - -#endif - - T = _T; - EOS = _EOS; - streamID = _streamID; - SRI = _H; - sriChanged = _sriChanged; - inputQueueFlushed = _inputQueueFlushed; - } - - - - - - // - // Required for template instantion for the compilation unit. - // Note: we only define those valid types for which Bulkio IDL is defined. Users wanting to - // inherit this functionality will be unable to since they cannot instantiate and - // link against the template. - // - - - template class DataTransfer< CharDataTransferTraits >; - template class DataTransfer< OctetDataTransferTraits >; - template class DataTransfer< ShortDataTransferTraits >; - template class DataTransfer< UShortDataTransferTraits >; - template class DataTransfer< LongDataTransferTraits >; - template class DataTransfer< ULongDataTransferTraits >; - template class DataTransfer< LongLongDataTransferTraits >; - template class DataTransfer< ULongLongDataTransferTraits >; - template class DataTransfer< FloatDataTransferTraits >; - template class DataTransfer< DoubleDataTransferTraits >; - template class DataTransfer< StringDataTransferTraits >; - - } // end of bulkio namespace diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_attachable_port.cpp b/bulkioInterfaces/libsrc/cpp/bulkio_attachable_port.cpp index 147328df8..6798283a4 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio_attachable_port.cpp +++ b/bulkioInterfaces/libsrc/cpp/bulkio_attachable_port.cpp @@ -46,10 +46,6 @@ namespace bulkio { if ( sriChangeCB ) { sriChangeCallback = *sriChangeCB; } - - std::string pname("redhawk.bulkio.inport."); - pname = pname + port_name; - logger = rh_logger::Logger::getLogger(pname); } @@ -66,19 +62,11 @@ namespace bulkio { sri_cmp(sriCmp), time_cmp(timeCmp), attach_detach_callback(attach_detach_cb), - logger(logger), newSRICallback(), sriChangeCallback() { stats = new linkStatistics(port_name); - if ( !logger ) { - std::string pname("redhawk.bulkio.inport."); - pname = pname + port_name; - logger = rh_logger::Logger::getLogger(pname); - } - LOG_DEBUG( logger, "bulkio::InAttachablePort CTOR port:" << port_name ); - if ( newSriCB ) { newSRICallback = *newSriCB; } @@ -94,6 +82,11 @@ namespace bulkio { } + template + void InAttachablePort::setLogger(LOGGER_PTR newLogger) { + _portLog = newLogger; + } + template bool InAttachablePort::hasSriChanged () { return sriChanged; @@ -119,12 +112,6 @@ namespace bulkio { newSRICallback = SRICallback(newCallback); } - template - void InAttachablePort::setLogger( LOGGER_PTR newLogger ) - { - logger = newLogger; - } - template void InAttachablePort::setNewAttachDetachCallback( Callback *newCallback) { attach_detach_callback = newCallback; @@ -210,7 +197,7 @@ namespace bulkio { template void InAttachablePort::pushSRI(const BULKIO::StreamSRI& H, const BULKIO::PrecisionUTCTime& T) { - TRACE_ENTER(logger, "InAttachablePort::pushSRI" ); + TRACE_ENTER(_portLog, "InAttachablePort::pushSRI" ); // Shared mutex allows concurrent reads // Upgrades to unique lock while modifying values @@ -230,9 +217,9 @@ namespace bulkio { } if (!foundSRI) { if ( newSRICallback ) { - LOG_DEBUG(logger, "pushSRI: About to call user-defined 'newSRICallback' method") + LOG_DEBUG(_portLog, "pushSRI: About to call user-defined 'newSRICallback' method") newSRICallback(tmpH); - LOG_DEBUG(logger, "pushSRI: Returned from user-defined 'newSRICallback' method") + LOG_DEBUG(_portLog, "pushSRI: Returned from user-defined 'newSRICallback' method") } { @@ -254,9 +241,9 @@ namespace bulkio { sriChanged = !schanged || !tchanged; if ( sriChanged && sriChangeCallback ) { - LOG_DEBUG(logger, "pushSRI: About to call user-defined 'sriChangeCallback' method") + LOG_DEBUG(_portLog, "pushSRI: About to call user-defined 'sriChangeCallback' method") sriChangeCallback(tmpH); - LOG_DEBUG(logger, "pushSRI: Returned from user-defined 'sriChangeCallback' method") + LOG_DEBUG(_portLog, "pushSRI: Returned from user-defined 'sriChangeCallback' method") } { @@ -266,7 +253,7 @@ namespace bulkio { } } - TRACE_EXIT(logger, "InAttachablePort::pushSRI" ); + TRACE_EXIT(_portLog, "InAttachablePort::pushSRI" ); } // @@ -285,18 +272,18 @@ namespace bulkio { char* InAttachablePort::attach(const StreamDefinition& stream, const char* userid) throw (typename PortType::AttachError, typename PortType::StreamInputError) { - TRACE_ENTER(logger, "InAttachablePort::attach" ); - LOG_DEBUG( logger, "ATTACHABLE PORT: ATTACH REQUEST, STREAM/USER: " << stream.id << "/" << userid ); + TRACE_ENTER(_portLog, "InAttachablePort::attach" ); + LOG_DEBUG( _portLog, "ATTACHABLE PORT: ATTACH REQUEST, STREAM/USER: " << stream.id << "/" << userid ); std::string attachId(""); if ( attach_detach_callback ) { try { - LOG_DEBUG( logger, "ATTACHABLE PORT: CALLING ATTACH CALLBACK, STREAM/USER: " << stream.id << "/" << userid ); + LOG_DEBUG( _portLog, "ATTACHABLE PORT: CALLING ATTACH CALLBACK, STREAM/USER: " << stream.id << "/" << userid ); attachId = attach_detach_callback->attach(stream, userid); } catch(...) { - LOG_ERROR( logger, "ATTACHABLE PORT: ATTACH CALLBACK EXCEPTION, STREAM/USER: " << stream.id << "/" << userid ); + LOG_ERROR( _portLog, "ATTACHABLE PORT: ATTACH CALLBACK EXCEPTION, STREAM/USER: " << stream.id << "/" << userid ); throw typename PortType::AttachError("Callback Failed."); } } @@ -310,10 +297,10 @@ namespace bulkio { attachedStreamMap.insert(std::make_pair(attachId, new StreamDefinition(stream))); attachedUsers.insert(std::make_pair(attachId, std::string(userid))); - LOG_DEBUG( logger, "ATTACHABLE PORT, ATTACH COMPLETED, ID:" << attachId << + LOG_DEBUG( _portLog, "ATTACHABLE PORT, ATTACH COMPLETED, ID:" << attachId << " STREAM/USER" << stream.id << "/" << userid ); - TRACE_EXIT(logger, "InAttachablePort::attach" ); + TRACE_EXIT(_portLog, "InAttachablePort::attach" ); return CORBA::string_dup(attachId.c_str()); } @@ -326,20 +313,20 @@ namespace bulkio { template void InAttachablePort::detach(const char* attachId) { - TRACE_ENTER(logger, "InAttachablePort::detach" ); - LOG_DEBUG( logger, "ATTACHABLE PORT: DETACH REQUESTED, ID:" << attachId ); + TRACE_ENTER(_portLog, "InAttachablePort::detach" ); + LOG_DEBUG( _portLog, "ATTACHABLE PORT: DETACH REQUESTED, ID:" << attachId ); if ( attach_detach_callback ) { try { - LOG_DEBUG( logger, "ATTACHABLE PORT: CALLING DETACH CALLBACK, ID:" << attachId ); + LOG_DEBUG( _portLog, "ATTACHABLE PORT: CALLING DETACH CALLBACK, ID:" << attachId ); attach_detach_callback->detach(attachId); - LOG_DEBUG( logger, "ATTACHABLE PORT: RETURNED FROM DETACH CALLBACK, ID:" << attachId ); + LOG_DEBUG( _portLog, "ATTACHABLE PORT: RETURNED FROM DETACH CALLBACK, ID:" << attachId ); } catch(typename PortType::DetachError &ex) { throw ex; } catch(...) { - LOG_ERROR( logger, "ATTACHABLE PORT: DETACH CALLBACK EXCEPTION ID:" << attachId ); + LOG_ERROR( _portLog, "ATTACHABLE PORT: DETACH CALLBACK EXCEPTION ID:" << attachId ); throw typename PortType::DetachError("Unknown issue occured in the detach callback!"); } } @@ -362,9 +349,9 @@ namespace bulkio { throw typename PortType::DetachError("Unknown Attach ID"); } - LOG_DEBUG( logger, "ATTACHABLE PORT: DETACH SUCCESS, ID:" << attachId ); + LOG_DEBUG( _portLog, "ATTACHABLE PORT: DETACH SUCCESS, ID:" << attachId ); - TRACE_EXIT(logger, "InAttachablePort::detach" ); + TRACE_EXIT(_portLog, "InAttachablePort::detach" ); } @@ -862,7 +849,7 @@ namespace bulkio { boost::mutex::scoped_lock lock(attachmentsUpdateLock); try { LOG_TRACE( logger, "Creating ATTACHMENT FOR CONNECTION: " << connectionId); - char* attachId = inPort->attach(_streamDefinition, _name.c_str()); + CORBA::String_var attachId = inPort->attach(_streamDefinition, _name.c_str()); LOG_TRACE( logger, "Created ATTACHMENT, CONNECTION " << connectionId << " ATTACH ID" << attachId); StreamAttachment attachment(connectionId, std::string(attachId), inPort, port); attachment.setLogger(logger); @@ -1187,7 +1174,6 @@ namespace bulkio { for (iter = _streams.begin(); iter != _streams.end(); iter++) { try { iter->detachByAttachIdConnectionId(attachId, connectionId); - //LOG_DEBUG(logger, "ATTACHABLE PORT: DETACH COMPLETD ID:" << attachId ); } catch(...) { //LOG_WARN(logger, "UNABLE TO DETACH ATTACHID/CONNECTIONID: " << attachId << "/" << connectionId); @@ -1295,21 +1281,15 @@ namespace bulkio { { recConnectionsRefresh = false; recConnections.length(0); - if ( !logger ) { - std::string pname("redhawk.bulkio.outport."); - pname = pname + port_name; - logger = rh_logger::Logger::getLogger(pname); - } } template OutAttachablePort::OutAttachablePort(std::string port_name, - LOGGER_PTR logger, + LOGGER_PTR newLogger, ConnectionEventListener *connectCB, ConnectionEventListener *disconnectCB ): Port_Uses_base_impl(port_name), streamContainer(*this), - logger(logger), _connectCB(), _disconnectCB() { @@ -1323,14 +1303,9 @@ namespace bulkio { recConnectionsRefresh = false; recConnections.length(0); - if ( !logger ) { - std::string pname("redhawk.bulkio.outport."); - pname = pname + port_name; - logger = rh_logger::Logger::getLogger(pname); - } - LOG_DEBUG( logger, "bulkio::OutAttachablePort::CTOR port:" << this->name ); - this->streamContainer.setLogger(logger); + LOG_DEBUG( _portLog, "bulkio::OutAttachablePort::CTOR port:" << this->name ); + this->streamContainer.setLogger(_portLog); } template @@ -1342,8 +1317,8 @@ namespace bulkio { template void OutAttachablePort::setLogger( LOGGER_PTR newLogger ) { - logger = newLogger; - this->streamContainer.setLogger(logger); + _portLog = newLogger; + this->streamContainer.setLogger(newLogger); } // @@ -1404,7 +1379,7 @@ namespace bulkio { { try { stats[cid].resetConnectionErrors(); - LOG_TRACE(logger, "Reset connection error stats for: " << cid ); + LOG_TRACE(_portLog, "Reset connection error stats for: " << cid ); } catch(...){ } @@ -1427,7 +1402,7 @@ namespace bulkio { template void OutAttachablePort::connectPort(CORBA::Object_ptr connection, const char* connectionId) { - TRACE_ENTER(logger, "OutAttachablePort::connectPort" ); + TRACE_ENTER(_portLog, "OutAttachablePort::connectPort" ); { boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in @@ -1438,7 +1413,7 @@ namespace bulkio { port = PortType::_narrow(connection); } catch(...) { - LOG_ERROR( logger, "CONNECT FAILED: UNABLE TO NARROW ENDPOINT, USES PORT:" << this->name ); + LOG_ERROR( _portLog, "CONNECT FAILED: UNABLE TO NARROW ENDPOINT, USES PORT:" << this->name ); throw CF::Port::InvalidPort(1, "Unable to narrow"); } @@ -1476,13 +1451,13 @@ namespace bulkio { this->recConnectionsRefresh = true; this->refreshSRI = true; updateSRIForAllConnections(); - LOG_DEBUG( logger, "CONNECTION ESTABLISHED, PORT/CONNECTION_ID:" << this->name << "/" << connectionId ); + LOG_DEBUG( _portLog, "CONNECTION ESTABLISHED, PORT/CONNECTION_ID:" << this->name << "/" << connectionId ); } if ( _connectCB ) (*_connectCB)(connectionId); this->streamContainer.printState("After connectPort"); - TRACE_EXIT(logger, "OutAttachablePort::connectPort" ); + TRACE_EXIT(_portLog, "OutAttachablePort::connectPort" ); } template @@ -1490,20 +1465,20 @@ namespace bulkio { { { boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in - LOG_DEBUG( logger, "Disconnect Port PORT/CONNECTION_ID:" << this->name << "/" << connectionId ); + LOG_DEBUG( _portLog, "Disconnect Port PORT/CONNECTION_ID:" << this->name << "/" << connectionId ); try { // Detach everything that's associated to connectionId std::string connId(connectionId); - LOG_DEBUG( logger, "DETACH By Connection Id , PORT/CONNECTION_ID:" << this->name << "/" << connectionId ); + LOG_DEBUG( _portLog, "DETACH By Connection Id , PORT/CONNECTION_ID:" << this->name << "/" << connectionId ); this->streamContainer.detachByConnectionId(connId); } catch(...) { - LOG_ERROR(logger," Unable to detach for CONNECTION: " << connectionId ); + LOG_ERROR(_portLog," Unable to detach for CONNECTION: " << connectionId ); } for (unsigned int i = 0; i < outConnections.size(); i++) { if (outConnections[i].second == connectionId) { - LOG_DEBUG( logger, "DISCONNECT, PORT/CONNECTION: " << this->name << "/" << connectionId ); + LOG_DEBUG( _portLog, "DISCONNECT, PORT/CONNECTION: " << this->name << "/" << connectionId ); stats.erase(connectionId); outConnections.erase(outConnections.begin() + i); break; @@ -1579,7 +1554,7 @@ namespace bulkio { hasPortEntry = true; typename PortType::_ptr_type connectedPort = this->getConnectedPort(ftPtr->connection_id); if (connectedPort->_is_nil()) { - LOG_DEBUG( logger, "Unable to find connected port with connectionId: " << ftPtr->connection_id); + LOG_DEBUG( _portLog, "Unable to find connected port with connectionId: " << ftPtr->connection_id); continue; } @@ -1603,7 +1578,7 @@ namespace bulkio { if (foundStream) { foundStream->updateAttachments(streamAttachmentsIter->second); } else { - LOG_WARN( logger, "Unable to locate stream definition for streamId: " << streamId); + LOG_WARN( _portLog, "Unable to locate stream definition for streamId: " << streamId); } } @@ -1663,7 +1638,7 @@ namespace bulkio { std::string connId = *connIdIter; typename PortType::_ptr_type connectedPort = this->getConnectedPort(*connIdIter); if (connectedPort->_is_nil()) { - LOG_DEBUG( logger, "Unable to find connected port with connectionId: " << (*connIdIter)); + LOG_DEBUG( _portLog, "Unable to find connected port with connectionId: " << (*connIdIter)); continue; } @@ -1673,19 +1648,19 @@ namespace bulkio { sriMapIter->second.connections.insert(*connIdIter); } catch( CORBA::TRANSIENT &ex ) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (Transient), PORT/CONNECTION: " << name << "/" << connId); + LOG_ERROR( _portLog, "PUSH-SRI FAILED (Transient), PORT/CONNECTION: " << name << "/" << connId); } } catch( CORBA::COMM_FAILURE &ex) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (CommFailure), PORT/CONNECTION: " << name << "/" << connId); + LOG_ERROR( _portLog, "PUSH-SRI FAILED (CommFailure), PORT/CONNECTION: " << name << "/" << connId); } } catch( CORBA::SystemException &ex) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (SystemException), PORT/CONNECTION: " << name << "/" << connId ); + LOG_ERROR( _portLog, "PUSH-SRI FAILED (SystemException), PORT/CONNECTION: " << name << "/" << connId ); } } catch(...) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED, (UnknownException) PORT/CONNECTION: " << name << "/" << connId ); + LOG_ERROR( _portLog, "PUSH-SRI FAILED, (UnknownException) PORT/CONNECTION: " << name << "/" << connId ); } } @@ -1725,7 +1700,7 @@ namespace bulkio { template void OutAttachablePort::pushSRI(const BULKIO::StreamSRI& H, const BULKIO::PrecisionUTCTime& T) { - TRACE_ENTER(logger, "OutAttachablePort::pushSRI" ); + TRACE_ENTER(_portLog, "OutAttachablePort::pushSRI" ); boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in @@ -1756,7 +1731,7 @@ namespace bulkio { if ( (ftPtr->port_name == this->name) and (ftPtr->connection_id == i->second) and (strcmp(ftPtr->stream_id.c_str(),H.streamID) == 0 ) ){ - LOG_DEBUG(logger,"pushSRI - PORT:" << this->name << " CONNECTION:" << ftPtr->connection_id << " SRI streamID:" << H.streamID << " Mode:" << H.mode << " XDELTA:" << 1.0/H.xdelta ); + LOG_DEBUG(_portLog,"pushSRI - PORT:" << this->name << " CONNECTION:" << ftPtr->connection_id << " SRI streamID:" << H.streamID << " Mode:" << H.mode << " XDELTA:" << 1.0/H.xdelta ); std::string connId = i->second; try { @@ -1764,19 +1739,19 @@ namespace bulkio { sri_iter->second.connections.insert( i->second ); } catch( CORBA::TRANSIENT &ex ) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (Transient), PORT/CONNECTION: " << name << "/" << connId); + LOG_ERROR( _portLog, "PUSH-SRI FAILED (Transient), PORT/CONNECTION: " << name << "/" << connId); } } catch( CORBA::COMM_FAILURE &ex) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (CommFailure), PORT/CONNECTION: " << name << "/" << connId); + LOG_ERROR( _portLog, "PUSH-SRI FAILED (CommFailure), PORT/CONNECTION: " << name << "/" << connId); } } catch( CORBA::SystemException &ex) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (SystemException), PORT/CONNECTION: " << name << "/" << connId ); + LOG_ERROR( _portLog, "PUSH-SRI FAILED (SystemException), PORT/CONNECTION: " << name << "/" << connId ); } } catch(...) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED, (UnknownException) PORT/CONNECTION: " << name << "/" << connId ); + LOG_ERROR( _portLog, "PUSH-SRI FAILED, (UnknownException) PORT/CONNECTION: " << name << "/" << connId ); } } } @@ -1785,33 +1760,33 @@ namespace bulkio { if (!portListed) { for (ConnectionsIter i = outConnections.begin(); i != outConnections.end(); ++i) { - LOG_DEBUG(logger,"pushSRI -2- PORT:" << this->name << " CONNECTION:" << i->second << " SRI streamID:" << H.streamID << " Mode:" << H.mode << " XDELTA:" << 1.0/H.xdelta ); + LOG_DEBUG(_portLog,"pushSRI -2- PORT:" << this->name << " CONNECTION:" << i->second << " SRI streamID:" << H.streamID << " Mode:" << H.mode << " XDELTA:" << 1.0/H.xdelta ); std::string connId = i->second; try { i->first->pushSRI(H, T); sri_iter->second.connections.insert( i->second ); } catch( CORBA::TRANSIENT &ex ) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (Transient), PORT/CONNECTION: " << name << "/" << connId); + LOG_ERROR( _portLog, "PUSH-SRI FAILED (Transient), PORT/CONNECTION: " << name << "/" << connId); } } catch( CORBA::COMM_FAILURE &ex) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (CommFailure), PORT/CONNECTION: " << name << "/" << connId); + LOG_ERROR( _portLog, "PUSH-SRI FAILED (CommFailure), PORT/CONNECTION: " << name << "/" << connId); } } catch( CORBA::SystemException &ex) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (SystemException), PORT/CONNECTION: " << name << "/" << connId ); + LOG_ERROR( _portLog, "PUSH-SRI FAILED (SystemException), PORT/CONNECTION: " << name << "/" << connId ); } } catch(...) { if ( reportConnectionErrors(connId) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED, (UnknownException) PORT/CONNECTION: " << name << "/" << connId ); + LOG_ERROR( _portLog, "PUSH-SRI FAILED, (UnknownException) PORT/CONNECTION: " << name << "/" << connId ); } } } } } - TRACE_EXIT(logger, "OutAttachablePort::pushSRI" ); + TRACE_EXIT(_portLog, "OutAttachablePort::pushSRI" ); return; } @@ -1981,7 +1956,7 @@ namespace bulkio { template bool OutAttachablePort::addStream(const StreamDefinition& stream) { - TRACE_ENTER(logger, "OutAttachablePort::addStream" ); + TRACE_ENTER(_portLog, "OutAttachablePort::addStream" ); boost::mutex::scoped_lock lock(updatingPortsLock); //this->streamContainer.printState("Before stream added"); @@ -1995,7 +1970,7 @@ namespace bulkio { // Create a new stream for attachment Stream* newStream = new Stream(stream, std::string(user_id), std::string(stream.id) ); newStream->setPort(this); - newStream->setLogger(logger); + newStream->setLogger(_portLog); // Iterate through connections and apply filterTable bool portListed = false; @@ -2008,7 +1983,7 @@ namespace bulkio { if ( (ftPtr->port_name == this->name) and (ftPtr->connection_id == i->second) and (strcmp(ftPtr->stream_id.c_str(),stream.id) == 0 ) ){ - LOG_DEBUG(logger,"attach - PORT:" << this->name << " CONNECTION:" << ftPtr->connection_id << " SRI streamID:" << stream.id); + LOG_DEBUG(_portLog,"attach - PORT:" << this->name << " CONNECTION:" << ftPtr->connection_id << " SRI streamID:" << stream.id); try { // Create a new attachment for valid filterTable entry @@ -2021,15 +1996,15 @@ namespace bulkio { try { newStream->createNewAttachment(i->second, i->first); } catch (typename PortType::AttachError& ex) { - LOG_ERROR( logger, __FUNCTION__ << ": AttachError occurred: " << ex.msg); + LOG_ERROR( _portLog, __FUNCTION__ << ": AttachError occurred: " << ex.msg); } catch (typename PortType::StreamInputError& ex) { - LOG_ERROR( logger, __FUNCTION__ << ": StreamInputError occurred: " << ex.msg); + LOG_ERROR( _portLog, __FUNCTION__ << ": StreamInputError occurred: " << ex.msg); } catch(...) { - LOG_ERROR( logger, __FUNCTION__ << ": Unknown attachment error occured: " << this->name << "/" << i->second ); + LOG_ERROR( _portLog, __FUNCTION__ << ": Unknown attachment error occured: " << this->name << "/" << i->second ); } } catch(...) { - LOG_ERROR( logger, "UNABLE TO CREATE ATTACHMENT, PORT/CONNECTION: " << this->name << "/" << i->second ); + LOG_ERROR( _portLog, "UNABLE TO CREATE ATTACHMENT, PORT/CONNECTION: " << this->name << "/" << i->second ); } } } @@ -2046,36 +2021,36 @@ namespace bulkio { try { newStream->createNewAttachment(i->second, i->first); } catch (typename PortType::AttachError& ex) { - LOG_ERROR( logger, __FUNCTION__ << ": AttachError occurred: " << ex.msg); + LOG_ERROR( _portLog, __FUNCTION__ << ": AttachError occurred: " << ex.msg); } catch (typename PortType::StreamInputError& ex) { - LOG_ERROR( logger, __FUNCTION__ << ": StreamInputError occurred: " << ex.msg); + LOG_ERROR( _portLog, __FUNCTION__ << ": StreamInputError occurred: " << ex.msg); } catch(...) { - LOG_ERROR( logger, __FUNCTION__ << ": Unknown attachment error occured: " << this->name << "/" << i->second ); + LOG_ERROR( _portLog, __FUNCTION__ << ": Unknown attachment error occured: " << this->name << "/" << i->second ); } } } - LOG_DEBUG(logger, "ATTACHABLE PORT: CREATED NEW STREAM :" << stream.id ); + LOG_DEBUG(_portLog, "ATTACHABLE PORT: CREATED NEW STREAM :" << stream.id ); this->streamContainer.addStream(*newStream); delete newStream; this->streamContainer.printState("End of Attach"); - TRACE_EXIT(logger, "OutAttachablePort::attach" ); + TRACE_EXIT(_portLog, "OutAttachablePort::attach" ); return true; } template void OutAttachablePort::removeStream(const std::string& streamId) { - TRACE_ENTER(logger, "OutAttachablePort::removeStream" ); + TRACE_ENTER(_portLog, "OutAttachablePort::removeStream" ); boost::mutex::scoped_lock lock(updatingPortsLock); //this->streamContainer.printState("Beginning of RemoveStream"); this->streamContainer.removeStreamByStreamId(streamId); this->streamContainer.printState("End of RemoveStream"); - TRACE_EXIT(logger, "OutAttachablePort::removeStream" ); + TRACE_EXIT(_portLog, "OutAttachablePort::removeStream" ); } // @@ -2086,7 +2061,7 @@ namespace bulkio { template void OutAttachablePort::detach(const char* attach_id ) { - TRACE_ENTER(logger, "OutAttachablePort::detach" ); + TRACE_ENTER(_portLog, "OutAttachablePort::detach" ); boost::mutex::scoped_lock lock(updatingPortsLock); std::string attachId(attach_id); @@ -2094,13 +2069,13 @@ namespace bulkio { this->streamContainer.detachByAttachId(std::string(attachId)); this->streamContainer.printState("End of Detach"); - TRACE_EXIT(logger, "OutAttachablePort::detach" ); + TRACE_EXIT(_portLog, "OutAttachablePort::detach" ); } template void OutAttachablePort::detach(const char* attach_id, const char *connection_id ) { - TRACE_ENTER(logger, "OutAttachablePort::detach" ); + TRACE_ENTER(_portLog, "OutAttachablePort::detach" ); boost::mutex::scoped_lock lock(updatingPortsLock); //this->streamContainer.printState("Beginning of Detach"); @@ -2108,13 +2083,13 @@ namespace bulkio { std::string connectionId(connection_id); if (connectionId.empty()) { - LOG_WARN(logger, "UNABLE TO DETACH SPECIFIC CONNECTION ID: CONNECTION ID IS "); + LOG_WARN(_portLog, "UNABLE TO DETACH SPECIFIC CONNECTION ID: CONNECTION ID IS "); } else { this->streamContainer.detachByAttachIdConnectionId(attachId, connectionId); - LOG_DEBUG(logger, "ATTACHABLE PORT: DETACH COMPLETED ID:" << attachId ); + LOG_DEBUG(_portLog, "ATTACHABLE PORT: DETACH COMPLETED ID:" << attachId ); } this->streamContainer.printState("End of Detach"); - TRACE_EXIT(logger, "OutAttachablePort::detach" ); + TRACE_EXIT(_portLog, "OutAttachablePort::detach" ); } // Grab port by connectionId diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_datablock.cpp b/bulkioInterfaces/libsrc/cpp/bulkio_datablock.cpp index 61d0a4fda..98bf04da9 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio_datablock.cpp +++ b/bulkioInterfaces/libsrc/cpp/bulkio_datablock.cpp @@ -25,214 +25,345 @@ #include "bulkio_base.h" #include "bulkio_datablock.h" +#include "bulkio_stream.h" #include "bulkio_time_operators.h" +namespace bulkio { + namespace { + double get_drift(const SampleTimestamp& begin, const bulkio::SampleTimestamp& end, double xdelta) + { + double real = end.time - begin.time; + double expected = (end.offset - begin.offset)*xdelta; + return real-expected; + } + + void validate_timestamps(const std::list& timestamps) + { + // Validity checks + if (timestamps.empty()) { + throw std::logic_error("block contains no timestamps"); + } else if (timestamps.front().offset != 0) { + throw std::logic_error("no timestamp at offset 0"); + } + } + } +} + using bulkio::SampleTimestamp; using bulkio::DataBlock; template -struct DataBlock::Impl +struct DataBlock::Impl : public StreamDescriptor { - std::vector data; - BULKIO::StreamSRI sri; - std::list timestamps; - int sriChangeFlags; - bool inputQueueFlushed; + Impl(const bulkio::StreamDescriptor& sri, const T& data) : + StreamDescriptor(sri), + data(data), + sriChangeFlags(bulkio::sri::NONE), + inputQueueFlushed(false), + dataOwned(false) + { + } + + void copy() + { + // Default implementation assumes a shared-ownership class for data + // (shared_buffer, shared_bitbuffer); make a copy and tag the data as + // owned + data = data.copy(); + dataOwned = true; + } + + T data; + std::list timestamps; + int sriChangeFlags; + bool inputQueueFlushed; + + // Flag to track whether a shared_buffer might share references with other + // consumers (e.g., other input ports in the same process space). Note that + // this does *not* track whether there is another reference to the same + // data block. + bool dataOwned; }; +namespace bulkio { + template <> + void DataBlock::Impl::copy() + { + // Strings do not have shared ownership, making this a no-op + } +} + template DataBlock::DataBlock() : - _impl() + _impl() { } template -DataBlock::DataBlock(const BULKIO::StreamSRI& sri, size_t size) : - _impl(new Impl()) +DataBlock::DataBlock(const bulkio::StreamDescriptor& sri, const T& data) : + _impl(boost::make_shared(sri, data)) { - _impl->data.resize(size); - _impl->sri = sri; } template DataBlock DataBlock::copy() const { - DataBlock result; - if (_impl) { - result._impl = boost::make_shared(*_impl); - } - return result; + DataBlock result; + if (_impl) { + result._impl = boost::make_shared(*_impl); + result._impl->copy(); + } + return result; } template const BULKIO::StreamSRI& DataBlock::sri() const { - return _impl->sri; + return _impl->sri(); } template double DataBlock::xdelta() const { - return _impl->sri.xdelta; + return sri().xdelta; } template -T* DataBlock::data() +const T& DataBlock::buffer() const { - return &(_impl->data[0]); + return _impl->data; } template -const T* DataBlock::data() const +void DataBlock::buffer(const T& data) { - return &(_impl->data[0]); + _impl->data = data; } template -size_t DataBlock::size() const +void DataBlock::addTimestamp(const bulkio::SampleTimestamp& timestamp) { - return _impl->data.size(); + std::list::iterator pos = _impl->timestamps.begin(); + const std::list::iterator end = _impl->timestamps.end(); + while ((pos != end) && (timestamp.offset >= pos->offset)) { + // TODO: Replace existing + ++pos; + } + _impl->timestamps.insert(pos, timestamp); } template -void DataBlock::resize(size_t count) +std::list DataBlock::getTimestamps() const { - _impl->data.resize(count); + return _impl->timestamps; } template -bool DataBlock::complex() const +const BULKIO::PrecisionUTCTime& DataBlock::getStartTime() const { - return (_impl->sri.mode != 0); + const std::list& timestamps = _impl->timestamps; + validate_timestamps(timestamps); + + return _impl->timestamps.front().time; } template -std::complex* DataBlock::cxdata() +double DataBlock::getNetTimeDrift() const { - return reinterpret_cast(data()); + const std::list& timestamps = _impl->timestamps; + validate_timestamps(timestamps); + + return get_drift(timestamps.front(), timestamps.back(), xdelta()); } template -const std::complex* DataBlock::cxdata() const +double DataBlock::getMaxTimeDrift() const { - return reinterpret_cast(data()); + const std::list& timestamps = _impl->timestamps; + validate_timestamps(timestamps); + + double max = 0.0; + std::list::const_iterator current = timestamps.begin(); + std::list::const_iterator next = current; + ++next; + for (; next != timestamps.end(); ++current, ++next) { + double drift = get_drift(*current, *next, xdelta()); + if (std::abs(drift) > std::abs(max)) { + max = drift; + } + } + return max; } template -size_t DataBlock::cxsize() const +bool DataBlock::sriChanged() const { - return size() / 2; + return sriChangeFlags() != bulkio::sri::NONE; } template -void DataBlock::addTimestamp(const bulkio::SampleTimestamp& timestamp) +int DataBlock::sriChangeFlags() const { - std::list::iterator pos = _impl->timestamps.begin(); - const std::list::iterator end = _impl->timestamps.end(); - while ((pos != end) && (timestamp.offset >= pos->offset)) { - // TODO: Replace existing - ++pos; - } - _impl->timestamps.insert(pos, timestamp); + return _impl->sriChangeFlags; } template -std::list DataBlock::getTimestamps() const +void DataBlock::sriChangeFlags(int flags) { - return _impl->timestamps; + _impl->sriChangeFlags = flags; } -namespace { - double get_drift(const bulkio::SampleTimestamp& begin, const bulkio::SampleTimestamp& end, double xdelta) - { - double real = end.time - begin.time; - double expected = (end.offset - begin.offset)*xdelta; - return real-expected; - } +template +bool DataBlock::inputQueueFlushed() const +{ + return _impl->inputQueueFlushed; } template -double DataBlock::getNetTimeDrift() const +void DataBlock::inputQueueFlushed(bool flushed) { - const std::list& timestamps = _impl->timestamps; - // Validity checks - if (timestamps.empty()) { - throw std::logic_error("block contains no timestamps"); - } else if (timestamps.front().offset != 0) { - throw std::logic_error("no timestamp at offset 0"); - } + _impl->inputQueueFlushed = flushed; +} - return get_drift(timestamps.front(), timestamps.back(), _impl->sri.xdelta); +template +DataBlock::operator unspecified_bool_type() const +{ + return _impl?&DataBlock::_impl:0; } + +// +// SampleDataBlock +// +using bulkio::SampleDataBlock; + template -double DataBlock::getMaxTimeDrift() const +SampleDataBlock::SampleDataBlock() : + Base() +{ +} + +template +SampleDataBlock::SampleDataBlock(const bulkio::StreamDescriptor& sri, + const ScalarBuffer& buffer) : + Base(sri, buffer) +{ +} + +template +SampleDataBlock::SampleDataBlock(const BULKIO::StreamSRI& sri, size_t size) : + Base(bulkio::StreamDescriptor(sri), redhawk::buffer(size)) { - const std::list& timestamps = _impl->timestamps; - // Validity checks - if (timestamps.empty()) { - throw std::logic_error("block contains no timestamps"); - } else if (timestamps.front().offset != 0) { - throw std::logic_error("no timestamp at offset 0"); - } - - double max = 0.0; - std::list::const_iterator current = timestamps.begin(); - std::list::const_iterator next = current; - ++next; - for (; next != timestamps.end(); ++current, ++next) { - double drift = get_drift(*current, *next, _impl->sri.xdelta); - if (std::abs(drift) > std::abs(max)) { - max = drift; +} + +template +T* SampleDataBlock::data() +{ + // To preserve data integrity of shared buffers received from a port, make + // a one-time copy of the buffer and assume ownership of it + if (!_impl->dataOwned) { + _impl->copy(); } - } - return max; + return const_cast(_impl->data.data()); } template -bool DataBlock::sriChanged() const +const T* SampleDataBlock::data() const { - return sriChangeFlags() != bulkio::sri::NONE; + return _impl->data.data(); } template -int DataBlock::sriChangeFlags() const +size_t SampleDataBlock::size() const { - return _impl->sriChangeFlags; + return _impl->data.size(); } template -void DataBlock::sriChangeFlags(int flags) +void SampleDataBlock::resize(size_t count) { - _impl->sriChangeFlags = flags; + // We have to create a writeable temporary buffer, and although there is a + // resize() operation, it would potentially require two allocations and two + // memory copies: once to make a copy of the current buffer, and again on + // the resize. Instead, we allocate in one step and copy in another. + redhawk::buffer temp(count); + temp.replace(0, std::min(_impl->data.size(), count), _impl->data); + _impl->data = temp; + // Creating a new buffer also alleviates concern about buffer sharing + _impl->dataOwned = true; } template -bool DataBlock::inputQueueFlushed() const +bool SampleDataBlock::complex() const { - return _impl->inputQueueFlushed; + return _impl->complex(); } template -void DataBlock::inputQueueFlushed(bool flushed) +std::complex* SampleDataBlock::cxdata() +{ + // Defer to the data() method to ensure that the data gets copied if it's a + // potentially shared reference + return reinterpret_cast(data()); +} + +template +const std::complex* SampleDataBlock::cxdata() const +{ + return reinterpret_cast(data()); +} + +template +size_t SampleDataBlock::cxsize() const +{ + return size() / 2; +} + +template +void SampleDataBlock::swap(std::vector& other) +{ + // Copy the vector data into a new shared buffer + ScalarBuffer data = ScalarBuffer::make_transient(&other[0], other.size()).copy(); + // Swap the block's data with the new shared buffer + _impl->data.swap(data); + // Assign the old data to the vector + other.assign(data.begin(), data.end()); +} + +template +const typename SampleDataBlock::ScalarBuffer& SampleDataBlock::buffer() const { - _impl->inputQueueFlushed = flushed; + // This method is overridden to extend the documentation to cover real vs. + // complex data, but does not modify the behavior + return Base::buffer(); } template -void DataBlock::swap(std::vector& other) +typename SampleDataBlock::ComplexBuffer SampleDataBlock::cxbuffer() const { - _impl->data.swap(other); + return ComplexBuffer::recast(buffer()); } // Instantiate templates for supported types -template class DataBlock; -template class DataBlock; -template class DataBlock; -template class DataBlock; -template class DataBlock; -template class DataBlock; -template class DataBlock; -template class DataBlock; -template class DataBlock; -template class DataBlock; +#define INSTANTIATE_TEMPLATE(x) \ + template class DataBlock< x >; + +#define INSTANTIATE_NUMERIC_TEMPLATE(x) \ + INSTANTIATE_TEMPLATE(redhawk::shared_buffer); \ + template class SampleDataBlock; + +// String (XML, file) and bit blocks use the basic DataBlock class +INSTANTIATE_TEMPLATE(std::string); +INSTANTIATE_TEMPLATE(redhawk::shared_bitbuffer); + +// Numeric types support the full SampleDataBlock interface +INSTANTIATE_NUMERIC_TEMPLATE(int8_t); +INSTANTIATE_NUMERIC_TEMPLATE(CORBA::Octet); +INSTANTIATE_NUMERIC_TEMPLATE(CORBA::Short); +INSTANTIATE_NUMERIC_TEMPLATE(CORBA::UShort); +INSTANTIATE_NUMERIC_TEMPLATE(CORBA::Long); +INSTANTIATE_NUMERIC_TEMPLATE(CORBA::ULong); +INSTANTIATE_NUMERIC_TEMPLATE(CORBA::LongLong); +INSTANTIATE_NUMERIC_TEMPLATE(CORBA::ULongLong); +INSTANTIATE_NUMERIC_TEMPLATE(CORBA::Float); +INSTANTIATE_NUMERIC_TEMPLATE(CORBA::Double); diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_datablock.h b/bulkioInterfaces/libsrc/cpp/bulkio_datablock.h deleted file mode 100644 index 61ea9c131..000000000 --- a/bulkioInterfaces/libsrc/cpp/bulkio_datablock.h +++ /dev/null @@ -1,496 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -#ifndef __bulkio_datablock_h -#define __bulkio_datablock_h - -#include -#include - -#include - -#include - -namespace bulkio { - - /** - * @brief Extended time stamp container. - * - * SampleTimestamp adds additional context to a BULKIO::PrecisionUTCTime - * time stamp. When data is read from an InputStream, it may span more than - * one packet, or its start may not be on a packet boundary. In these cases, - * the @a offset and @a synthetic fields allow more sophisticated handling - * of time information. - * - * The @a offset indicates at which sample @a time applies. If the sample - * data is complex, @a offset should be interpreted in terms of complex - * samples (i.e., two real values per index). - * - * A %SampleTimestamp is considered synthetic if it was generated by an - * %InputStream because there was no received time stamp available at that - * sample offset. This occurs when the prior read did not end on a packet - * boundary; only the first time stamp in a DataBlock can be synthetic. - */ - struct SampleTimestamp - { - /** - * @brief Constructor. - * @param time Time stamp. - * @param offset Sample offset. - * @param synthetic False if @p time was received, true if interpolated. - */ - SampleTimestamp(const BULKIO::PrecisionUTCTime& time, size_t offset=0, bool synthetic=false) : - time(time), - offset(offset), - synthetic(synthetic) - { - } - - /// @brief The time at which the referenced sample was created. - BULKIO::PrecisionUTCTime time; - - /// @brief The 0-based index of the sample at which @a time applies. - size_t offset; - - /// @brief Indicates whether @a time was interpolated. - bool synthetic; - }; - - /** - * @brief Container for sample data and stream metadata read from an - * InputStream. - * @headerfile bulkio_datablock.h - * - * %DataBlock is a smart pointer-based class that encapsulates the result of - * a single-read operation on an InputStream. It includes both sample data, - * which may be real or complex, and metadata, which includes signal-related - * information (SRI). - * - * @warning Do not declare instances of this template class directly in user - * code; the template parameter and class name are not considered - * API. Use the type-specific @c typedef instead, such as - * bulkio::FloatDataBlock, or the nested @c typedef BlockType from - * an %InputStream. - * - * DataBlocks have reference semantics; in other words, assigning one block - * to another does not make a copy but rather shares the same sample data - * and metadata. When the last reference expires, the memory is released to - * the system to prevent memory leaks. Additionally, blocks are inexpensive - * to return by value, reassign, or store in nested data types. - * - * The default constructor creates an invalid or "null" block. Likewise, - * %InputStream read operations may return an invalid block if the operation - * cannot be completed. When receiving a data block, you must always check - * for validity before accessing the sample data or metadata: - * @code - * if (!block) { - * // handle failure - * } else { - * // access data and metadata - * } - * @endcode - * - * While it is possible to generate DataBlocks in user code, they are usually - * obtained by reading from an %InputStream. - * @see InputStream::read - * @see InputStream::tryread - * - * @par Real vs. Complex Samples - * Because BulkIO streams support both real and complex sample data, blocks - * store data internally as an array of real samples, thus providing methods that - * allow the user to interpret the data as either real or complex. When the - * complex mode changes, this is typically indicated with the corresponding - * SRI change flag (see sriChangeFlags()). On a per-block basis, the complex() - * method indicates whether the sample data is intended to be handled as real - * or complex: - * @code - * if (block.complex()) { - * const float* cxdata = block.cxdata(); - * for (size_t index = 0; index < block.cxsize(); ++index) { - * // do something with cxdata[index] - * } - * } else { - * const float* data = block.data(); - * for (size_t index = 0; index < block.size(); ++index) { - * // do something with data[index] - * } - * } - * @endcode - */ - template - class DataBlock - { - public: - /// @brief The native type of a real sample. - typedef T ScalarType; - - /// @brief The native type of a complex sample. - typedef std::complex ComplexType; - - /** - * @brief Default constructor. - * @see InputStream::read - * @see InputStream::tryread - * - * Create a null block. This block has no data nor metadata associated with - * it. No methods may be called on a null %DataBlock except for operator!, - * which will always return true, and operator==, which returns true if the - * other %DataBlock is also null, or false otherwise. - * - * DataBlocks are typically obtained by reading from an InputStream. - */ - DataBlock(); - - /** - * @brief Construct a %DataBlock, optionally allocating buffer space. - * @param sri The SRI that describes the data. - * @param size Number of samples to allocate. - * - * Creates a new, valid block providing enough allocated space to hold @a size real - * samples in the internal buffer. If the data is complex, @a size must be - * twice the desired number of complex samples. - * - * @note This method is typically called by InputStream. - */ - DataBlock(const BULKIO::StreamSRI& sri, size_t size=0); - - /** - * @brief Copies this block's data and metadata. - * @returns A new block. - * - * Makes a complete copy of this block, which returns a unique block that does - * not share this block's data or metadata. - * - * If this block is invalid, returns a new null block. - */ - DataBlock copy() const; - - /** - * @brief Gets the stream metadata. - * @returns Read-only reference to stream SRI. - * @pre Block is valid. - * - * The SRI represents the stream metadata at the time the block was read. - */ - const BULKIO::StreamSRI& sri() const; - - /** - * @brief Gets the X-axis delta. - * @returns The distance between two adjacent samples in the X direction. - * @pre Block is valid. - * - * Because the X-axis is commonly in terms of time (that is, @c sri.xunits is - * @c BULKIO::UNITS_TIME), this is typically the reciprocal of the sample - * rate. - */ - double xdelta() const; - - /** - * @brief Read/write access to real sample data. - * @returns Pointer to first element as a real sample. - * @pre Block is valid. - * @see size() - * @see cxdata() - * - * Inteprets the internal buffer as real samples. Up to size() samples may - * be accessed via the returned pointer. - * - * To interpret the data as complex samples, use cxdata(). - */ - ScalarType* data(); - - /** - * @brief Read-only access to real sample data. - * @returns Const pointer to first element as a real sample. - * @pre Block is valid. - * @see size() - * @see cxdata() const - * - * Inteprets the internal buffer as real samples. Up to size() samples may - * be accessed via the returned pointer. - * - * To interpret the data as complex samples, use cxdata() const. - */ - const ScalarType* data() const; - - /** - * @brief Gets the size of the data in terms of real samples. - * @returns Number of real samples. - * @see cxsize() - */ - size_t size() const; - - /** - * @brief Resizes the sample data buffer. - * @param count Number of scalar samples. - * @pre Block is valid. - * - * Adjusts the size of the internal buffer to hold @a count scalar samples. - * If the data is complex, @a count should be twice the number of complex - * samples. - * - * @warning Use of this method is discouraged. - */ - void resize(size_t count); - - /** - * @brief Checks whether data should be interpreted as complex samples. - * @returns True if data is complex. False if data is real. - * @pre Block is valid. - * - * The sample data is considered complex if @c sri.mode is non-zero. - * - * If the data is complex, the offsets for the time stamps returned by - * getTimestamps() are in terms of complex samples. - */ - bool complex() const; - - /** - * @brief Read/write access to complex sample data. - * @returns Pointer to first element as a complex sample. - * @pre Block is valid. - * @see cxsize() - * @see data() - * - * Inteprets the internal buffer as complex samples. Up to cxsize() samples - * may be accessed via the returned pointer. - * - * To interpret the data as real samples, use data(). - */ - ComplexType* cxdata(); - - /** - * @brief Read-only access to complex sample data. - * @returns Const pointer to first element as a complex sample. - * @pre Block is valid. - * @see cxsize() - * @see data() const - * - * Inteprets the internal buffer as complex samples. Up to cxsize() samples - * may be accessed via the returned pointer. - * - * To interpret the data as real samples, use data() const. - */ - const ComplexType* cxdata() const; - - /** - * @brief Gets the size of the data in terms of complex samples. - * @returns Number of complex samples. - * @pre Block is valid. - * @see size() - */ - size_t cxsize() const; - - /** - * @brief Checks whether the SRI has changed since the last read from the - * same stream. - * @returns True if the SRI has changed. False is SRI is unchanged. - * @pre Block is valid. - * @see sriChangeFlags() - */ - bool sriChanged() const; - - /** - * @brief Checks which SRI fields have changed since the last read from - * the same stream. - * @returns Bit mask representing changed fields. - * @pre Block is valid. - * @see sriChanged() - * - * If no SRI change has occurred since the last read, the returned value is - * @c bulkio::sri::NONE (equal to 0). Otherwise, the returned value is the - * bitwise OR of one or more of the following flags: - * @li @c bulkio::sri::HVERSION - * @li @c bulkio::sri::XSTART - * @li @c bulkio::sri::XDELTA - * @li @c bulkio::sri::XUNITS - * @li @c bulkio::sri::SUBSIZE - * @li @c bulkio::sri::YSTART - * @li @c bulkio::sri::YDELTA - * @li @c bulkio::sri::YUNITS - * @li @c bulkio::sri::MODE - * @li @c bulkio::sri::STREAMID - * @li @c bulkio::sri::BLOCKING - * @li @c bulkio::sri::KEYWORDS - * - * The @c HVERSION and @c STREAMID flags are not set in normal operation. - */ - int sriChangeFlags() const; - - /** - * @brief Sets the flags for which SRI fields have changed since the - * last read from the same stream. - * @param flags Bit mask representing changed fields. - * @pre Block is valid. - * @see sriChangeFlags() - * - * @note This method is typically called by InputStream. - */ - void sriChangeFlags(int flags); - - /** - * @brief Checks whether the input queue has flushed since the last read. - * @returns True if an input queue flush occurred. False if no flush has occurred. - * @pre Block is valid. - * - * An input queue flush indicates that the InPort was unable to keep up - * with incoming packets for non-blocking streams and emptied the queue - * to catch up. - * - * The %InPort reports a flush once, on the next queued packet. This is - * typically reflected in the next %DataBlock read from any InputStream - * associated with the port; however, this does not necessarily mean that - * any packets for that %InputStream were discarded. - */ - bool inputQueueFlushed() const; - - /** - * @brief Set the input queue flush flag. - * @param flush True if an input queue flush occurred. False if no flush has occurred. - * @pre Block is valid. - * @see inputQueueFlushed() - * - * @note This method is typically called by InputStream. - */ - void inputQueueFlushed(bool flush); - - /** - * @brief Add a time stamp in sorted order. - * @param timestamp The new time stamp. - * @pre Block is valid. - * - * Inserts @a timestamp into the list of timestamps, sorted in sample - * offset order. - * - * If complex() is true, @a timestamp.offset is interpreted in terms of - * complex samples. - * - * @note No validity checks are performed on @a timestamp. - */ - void addTimestamp(const SampleTimestamp& timestamp); - - /** - * @brief Returns the time stamps for the sample data. - * - * If complex() is true, the offsets of the returned time stamps should be - * interpreted in terms of complex samples. - * - * Valid %DataBlocks obtained by reading from an InputStream are guaranteed - * to have at least one time stamp, at offset 0. If the read spanned more - * than one packet, each packet's time stamp is included with the packet's - * respective offsets from the first sample. - * - * When the %DataBlock is read from an %InputStream, only the first time - * stamp may be synthetic. This occurs when the prior read did not consume - * a full packet worth of data. In this case, the %InputStream linearly - * interpolates the time stamp based on the stream's xdelta value. - * - * @note The list is returned as a temporary value. If you plan to iterate - * through the returned list, it must be stored in a local variable. - */ - std::list getTimestamps() const; - - /** - * @brief Calculates the difference between the expected and actual value - * of the last time stamp - * @returns Difference, in seconds, between expected and actual value. - * @pre Block is valid. - * @see getMaxTimeDrift() - * @see xdelta() - * - * If this %DataBlock contains more than one time stamp, this method - * compares the last time stamp to a linearly interpolated value based on - * the initial time stamp, the StreamSRI xdelta, and the sample offset. - * This difference gives a rough estimate of the deviation between the - * nominal and actual sample rates over the sample period. - * - * @note If the SRI X-axis is not in units of time, this value has no - * meaning. - */ - double getNetTimeDrift() const; - - /** - * @brief Calculates the largest difference between expected and actual - * time stamps in the block. - * @returns Greatest difference, in seconds, between expected and actual - * time stamps. - * @pre Block is valid. - * @see getNetTimeDrift() - * @see xdelta() - * - * If this %DataBlock contains more than one time stamp, this method - * compares each time stamp to its linearly interpolated equivalent time - * stamp, based on the initial time stamp, the StreamSRI xdelta, and - * the sample offset. The greatest deviation is reported; this difference - * gives a rough indication of how severely the actual sample rate deviates - * from the nominal sample rate on a packet-to-packet basis. - * - * @note If the SRI X-axis is not in units of time, this value has no - * meaning. - */ - double getMaxTimeDrift() const; - - /** - * @brief Checks block validity. - * @returns True if this block is invalid. False if the block is valid. - * - * Invalid (null) blocks do not contain any sample data or metadata. An - * InputStream read operation may return a null block if there is no data - * available or the operation is interrupted. - * - * If this method returns true, no other methods except comparison or - * assignment may be called. - */ - bool operator! () const - { - return !_impl; - } - - /** - * @brief Swaps the internal buffer with a vector. - * @param other Vector of real samples with which to swap. - * @pre Block is valid. - * - * Exchanges the sample data in the internal buffer with that of @a other. - * - * @warning Use of this method is discouraged. - */ - void swap(std::vector& other); - private: - /// @cond IMPL - struct Impl; - boost::shared_ptr _impl; - /// @endcond IMPL - }; - - typedef DataBlock CharDataBlock; - typedef DataBlock OctetDataBlock; - typedef DataBlock ShortDataBlock; - typedef DataBlock UShortDataBlock; - typedef DataBlock LongDataBlock; - typedef DataBlock ULongDataBlock; - typedef DataBlock LongLongDataBlock; - typedef DataBlock ULongLongDataBlock; - typedef DataBlock FloatDataBlock; - typedef DataBlock DoubleDataBlock; - -} // end of bulkio namespace - -#endif diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_in_port.cpp b/bulkioInterfaces/libsrc/cpp/bulkio_in_port.cpp index 8ddc7ede6..b5947b659 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio_in_port.cpp +++ b/bulkioInterfaces/libsrc/cpp/bulkio_in_port.cpp @@ -17,64 +17,51 @@ * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see http://www.gnu.org/licenses/. */ -#include - -#include - -namespace bulkio { - - /* - * Wrap Callback functions as SriListerer objects - */ - class StaticSriCallback : public SriListener - { - public: - virtual void operator() ( BULKIO::StreamSRI& sri) - { - (*func_)(sri); - } - - StaticSriCallback ( SriListenerCallbackFn func) : - func_(func) - { - } - private: +#include +#include +#include - SriListenerCallbackFn func_; - }; +#include +#include +namespace bulkio { // ---------------------------------------------------------------------------------------- // Source/Input Port Definitions // ---------------------------------------------------------------------------------------- - template < typename PortTraits > - InPortBase< PortTraits >::InPortBase(std::string port_name, - LOGGER_PTR logger, - bulkio::sri::Compare sriCmp, - SriListener *newStreamCB): - Port_Provides_base_impl(port_name), + template + InPort::InPort(std::string port_name, + LOGGER_PTR logger, + bulkio::sri::Compare sriCmp, + SriListener *newStreamCB): + redhawk::NegotiableProvidesPortBase(port_name), sri_cmp(sriCmp), newStreamCallback(), + maxQueue(100), breakBlock(false), blocking(false), - queueSem(new queueSemaphore(100)), - stats(new linkStatistics(port_name, sizeof(TransportType))), - logger(logger) + stats(new linkStatistics(port_name)) { + // Manually set the bit size because the statistics ctor only takes a byte + // count + stats->setBitSize(NativeTraits::bits); + std::string _cmpMsg("USER_DEFINED"); std::string _sriMsg("EMPTY"); - if ( !logger ) { + if (!logger) { std::string pname("redhawk.bulkio.inport."); pname = pname + port_name; - logger = rh_logger::Logger::getLogger(pname); + setLogger(rh_logger::Logger::getLogger(pname)); + } else { + setLogger(logger); } if (newStreamCB) { - newStreamCallback = boost::shared_ptr< SriListener >( newStreamCB, null_deleter()); + newStreamCallback = boost::ref(*newStreamCB); _sriMsg = "USER_DEFINED"; } @@ -83,42 +70,38 @@ namespace bulkio { sri_cmp = bulkio::sri::DefaultComparator; } - LOG_DEBUG( logger, "bulkio::InPort CTOR port:" << name << - " Blocking/MaxInputQueueSize " << blocking << "/" << queueSem->getMaxValue() << + LOG_DEBUG( _portLog, "bulkio::InPort CTOR port:" << name << + " Blocking/MaxInputQueueSize " << blocking << "/" << maxQueue << " SriCompare/NewStreamCallback " << _cmpMsg << "/" << _sriMsg ); } - template < typename PortTraits > - InPortBase< PortTraits >::~InPortBase() + template + InPort::~InPort() { - TRACE_ENTER( logger, "InPort::DTOR" ); + TRACE_ENTER( _portLog, "InPort::DTOR" ); // block any data coming out of getPacket.. block(); - LOG_TRACE( logger, "PORT:" << name << " DUMP PKTS:" << workQueue.size() ); + LOG_TRACE( _portLog, "PORT:" << name << " DUMP PKTS:" << packetQueue.size() ); // purge the queue... - while (workQueue.size() != 0) { - DataTransferType *tmp = workQueue.front(); - workQueue.pop_front(); - delete tmp; + while (packetQueue.size() != 0) { + delete packetQueue.front(); + packetQueue.pop_front(); } // clean up allocated containers - if ( queueSem ) delete queueSem; - if ( stats ) delete stats; - TRACE_EXIT( logger, "InPort::DTOR" ); + TRACE_EXIT( _portLog, "InPort::DTOR" ); } - - template < typename PortTraits > - BULKIO::PortStatistics * InPortBase< PortTraits >::statistics() + template + BULKIO::PortStatistics * InPort::statistics() { SCOPED_LOCK lock(dataBufferLock); BULKIO::PortStatistics_var recStat = new BULKIO::PortStatistics(stats->retrieve()); @@ -127,13 +110,13 @@ namespace bulkio { } - template < typename PortTraits > - BULKIO::PortUsageType InPortBase< PortTraits >::state() + template + BULKIO::PortUsageType InPort::state() { SCOPED_LOCK lock(dataBufferLock); - if (workQueue.size() == queueSem->getMaxValue()) { + if (packetQueue.size() == maxQueue) { return BULKIO::BUSY; - } else if (workQueue.size() == 0) { + } else if (packetQueue.empty()) { return BULKIO::IDLE; } else { return BULKIO::ACTIVE; @@ -143,179 +126,294 @@ namespace bulkio { } - template < typename PortTraits > - BULKIO::StreamSRISequence * InPortBase< PortTraits >::activeSRIs() + template + BULKIO::StreamSRISequence * InPort::activeSRIs() { SCOPED_LOCK lock(sriUpdateLock); - BULKIO::StreamSRISequence seq_rtn; - SriMap::iterator currH; - int i = 0; - for (currH = currentHs.begin(); currH != currentHs.end(); currH++) { - i++; - seq_rtn.length(i); - seq_rtn[i-1] = currH->second.first; + BULKIO::StreamSRISequence_var retSRI = new BULKIO::StreamSRISequence(); + for (SriTable::iterator currH = currentHs.begin(); currH != currentHs.end(); ++currH) { + ossie::corba::push_back(retSRI, currH->second.first.sri()); } - BULKIO::StreamSRISequence_var retSRI = new BULKIO::StreamSRISequence(seq_rtn); // NOTE: You must delete the object that this function returns! return retSRI._retn(); } - template < typename PortTraits > - int InPortBase< PortTraits >::getMaxQueueDepth() + template + int InPort::getMaxQueueDepth() { SCOPED_LOCK lock(dataBufferLock); - return queueSem->getMaxValue(); + return maxQueue; } - template < typename PortTraits > - int InPortBase< PortTraits >::getCurrentQueueDepth() + template + int InPort::getCurrentQueueDepth() { SCOPED_LOCK lock(dataBufferLock); - return workQueue.size(); + return packetQueue.size(); } - template < typename PortTraits > - void InPortBase< PortTraits >::setMaxQueueDepth(int newDepth) + template + void InPort::setMaxQueueDepth(int newDepth) { SCOPED_LOCK lock(dataBufferLock); - queueSem->setMaxValue(newDepth); + maxQueue = newDepth; } - template < typename PortTraits > - void InPortBase< PortTraits >::pushSRI(const BULKIO::StreamSRI& H) + template + void InPort::setNewStreamListener(SriListener* newListener) { + if (newListener) { + newStreamCallback = boost::ref(*newListener); + } else { + newStreamCallback.clear(); + } + } + + template + void InPort::pushSRI(const BULKIO::StreamSRI& H) { - TRACE_ENTER( logger, "InPort::pushSRI" ); + TRACE_ENTER( _portLog, "InPort::pushSRI" ); if (H.blocking) { SCOPED_LOCK lock(dataBufferLock); blocking = true; - queueSem->setCurrValue(workQueue.size()); } const std::string streamID(H.streamID); - BULKIO::StreamSRI tmpH = H; // mutable copy for callbacks - LOG_TRACE(logger,"pushSRI - FIND- PORT:" << name << " NEW SRI:" << streamID << " Mode:" << H.mode << " XDELTA:" << 1.0/H.xdelta ); + LOG_TRACE(_portLog,"pushSRI - FIND- PORT:" << name << " NEW SRI:" << streamID << " Mode:" << H.mode << " XDELTA:" << 1.0/H.xdelta ); SCOPED_LOCK lock(sriUpdateLock); - SriMap::iterator currH = currentHs.find(streamID); + SriTable::iterator currH = currentHs.find(streamID); + StreamDescriptor sri(H); if (currH == currentHs.end()) { - LOG_DEBUG(logger,"pushSRI PORT:" << name << " NEW SRI:" << streamID << " Mode:" << H.mode ); - if ( newStreamCallback ) (*newStreamCallback)(tmpH); - currentHs[streamID] = std::make_pair(tmpH, true); + LOG_DEBUG(_portLog,"pushSRI PORT:" << name << " NEW SRI:" << streamID << " Mode:" << H.mode ); + if (newStreamCallback) { + // The callback takes a non-const SRI, so allow access via const_cast + newStreamCallback(const_cast(sri.sri())); + } + currentHs[streamID] = std::make_pair(sri, true); lock.unlock(); - createStream(streamID, tmpH); + createStream(streamID, sri); } else { - if ( sri_cmp && !sri_cmp(tmpH, currH->second.first)) { - LOG_DEBUG(logger,"pushSRI PORT:" << name << " SAME SRI:" << streamID << " Mode:" << H.mode ); - currentHs[streamID] = std::make_pair(tmpH, true); + int eos_count = 0; + for (typename PacketQueue::iterator ii = this->packetQueue.begin(); ii != this->packetQueue.end(); ++ii) { + if (((*ii)->streamID == streamID) and ((*ii)->EOS)) { + eos_count++; + } + } + int additional_streams = 1+pendingStreams.count(streamID); // count current and pending streams + if (additional_streams == eos_count) { // current and pending streams are all eos + createStream(streamID, sri); + } else { + if (sri_cmp && !sri_cmp(H, currH->second.first.sri())) { + LOG_DEBUG(_portLog,"pushSRI PORT:" << name << " SAME SRI:" << streamID << " Mode:" << H.mode ); + currH->second.first = StreamDescriptor(H); + currH->second.second = true; + } } } - TRACE_EXIT( logger, "InPort::pushSRI" ); + TRACE_EXIT( _portLog, "InPort::pushSRI" ); } + namespace { + template + inline bool is_copy_required(const redhawk::shared_buffer& data) + { + // If the data comes from a non-shared source (a vector or raw + // pointer), we need to make a copy. + return (data.transient() && !data.empty()); + } - template < typename PortTraits > - void InPortBase< PortTraits >::queuePacket(const PushArgumentType data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) - { + template + inline redhawk::shared_buffer copy_data(const redhawk::shared_buffer& data) + { + return data.copy(); + } + + inline bool is_copy_required(const redhawk::shared_bitbuffer& data) + { + // If the data comes from a non-shared source (a raw pointer), we + // need to make a copy. + return (data.transient() && !data.empty()); + } + + inline const redhawk::shared_bitbuffer copy_data(const redhawk::shared_bitbuffer& data) + { + return data.copy(); + } + + inline bool is_copy_required(const std::string&) + { + // Strings don't have sharing semantics, so no copy is required. + return false; + } + + inline const std::string& copy_data(const std::string& data) + { + // Pass through the string by reference (no copies made) + return data; + } - TRACE_ENTER( logger, "InPort::pushPacket" ); - if (queueSem->getMaxValue() == 0) { - TRACE_EXIT( logger, "InPort::pushPacket" ); - return; } + template + void InPort::queuePacket(const BufferType& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const std::string& streamID) + { + TRACE_ENTER( _portLog, "InPort::pushPacket" ); + + // Discard packets for disabled streams if (!_acceptPacket(streamID, EOS)) { + if (EOS) { + // If this was the only blocking stream, turn off blocking + bool turnOffBlocking = _handleEOS(streamID); + if (turnOffBlocking) { + SCOPED_LOCK lock(dataBufferLock); + blocking = false; + } + } + return; + } + + if (maxQueue == 0) { + TRACE_EXIT( _portLog, "InPort::pushPacket" ); return; } - BULKIO::StreamSRI tmpH = {1, 0.0, 1.0, 1, 0, 0.0, 0.0, 0, 0, streamID, false, 0}; + // Discard empty packets if EOS is not set, as there is no useful data or + // metadata to be had--since T applies to the 1st sample (which does not + // exist), all we have is a stream ID + if (data.empty() && !EOS) { + return; + } + + StreamDescriptor sri; bool sriChanged = false; - bool portBlocking = false; - SriMap::iterator currH; { SCOPED_LOCK lock(sriUpdateLock); - currH = currentHs.find(std::string(streamID)); + SriTable::iterator currH = currentHs.find(streamID); if (currH != currentHs.end()) { - tmpH = currH->second.first; + sri = currH->second.first; sriChanged = currH->second.second; - currentHs[streamID] = std::make_pair(currH->second.first, false); + currH->second.second = false; } else { // Unknown stream ID, register a new default SRI following the logic in pushSRI, // and set the SRI changed flag - LOG_WARN(logger, "InPort::pushPacket received data for stream '" << streamID << "' with no SRI"); + LOG_WARN(_portLog, "InPort::pushPacket received data for stream '" << streamID << "' with no SRI"); sriChanged = true; + sri = StreamDescriptor(bulkio::sri::create(streamID)); if (newStreamCallback) { - (*newStreamCallback)(tmpH); + // The callback takes a non-const SRI, so allow access via const_cast + newStreamCallback(const_cast(sri.sri())); } - currentHs[streamID] = std::make_pair(tmpH, false); + currentHs[streamID] = std::make_pair(sri, false); lock.unlock(); - createStream(streamID, tmpH); + createStream(streamID, sri); } - portBlocking = blocking; } const size_t length = _getElementLength(data); - LOG_DEBUG( logger, "bulkio::InPort port blocking:" << portBlocking ); - bool flushToReport = false; - if(portBlocking) { - queueSem->incr(); - SCOPED_LOCK lock(dataBufferLock); - LOG_TRACE( logger, "bulkio::InPort pushPacket NEW PACKET (QUEUE" << workQueue.size()+1 << ")" ); - stats->update(length, (float)(workQueue.size()+1)/(float)queueSem->getMaxValue(), EOS, streamID, false); - DataTransferType *tmpIn = new DataTransferType(data, T, EOS, streamID, tmpH, sriChanged, false); - workQueue.push_back(tmpIn); - dataAvailable.notify_all(); - } else { + { + bool flushToReport = false; SCOPED_LOCK lock(dataBufferLock); - bool sriChangedHappened = false; - bool flagEOS = false; - if (workQueue.size() == queueSem->getMaxValue()) { // reached maximum queue depth - flush the queue - LOG_DEBUG( logger, "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" << workQueue.size() << ")" ); - flushToReport = true; - DataTransferType *tmp; - while (workQueue.size() != 0) { - tmp = workQueue.front(); - if (tmp->sriChanged == true) { - sriChangedHappened = true; - } - if (tmp->EOS == true) { - flagEOS = true; - } - workQueue.pop_front(); - delete tmp; + LOG_DEBUG(_portLog, "bulkio::InPort port blocking:" << blocking); + if (blocking) { + while (packetQueue.size() >= maxQueue) { + queueAvailable.wait(lock); + } + } else { + if (packetQueue.size() >= maxQueue) { // reached maximum queue depth - flush the queue + LOG_DEBUG( _portLog, "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" << packetQueue.size() << ")" ); + flushToReport = true; + + // Need to hold the SRI mutex while flushing the queue because it may + // update SRI change state + SCOPED_LOCK lock(sriUpdateLock); + _flushQueue(); + + // Update the SRI change flag for this stream, which may have been + // modified during the queue flush + sriChanged = currentHs[streamID].second; + currentHs[streamID].second = false; } } - if (sriChangedHappened) - sriChanged = true; - if (flagEOS) - EOS = true; - LOG_DEBUG( logger, "bulkio::InPort pushPacket NEW Packet (QUEUE=" << workQueue.size()+1 << ")"); - stats->update(length, (float)(workQueue.size()+1)/(float)queueSem->getMaxValue(), EOS, streamID, flushToReport); - DataTransferType *tmpIn = new DataTransferType(data, T, EOS, streamID, tmpH, sriChanged, flushToReport); - workQueue.push_back(tmpIn); + LOG_TRACE(_portLog, "bulkio::InPort pushPacket NEW PACKET (QUEUE" << packetQueue.size()+1 << ")"); + stats->update(length, (float)(packetQueue.size()+1)/(float)maxQueue, EOS, streamID, flushToReport); + Packet *tmpIn; + if (is_copy_required(data)) { + tmpIn = new Packet(copy_data(data), T, EOS, sri, sriChanged, false); + } else { + tmpIn = new Packet(data, T, EOS, sri, sriChanged, false); + } + packetQueue.push_back(tmpIn); + // If a flush occurred, always set the flag on the first packet; this may + // not be the packet that was just inserted if there were any EOS packets + // on the queue + if (flushToReport) { + packetQueue.front()->inputQueueFlushed = true; + } dataAvailable.notify_all(); } - packetReceived(streamID); + packetWaiters.notify(streamID); - TRACE_EXIT( logger, "InPort::pushPacket" ); + TRACE_EXIT( _portLog, "InPort::pushPacket" ); } - template < typename PortTraits > - typename InPortBase< PortTraits >::DataTransferType* InPortBase< PortTraits >::peekPacket(float timeout) + template + void InPort::_flushQueue() + { + std::set sri_changed; + PacketQueue saved_packets; + for (typename PacketQueue::iterator iter = packetQueue.begin(); iter != packetQueue.end(); ++iter) { + Packet* packet = *iter; + if (packet->EOS) { + // Remove the SRI change flag for this stream, as further SRI changes + // apply to a different stream; set the SRI change flag for the EOS + // packet if there was one for this stream earlier in the queue + if (sri_changed.erase(packet->streamID)) { + packet->sriChanged = true; + } + + // Discard data and preserve the EOS packet + packet->buffer = BufferType(); + saved_packets.push_back(packet); + } else { + if (packet->sriChanged) { + sri_changed.insert(packet->streamID); + } + delete packet; + } + } + packetQueue.swap(saved_packets); + + // Save any SRI change flags that were collected and not applied to an EOS + // packet + for (std::set::iterator stream_id = sri_changed.begin(); + stream_id != sri_changed.end(); ++stream_id) { + // It should be safe to assume that an entry exists for the stream ID, + // but just in case, use find instead of operator[] + SriTable::iterator currH = currentHs.find(*stream_id); + if (currH != currentHs.end()) { + currH->second.second = true; + } + } + } + + + template + typename InPort::Packet* InPort::peekPacket(float timeout, + boost::unique_lock& lock) { uint64_t secs = (unsigned long)(trunc(timeout)); uint64_t msecs = (unsigned long)((timeout - secs) * 1e6); boost::system_time to_time = boost::get_system_time() + boost::posix_time::seconds(secs) + boost::posix_time::microseconds(msecs); - boost::mutex::scoped_lock lock(this->dataBufferLock); - while (!breakBlock && workQueue.empty()) { + while (!breakBlock && packetQueue.empty()) { if (timeout == 0) { break; } else if (timeout > 0) { @@ -327,60 +425,15 @@ namespace bulkio { } } - if (breakBlock || workQueue.empty()) { + if (breakBlock || packetQueue.empty()) { return 0; } else { - return workQueue.front(); - } - } - - template < typename PortTraits > - bool InPortBase< PortTraits >::isStreamActive(const std::string& streamID) - { - return true; - } - - template < typename PortTraits > - bool InPortBase< PortTraits >::_acceptPacket(const std::string& streamID, bool EOS) - { - // Acquire dataBufferLock for the duration of this call to ensure that end- - // of-stream is handled atomically for disabled streams - SCOPED_LOCK(dataBufferLock); - if (isStreamEnabled(streamID)) { - return true; - } - if (EOS) { - // Acknowledge the end-of-stream by removing the disabled stream before - // discarding the packet - removeStream(streamID); - - // If this was the only blocking stream, turn off blocking - bool turnOffBlocking = _handleEOS(streamID); - if (turnOffBlocking) { - queueSem->setCurrValue(0); - blocking = false; - } - } - return false; - } - - template < typename PortTraits > - bool InPortBase< PortTraits >::isStreamEnabled(const std::string& streamID) - { - return true; - } - - template < typename PortTraits > - void InPortBase< PortTraits >::packetReceived(const std::string& streamID) - { - if (isStreamActive(streamID)) { - packetWaiters.notify(streamID); + return packetQueue.front(); } } - - template < typename PortTraits > - void InPortBase< PortTraits >::enableStats( bool enable ) + template + void InPort::enableStats( bool enable ) { if (stats ) { stats->setEnabled(enable); @@ -388,41 +441,87 @@ namespace bulkio { } - template < typename PortTraits > - void InPortBase< PortTraits >::block() + template + void InPort::block() { - TRACE_ENTER( logger, "InPort::block" ); + TRACE_ENTER( _portLog, "InPort::block" ); breakBlock = true; - queueSem->release(); dataAvailable.notify_all(); packetWaiters.interrupt(); - TRACE_EXIT( logger, "InPort::block" ); + TRACE_EXIT( _portLog, "InPort::block" ); } - template < typename PortTraits > - void InPortBase< PortTraits >::unblock() + template + void InPort::unblock() { breakBlock = false; } - template < typename PortTraits > - void InPortBase< PortTraits >::stopPort() + template + void InPort::stopPort() { block(); } - template < typename PortTraits > - void InPortBase< PortTraits >::startPort() + template + void InPort::startPort() { unblock(); } - template < typename PortTraits > - bool InPortBase< PortTraits >::blocked() + template + bool InPort::blocked() { return breakBlock; } + template + typename InPort::StreamType InPort::getCurrentStream(float timeout) + { + // Prefer a stream that already has buffered data + { + boost::mutex::scoped_lock lock(streamsMutex); + for (typename StreamMap::iterator stream = streams.begin(); stream != streams.end(); ++stream) { + if (stream->second.hasBufferedData()) { + return stream->second; + } + } + } + + // Otherwise, return the stream that owns the next packet on the queue, + // potentially waiting for one to be received + boost::mutex::scoped_lock lock(this->dataBufferLock); + Packet* packet = this->peekPacket(timeout, lock); + if (packet) { + return getStream(packet->streamID); + } + + return StreamType(); + } + + template + typename InPort::StreamType InPort::getStream(const std::string& streamID) + { + boost::mutex::scoped_lock lock(streamsMutex); + typename StreamMap::iterator stream = streams.find(streamID); + if (stream != streams.end()) { + return stream->second; + } else { + return StreamType(); + } + } + + template + typename InPort::StreamList InPort::getStreams() + { + StreamList result; + boost::mutex::scoped_lock lock(streamsMutex); + for (typename StreamMap::const_iterator stream = streams.begin(); stream != streams.end(); ++stream) { + result.push_back(stream->second); + } + return result; + } + /* * getPacket * description: retrieve data from the provides (input) port @@ -430,36 +529,53 @@ namespace bulkio { * timeout: the amount of time to wait for data before a NULL is returned. * Use 0.0 for non-blocking and -1 for blocking. */ - template < typename PortTraits > - typename InPortBase< PortTraits >::DataTransferType * InPortBase< PortTraits >::getPacket(float timeout) + template + typename InPort::DataTransferType * InPort::getPacket(float timeout) { return getPacket(timeout, ""); } + template + typename InPort::DataTransferType * InPort::getPacket(float timeout, const std::string& streamID) + { + DataTransferType* transfer = 0; + boost::scoped_ptr packet(nextPacket(timeout, streamID)); + if (packet) { + transfer = new DataTransferType(packet->buffer, packet->T, packet->EOS, packet->streamID.c_str(), packet->SRI.sri(), packet->sriChanged, packet->inputQueueFlushed); + if (packet->EOS) { + // When user code is calling getPacket(), it is safe to assume they are + // not using using the stream API, so remove the associated stream here + // to avoid leaking memory + removeStream(packet->streamID); + } + } + return transfer; + } + - template < typename PortTraits > - typename InPortBase< PortTraits >::DataTransferType * InPortBase< PortTraits >::getPacket(float timeout, const std::string& streamID) + template + typename InPort::Packet* InPort::nextPacket(float timeout, const std::string& streamID) { - TRACE_ENTER( logger, "InPort::getPacket" ); + TRACE_ENTER(_portLog, "InPort::nextPacket"); if (breakBlock) { - TRACE_EXIT( logger, "InPort::getPacket" ); + TRACE_EXIT(_portLog, "InPort::nextPacket"); return NULL; } - DataTransferType *tmp=NULL; + Packet* packet = 0; { SCOPED_LOCK lock(dataBufferLock); - tmp = fetchPacket(streamID); + packet = fetchPacket(streamID); uint64_t secs = (unsigned long)(trunc(timeout)); uint64_t msecs = (unsigned long)((timeout - secs) * 1e6); boost::system_time to_time = boost::get_system_time() + boost::posix_time::seconds(secs) + boost::posix_time::microseconds(msecs); - while (!tmp) { + while (!packet) { if (timeout == 0.0) { - TRACE_EXIT( logger, "InPort::getPacket" ); + TRACE_EXIT(_portLog, "InPort::nextPacket"); return NULL; } else if (timeout > 0){ if (!dataAvailable.timed_wait(lock, to_time)) { - TRACE_EXIT( logger, "InPort::getPacket" ); + TRACE_EXIT(_portLog, "InPort::nextPacket"); return NULL; } } else { @@ -469,42 +585,38 @@ namespace bulkio { } } if (breakBlock) { - TRACE_EXIT( logger, "InPort::getPacket" ); + TRACE_EXIT(_portLog, "InPort::nextPacket"); return NULL; } - tmp = fetchPacket(streamID); + packet = fetchPacket(streamID); } - LOG_TRACE( logger, "bulkio.InPort getPacket PORT:" << name << " (QUEUE="<< workQueue.size() << ")" ); + if (!packet) { + TRACE_EXIT(_portLog, "InPort::nextPacket"); + return NULL; + } - } - - if (!tmp) { - TRACE_EXIT( logger, "InPort::getPacket" ); - return NULL; + LOG_TRACE(_portLog, "InPort::nextPacket PORT:" << name << " (QUEUE="<< packetQueue.size() << ")"); + queueAvailable.notify_all(); } bool turnOffBlocking = false; - if (tmp->EOS) { - turnOffBlocking = _handleEOS(tmp->streamID); + if (packet->EOS) { + turnOffBlocking = _handleEOS(packet->streamID); } { SCOPED_LOCK lock(dataBufferLock); if (turnOffBlocking) { - queueSem->setCurrValue(0); blocking = false; } - - if (blocking) { - queueSem->decr(); - } } - TRACE_EXIT( logger, "InPort::getPacket" ); - return tmp; + TRACE_EXIT( _portLog, "InPort::nextPacket" ); + return packet; } + namespace { template inline typename std::deque::iterator do_erase(std::deque& container, typename std::deque::iterator pos) @@ -527,22 +639,43 @@ namespace bulkio { } } - template < typename PortTraits > - typename InPortBase< PortTraits >::DataTransferType * InPortBase< PortTraits >::fetchPacket(const std::string &streamID) + template + void InPort::createStream(const std::string& streamID, + const bulkio::StreamDescriptor& sri) + { + StreamType stream(sri, this); + boost::mutex::scoped_lock lock(streamsMutex); + if (streams.count(streamID) == 0) { + // New stream + LOG_DEBUG(_portLog, "Creating new stream " << streamID); + streams.insert(std::make_pair(streamID, stream)); + lock.unlock(); + + streamAdded(stream); + } else { + // An active stream has the same stream ID; add this new stream to the + // pending list + LOG_DEBUG(_portLog, "Creating pending stream " << streamID); + pendingStreams.insert(std::make_pair(streamID, stream)); + } + } + + template + typename InPort::Packet * InPort::fetchPacket(const std::string &streamID) { if (streamID.empty()) { - if (workQueue.empty()) { + if (packetQueue.empty()) { return 0; } - DataTransferType* packet = workQueue.front(); - workQueue.pop_front(); + Packet* packet = packetQueue.front(); + packetQueue.pop_front(); return packet; } - for (typename WorkQueue::iterator ii = workQueue.begin(); ii != workQueue.end(); ++ii) { + for (typename PacketQueue::iterator ii = packetQueue.begin(); ii != packetQueue.end(); ++ii) { if ((*ii)->streamID == streamID) { - DataTransferType* packet = *ii; - bulkio::do_erase(workQueue, ii); + Packet* packet = *ii; + bulkio::do_erase(packetQueue, ii); return packet; } } @@ -550,17 +683,15 @@ namespace bulkio { } template - void InPortBase::discardPacketsForStream(const std::string& streamID) + void InPort::discardPacketsForStream(const std::string& streamID) { - // Caller must hold dataBufferLock - for (typename WorkQueue::iterator ii = workQueue.begin(); ii != workQueue.end();) { + SCOPED_LOCK lock(dataBufferLock); + for (typename PacketQueue::iterator ii = packetQueue.begin(); ii != packetQueue.end();) { if ((*ii)->streamID == streamID) { bool eos = (*ii)->EOS; delete *ii; - ii = bulkio::do_erase(workQueue, ii); - if (blocking) { - queueSem->decr(); - } + ii = bulkio::do_erase(packetQueue, ii); + queueAvailable.notify_one(); if (eos) { break; } @@ -570,74 +701,48 @@ namespace bulkio { } } - template < typename PortTraits > - bool InPortBase< PortTraits >::_handleEOS(const std::string& streamID) + template + bool InPort::_handleEOS(const std::string& streamID) { - bool turnOffBlocking = false; - SCOPED_LOCK lock2(sriUpdateLock); - SriMap::iterator target = currentHs.find(streamID); - if (target != currentHs.end()) { - bool sriBlocking = target->second.first.blocking; - currentHs.erase(target); - if (sriBlocking) { - turnOffBlocking = true; - SriMap::iterator currH; - for (currH = currentHs.begin(); currH != currentHs.end(); currH++) { - if (currH->second.first.blocking) { - turnOffBlocking = false; - break; + bool turnOffBlocking = false; + SCOPED_LOCK lock(sriUpdateLock); + SriTable::iterator target = currentHs.find(streamID); + if (target != currentHs.end()) { + bool sriBlocking = target->second.first.blocking(); + currentHs.erase(target); + if (sriBlocking) { + turnOffBlocking = true; + SriTable::iterator currH; + for (currH = currentHs.begin(); currH != currentHs.end(); currH++) { + if (currH->second.first.blocking()) { + turnOffBlocking = false; + break; + } + } } - } } - } - return turnOffBlocking; - } - - template < typename PortTraits > - void InPortBase< PortTraits >::setNewStreamListener( SriListener *newListener ) { - newStreamCallback = boost::shared_ptr< SriListener >(newListener, null_deleter()); + return turnOffBlocking; } - template < typename PortTraits > - void InPortBase< PortTraits >::setNewStreamListener( SriListenerCallbackFn newListener ) { - newStreamCallback = boost::make_shared< StaticSriCallback >( newListener ); - - } - - template < typename PortTraits > - void InPortBase< PortTraits >::createStream(const std::string& streamID, const BULKIO::StreamSRI& sri) - { - } - - template < typename PortTraits > - void InPortBase< PortTraits >::removeStream(const std::string& streamID) - { - } - - template < typename PortTraits > - void InPortBase< PortTraits >::setLogger( LOGGER_PTR newLogger ) { - logger = newLogger; - } - - template < typename PortTraits > - std::string InPortBase< PortTraits >::getRepid() const { - return PortType::_PD_repoId; + template + std::string InPort::getRepid() const { + return PortType::_PD_repoId; } - template < typename PortTraits > - int InPortBase< PortTraits >::_getElementLength(const PushArgumentType data) + template + int InPort::_getElementLength(const BufferType& data) { - return data.length(); + return data.size(); } - template < typename PortTraits > - size_t InPortBase< PortTraits >::samplesAvailable (const std::string& streamID, bool firstPacket) + template + size_t InPort::samplesAvailable (const std::string& streamID, bool firstPacket) { size_t samples = 0; size_t item_size = 1; SCOPED_LOCK lock(dataBufferLock); - for (typename WorkQueue::iterator iter = workQueue.begin(); iter != workQueue.end(); ++iter) { - DataTransferType* packet = *iter; + for (typename PacketQueue::iterator iter = packetQueue.begin(); iter != packetQueue.end(); ++iter) { + Packet* packet = *iter; if (packet->streamID != streamID) { continue; } @@ -645,14 +750,111 @@ namespace bulkio { if (!firstPacket) break; } firstPacket = false; - if (packet->SRI.mode) { + if (packet->SRI.complex()) { item_size = 2; } - samples += packet->dataBuffer.size(); + samples += packet->buffer.size(); } return samples / item_size; } + template + void InPort::removeStream(const std::string& streamID) + { + LOG_DEBUG(_portLog, "Removing stream " << streamID); + boost::mutex::scoped_lock lock(streamsMutex); + + // Remove the current stream, and if there's a pending stream with the same + // stream ID, move it to the active list + typename StreamMap::iterator current = streams.find(streamID); + if (current != streams.end()) { + // There should always be a stream with the expected streamID when this + // method is called, but just to be safe, only close and remove when we + // know it's a valid stream + current->second.close(); + streams.erase(current); + } + typename std::multimap::iterator next = pendingStreams.find(streamID); + if (next != pendingStreams.end()) { + LOG_DEBUG(_portLog, "Moving pending stream " << streamID << " to active"); + StreamType stream = next->second; + streams.insert(*next); + pendingStreams.erase(next); + lock.unlock(); + + streamAdded(stream); + } + } + + template + bool InPort::isStreamActive(const std::string& streamID) + { + SCOPED_LOCK lock(streamsMutex); + if (pendingStreams.count(streamID) > 0) { + // The current stream has received an EOS + return false; + } else if (streams.count(streamID) == 0) { + // Unknown stream, presumably no SRI was received + return false; + } + return true; + } + + template + bool InPort::_acceptPacket(const std::string& streamID, bool EOS) + { + // Acquire streamsMutex for the duration of this call to ensure that + // end-of-stream is handled atomically for disabled streams + boost::mutex::scoped_lock lock(streamsMutex); + + // Find the current stream for the stream ID and check whether it's + // enabled + typename StreamMap::iterator stream = streams.find(streamID); + if (stream == streams.end() || stream->second.enabled()) { + return true; + } + + // If there's a pending stream, the packet is designated for that + if (pendingStreams.find(streamID) != pendingStreams.end()) { + return true; + } + + if (EOS) { + // Acknowledge the end-of-stream by removing the disabled stream + // before discarding the packet + LOG_DEBUG(_portLog, "Removing stream " << streamID); + stream->second.close(); + streams.erase(stream); + + typename std::multimap::iterator next = pendingStreams.find(streamID); + if (next != pendingStreams.end()) { + LOG_DEBUG(_portLog, "Moving pending stream " << streamID << " to active"); + StreamType stream = next->second; + streams.insert(*next); + pendingStreams.erase(next); + lock.unlock(); + + streamAdded(stream); + } + } + return false; + } + + template + bool InPort::isStreamEnabled(const std::string& streamID) + { + SCOPED_LOCK lock(streamsMutex); + if (pendingStreams.count(streamID) == 0) { + typename StreamMap::iterator stream = streams.find(streamID); + if (stream != streams.end()) { + if (!stream->second.enabled()) { + return false; + } + } + } + return true; + } + namespace { template inline bool is_ready(StreamType& stream, size_t size) @@ -677,120 +879,62 @@ namespace bulkio { } } - /* - * Specializations of base class methods for dataXML ports - */ - - template <> - int InPortBase< FilePortTraits >::_getElementLength(const char* /*unused*/) - { - return 1; - } - /* * Specializations of base class methods for dataFile ports */ template <> - int InPortBase< XMLPortTraits >::_getElementLength(const char* data) + int InPort::_getElementLength(const std::string& /*unused*/) { - if (!data) { - return 0; - } - return strlen(data); + return 1; } // - template < typename PortTraits > - InPort< PortTraits >::InPort(std::string port_name, - LOGGER_PTR logger, - bulkio::sri::Compare compareSri, - SriListener *newStreamCB ) : - InPortBase(port_name, logger, compareSri, newStreamCB) - { - } - - template < typename PortTraits > - InPort< PortTraits >::InPort(std::string port_name, - bulkio::sri::Compare compareSri, - SriListener *newStreamCB ) : - InPortBase(port_name, LOGGER_PTR(), compareSri, newStreamCB) - { - } - - template < typename PortTraits > - InPort< PortTraits >::InPort(std::string port_name, void* /*unused*/) : - InPortBase(port_name, LOGGER_PTR()) + template + InNumericPort::InNumericPort(std::string port_name, + LOGGER_PTR logger, + bulkio::sri::Compare compareSri, + SriListener *newStreamCB) : + InPort(port_name, logger, compareSri, newStreamCB) { } - template < typename PortTraits > - void InPort< PortTraits >::pushPacket(const PortSequenceType& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) + template + InNumericPort::InNumericPort(std::string port_name, + bulkio::sri::Compare compareSri, + SriListener *newStreamCB) : + InPort(port_name, LOGGER_PTR(), compareSri, newStreamCB) { - this->queuePacket(data, T, EOS, streamID); } - template < typename PortTraits > - typename InPort< PortTraits >::StreamType InPort< PortTraits >::getCurrentStream(float timeout) - { - // Prefer a stream that already has buffered data - { - boost::mutex::scoped_lock lock(streamsMutex); - for (typename StreamMap::iterator stream = streams.begin(); stream != streams.end(); ++stream) { - if (stream->second.hasBufferedData()) { - return stream->second; - } - } - } - - // Otherwise, return the stream that owns the next packet on the queue, - // potentially waiting for one to be received - DataTransferType* packet = this->peekPacket(timeout); - if (packet) { - const std::string& streamID = packet->streamID; - return getStream(streamID); - } - - return StreamType(); - } - - template < typename PortTraits > - typename InPort< PortTraits >::StreamType InPort< PortTraits >::getStream(const std::string& streamID) + template + InNumericPort::InNumericPort(std::string port_name, void* /*unused*/) : + InPort(port_name, LOGGER_PTR()) { - boost::mutex::scoped_lock lock(streamsMutex); - typename StreamMap::iterator stream = streams.find(streamID); - if (stream != streams.end()) { - return stream->second; - } else { - return StreamType(); - } } - template < typename PortTraits > - typename InPort< PortTraits >::StreamList InPort< PortTraits >::getStreams() + template + void InNumericPort::pushPacket(const PortSequenceType& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) { - StreamList result; - boost::mutex::scoped_lock lock(streamsMutex); - for (typename StreamMap::const_iterator stream = streams.begin(); stream != streams.end(); ++stream) { - result.push_back(stream->second); - } - return result; + size_t size = data.length(); + TransportType* ptr = const_cast(data).get_buffer(1); + this->queuePacket(BufferType(reinterpret_cast(ptr), size), T, EOS, streamID); } - template < typename PortTraits > - typename InPort< PortTraits >::StreamList InPort< PortTraits >::pollStreams(float timeout) + template + typename InNumericPort::StreamList InNumericPort::pollStreams(float timeout) { return pollStreams(0, timeout); } - template < typename PortTraits > - typename InPort< PortTraits >::StreamList InPort< PortTraits >::pollStreams(StreamList& pollset, float timeout) + template + typename InNumericPort::StreamList InNumericPort::pollStreams(StreamList& pollset, float timeout) { return pollStreams(pollset, 0, timeout); } - template < typename PortTraits > - typename InPort< PortTraits >::StreamList InPort< PortTraits >::pollStreams(size_t samples, float timeout) + template + typename InNumericPort::StreamList InNumericPort::pollStreams(size_t samples, float timeout) { redhawk::signal::waiter waiter(&packetWaiters, timeout); @@ -808,8 +952,8 @@ namespace bulkio { return result; } - template < typename PortTraits > - typename InPort< PortTraits >::StreamList InPort< PortTraits >::pollStreams(StreamList& pollset, size_t samples, float timeout) + template + typename InNumericPort::StreamList InNumericPort::pollStreams(StreamList& pollset, size_t samples, float timeout) { redhawk::signal::waiter waiter(&packetWaiters, timeout); @@ -833,132 +977,107 @@ namespace bulkio { return result; } - template < typename PortTraits > - void InPort< PortTraits >::createStream(const std::string& streamID, const BULKIO::StreamSRI& sri) + template + typename InNumericPort::StreamList InNumericPort::getReadyStreams(size_t samples) { - StreamType stream(sri, this); + StreamList result; boost::mutex::scoped_lock lock(streamsMutex); - if (streams.count(streamID) == 0) { - // New stream - LOG_DEBUG(logger, "Creating new stream " << streamID); - streams.insert(std::make_pair(streamID, stream)); - lock.unlock(); - - streamAdded(stream); - } else { - // An active stream has the same stream ID; add this new stream to the - // pending list - LOG_DEBUG(logger, "Creating pending stream " << streamID); - pendingStreams.insert(std::make_pair(streamID, stream)); + for (typename StreamMap::iterator stream = streams.begin(); stream != streams.end(); ++stream) { + if (bulkio::is_ready(stream->second, samples)) { + result.push_back(stream->second); + } } + return result; } - template < typename PortTraits > - void InPort< PortTraits >::removeStream(const std::string& streamID) - { - LOG_DEBUG(logger, "Removing stream " << streamID); - boost::mutex::scoped_lock lock(streamsMutex); - typename StreamMap::iterator current = streams.find(streamID); - current->second.close(); - streams.erase(current); + InBitPort::InBitPort(const std::string& name, LOGGER_PTR logger) : + InPort(name, logger) + { + } - // If there's a pending stream waiting, move it to the active list - typename std::multimap::iterator next = pendingStreams.find(streamID); - if (next != pendingStreams.end()) { - LOG_DEBUG(logger, "Moving pending stream " << streamID << " to active"); - StreamType stream = next->second; - streams.insert(*next); - pendingStreams.erase(next); - lock.unlock(); + void InBitPort::pushPacket(const BULKIO::BitSequence& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) + { + redhawk::shared_bitbuffer::data_type* ptr = const_cast(data).data.get_buffer(1); + redhawk::shared_bitbuffer buffer(ptr, data.bits); + queuePacket(buffer, T, EOS, streamID); + } - streamAdded(stream); - } + // ---------------------------------------------------------------------------------------- + // Source Input Port String Definitions + // ---------------------------------------------------------------------------------------- + InFilePort::InFilePort(std::string port_name, + LOGGER_PTR logger, + bulkio::sri::Compare compareSri, + SriListener *newStreamCB) : + InPort(port_name, logger, compareSri, newStreamCB) + { } - template < typename PortTraits > - bool InPort< PortTraits >::isStreamActive(const std::string& streamID) + + InFilePort::InFilePort(std::string port_name, + bulkio::sri::Compare compareSri, + SriListener *newStreamCB) : + InPort(port_name, LOGGER_PTR(), compareSri, newStreamCB) { - SCOPED_LOCK lock(streamsMutex); - if (pendingStreams.count(streamID) > 0) { - // The current stream has received an EOS - return false; - } else if (streams.count(streamID) == 0) { - // Unknown stream, presumably no SRI was received - return false; - } - return true; } - template < typename PortTraits > - bool InPort< PortTraits >::isStreamEnabled(const std::string& streamID) + InFilePort::InFilePort(std::string port_name, void* /*unused*/) : + InPort(port_name, LOGGER_PTR()) { - SCOPED_LOCK lock(streamsMutex); - if (pendingStreams.count(streamID) == 0) { - typename StreamMap::iterator stream = streams.find(streamID); - if (stream != streams.end()) { - if (!stream->second.enabled()) { - return false; - } - } - } - return true; } - template < typename PortTraits > - typename InPort< PortTraits >::StreamList InPort< PortTraits >::getReadyStreams(size_t samples) + void InFilePort::pushPacket(const char *data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) { - StreamList result; - boost::mutex::scoped_lock lock(streamsMutex); - for (typename StreamMap::iterator stream = streams.begin(); stream != streams.end(); ++stream) { - if (bulkio::is_ready(stream->second, samples)) { - result.push_back(stream->second); - } + if (!data) { + this->queuePacket(std::string(), T, EOS, streamID); + } else { + this->queuePacket(data, T, EOS, streamID); } - return result; } - // ---------------------------------------------------------------------------------------- - // Source Input Port String Definitions - // ---------------------------------------------------------------------------------------- - template < typename PortTraits > - InStringPort< PortTraits >::InStringPort(std::string port_name, - LOGGER_PTR logger, - bulkio::sri::Compare compareSri, - SriListener *newStreamCB ) : - InPortBase(port_name, logger, compareSri, newStreamCB) + InXMLPort::InXMLPort(std::string name, + LOGGER_PTR logger, + bulkio::sri::Compare compareSri, + SriListener* newStreamCB) : + InPort(name, logger, compareSri, newStreamCB) { } - template < typename PortTraits > - InStringPort< PortTraits >::InStringPort(std::string port_name, - bulkio::sri::Compare compareSri, - SriListener *newStreamCB ) : - InPortBase(port_name, LOGGER_PTR(), compareSri, newStreamCB) + InXMLPort::InXMLPort(std::string name, + bulkio::sri::Compare compareSri, + SriListener* newStreamCB) : + InPort(name, LOGGER_PTR(), compareSri, newStreamCB) { } - template < typename PortTraits > - InStringPort< PortTraits >::InStringPort(std::string port_name, void* /*unused*/) : - InPortBase(port_name, LOGGER_PTR()) + InXMLPort::InXMLPort(std::string name, void* /*unused*/) : + InPort(name, LOGGER_PTR()) { } - template < typename PortTraits > - void InStringPort< PortTraits >::pushPacket(const char *data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) + void InXMLPort::pushPacket(const char* data, CORBA::Boolean EOS, const char* streamID) { - this->queuePacket(data, T, EOS, streamID); + std::string buffer; + if (data) { + buffer = data; + } + // Use a default timestamp of "not set" for XML + this->queuePacket(buffer, bulkio::time::utils::notSet(), EOS, streamID); } - - template < typename PortTraits > - void InStringPort< PortTraits >::pushPacket(const char *data, CORBA::Boolean EOS, const char* streamID) + void InXMLPort::pushPacket(const char* data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) { - this->queuePacket(data, BULKIO::PrecisionUTCTime(), EOS, streamID); + std::string buffer; + if (data) { + buffer = data; + } + this->queuePacket(buffer, T, EOS, streamID); } + // // Required for Template Instantion for the compilation unit. // Note: we only define those valid types for which Bulkio IDL is defined. Users wanting to @@ -966,27 +1085,13 @@ namespace bulkio { // link against the template. // -#define INSTANTIATE_BASE_TEMPLATE(x) \ - template class InPortBase; - #define INSTANTIATE_TEMPLATE(x) \ - INSTANTIATE_BASE_TEMPLATE(x); template class InPort; - - INSTANTIATE_TEMPLATE(CharPortTraits); - INSTANTIATE_TEMPLATE(OctetPortTraits); - INSTANTIATE_TEMPLATE(ShortPortTraits); - INSTANTIATE_TEMPLATE(UShortPortTraits); - INSTANTIATE_TEMPLATE(LongPortTraits); - INSTANTIATE_TEMPLATE(ULongPortTraits); - INSTANTIATE_TEMPLATE(LongLongPortTraits); - INSTANTIATE_TEMPLATE(ULongLongPortTraits); - INSTANTIATE_TEMPLATE(FloatPortTraits); - INSTANTIATE_TEMPLATE(DoublePortTraits); - - INSTANTIATE_BASE_TEMPLATE(FilePortTraits); - INSTANTIATE_BASE_TEMPLATE(XMLPortTraits); - template class InStringPort< FilePortTraits >; - template class InStringPort< XMLPortTraits >; + template class InPort; + +#define INSTANTIATE_NUMERIC_TEMPLATE(x) \ + template class InNumericPort; + FOREACH_PORT_TYPE(INSTANTIATE_TEMPLATE); + FOREACH_NUMERIC_PORT_TYPE(INSTANTIATE_NUMERIC_TEMPLATE); } // end of bulkio namespace diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_in_port.h b/bulkioInterfaces/libsrc/cpp/bulkio_in_port.h deleted file mode 100644 index 2e39b8e9f..000000000 --- a/bulkioInterfaces/libsrc/cpp/bulkio_in_port.h +++ /dev/null @@ -1,678 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef __bulkio_in_port_h -#define __bulkio_in_port_h - -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "bulkio_base.h" -#include "bulkio_traits.h" -#include "bulkio_in_stream.h" -#include "bulkio_callbacks.h" - -namespace bulkio { - - // - // InPortBase - // Base template for data transfers between BULKIO ports. This class is defined by 2 trait classes - // DataTransferTraits: This template trait defines the DataTranfer object that is returned by the getPacket method - // PortTraits - This template provides the context for the port's middleware transport classes and they base data types - // passed between port objects - // - template < typename PortTraits > - class InPortBase : public PortTraits::POAPortType, public Port_Provides_base_impl - { - - public: - - typedef PortTraits Traits; - - // Transport Sequence Type use to during push packet - typedef typename Traits::SequenceType PortSequenceType; - - // - // Transport type used by this port - // - typedef typename Traits::TransportType TransportType; - - // - // True type of argument to pushPacket, typically "const PortSequenceType&" - // except for dataXML and dataFile (which use "const char*") - // - typedef typename Traits::PushType PushArgumentType; - - typedef typename Traits::PortType PortType; - - // - // Declaration of DataTransfer class from TransportType trait and DataBuffer type trait - // - typedef DataTransfer< typename Traits::DataTransferTraits > DataTransferType; - - // Queue of data transfer objects maintained by the port - typedef std::deque< DataTransferType * > WorkQueue; - - // - // ~InPortBase - call the virtual destructor to remove all allocated memebers - // - virtual ~InPortBase(); - - /* - * getPacket - interface used by components to grab data from the port's internal queue object for processing. The timeout parameter allows - * the calling component to perform blocking and non-blocking retrievals. - * - * @param timeout - timeout == bulkio::Const::NON_BLOCKING (0.0) non-blocking io - * timeout == bulkio::Const::BLOCKING (-1) block until data arrives or lock is broken on exit - * timeout > 0.0 wait until time expires. - * @return dataTranfer * pointer to a data transfer object from the port's work queue - * @return NULL - no data available - */ - virtual DataTransferType *getPacket(float timeout); - - /* - * getPacket - interface used by components to grab data from the port's internal queue object for a specified streamID - * - * @param timeout - timeout == bulkio::Const::NON_BLOCKING (0.0) non-blocking io - * timeout == bulkio::Const::BLOCKING (-1) block until data arrives or lock is broken on exit - * timeout > 0.0 wait until time expires. - * @param streamID stream id to match on for when pulling data from the port's work queue - * @return dataTranfer * pointer to a data transfer object from the port's work queue - * @return NULL - no data available - */ - virtual DataTransferType *getPacket(float timeout, const std::string &streamID); - - // - // BULKIO IDL interface for pushing Floating Point vectors between components - // - - /* - * pushSRI - called by the source component when SRI data about the stream changes, the data flow policy is this activity - * will occurr first before any data flows to the component. - * - * @param H - Incoming StreamSRI object that defines the state of the data flow portion of the stream (pushPacket) - */ - virtual void pushSRI(const BULKIO::StreamSRI& H); - - // - // Port Statistics Interface - // - - /* - * turn on/off the port monitoring capability - */ - virtual void enableStats(bool enable); - - // - // state - returns the current state of the port as follows: - // BULKIO::BUSY - internal queue has reached FULL state - // BULKIO::IDLE - there are no items on the internal queue - // BULKIO::ACTIVE - there are items on the queue - // - // @return BULKIO::PortUsageType - current state of port - // - virtual BULKIO::PortUsageType state(); - - // - // statisics - returns a PortStatistics object for this provides port - // PortStatistics: - // portname - name of port - // elementsPerSecond - number of elements per second (element is based on size of port type ) - // bitsPerSecond - number of bits per second (based on element storage size in bits) - // callsPerSecond - history window -1 / time between calls to this method - // streamIds - list of active stream id values - // averageQueueDepth - the average depth of the queue for this port - // timeSinceLastCall - time since this method as invoked and the last pushPacket happened - // Keyword Sequence - deprecated - // - // @return BULKIO::PortStatistics - current data flow metrics collected for the port. - // the caller of the method is responsible for freeing this object - // - virtual BULKIO::PortStatistics* statistics(); - - // - // activeSRIs - returns a sequence of BULKIO::StreamSRI objectsPort - // - // @return BULKIO::StreamSRISequence - list of activte SRI objects for this port - // the caller of the method is responsible for freeing this object - // - virtual BULKIO::StreamSRISequence* activeSRIs(); - - /* - * getCurrentQueueDepth - returns the current number of elements in the queue - * - * @return int - number of items in the queue - */ - virtual int getCurrentQueueDepth(); - - /* - * getMaxQueueDepth - returns the maximum size of the queue , if this water mark is reached the queue will be purged, and the - * component of the port will be notified in getPacket method - * @return int - maximum size the queue can reach before purging occurs - */ - virtual int getMaxQueueDepth(); - - /* - * setMaxQueueDepth - allow users of this port to modify the maximum number of allowable vectors on the queue. - */ - virtual void setMaxQueueDepth(int newDepth); - - // - // Allow the component to control the flow of data from the port to the component. Block will restrict the flow of data back into the - // component. Call in component's stop method - // - virtual void block(); - - // - // Allow the component to control the flow of data from the port to the component. Unblock will release the flow of data back into the - // component. Called in component's start method. - // - virtual void unblock(); - - // - // Support function for automatic component-managed start. Calls unblock. - // - virtual void startPort(); - - // - // Support function for automatic component-managed stop. Calls block. - // - virtual void stopPort(); - - /* - * blocked - * - * @return bool returns state of breakBlock variable used to release any upstream blocking pushPacket calls - */ - virtual bool blocked(); - - /* - * Assign a callback for notification when a new SRI StreamId is received - */ - template< typename T > inline - void setNewStreamListener(T &target, void (T::*func)( BULKIO::StreamSRI &) ) { - newStreamCallback = boost::make_shared< MemberSriListener< T > >( boost::ref(target), func ); - }; - - /* - * Assign a callback for notification when a new SRI StreamId is received - */ - template< typename T > inline - void setNewStreamListener(T *target, void (T::*func)( BULKIO::StreamSRI &) ) { - newStreamCallback = boost::make_shared< MemberSriListener< T > >( boost::ref(*target), func ); - - }; - - void setNewStreamListener( SriListener *newListener ); - - void setNewStreamListener( SriListenerCallbackFn newListener ); - - void setLogger( LOGGER_PTR logger ); - - // Return the interface that this Port supports - std::string getRepid () const; - - protected: - // - // InPortBase - creates a provides port that can accept data vectors from a source - // - // @param port_name name of the port taken from .scd.xml file - // @param sriCmp comparator function that accepts to StreamSRI objects and compares their contents, - // if all members match then return true, otherwise false. This is used during the pushSRI method - // @param newStreamCB interface that is called when new SRI.streamID is received - InPortBase(std::string port_name, - LOGGER_PTR logger, - bulkio::sri::Compare sriCmp = bulkio::sri::DefaultComparator, - SriListener *newStreamCB = NULL ); - - // - // FIFO of data vectors and time stamps waiting to be processed by a component - // - WorkQueue workQueue; - - // - // Track size of work queue between getPacket calls when using streamID for extraction - // - uint32_t lastQueueSize; - - // - // SRI compare method used by pushSRI method to determine how to match incoming SRI objects and streamsID - // - bulkio::sri::Compare sri_cmp; - - // - // Callback for notifications when new SRI streamID's are received - // - boost::shared_ptr< SriListener > newStreamCallback; - - // - // List of SRI objects managed by StreamID - // - SriMap currentHs; - - // - // synchronizes access to the workQueue member - // - MUTEX dataBufferLock; - - // - // synchronizes access to the currentHs member - // - MUTEX sriUpdateLock; - - // - // mutex for use with condition variable to signify when data is available for consumption - // RESOVLE: combine deque and condition into template for pushing and poping items onto the queue... - // refer to ConditionList.h example - // - MUTEX dataAvailableMutex; - - CONDITION dataAvailable; - - // - // used to control data flow from getPacket call - // - bool breakBlock; - - // - // Transfers blocking request from data provider to this port that will block pushPacket calls if queue has reached a maximum value - // - bool blocking; - - // - // An abstraction of a counting semaphore to control access to the workQueue member - // - queueSemaphore *queueSem; - - // - // Statistics provider object used by the port monitoring interface - // - linkStatistics *stats; - - LOGGER_PTR logger; - - // - // Synchronized waiter list for use in poll() - // - redhawk::signal packetWaiters; - - // - // Queues a packet received via pushPacket; in most cases, this method maps - // exactly to pushPacket, except for dataFile - // - void queuePacket(PushArgumentType data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID); - - // - // Returns a pointer to the first packet in the queue, blocking for up to - // timeout seconds for one to be available - // - DataTransferType* peekPacket(float timeout); - - virtual void createStream(const std::string& streamID, const BULKIO::StreamSRI& sri); - virtual void removeStream(const std::string& streamID); - - virtual bool isStreamActive(const std::string& streamID); - virtual bool isStreamEnabled(const std::string& streamID); - - DataTransferType* fetchPacket(const std::string& streamID); - void packetReceived(const std::string& streamID); - - // Discard currently queued packets for the given stream ID, up to the - // first end-of-stream; requires caller to hold dataBufferLock - void discardPacketsForStream(const std::string& streamID); - - friend class InputStream; - size_t samplesAvailable(const std::string& streamID, bool firstPacket); - - // Checks whether the packet should be queued or discarded; also handles - // end-of-stream if the packet is being discarded - bool _acceptPacket(const std::string& streamID, bool EOS); - - // Stops tracking the SRI for streamID, returning true if the stream was - // the last blocking stream, indicating that blocking can be turned off - // for the work queue - bool _handleEOS(const std::string& streamID); - - // - // Returns the total number of elements of data in a pushPacket call, for - // statistical tracking; enables XML and File specialization, which have - // different notions of size - // - int _getElementLength(PushArgumentType data); - }; - - template < typename PortTraits > - class InPort : public InPortBase - { - public: - typedef PortTraits Traits; - - // Port Variable Type - typedef typename Traits::POAPortType PortVarType; - - // Interface Type - typedef typename Traits::PortType PortType; - - // Interface Type - typedef typename Traits::PortType ProvidesPortType; - - // Transport Sequence Type use to during push packet - typedef typename Traits::SequenceType PortSequenceType; - - // - // Transport type used by this port - // - typedef typename Traits::TransportType TransportType; - - // - // Native type mapping of TransportType - // - typedef typename Traits::NativeType NativeType; - - // - // Declaration of DataTransfer class from TransportType trait and DataBuffer type trait - // - typedef DataTransfer< typename Traits::DataTransferTraits > DataTransferType; - - // backwards compatible definition - typedef DataTransferType dataTransfer; - - // queue of dataTranfer objects maintained by the port - typedef std::deque< DataTransferType * > WorkQueue; - - // Input stream interface used by this port - typedef InputStream StreamType; - - // List type for input streams provided by this port - typedef std::list StreamList; - - // - // InPort - creates a provides port that can accept data vectors from a source - // - // @param port_name name of the port taken from .scd.xml file - // @param sriCmp comparator function that accepts to StreamSRI objects and compares their contents, - // if all members match then return true, otherwise false. This is used during the pushSRI method - // @param newStreamCB interface that is called when new SRI.streamID is received - InPort(std::string port_name, - LOGGER_PTR logger, - bulkio::sri::Compare sriCmp = bulkio::sri::DefaultComparator, - SriListener *newStreamCB = NULL ); - - InPort(std::string port_name, - bulkio::sri::Compare sriCmp = bulkio::sri::DefaultComparator, - SriListener *newStreamCB = NULL ); - - InPort(std::string port_name, void *); - - // - // pushPacket called by the source component when pushing a vector of data into a component. This method will save off the data - // vector, timestamp, EOS and streamID onto a queue for consumption by the component via the getPacket method - // - // @param data - the vector of data to be consumed - // @param T - a time stamp for the data, the time represents the associated time value for the first entry of the data vector - // @param EOS - indicator that the stream has ended, (stream is identified by streamID) - // @param streamID - name of the stream the vector and stream context data are associated with - virtual void pushPacket(const PortSequenceType& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID); - - // - // Stream-based input API - // - - /** - * @brief Gets the stream that should be used for the next basic read. - * @param timeout Seconds to wait for a stream; a negative value waits - * indefinitely. - * @returns Input stream ready for reading on success. - * @returns Null input stream if timeout expires or port is stopped. - */ - StreamType getCurrentStream(float timeout=bulkio::Const::BLOCKING); - - /** - * @brief Get the active stream with the given stream ID. - * @param streamID Stream identifier. - * @returns Input stream for @p streamID if it exists. - * @returns Null input stream if no such stream ID exits. - */ - StreamType getStream(const std::string& streamID); - - /** - * @brief Gets the current set of active streams. - * @returns List of streams. - */ - StreamList getStreams(); - - StreamList pollStreams(float timeout); - StreamList pollStreams(StreamList& pollset, float timeout); - - StreamList pollStreams(size_t samples, float timeout); - StreamList pollStreams(StreamList& pollset, size_t samples, float timeout); - - /** - * @brief Registers a callback for new streams. - * @param target Class instance. - * @param func Member function pointer. - */ - template - void addStreamListener(Target target, Func func) { - streamAdded.add(target, func); - } - - /** - * @brief Unregisters a callback for new streams. - * @param target Class instance. - * @param func Member function pointer. - */ - template - void removeStreamListener(Target target, Func func) { - streamAdded.remove(target, func); - } - - protected: - typedef InPortBase super; - using super::packetWaiters; - using super::logger; - - // Allow the input stream type friend access so it can call removeStream() - // when it acknowledges an end-of-stream - friend class InputStream; - - // - // Notification for new stream creation - // - ossie::notification streamAdded; - - // - // Streams that are currently active - // - typedef std::map StreamMap; - StreamMap streams; - boost::mutex streamsMutex; - - // Streams that have the same stream ID as an active stream, when an - // end-of-stream has been queued but not yet read - std::multimap pendingStreams; - - virtual void createStream(const std::string& streamID, const BULKIO::StreamSRI& sri); - virtual void removeStream(const std::string& streamID); - - virtual bool isStreamActive(const std::string& streamID); - virtual bool isStreamEnabled(const std::string& streamID); - - StreamList getReadyStreams(size_t samples); - }; - - // - // InStringPort - // Base template for simple data transfers between Input/Output ports. This class is defined by 2 trait classes - // DataTransferTraits: This template trait defines the DataTranfer object that is returned by the getPacket method - // PortTraits - This template provides the context for the port's middleware transport classes and they base data types - // passed between port objects - // - // Both classes have a simlar types of TransportType and SequenceType and the DataTransferTraits defines the the type for the - // data buffer used to store incoming streams of data. These 2 class should be combined to described InputPortTraits. - // - - - template < typename PortTraits > - class InStringPort : public InPortBase - { - - public: - - typedef PortTraits Traits; - - // Port Variable Type - typedef typename Traits::POAPortType PortVarType; - - // Interface Type - typedef typename Traits::PortType PortType; - - // Interface Type - typedef typename Traits::PortType ProvidesPortType; - - // Transport Sequence Type use to during push packet - typedef char * PortSequenceType; - - // - // Transport type used by this port - // - typedef typename Traits::TransportType TransportType; - - // - // Native type mapping of TransportType - // - typedef typename Traits::NativeType NativeType; - - // - // Data transfer object from ports to components - // - typedef DataTransfer< typename Traits::DataTransferTraits > DataTransferType; - - - // backwards compatible defintion - typedef DataTransfer< typename Traits::DataTransferTraits > dataTransfer; - - - // queue of dataTranfer objects maintained by the port - typedef std::deque< DataTransferType * > WorkQueue; - - - // - // InStringPort - creates a provides port that can accept floating point vectors from a source - // - // @param port_name name of the port taken from .scd.xml file - // @param SriCompareFunc comparator function that accepts to StreamSRI objects and compares their contents, - // if all members match then return true, otherwise false. This is used during the pushSRI method - // @param newStreamCB interface that is called when new SRI.streamID is received - - InStringPort(std::string port_name, - LOGGER_PTR logger, - bulkio::sri::Compare = bulkio::sri::DefaultComparator, - SriListener *newStreamCB = NULL ); - - InStringPort(std::string port_name, - bulkio::sri::Compare = bulkio::sri::DefaultComparator, - SriListener *newStreamCB = NULL ); - - InStringPort(std::string port_name, void * ); - - // - // pushPacket called by the source component when pushing a vector of data into a component. This method will save off the data - // vector, timestamp, EOS and streamID onto a queue for consumption by the component via the getPacket method - // - // @param data - the vector of data to be consumed - // @param T - a time stamp for the data, the time represents the associated time value for the first entry of the data vector - // @param EOS - indicator that the stream has ended, (stream is identified by streamID) - // @param streamID - name of the stream the vector and stream context data are associated with - virtual void pushPacket(const char *data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID); - - - // - // pushPacket called by the source component when pushing a vector of data into a component. This method will save off the data - // vector, timestamp, EOS and streamID onto a queue for consumption by the component via the getPacket method - // - // @param data - the vector of data to be consumed - // @param EOS - indicator that the stream has ended, (stream is identified by streamID) - // @param streamID - name of the stream the vector and stream context data are associated with - virtual void pushPacket( const char *data, CORBA::Boolean EOS, const char* streamID); - }; - - - /* - Provides Port Definitions for All Bulk IO pushPacket Port definitions - * - */ - // Bulkio char (Int8) input - typedef InPort< CharPortTraits > InCharPort; - // Bulkio octet (UInt8) input - typedef InPort< OctetPortTraits > InOctetPort; - // Bulkio Int8 input - typedef InCharPort InInt8Port; - // Bulkio UInt8 input - typedef InOctetPort InUInt8Port; - // Bulkio short (Int16) input - typedef InPort< ShortPortTraits > InShortPort; - // Bulkio unsigned short (UInt16) input - typedef InPort< UShortPortTraits > InUShortPort; - // Bulkio Int16 input - typedef InShortPort InInt16Port; - // Bulkio UInt16 input - typedef InUShortPort InUInt16Port; - // Bulkio long (Int32) input - typedef InPort< LongPortTraits > InLongPort; - // Bulkio unsigned long (UInt32) input - typedef InPort< ULongPortTraits > InULongPort; - // Bulkio Int32 input - typedef InLongPort InInt32Port; - // Bulkio UInt32 input - typedef InULongPort InUInt32Port; - // Bulkio long long (Int64) input - typedef InPort< LongLongPortTraits > InLongLongPort; - // Bulkio unsigned long long (UInt64) input - typedef InPort< ULongLongPortTraits > InULongLongPort; - // Bulkio Int64 input - typedef InLongLongPort InInt64Port; - // Bulkio UInt64 input - typedef InULongLongPort InUInt64Port; - // Bulkio float input - typedef InPort< FloatPortTraits > InFloatPort; - // Bulkio double input - typedef InPort< DoublePortTraits > InDoublePort; - // Bulkio URL input - typedef InStringPort< URLPortTraits > InURLPort; - // Bulkio File (URL) input - typedef InStringPort< FilePortTraits > InFilePort; - // Bulkio XML input - typedef InStringPort< XMLPortTraits > InXMLPort; - - - - -} // end of bulkio namespace - - -#endif diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_in_stream.cpp b/bulkioInterfaces/libsrc/cpp/bulkio_in_stream.cpp index 2d3f7835e..d67fe5c25 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio_in_stream.cpp +++ b/bulkioInterfaces/libsrc/cpp/bulkio_in_stream.cpp @@ -21,685 +21,770 @@ #include "bulkio_in_stream.h" #include "bulkio_time_operators.h" #include "bulkio_in_port.h" +#include "bulkio_p.h" -namespace { - template - class stealable_vector : public std::vector { - public: - stealable_vector() +#include +#include +#include + +#include + +using bulkio::InputStream; + +template +class InputStream::Impl : public StreamBase::Impl { +public: + typedef StreamBase::Impl ImplBase; + + typedef typename InPortType::Packet PacketType; + + enum EosState { + EOS_NONE, + EOS_RECEIVED, + EOS_REACHED, + EOS_REPORTED + }; + + Impl(const bulkio::StreamDescriptor& sri, InPortType* port) : + ImplBase(sri), + _port(port), + _eosState(EOS_NONE), + _enabled(true), + _newstream(true) { } - T* steal() + DataBlockType readPacket(bool blocking) + { + boost::scoped_ptr packet(_fetchPacket(blocking)); + if (_eosState == EOS_RECEIVED) { + _eosState = EOS_REACHED; + } + if (!packet || (packet->EOS && packet->buffer.empty())) { + _reportIfEosReached(); + return DataBlockType(); + } + DataBlockType block(packet->SRI, packet->buffer); + // Add timestamp via a templatized method so that dataXML can omit it + _addTimestamp(block, packet->T); + _setBlockFlags(block, *packet); + // Update local SRI from packet + StreamDescriptor::operator=(packet->SRI); + return block; + } + + bool enabled() const { - T* out = this->_M_impl._M_start; - this->_M_impl._M_start = 0; - this->_M_impl._M_finish = 0; - this->_M_impl._M_end_of_storage = 0; - return out; + return _enabled; } - }; - template - T* steal_buffer(std::vector& vec) - { - stealable_vector other; - std::swap(vec, other); - return other.steal(); - } -} + void enable() + { + // Changing the enabled flag requires holding the port's streamsMutex + // (that controls access to the stream map) to ensure that the change + // is atomic with respect to handling end-of-stream packets. Otherwise, + // there is a race condition between the port's IO thread and the + // thread that enables the stream--it could be re-enabled and start + // reading in between the port checking whether to discard the packet + // and closing the stream. Because it is assumed that the same thread + // that calls enable is the one doing the reading, it is not necessary + // to apply mutual exclusion across the entire public stream API, just + // enable/disable. + boost::mutex::scoped_lock lock(_port->streamsMutex); + _enabled = true; + } + + virtual void disable() + { + // See above re: locking + boost::mutex::scoped_lock lock(_port->streamsMutex); + _enabled = false; -using bulkio::InputStream; + // Unless end-of-stream has been received by the port (meaning any further + // packets with this stream ID are for a different instance), purge any + // packets for this stream from the port's queue + if (_eosState == EOS_NONE) { + _port->discardPacketsForStream(_streamID); + } + } -template -class InputStream::Impl { -public: - typedef PortTraits TraitsType; - typedef DataTransfer DataTransferType; - typedef typename DataTransferType::NativeDataType NativeType; - typedef std::vector VectorType; - typedef DataBlock DataBlockType; - - enum EosState { - EOS_NONE, - EOS_RECEIVED, - EOS_REACHED, - EOS_REPORTED - }; - - Impl(const BULKIO::StreamSRI& sri, bulkio::InPort* port) : - _streamID(sri.streamID), - _sri(sri), - _eosState(EOS_NONE), - _port(port), - _queue(), - _pending(0), - _samplesQueued(0), - _sampleOffset(0), - _enabled(true), - _newstream(true) - { - } - - ~Impl() - { - delete _pending; - } - - const std::string& streamID() const - { - return _streamID; - } - - const BULKIO::StreamSRI& sri() const - { - return _sri; - } - - bool eos() - { - if (_queue.empty()) { - // Try a non-blocking fetch to see if there's an empty end-of-stream - // packet waiting; this helps with the case where the last read consumes - // exactly the remaining data, and the stream will never report a ready - // state again - _fetchPacket(false); - } - // At this point, if end-of-stream has been reached, make sure it's been - // reported - _reportIfEosReached(); - return (_eosState == EOS_REPORTED); - } - - size_t samplesAvailable() - { - // Start with the samples already in the queue - size_t queued = _samplesQueued; - if (queued > 0) { - // Adjust number of samples to account for complex data, if necessary - const BULKIO::StreamSRI& sri = _queue.front()->SRI; - if (sri.mode) { - queued /= 2; - } - } - - // Only search the port's queue if there is no SRI change or input queue - // flush pending, and an end-of-stream has not been received - if (!_pending && (_eosState == EOS_NONE)) { - // If the queue is empty, this is the first read of a segment (i.e., - // search can go past the first packet if the SRI change or queue flush - // flag is set) - bool first = _queue.empty(); - queued += _port->samplesAvailable(_streamID, first); - } - - return queued; - } - - DataBlockType readPacket(bool blocking) - { - if (_samplesQueued == 0) { - _fetchPacket(blocking); - } - - if (_samplesQueued == 0) { - // It's possible that there are no samples queued because of an end-of- - // stream; if so, report it so that this stream can be dissociated from - // the port - _reportIfEosReached(); - return DataBlockType(); - } - const size_t samples = _queue.front()->dataBuffer.size() - _sampleOffset; - return _readData(samples, samples); - } - - DataBlockType read(size_t count, size_t consume, bool blocking) - { - // Try to get the SRI for the upcoming block of data, fetching it from the - // port's input queue if necessary - const BULKIO::StreamSRI* sri = _nextSRI(blocking); - if (!sri) { - // No SRI retreived implies no data will be retrieved, either due to end- - // of-stream or because it would block - _reportIfEosReached(); - return DataBlockType(); - } - - // If the next block of data is complex, double the read and consume size - // (which the lower-level I/O handles in terms of scalars) so that the - // returned block has the right number of samples - if (sri->mode == 1) { - count *= 2; - consume *= 2; - } - - // Queue up packets from the port until we have enough data to satisfy the - // requested read amount - while (_samplesQueued < count) { - if (!_fetchPacket(blocking)) { - break; - } - } - - if (_samplesQueued == 0) { - // As above, it's possible that there are no samples due to an end-of- - // stream - _reportIfEosReached(); - return DataBlockType(); - } - - // Only read as many samples as are available (e.g., if a new SRI is coming - // or the stream reached the end) - const size_t samples = std::min(count, _samplesQueued); - - // Handle a partial read, which could mean that there's not enough data at - // present (non-blocking), or that the read pointer has reached the end of - // a segment (new SRI, queue flush, end-of-stream) - if (samples < count) { - // Non-blocking: return a null block if there's not currently a break in - // the data, under the assumption that a future read might return the - // full amount - if (!blocking && !_pending && (_eosState == EOS_NONE)) { - return DataBlockType(); - } - // Otherwise, consume all remaining data (when not requested as 0) - if (consume != 0) - consume = samples; - } - - return _readData(samples, consume); - } - - size_t skip(size_t count) - { - // If the next block of data is complex, double the skip size (which the - // lower-level I/O handles in terms of scalars) so that the right number of - // samples is skipped - const BULKIO::StreamSRI* sri = _nextSRI(true); - if (!sri) { - return 0; - } - - size_t item_size = sri->mode?2:1; - count *= item_size; - - // Queue up packets from the port until we have enough data to satisfy the - // requested read amount - while (_samplesQueued < count) { - if (!_fetchPacket(true)) { - break; - } - } - - count = std::min(count, _samplesQueued); - _consumeData(count); - - // Convert scalars back to samples - return count / item_size; - } - - bool ready() - { - if (_samplesQueued) { - return true; - } else { - return samplesAvailable() > 0; - } - } - - bool enabled() const - { - return _enabled; - } - - void enable() - { - // Changing the enabled flag requires holding the port's dataBufferLock - // (that controls access to its queue) to ensure that the change is atomic - // with respect to handling end-of-stream packets. Otherwise, there is a - // race condition between the port's IO thread and the thread that enables - // the stream--it could be re-enabled and start reading in between the - // port checking whether to discard the packet and closing the stream. - // Because it is assumed that the same thread that calls enable is the one - // doing the reading, it is not necessary to apply mutual exclusion across - // the entire public stream API, just enable/disable. - boost::mutex::scoped_lock lock(_port->dataBufferLock); - _enabled = true; - } - - void disable() - { - { - // See above re: locking - boost::mutex::scoped_lock lock(_port->dataBufferLock); - _enabled = false; - - // Unless end-of-stream has been received by the port (meaning any further - // packets with this stream ID are for a different instance), purge any - // packets for this stream from the port's queue - if (_eosState == EOS_NONE) { - _port->discardPacketsForStream(_streamID); - } - } - // NB: The lock is not required to modify the internal stream queue state, - // because it should only be accessed by the thread that is reading from - // the stream - - // Purge the packet queue... - for (typename QueueType::iterator packet = _queue.begin(); packet != _queue.end(); ++packet) { - _deletePacket(*packet); - } - _queue.clear(); - _sampleOffset = 0; - _samplesQueued = 0; - - // ...and the pending packet - if (_pending) { - _deletePacket(_pending); - } - } - - void close() - { - // NB: This method is always called by the port with dataBufferLock held - - // If this stream is enabled, close() is in response to the stream calling - // removeStream() on the port, so there's nothing left to do - if (_enabled) { - return; - } - - // Consider end-of-stream reported, since the stream has already been - // removed from the port; otherwise, there's nothing to do - _eosState = EOS_REPORTED; - } - - bool hasBufferedData() const - { - // To nudge the caller to check end-of-stream, return true if it has been - // reached but not reported - if (_eosState == EOS_REACHED) { - return true; - } - return !_queue.empty() || _pending; - } + void close() + { + // NB: This method is always called by the port with streamsMutex held -private: - void _reportIfEosReached() - { - if (_eosState == EOS_REACHED) { - // This is the first time end-of-stream has been checked since it - // was reached; remove the stream from the port now, since the - // caller knows that the stream ended - _port->removeStream(_streamID); - _eosState = EOS_REPORTED; - } - } - - void _consumeData(size_t count) - { - while (count > 0) { - const VectorType& data = _queue.front()->dataBuffer; - - const size_t available = data.size() - _sampleOffset; - const size_t pass = std::min(available, count); - - _sampleOffset += pass; - _samplesQueued -= pass; - count -= pass; - - if (_sampleOffset >= data.size()) { - // Read pointer has passed the end of the packet data - _consumePacket(); - _sampleOffset = 0; - } - } - } - - void _deletePacket(DataTransferType* packet) - { - // The packet buffer was allocated with new[] by the CORBA layer, while - // vector will use non-array delete, so explicitly delete the buffer - delete[] steal_buffer(packet->dataBuffer); - delete packet; - } - - void _consumePacket() - { - // Acknowledge any end-of-stream flag and delete the packet - DataTransferType* packet = _queue.front(); - if (packet->EOS) { - _eosState = EOS_REACHED; - } - - // The packet buffer was allocated with new[] by the CORBA layer, while - // vector will use non-array delete, so explicitly delete the buffer - _deletePacket(packet); - _queue.erase(_queue.begin()); - - // If the queue is empty, move the pending packet onto the queue - if (_queue.empty() && _pending) { - _queuePacket(_pending); - _pending = 0; - } - } - - DataBlockType _readData(size_t count, size_t consume) - { - // Acknowledge pending SRI change - DataTransferType* front = _queue.front(); - int sriChangeFlags = bulkio::sri::NONE; - if (front->sriChanged) { - sriChangeFlags = bulkio::sri::compareFields(_sri, front->SRI); - front->sriChanged = false; - _sri = front->SRI; - } - - if ( _newstream ) { - // seed sri change flags for new stream - sriChangeFlags |= bulkio::sri::STREAMID |bulkio::sri::XDELTA | bulkio::sri::YDELTA | bulkio::sri::KEYWORDS | bulkio::sri::MODE; - _newstream=false; - } - - // Allocate empty data block and propagate the SRI change and input queue - // flush flags - DataBlockType data(_sri); - data.sriChangeFlags(sriChangeFlags); - if (front->inputQueueFlushed) { - data.inputQueueFlushed(true); - front->inputQueueFlushed = false; - } - - if ((count <= consume) && (_sampleOffset == 0) && (front->dataBuffer.size() == count)) { - // Optimization: when the read aligns perfectly with the front packet's - // data buffer, and the entire packet is being consumed, swap the vector - // data - data.addTimestamp(bulkio::SampleTimestamp(front->T, 0)); - data.swap(front->dataBuffer); - _samplesQueued -= count; - _consumePacket(); - return data; - } - - data.resize(count); - NativeType* data_buffer = data.data(); - size_t data_offset = 0; - - // Assemble data that may span several input packets into the output buffer - size_t packet_index = 0; - size_t packet_offset = _sampleOffset; - while (count > 0) { - DataTransferType* packet = _queue[packet_index]; - const VectorType& input_data = packet->dataBuffer; - - // Determine the timestamp of this chunk of data; if this is the - // first chunk, the packet offset (number of samples already read) - // must be accounted for, so adjust the timestamp based on the SRI. - // Otherwise, the adjustment is a noop. - BULKIO::PrecisionUTCTime time = packet->T; - double time_offset = packet_offset * packet->SRI.xdelta; - size_t sample_offset = data_offset; - if (packet->SRI.mode) { - // Complex data; each sample is two values - time_offset /= 2.0; - sample_offset /= 2; - } - - // If there is a time offset, apply the adjustment and mark the timestamp - // so that the caller knows it was calculated rather than received - bool synthetic = false; - if (time_offset > 0.0) { - time += time_offset; - synthetic = true; - } - - data.addTimestamp(bulkio::SampleTimestamp(time, sample_offset, synthetic)); - - // The number of samples copied on this pass may be less than the total - // remaining - const size_t available = input_data.size() - packet_offset; - const size_t pass = std::min(available, count); - - std::copy(&input_data[packet_offset], &input_data[packet_offset+pass], &data_buffer[data_offset]); - data_offset += pass; - packet_offset += pass; - count -= pass; - - // If all the data from the current packet has been read, move on to - // the next - if (packet_offset >= input_data.size()) { - packet_offset = 0; - ++packet_index; - } - } - - // Advance the read pointers - _consumeData(consume); - - return data; - } - - const BULKIO::StreamSRI* _nextSRI(bool blocking) - { - if (_queue.empty()) { - if (!_fetchPacket(blocking)) { - return 0; - } - } - - return &(_queue.front()->SRI); - } - - bool _fetchPacket(bool blocking) - { - // Don't fetch a packet from the port if stream is disabled - if (!_enabled) { - return false; - } - - if (_pending) { - // Cannot read another packet until non-bridging packet is acknowledged - return false; - } - - // Any future packets with this stream ID belong to another InputStream - if (_eosState != EOS_NONE) { - return false; - } - - float timeout = blocking?bulkio::Const::BLOCKING:bulkio::Const::NON_BLOCKING; - DataTransferType* packet = _port->getPacket(timeout, _streamID); - if (!packet) { - return false; - } - - if (packet->EOS) { - _eosState = EOS_RECEIVED; - } - if (_queue.empty() || _canBridge(packet)) { - return _queuePacket(packet); - } else { - _pending = packet; - return false; - } - } - - bool _queuePacket(DataTransferType* packet) - { - if (packet->EOS && packet->dataBuffer.empty()) { - // Handle end-of-stream packet with no data (assuming that timestamps, - // SRI changes, and queue flushes are irrelevant at this point) - if (_queue.empty()) { - // No queued packets, read pointer has reached end-of-stream - _eosState = EOS_REACHED; - } else { - // Assign the end-of-stream flag to the last packet in the queue so - // that it is handled on read - _queue.back()->EOS = true; - } - delete packet; - // Return false to let the caller know that no more sample data is - // forthcoming - return false; - } else { - _samplesQueued += packet->dataBuffer.size(); - _queue.push_back(packet); - return true; - } - } - - bool _canBridge(DataTransferType* packet) const - { - return !(packet->sriChanged || packet->inputQueueFlushed); - } - - const std::string _streamID; - BULKIO::StreamSRI _sri; - EosState _eosState; - InPort* _port; - typedef std::vector QueueType; - QueueType _queue; - DataTransferType* _pending; - size_t _samplesQueued; - size_t _sampleOffset; - bool _enabled; - bool _newstream; + // Consider end-of-stream reported, since the stream has already been + // removed from the port; otherwise, there's nothing to do + _eosState = EOS_REPORTED; + } + + virtual bool eos() + { + _reportIfEosReached(); + // At this point, if end-of-stream has been reached, the state is + // reported (it gets set above), so the checking for the latter is + // sufficient + return (_eosState == EOS_REPORTED); + } + + virtual bool hasBufferedData() const + { + // For the base class, there is no data to report; however, to nudge + // the check end-of-stream, return true if it has been reached but not + // reported + return (_eosState == EOS_REACHED); + } + +protected: + PacketType* _fetchPacket(bool blocking) + { + // Don't fetch a packet from the port if stream is disabled + if (!_enabled) { + return 0; + } + + // Any future packets with this stream ID belong to another InputStream + if (_eosState != EOS_NONE) { + return 0; + } + + float timeout = blocking?bulkio::Const::BLOCKING:bulkio::Const::NON_BLOCKING; + PacketType* packet = _port->nextPacket(timeout, _streamID); + if (packet && packet->EOS) { + _eosState = EOS_RECEIVED; + } + return packet; + } + + void _reportIfEosReached() + { + if (_eosState == EOS_REACHED) { + // This is the first time end-of-stream has been checked since it + // was reached; remove the stream from the port now, since the + // caller knows that the stream ended + _port->removeStream(_streamID); + _eosState = EOS_REPORTED; + } + } + + size_t _samplesAvailable(bool first) + { + if (_eosState == EOS_NONE) { + return _port->samplesAvailable(_streamID, first); + } else { + return 0; + } + } + + void _setBlockFlags(DataBlockType& block, PacketType& packet) + { + // Allocate empty data block and propagate the SRI change and input + // queue flush flags + int flags=0; + if (packet.sriChanged) { + flags = bulkio::sri::compareFields(this->sri(), packet.SRI.sri()); + block.sriChangeFlags(flags); + } + if (_newstream) { + _newstream=false; + flags |= bulkio::sri::STREAMID |bulkio::sri::XDELTA | bulkio::sri::YDELTA | bulkio::sri::KEYWORDS | bulkio::sri::MODE; + block.sriChangeFlags(flags); + } + if (packet.inputQueueFlushed) { + block.inputQueueFlushed(true); + } + } + + void _addTimestamp(DataBlockType& block, const BULKIO::PrecisionUTCTime& time) + { + block.addTimestamp(time); + } + + InPortType* _port; + EosState _eosState; + bool _enabled; + bool _newstream; }; +namespace bulkio { + template <> + void InputStream::Impl::_addTimestamp(bulkio::StringDataBlock& block, + const BULKIO::PrecisionUTCTime&) + { + // Discard the time stamp, which was created by the input port to adapt to + // the common template implementation + } +} + +template +InputStream::InputStream() : + StreamBase() +{ +} + +template +InputStream::InputStream(const StreamDescriptor& sri, InPortType* port) : + StreamBase(boost::make_shared(sri, port)) +{ +} -template -InputStream::InputStream() : - _impl() +template +InputStream::InputStream(const boost::shared_ptr& impl) : + StreamBase(impl) { } -template -InputStream::InputStream(const BULKIO::StreamSRI& sri, bulkio::InPort* port) : - _impl(new Impl(sri, port)) +template +typename InputStream::DataBlockType InputStream::read() { + return impl().readPacket(true); } -template -const std::string& InputStream::streamID() const +template +typename InputStream::DataBlockType InputStream::tryread() { - return _impl->streamID(); + return impl().readPacket(false); } -template -const BULKIO::StreamSRI& InputStream::sri() const +template +bool InputStream::enabled() const { - return _impl->sri(); + return impl().enabled(); } -template -bool InputStream::eos() +template +void InputStream::enable() { - return _impl->eos(); + impl().enable(); } -template -typename InputStream::DataBlockType InputStream::read() +template +void InputStream::disable() { - return _impl->readPacket(true); + impl().disable(); } -template -typename InputStream::DataBlockType InputStream::read(size_t count) +template +bool InputStream::eos() { - return _impl->read(count, count, true); + return impl().eos(); } -template -typename InputStream::DataBlockType InputStream::read(size_t count, size_t consume) +template +InputStream::operator unspecified_bool_type() const +{ + return _impl?static_cast(&InputStream::impl):0; +} + +template +typename InputStream::Impl& InputStream::impl() +{ + return static_cast(*this->_impl); +} + +template +const typename InputStream::Impl& InputStream::impl() const +{ + return static_cast(*this->_impl); +} + +template +bool InputStream::hasBufferedData() +{ + return impl().hasBufferedData(); +} + +template +void InputStream::close() +{ + impl().close(); +} + + +using bulkio::BufferedInputStream; + +template +class BufferedInputStream::Impl : public Base::Impl { +public: + typedef typename Base::Impl ImplBase; + + typedef typename ImplBase::PacketType PacketType; + typedef typename NativeTraits::NativeType NativeType; + typedef typename BufferTraits::BufferType BufferType; + typedef typename BufferTraits::MutableBufferType MutableBufferType; + + Impl(const bulkio::StreamDescriptor& sri, InPortType* port) : + ImplBase(sri, port), + _queue(), + _pending(0), + _samplesQueued(0), + _sampleOffset(0) + { + } + + ~Impl() + { + delete _pending; + } + + virtual bool eos() + { + if (_queue.empty()) { + // Try a non-blocking fetch to see if there's an empty end-of-stream + // packet waiting; this helps with the case where the last read consumes + // exactly the remaining data, and the stream will never report a ready + // state again + _fetchPacket(false); + } + return ImplBase::eos(); + } + + size_t samplesAvailable() + { + // Start with the samples already in the queue + size_t queued = _samplesQueued; + if (queued > 0) { + // Adjust number of samples to account for complex data, if necessary + if (_queue.front().SRI.complex()) { + queued /= 2; + } + } + + // Only search the port's queue if there is no SRI change or input queue + // flush pending + if (!_pending) { + // If the queue is empty, this is the first read of a segment (i.e., + // search can go past the first packet if the SRI change or queue flush + // flag is set) + bool first = _queue.empty(); + queued += ImplBase::_samplesAvailable(first); + } + + return queued; + } + + DataBlockType readPacket(bool blocking) + { + if (_samplesQueued == 0) { + _fetchPacket(blocking); + } + + if (_samplesQueued == 0) { + // It's possible that there are no samples queued because of an + // end-of-stream; if so, report it so that this stream can be + // dissociated from the port + this->_reportIfEosReached(); + return DataBlockType(); + } + // Only read up to the end of the first packet in the queue + const size_t samples = _queue.front().buffer.size() - _sampleOffset; + return _readData(samples, samples); + } + + DataBlockType read(size_t count, size_t consume, bool blocking) + { + // Try to get the SRI for the upcoming block of data, fetching it from the + // port's input queue if necessary + const StreamDescriptor* sri = _nextSRI(blocking); + if (!sri) { + // No SRI retreived implies no data will be retrieved, either due + // to end-of-stream or because it would block + this->_reportIfEosReached(); + return DataBlockType(); + } + + // If the next block of data is complex, double the read and consume size + // (which the lower-level I/O handles in terms of scalars) so that the + // returned block has the right number of samples + if (sri->complex()) { + count *= 2; + consume *= 2; + } + + // Queue up packets from the port until we have enough data to satisfy the + // requested read amount + while (_samplesQueued < count) { + if (!_fetchPacket(blocking)) { + break; + } + } + + if (_samplesQueued == 0) { + // As above, it's possible that there are no samples due to an end- + // of-stream + this->_reportIfEosReached(); + return DataBlockType(); + } + + // Only read as many samples as are available (e.g., if a new SRI is coming + // or the stream reached the end) + const size_t samples = std::min(count, _samplesQueued); + + // Handle a partial read, which could mean that there's not enough data at + // present (non-blocking), or that the read pointer has reached the end of + // a segment (new SRI, queue flush, end-of-stream) + if (samples < count) { + // Non-blocking: return a null block if there's not currently a break in + // the data, under the assumption that a future read might return the + // full amount + if (!blocking && !_pending && (this->_eosState == ImplBase::EOS_NONE)) { + return DataBlockType(); + } + // Otherwise, consume all remaining data (when not requested as 0) + if (consume != 0) + consume = samples; + } + + return _readData(samples, consume); + } + + size_t skip(size_t count) + { + // If the next block of data is complex, double the skip size (which the + // lower-level I/O handles in terms of scalars) so that the right number of + // samples is skipped + const StreamDescriptor* sri = _nextSRI(true); + if (!sri) { + return 0; + } + + size_t item_size = sri->complex()?2:1; + count *= item_size; + + // Queue up packets from the port until we have enough data to satisfy the + // requested read amount + while (_samplesQueued < count) { + if (!_fetchPacket(true)) { + break; + } + } + + count = std::min(count, _samplesQueued); + _consumeData(count); + + // Convert scalars back to samples + return count / item_size; + } + + bool ready() + { + if (_samplesQueued) { + return true; + } else { + return samplesAvailable() > 0; + } + } + + virtual void disable() + { + ImplBase::disable(); + // NB: The lock is not required to modify the internal stream queue + // state, because it should only be accessed by the thread that is + // reading from the stream + + // Clear queued packets, which implicitly deletes them + _queue.clear(); + _sampleOffset = 0; + _samplesQueued = 0; + + // Delete pending packet (it's safe to delete null pointers) + delete _pending; + } + + bool hasBufferedData() const + { + if (!_queue.empty() || _pending) { + // Return true if either there are queued or pending packets + return true; + } + return ImplBase::hasBufferedData(); + } + +private: + void _consumeData(size_t count) + { + while (count > 0) { + const BufferType& data = _queue.front().buffer; + + const size_t available = data.size() - _sampleOffset; + const size_t pass = std::min(available, count); + + _sampleOffset += pass; + _samplesQueued -= pass; + count -= pass; + + if (_sampleOffset >= data.size()) { + // Read pointer has passed the end of the packet data + _consumePacket(); + _sampleOffset = 0; + } + } + } + + void _consumePacket() + { + // Acknowledge any end-of-stream flag and delete the packet (the queue will + // automatically delete it when it's removed) + if (_queue.front().EOS) { + this->_eosState = ImplBase::EOS_REACHED; + } + _queue.pop_front(); + + // If the queue is empty, move the pending packet onto the queue + if (_queue.empty() && _pending) { + _queuePacket(_pending); + _pending = 0; + } + } + + DataBlockType _readData(size_t count, size_t consume) + { + // Acknowledge pending SRI change + PacketType& front = _queue.front(); + + // Allocate empty data block and propagate the SRI change and input queue + // flush flags + DataBlockType data(front.SRI); + this->_setBlockFlags(data, front); + if (front.sriChanged) { + // Update the stream metadata + StreamDescriptor::operator=(front.SRI); + } + + // Clear flags from packet, since they've been reported + front.sriChanged = false; + front.inputQueueFlushed = false; + + size_t last_offset = _sampleOffset + count; + if (last_offset <= front.buffer.size()) { + // The requsted sample count can be satisfied from the first packet + _addTimestamp(data, _sampleOffset, 0, front.T); + data.buffer(front.buffer.slice(_sampleOffset, last_offset)); + } else { + // We have to span multiple packets to get the data + MutableBufferType buffer(count); + data.buffer(buffer); + size_t data_offset = 0; + + // Assemble data spanning several input packets into the output buffer + size_t packet_index = 0; + size_t packet_offset = _sampleOffset; + while (count > 0) { + PacketType& packet = _queue[packet_index]; + const BufferType& input_data = packet.buffer; + + // Add the timestamp for this pass + _addTimestamp(data, packet_offset, data_offset, packet.T); + + // The number of samples copied on this pass may be less than the total + // remaining + const size_t available = input_data.size() - packet_offset; + const size_t pass = std::min(available, count); + + buffer.replace(data_offset, pass, input_data, packet_offset); + data_offset += pass; + packet_offset += pass; + count -= pass; + + // If all the data from the current packet has been read, move on to + // the next + if (packet_offset >= input_data.size()) { + packet_offset = 0; + ++packet_index; + } + } + } + + // Advance the read pointers + _consumeData(consume); + + return data; + } + + void _addTimestamp(DataBlockType& data, size_t inputOffset, size_t outputOffset, BULKIO::PrecisionUTCTime time) + { + // Determine the timestamp of this chunk of data; if this is the + // first chunk, the packet offset (number of samples already read) + // must be accounted for, so adjust the timestamp based on the SRI. + // Otherwise, the adjustment is a noop. + double time_offset = inputOffset * data.xdelta(); + // Check the SRI directly for the complex mode because bit data blocks + // intentionally do not have a complex() method. + if (data.sri().mode != 0) { + // Complex data; each sample is two values + time_offset /= 2.0; + outputOffset /= 2; + } + + // If there is a time offset, apply the adjustment and mark the timestamp + // so that the caller knows it was calculated rather than received + bool synthetic = false; + if (time_offset > 0.0) { + time += time_offset; + synthetic = true; + } + + data.addTimestamp(bulkio::SampleTimestamp(time, outputOffset, synthetic)); + } + + const StreamDescriptor* _nextSRI(bool blocking) + { + if (_queue.empty()) { + if (!_fetchPacket(blocking)) { + return 0; + } + } + + return &(_queue.front().SRI); + } + + bool _fetchPacket(bool blocking) + { + if (_pending) { + // Cannot read another packet until non-bridging packet is acknowledged + return false; + } + + PacketType* packet = ImplBase::_fetchPacket(blocking); + if (!packet) { + return false; + } + + if (_queue.empty() || _canBridge(packet)) { + return _queuePacket(packet); + } else { + _pending = packet; + return false; + } + } + + bool _queuePacket(PacketType* packet) + { + if (packet->EOS && packet->buffer.empty()) { + // Handle end-of-stream packet with no data (assuming that timestamps, + // SRI changes, and queue flushes are irrelevant at this point) + if (_queue.empty()) { + // No queued packets, read pointer has reached end-of-stream + this->_eosState = ImplBase::EOS_REACHED; + } else { + // Assign the end-of-stream flag to the last packet in the queue so + // that it is handled on read + _queue.back().EOS = true; + } + // Explicitly delete the packet, since it isn't being queued, and + // return false to let the caller know that no more sample data is + // forthcoming + delete packet; + return false; + } else { + // Add the packet to the queue, taking ownership; it will be deleted when + // it's consumed + _samplesQueued += packet->buffer.size(); + _queue.push_back(packet); + return true; + } + } + + bool _canBridge(PacketType* packet) const + { + return !(packet->sriChanged || packet->inputQueueFlushed); + } + + boost::ptr_deque _queue; + PacketType* _pending; + size_t _samplesQueued; + size_t _sampleOffset; +}; + +template +BufferedInputStream::BufferedInputStream() : + Base() { - return _impl->read(count, consume, true); } -template -typename InputStream::DataBlockType InputStream::tryread() +template +BufferedInputStream::BufferedInputStream(const bulkio::StreamDescriptor& sri, InPortType* port) : + Base(boost::make_shared(sri, port)) { - return _impl->readPacket(false); } -template -typename InputStream::DataBlockType InputStream::tryread(size_t count) +template +typename BufferedInputStream::DataBlockType BufferedInputStream::read() { - return _impl->read(count, count, false); + return impl().readPacket(true); } -template -typename InputStream::DataBlockType InputStream::tryread(size_t count, size_t consume) +template +typename BufferedInputStream::DataBlockType BufferedInputStream::read(size_t count) { - return _impl->read(count, consume, false); + return impl().read(count, count, true); } -template -size_t InputStream::skip(size_t count) +template +typename BufferedInputStream::DataBlockType BufferedInputStream::read(size_t count, size_t consume) { - return _impl->skip(count); + return impl().read(count, consume, true); } -template -bool InputStream::enabled() const +template +typename BufferedInputStream::DataBlockType BufferedInputStream::tryread() { - return _impl->enabled(); + return impl().readPacket(false); } -template -void InputStream::enable() +template +typename BufferedInputStream::DataBlockType BufferedInputStream::tryread(size_t count) { - _impl->enable(); + return impl().read(count, count, false); } -template -void InputStream::disable() +template +typename BufferedInputStream::DataBlockType BufferedInputStream::tryread(size_t count, size_t consume) { - _impl->disable(); + return impl().read(count, consume, false); } -template -size_t InputStream::samplesAvailable() +template +size_t BufferedInputStream::skip(size_t count) { - return _impl->samplesAvailable(); + return impl().skip(count); } -template -bool InputStream::operator!() const +template +size_t BufferedInputStream::samplesAvailable() { - return !_impl; + return impl().samplesAvailable(); } -template -bool InputStream::operator==(const InputStream& other) const +template +bool BufferedInputStream::operator==(const BufferedInputStream& other) const { - return _impl.get() == other._impl.get(); + return _impl.get() == other._impl.get(); } -template -bool InputStream::ready() +template +bool BufferedInputStream::ready() { - return _impl->ready(); + return impl().ready(); } -template -bool InputStream::hasBufferedData() +template +typename BufferedInputStream::Impl& BufferedInputStream::impl() { - return _impl->hasBufferedData(); + return static_cast(Base::impl()); } -template -void InputStream::close() +template +const typename BufferedInputStream::Impl& BufferedInputStream::impl() const { - return _impl->close(); + return static_cast(Base::impl()); } -template class InputStream; -template class InputStream; -template class InputStream; -template class InputStream; -template class InputStream; -template class InputStream; -template class InputStream; -template class InputStream; -template class InputStream; -template class InputStream; +#define INSTANTIATE_TEMPLATE(x) \ + template class InputStream; + +#define INSTANTIATE_NUMERIC_TEMPLATE(x) \ + template class BufferedInputStream; + +FOREACH_PORT_TYPE(INSTANTIATE_TEMPLATE); +FOREACH_NUMERIC_PORT_TYPE(INSTANTIATE_NUMERIC_TEMPLATE); +INSTANTIATE_NUMERIC_TEMPLATE(BULKIO::dataBit); diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_in_stream.h b/bulkioInterfaces/libsrc/cpp/bulkio_in_stream.h deleted file mode 100644 index 209b8adba..000000000 --- a/bulkioInterfaces/libsrc/cpp/bulkio_in_stream.h +++ /dev/null @@ -1,474 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -#ifndef __bulkio_in_stream_h -#define __bulkio_in_stream_h - -#include -#include - -#include - -#include "bulkio_traits.h" -#include "bulkio_datablock.h" - -namespace bulkio { - - template - class InPort; - - /** - * @brief BulkIO input stream class. - * @headerfile bulkio_in_stream.h - * - * %InputStream is a smart pointer-based class that encapsulates a single - * BulkIO stream for reading. It is associated with the InPort that created - * it, providing a file-like API on top of the classic BulkIO getPacket - * model. - * - * @warning Do not declare instances of this template class directly in user - * code; the template parameter and class name are not considered - * API. Use the type-specific @c typedef instead, such as - * bulkio::InFloatStream, or the nested @c typedef StreamType from - * an %InPort. - * - * Notionally, a BulkIO stream represents a contiguous data set and its - * associated signal-related information (SRI), uniquely identified by a - * stream ID, from creation until close. The SRI may vary over time, but the - * stream ID is immutable. Only one stream with a given stream ID can be - * active at a time. - * - * The %InputStream class itself is a lightweight handle; it is inexpensive - * to copy or store in local variables or nested data types. Assigning one - * %InputStream to another does not copy the stream state but instead - * aliases both objects to the same underlying stream. - * - * The default constructor creates an invalid "null" %InputStream that cannot - * be used for any real operations, similar to a null pointer. A stream may - * be checked for validity with the boolean ! operator: - * - * @code - * if (!stream) { - * // handle failure - * } else { - * // use stream - * } - * @endcode - * - * InputStreams are managed by the %InPort, and created in response to the - * arrival of a new SRI. Valid InputStreams are obtained by either querying - * the port, or registering a callback. - * @see InPort::getCurrentStream(float) - * @see InPort::getStream(const std::string&) - * @see InPort::getStreams() - * @see InPort::addStreamListener(Target,Func) - * - * @par Data Buffering - * Often, signal processing algorithms prefer to work on regular, fixed-size - * blocks of data. However, because the producer is working independently, data - * may be received in entirely different packet sizes. For this use case, - * %InputStream provides a read(size_t) method that frees the user from - * managing their own data buffering. - * @par - * To maintain the requested size, partial packets may be buffered, - * or a read may span multiple packets. Packets are fetched from the %InPort - * as needed; however, if an SRI change or input queue flush is encountered, - * the operation will stop, therefore, data is only read up to that point. The next - * read operation will continue at the beginning of the packet that contains - * the new SRI or input queue flush flag. - * - * @par Time Stamps - * The data block from a successful read always includes as least one time - * stamp, at a sample offset of 0. Because buffered reads may not begin on a - * packet boundary, the %InputStream can interpolate a time stamp based on - * the SRI @a xdelta value and the prior time stamp. When this occurs, the - * time stamp will be marked as "synthetic." - * @par - * Reads that span multiple packets will contain more than one time stamp. - * The time stamp offsets indicate at which sample the time stamp occurs, - * taking real or complex samples into account. Only the first time stamp can - * be synthetic. - * - * @par Overlapped Reads - * Certain classes of signal processing algorithms need to preserve a portion - * of the last data set for the next iteration, such as a power spectral - * density (PSD) calculation with overlap. The read(size_t,size_t) method - * supports this mode of operation by allowing the reader to consume fewer - * samples than are read. This can be thought of as a separate read pointer - * that trails behind the stream's internal buffer. - * @par - * When an overlapped read needs to span multiple packets, but an SRI change, - * input queue flush, or end-of-stream is encountered, all of the available - * data is returned and consumed, equivalent to read(size_t). The assumption - * is that special handling is required due to the pending change, and it is - * not possible for the stream to interpret the relationship between the read - * size and consume size. - * - * @par Non-Blocking Reads - * For each @a read method, there is a corresponsing @a tryread method that - * is non-blocking. If there is not enough data currently available to - * satisfy the request, but more data could become available in the future, - * the operation will return a null data block immediately. - * - * @par End-of-Stream - * In normal usage, reading continues until the end of the stream is reached, - * at which point all future read operations will fail immediately. When a - * read fails, or returns fewer samples than requested, it is incumbent upon - * the caller to check the stream's end-of-stream state via eos(). Once the - * end-of-stream has been acknowledged, either by an explicit check or with a - * subsequent failed read, the stream is removed from the %InPort. If the - * %InPort has another stream with the same streamID pending, it will become - * active. - * @par - * Although an end-of-stream packet may be present in the stream's internal - * buffer or the %InPort's queue, this state is not reflected in eos(). As - * with Unix pipes or sockets, the recommended pattern is to continually read - * until a failure occurs, handling the failure as needed. - */ - template - class InputStream { - public: - /** - * @brief Default constructor. - * @see InPort::getCurrentStream() - * @see InPort::getStream(const std::string&) - * @see InPort::getStreams() - * @see InPort::addStreamListener(Target,Func) - * - * Creates a null InputStream. This stream is not associated with a stream - * from any InPort instance. No methods may be called on the %InputStream - * except for operator!, which will always return true, and operator==, - * which returns true if the other %InputStream is also null. Both operators - * will return false if the other %InputStream is also not null. - * - * To get a handle to a live stream, you must query an %InPort or register - * a callback. - */ - InputStream(); - - /** - * @brief The native type of a real sample, or the real or imaginary - * component of a complex sample. - */ - typedef typename PortTraits::DataTransferTraits::NativeDataType NativeType; - - /// @brief The type of data block returned by read methods on this stream. - typedef DataBlock DataBlockType; - - /** - * @brief Returns the stream ID. - * @pre Stream is valid. - * - * The stream ID is immutable and cannot be changed. - */ - const std::string& streamID() const; - - /** - * @brief Gets the current stream metadata. - * @returns Read-only reference to stream SRI. - * @pre Stream is valid. - * - * The SRI that was in effect for the last read operation is considered the - * current one. A subsequent read may update the current SRI. - */ - const BULKIO::StreamSRI& sri() const; - - /** - * @brief Checks whether this stream has ended. - * @returns True if this stream has reached the end. False if the end of stream - * has not been reached. - * @pre Stream is valid. - * - * A stream is considered at the end when it has read and consumed all data - * up to the end-of-stream marker. Once end-of-stream has been reached, all - * read operations will fail immediately, as no more data will ever be - * received for this stream. - * - * The recommended practice is to check @a eos any time a read operation - * fails or returns fewer samples than requested. When the end-of-stream - * is acknowledged, either by checking @a eos or when successive reads fail - * due to an end-of-stream, the stream is removed from the %InPort. If the - * %InPort has another stream with the same streamID pending, it will - * become active. - */ - bool eos(); - - /** - * @brief Reads the next packet. - * @returns Valid data block if successful. - * @returns Null data block if the read failed. - * @pre Stream is valid. - * - * Blocking read up to the next packet boundary. Reading a packet at a time - * is the most computationally efficent method because it does not require - * the stream to copy data into an intermediate buffer; instead, it may - * pass the original buffer along to the reader. - * - * Returns a null data block immediately if: - * @li End-of-stream has been reached - * @li The InPort is stopped - */ - DataBlockType read(); - - /** - * @brief Reads a specified number of samples. - * @param count Number of samples to read - * @returns Data block containing up to @p count samples if successful. - * @returns Null data block if the read failed. - * @pre Stream is valid. - * - * Blocking read of @a count samples worth of data. For signal processing - * operations that require a fixed input data size, such as fast Fourier - * transform (FFT), this simplifies buffer management by offloading it to - * the stream. This usually incurs some computational overhead to copy data - * between buffers; however, this cost is intrinsic to the algorithm, and - * the reduced complexity of implementation avoids common errors. - * - * If the SRI indicates that the data is complex, @a count is in terms of - * complex samples. - * - * If any of the following conditions are encountered while fetching packets, - * the returned data block may contain fewer samples than requested. - * @li End-of-stream - * @li SRI change - * @li Input queue flush - * - * Returns a null data block immediately if: - * @li End-of-stream has been reached - * @li The InPort is stopped - */ - DataBlockType read(size_t count); - - /** - * @brief Reads a specified number of samples, with overlap. - * @param count Number of samples to read. - * @param consume Number of samples to advance read pointer. - * @returns Data block containing up to @p count samples if successful. - * @returns Null data block if the read failed. - * @pre Stream is valid. - * @pre @p consume <= @p count - * @see read(size_t) - * - * Blocking read of @a count samples worth of data will only advance the read - * pointer by @a consume samples. The remaining @c count-consume samples - * are buffered and will be returned on the following read operation. This - * method is designed to support signal processing operations that require - * overlapping data sets, such as power spectral density (PSD). - * - * If the SRI indicates that the data is complex, @a count and @a consume - * are in terms of complex samples. - * - * If any of the following conditions are encountered while fetching packets, - * the returned data block may contain fewer samples than requested. - * @li End-of-stream - * @li SRI change - * @li Input queue flush - * - * When this occurs, all of the returned samples are consumed, as it is - * assumed that special handling is required. - * - * Returns a null data block immediately if: - * @li End-of-stream has been reached - * @li The InPort is stopped - */ - DataBlockType read(size_t count, size_t consume); - - /** - * @brief Non-blocking read of the next packet. - * @returns Valid data block if successful. - * @returns Null data block if the read failed. - * @pre Stream is valid. - * @see read() - * - * Non-blocking version of read(), returning a null data block immediately - * when no data is available. - */ - DataBlockType tryread(); - - /** - * @brief Non-blocking sized read. - * @param count Number of samples to read. - * @returns Data block containing up to @p count samples if successful. - * @returns Null data block if the read failed. - * @pre Stream is valid. - * @see read(size_t) - * - * Non-blocking version of read(size_t), returning a null data block - * immediately when no data is available. - */ - DataBlockType tryread(size_t count); - - /** - * @brief Non-blocking read with overlap. - * @param count Number of samples to read. - * @returns Data block containing up to @p count samples if successful. - * @returns Null data block if the read failed. - * @pre Stream is valid. - * @pre @p consume <= @p count - * @see read(size_t,size_t) - * - * Non-blocking version of read(size_t,size_t), returning a null data block - * immediately when no data is available. - */ - DataBlockType tryread(size_t count, size_t consume); - - /** - * @brief Discard a specified number of samples. - * @param count Number of samples to skip. - * @returns Actual number of samples skipped. - * @pre Stream is valid. - * @see read(size_t) - * - * Skips the next @a count samples worth of data and blocks until the - * requested amount of data is available. If the data is not being used, - * this is more computationally efficient than the equivalent call to - * read(size_t) because no buffering is performed. - * - * If the SRI indicates that the data is complex, @a count and the return - * value are in terms of complex samples. - * - * Skipping behaves like read(size_t) when fetching packets. If any of the following - * conditions are encountered, the returned value may be less than @a count. - * @li End-of-stream - * @li SRI change - * @li Input queue flush - * - * Returns 0 immediately if: - * @li End-of-stream has been reached - * @li The InPort is stopped - */ - size_t skip(size_t count); - - /** - * @brief Checks whether this stream can receive data. - * @returns True if this stream is enabled. False if stream is disabled. - * @pre Stream is valid. - * @see enable() - * @see disable() - * - * If a stream is enabled, packets received for its stream ID are queued - * in the InPort, and the stream may be used for reading. Conversely, - * packets for a disabled stream are discarded, and no reading may be - * performed. - */ - bool enabled() const; - - /** - * @brief Enable this stream for reading data. - * @pre Stream is valid. - * @see enabled() - * @see disable() - * - * The InPort will resume queuing packets for this stream. - */ - void enable(); - - /** - * @brief Disable this stream for reading data. - * @pre Stream is valid. - * @see enable() - * @see enabled() - * - * The InPort will discard any packets that are currently queued for this - * stream, and all future packets for this stream will be discarded upon - * receipt until an end-of-stream is received. - * - * Disabling unwanted streams may improve performance and queueing behavior - * by reducing the number of queued packets on a port. - */ - void disable(); - - /** - * @brief Estimates the number of samples that can be read immediately. - * @returns Number of samples. - * @pre Stream is valid. - * - * The number of samples returned by this method is an estimate based on - * the current state of the stream and the input queue. If there are any - * SRI changes or input queue flushes to report, only samples up to that - * point are considered, as a read cannot span those packets. - * - * If the SRI indicates that the data is complex, the returned value is in - * terms of complex samples. - * - * @warning The returned value is not guaranteed; if the input queue - * flushes in between calls, a subsequent call to - * @a read may block or @a tryread may fail. - */ - size_t samplesAvailable(); - - /** - * @brief Checks stream validity. - * @returns True if this stream is not valid. False if the stream is invalid. - * - * Invalid (null) InputStreams are not associated with an active stream in - * an %InPort. If this method returns true, no other methods except - * comparison or assignment may be called. - */ - bool operator! () const; - - /** - * @brief Stream equality comparison. - * @param other Another %InputStream. - * @returns True if and only if both InputStreams reference the same underlying - * stream. - */ - bool operator== (const InputStream& other) const; - - /** - * @brief Returns true if data can be read without blocking. - * @see samplesAvailable() - * - * A stream is considered ready if samplesAvailable() would return a - * non-zero value. - * - * @warning Even if this method returns true, if the input queue flushes - * in between calls, a subsequent call to @a read - * may block or @a tryread may fail. - */ - bool ready(); - - private: - /// @cond IMPL - friend class InPort; - InputStream(const BULKIO::StreamSRI&, InPort*); - - bool hasBufferedData(); - void close(); - - class Impl; - boost::shared_ptr _impl; - /// @endcond - }; - - typedef InputStream InCharStream; - typedef InputStream InOctetStream; - typedef InputStream InShortStream; - typedef InputStream InUShortStream; - typedef InputStream InLongStream; - typedef InputStream InULongStream; - typedef InputStream InLongLongStream; - typedef InputStream InULongLongStream; - typedef InputStream InFloatStream; - typedef InputStream InDoubleStream; - -} // end of bulkio namespace - -#endif diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_out_port.cpp b/bulkioInterfaces/libsrc/cpp/bulkio_out_port.cpp index efac87168..aafd92722 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio_out_port.cpp +++ b/bulkioInterfaces/libsrc/cpp/bulkio_out_port.cpp @@ -17,154 +17,128 @@ * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see http://www.gnu.org/licenses/. */ -/******************************************************************************************* +#include +#include - *******************************************************************************************/ -#include "bulkio_out_port.h" +#include "LocalTransport.h" +#include "CorbaTransport.h" #include "bulkio_p.h" -#include "bulkio_time_operators.h" -// Suppress warnings for access to "deprecated" currentSRI member--it's the -// public access that's deprecated, not the member itself +// Suppress warnings for access to deprecated currentSRI member (on gcc 4.4, at +// least, the implicit destructor call from OutPort's destructor emits a +// warning) #pragma GCC diagnostic ignored "-Wdeprecated-declarations" -namespace bulkio { - +namespace bulkio { /* OutPort Constructor Accepts connect/disconnect interfaces for notification when these events occur */ - template < typename PortTraits > - OutPortBase< PortTraits >::OutPortBase(std::string port_name, - LOGGER_PTR logger, - ConnectionEventListener *connectCB, - ConnectionEventListener *disconnectCB ) : - Port_Uses_base_impl(port_name), - logger(logger) + template + OutPort::OutPort(const std::string& name, + LOGGER_PTR logger, + ConnectionEventListener *connectCB, + ConnectionEventListener *disconnectCB) : + redhawk::NegotiableUsesPort(name) { - if ( !logger ) { + if (!logger) { std::string pname("redhawk.bulkio.outport."); - pname = pname + port_name; - logger = rh_logger::Logger::getLogger(pname); + pname = pname + name; + setLogger(rh_logger::Logger::getLogger(pname)); + } else { + setLogger(logger); } if ( connectCB ) { _connectCB = boost::shared_ptr< ConnectionEventListener >( connectCB, null_deleter() ); } + addConnectListener(this, &OutPort::_connectListenerAdapter); if ( disconnectCB ) { _disconnectCB = boost::shared_ptr< ConnectionEventListener >( disconnectCB, null_deleter() ); } + addDisconnectListener(this, &OutPort::_disconnectListenerAdapter); + LOG_DEBUG( _portLog, "bulkio::OutPort::CTOR port:" << name ); + } - recConnectionsRefresh = false; - recConnections.length(0); - - LOG_DEBUG( logger, "bulkio::OutPort::CTOR port:" << name ); + template + OutPort::~OutPort(){ } - template < typename PortTraits > - OutPortBase< PortTraits >::OutPortBase(std::string port_name, - ConnectionEventListener *connectCB, - ConnectionEventListener *disconnectCB ) : - Port_Uses_base_impl(port_name), - logger() + template + void OutPort::pushSRI(const BULKIO::StreamSRI& H) { + TRACE_ENTER(_portLog, "OutPort::pushSRI" ); + LOG_TRACE(_portLog, "OutPort::pushSRI" ); - std::string pname("redhawk.bulkio.outport."); - pname = pname + port_name; - logger = rh_logger::Logger::getLogger(pname); - if ( connectCB ) { - _connectCB = boost::shared_ptr< ConnectionEventListener >( connectCB, null_deleter() ); - } + const std::string sid(H.streamID); + SCOPED_LOCK lock(updatingPortsLock); // don't want to process while command information is coming in + StreamType stream; + typename StreamMap::iterator existing = streams.find(sid); + if (existing == streams.end()) { + // Insert new SRI + stream = StreamType(H, this); + streams[sid] = stream; + } else { + // Overwrite existing SRI + stream = existing->second; + stream.sri(H); + } + const BULKIO::StreamSRI& sri = stream.sri(); - if ( disconnectCB ) { - _disconnectCB = boost::shared_ptr< ConnectionEventListener >( disconnectCB, null_deleter() ); - } + if (active) { + for (TransportIterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + PortTransportType* transport = connection.transport(); + const std::string& connection_id = connection.connectionId(); - recConnectionsRefresh = false; - recConnections.length(0); + // Skip ports known to be dead + if (!transport->isAlive()) { + continue; + } - } + if (!_isStreamRoutedToConnection(sid, connection_id)) { + continue; + } - template < typename PortTraits > - OutPortBase< PortTraits >::~OutPortBase(){ + LOG_DEBUG(_portLog,"pushSRI - PORT:" << name << " CONNECTION:" << connection_id << " SRI streamID:" + << stream.streamID() << " Mode:" << sri.mode << " XDELTA:" << 1.0/sri.xdelta); + try { + transport->pushSRI(sid, sri, stream.modcount()); + } catch (const redhawk::FatalTransportError& err) { + LOG_ERROR(_portLog, "PUSH-SRI FAILED " << err.what() + << " PORT/CONNECTION: " << name << "/" << connection_id); + } + } + } + TRACE_EXIT(_portLog, "OutPort::pushSRI"); } - - template < typename PortTraits > - void OutPortBase< PortTraits >::pushSRI(const BULKIO::StreamSRI& H) { - - - TRACE_ENTER(logger, "OutPort::pushSRI" ); - - - typename ConnectionsList::iterator i; - - SCOPED_LOCK lock(updatingPortsLock); // don't want to process while command information is coming in - - std::string sid( H.streamID ); - typename OutPortSriMap::iterator sri_iter; - sri_iter= currentSRIs.find( sid ); - if ( sri_iter == currentSRIs.end() ) { - SriMapStruct sri_ctx( H ); - // need to use insert since we do not have default CTOR for SriMapStruct - currentSRIs.insert( OutPortSriMap::value_type( sid, sri_ctx ) ); - sri_iter= currentSRIs.find( sid ); - } - else { - // overwrite the SRI - sri_iter->second.sri = H; - - // reset connections list to be empty - sri_iter->second.connections.clear(); - } - - if (active) { - for (i = outConnections.begin(); i != outConnections.end(); ++i) { - if (!_isStreamRoutedToConnection(sid, i->second)) { - continue; - } - - std::string cid = i->second; - LOG_DEBUG(logger,"pushSRI - PORT:" << name << " CONNECTION:" << i->second << " SRI streamID:" << H.streamID << " Mode:" << H.mode << " XDELTA:" << 1.0/H.xdelta ); - try { - i->first->pushSRI(H); - sri_iter->second.connections.insert( i->second ); - } catch( CORBA::TRANSIENT &ex ) { - if ( reportConnectionErrors(cid) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (Transient), PORT/CONNECTION: " << name << "/" << cid ); - } - } catch( CORBA::COMM_FAILURE &ex) { - if ( reportConnectionErrors(cid) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (CommFailure), PORT/CONNECTION: " << name << "/" << cid); - } - } catch( CORBA::SystemException &ex) { - if ( reportConnectionErrors(cid) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (SystemException), PORT/CONNECTION: " << name << "/" << cid ); - } - } catch(...) { - if ( reportConnectionErrors(cid) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED, (UnknownException), PORT/CONNECTION: " << name << "/" << cid ); - } - } + template + void OutPort::_connectListenerAdapter(const std::string& connectionId) + { + if (_connectCB) { + (*_connectCB)(connectionId.c_str()); } - } - - TRACE_EXIT(logger, "OutPort::pushSRI" ); - return; } + template + void OutPort::_disconnectListenerAdapter(const std::string& connectionId) + { + if (_disconnectCB) { + (*_disconnectCB)(connectionId.c_str()); + } + } - template < typename PortTraits > - bool OutPortBase< PortTraits >::_isStreamRoutedToConnection( + template + bool OutPort::_isStreamRoutedToConnection( const std::string& streamID, const std::string& connectionID) { @@ -183,625 +157,317 @@ namespace bulkio { return !portListed; } - template < typename PortTraits > - void OutPortBase< PortTraits >::_pushPacketToPort( - PortPtrType port, - PushArgumentType data, - const BULKIO::PrecisionUTCTime& T, - bool EOS, - const char* streamID) - { - port->pushPacket(data, T, EOS, streamID); - } - template < typename PortTraits > - void OutPortBase< PortTraits >::_sendEOS( - PortPtrType port, - const std::string& streamID) + template + typename OutPort::StreamType OutPort::_getStream(const std::string& streamID) { - port->pushPacket(PortSequenceType(), bulkio::time::utils::notSet(), true, streamID.c_str()); - } - + typename StreamMap::iterator existing = streams.find(streamID); + if (existing == streams.end()) { + LOG_TRACE(_portLog, "Creating new stream '" << streamID << "' with default SRI"); - template < typename PortTraits > - size_t OutPortBase< PortTraits >::_dataLength(PushArgumentType data) - { - return data.length(); + // No SRI associated with the stream ID, create a default one and add + // it to the list; it will get pushed to downstream connections below + StreamType stream(bulkio::sri::create(streamID), this); + streams[streamID] = stream; + return stream; + } else { + return existing->second; + } } - template < typename PortTraits > - void OutPortBase< PortTraits >::_pushSinglePacket( - PushArgumentType data, + template + void OutPort::_sendPacket( + const BufferType& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { // don't want to process while command information is coming in SCOPED_LOCK lock(this->updatingPortsLock); - _pushPacketLocked(data, T, EOS, streamID); - } - - template < typename PortTraits > - void OutPortBase< PortTraits >::_pushPacketLocked( - PushArgumentType data, - const BULKIO::PrecisionUTCTime& T, - bool EOS, - const std::string& streamID) - { - // grab SRI context - typename OutPortSriMap::iterator sri_iter = currentSRIs.find( streamID ); - if (sri_iter == currentSRIs.end()) { - // No SRI associated with the stream ID, create a default one and add - // it to the list; it will get pushed to downstream connections below - SriMapStruct sri_ctx(bulkio::sri::create(streamID)); - // need to use insert since we do not have default CTOR for SriMapStruct - sri_iter = currentSRIs.insert(std::make_pair(streamID, sri_ctx)).first; - } - const size_t length = _dataLength(data); + // grab SRI context + StreamType stream = _getStream(streamID); - if (active) { - typename ConnectionsList::iterator port; - for (port = outConnections.begin(); port != outConnections.end(); port++) { - // Check whether filtering is enabled and if this connection should - // receive the stream - if (!_isStreamRoutedToConnection(streamID, port->second)) { - continue; - } + if (active) { + for (TransportIterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + PortTransportType* transport = connection.transport(); + const std::string& connection_id = connection.connectionId(); + // Skip ports known to be dead + if (!transport->isAlive()) { + continue; + } - if ( sri_iter != currentSRIs.end() && sri_iter->second.connections.count( port->second ) == 0 ) { - this->_pushSRI( port, sri_iter->second ); - } + // Check whether filtering is enabled and if this connection should + // receive the stream + if (!_isStreamRoutedToConnection(streamID, connection_id)) { + continue; + } - try { - _pushPacketToPort(port->first, data, T, EOS, streamID.c_str()); - stats[port->second].update(length, 0, EOS, streamID); - } catch( CORBA::TRANSIENT &ex) { - if ( reportConnectionErrors(port->second) ) { - LOG_ERROR( logger, "PUSH-PACKET FAILED (Transient), PORT/CONNECTION: " << name << "/" << port->second ); - } - } catch( CORBA::COMM_FAILURE &ex) { - if ( reportConnectionErrors(port->second) ) { - LOG_ERROR( logger, "PUSH-PACKET FAILED (CommFailure), PORT/CONNECTION: " << name << "/" << port->second ); - } - } catch( CORBA::SystemException &ex) { - if ( reportConnectionErrors(port->second) ) { - LOG_ERROR( logger, "PUSH-PACKET FAILED (SystemFailure), PORT/CONNECTION: " << name << "/" << port->second ); - } - } catch(...) { - if ( reportConnectionErrors(port->second) ) { - LOG_ERROR( logger, "PUSH-PACKET FAILED, (UnknownException), PORT/CONNECTION: " << name << "/" << port->second ); - } - } + try { + transport->pushSRI(streamID, stream.sri(), stream.modcount()); + transport->pushPacket(data, T, EOS, streamID, stream.sri()); + } catch (const redhawk::FatalTransportError& err) { + LOG_ERROR(_portLog, "PUSH-PACKET FAILED " << err.what() + << " PORT/CONNECTION: " << name << "/" << connection_id); + transport->setAlive(false); + } catch (const redhawk::TransportError& err) { + LOG_ERROR(_portLog, "pushPacket error on connection '" << connection_id << "': " << err.what()); + } } - } - - // if we have end of stream removed old sri - try { - if ( EOS ) currentSRIs.erase(streamID); - } - catch(...){ - } + } + // if we have end of stream removed old sri + if (EOS) { + streams.erase(streamID); + } } - template < typename PortTraits > - BULKIO::UsesPortStatisticsSequence * OutPortBase< PortTraits >::statistics() + template + BULKIO::UsesPortStatisticsSequence* OutPort::statistics() { - SCOPED_LOCK lock(updatingPortsLock); - BULKIO::UsesPortStatisticsSequence_var recStat = new BULKIO::UsesPortStatisticsSequence(); - recStat->length(outConnections.size()); - for (unsigned int i = 0; i < outConnections.size(); i++) { - recStat[i].connectionId = CORBA::string_dup(outConnections[i].second.c_str()); - recStat[i].statistics = stats[outConnections[i].second].retrieve(); - } - return recStat._retn(); + SCOPED_LOCK lock(updatingPortsLock); + BULKIO::UsesPortStatisticsSequence_var recStat = new BULKIO::UsesPortStatisticsSequence(); + for (TransportIterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + BULKIO::UsesPortStatistics stat; + stat.connectionId = connection.connectionId().c_str(); + stat.statistics = connection.transport()->getStatistics(); + stat.statistics.portName = name.c_str(); + ossie::corba::push_back(recStat, stat); + } + return recStat._retn(); } - template < typename PortTraits > - BULKIO::PortUsageType OutPortBase< PortTraits >::state() + template + BULKIO::PortUsageType OutPort::state() { SCOPED_LOCK lock(updatingPortsLock); - if (outConnections.size() > 0) { - return BULKIO::ACTIVE; - } else { + if (_connections.empty()) { return BULKIO::IDLE; + } else { + return BULKIO::ACTIVE; } - - return BULKIO::BUSY; } - template < typename PortTraits > - void OutPortBase< PortTraits >::enableStats(bool enable) + template + void OutPort::enableStats(bool enable) { - for (unsigned int i = 0; i < outConnections.size(); i++) { - stats[outConnections[i].second].setEnabled(enable); - } + // Statistics are always collected } - - template < typename PortTraits > - ExtendedCF::UsesConnectionSequence * OutPortBase< PortTraits >::connections() + template + redhawk::UsesTransport* + OutPort::_createLocalTransport(PortBase* port, CORBA::Object_ptr object, const std::string& connectionId) { - SCOPED_LOCK lock(updatingPortsLock); // don't want to process while command information is coming in - if (recConnectionsRefresh) { - recConnections.length(outConnections.size()); - for (unsigned int i = 0; i < outConnections.size(); i++) { - recConnections[i].connectionId = CORBA::string_dup(outConnections[i].second.c_str()); - recConnections[i].port = CORBA::Object::_duplicate(outConnections[i].first); - } - recConnectionsRefresh = false; - } - ExtendedCF::UsesConnectionSequence_var retVal = new ExtendedCF::UsesConnectionSequence(recConnections); - // NOTE: You must delete the object that this function returns! - return retVal._retn(); + return LocalTransport::Factory(this, port); } - template < typename PortTraits > - void OutPortBase< PortTraits >::connectPort(CORBA::Object_ptr connection, const char* connectionId) + template + redhawk::UsesTransport* + OutPort::_createTransport(CORBA::Object_ptr object, const std::string& connectionId) { - TRACE_ENTER(logger, "OutPort::connectPort" ); - { - SCOPED_LOCK lock(updatingPortsLock); // don't want to process while command information is coming in PortVarType port; try { - port = PortType::_narrow(connection); - if (CORBA::is_nil(port)) { - throw CF::Port::InvalidPort(1, "Unable to narrow"); - } - } - catch(...) { - LOG_ERROR( logger, "CONNECT FAILED: UNABLE TO NARROW ENDPOINT, USES PORT:" << name ); - throw CF::Port::InvalidPort(1, "Unable to narrow"); - } - outConnections.push_back(std::make_pair(port, connectionId)); - if (stats.count(connectionId) == 0) { - stats.insert(std::make_pair(connectionId, linkStatistics(name, sizeof(NativeType)))); + port = PortType::_narrow(object); + if (CORBA::is_nil(port)) { + throw CF::Port::InvalidPort(1, "Unable to narrow"); + } + } catch (const CORBA::SystemException&) { + LOG_ERROR( _portLog, "CONNECT FAILED: UNABLE TO NARROW ENDPOINT, USES PORT:" << name ); + throw CF::Port::InvalidPort(1, "Unable to narrow"); } - active = true; - recConnectionsRefresh = true; - LOG_DEBUG( logger, "CONNECTION ESTABLISHED, PORT/CONNECTION_ID:" << name << "/" << connectionId ); - - } - if (_connectCB) (*_connectCB)(connectionId); - - TRACE_EXIT(logger, "OutPort::connectPort" ); + return CorbaTransportFactory::Create(this, port); } - template < typename PortTraits > - void OutPortBase< PortTraits >::disconnectPort(const char* connectionId) + template + typename OutPort::StreamType OutPort::getStream(const std::string& streamID) { - TRACE_ENTER(logger, "OutPort::disconnectPort" ); - { - SCOPED_LOCK lock(updatingPortsLock); // don't want to process while command information is coming in - - const std::string cid(connectionId); - for (typename ConnectionsList::iterator ii = outConnections.begin(); ii != outConnections.end(); ++ii) { - if (ii->second != connectionId) { - continue; - } - - typename OutPortSriMap::iterator cSRIs = currentSRIs.begin(); - - // send an EOS for every connection that's listed for this SRI - for (; cSRIs!=currentSRIs.end(); cSRIs++) { - std::string cSriSid(cSRIs->second.sri.streamID); - - // Check if we have sent out sri/data to the connection - if ( cSRIs->second.connections.count( cid ) != 0 ) { - if (_isStreamRoutedToConnection(cSriSid, cid)) { - try { - _sendEOS(ii->first, cSriSid); - } catch(...) { - } - } - } - - // remove connection id from sri connections list - cSRIs->second.connections.erase( cid ); - - } - LOG_DEBUG( logger, "DISCONNECT, PORT/CONNECTION: " << name << "/" << connectionId ); - stats.erase(ii->second); - outConnections.erase(ii); - break; - } - - if (outConnections.size() == 0) { - active = false; + boost::mutex::scoped_lock lock(updatingPortsLock); + typename StreamMap::iterator stream = streams.find(streamID); + if (stream != streams.end()) { + return stream->second; + } else { + return StreamType(); } - recConnectionsRefresh = true; - } - if (_disconnectCB) (*_disconnectCB)(connectionId); - - TRACE_EXIT(logger, "OutPort::disconnectPort" ); } - template < typename PortTraits > - void OutPortBase< PortTraits >::_pushSRI( typename ConnectionsList::iterator connPair, SriMapStruct &sri_ctx) + template + typename OutPort::StreamList OutPort::getStreams() { - TRACE_ENTER(logger, "OutPort::_pushSRI" ); - - // assume parent will lock us... - if ( connPair != outConnections.end() ) { - - std::string cid = connPair->second; - // push SRI over port instance - try { - connPair->first->pushSRI(sri_ctx.sri); - sri_ctx.connections.insert( connPair->second ); - LOG_TRACE( logger, "_pushSRI() connection_id/streamID " << connPair->second << "/" << sri_ctx.sri.streamID ); - } catch( CORBA::TRANSIENT &ex ) { - if ( reportConnectionErrors(cid) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (Transient), PORT/CONNECTION: " << name << "/" << cid ); - } - } catch( CORBA::COMM_FAILURE &ex) { - if ( reportConnectionErrors(cid) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (CommFailure), PORT/CONNECTION: " << name << "/" << cid); - } - } catch( CORBA::SystemException &ex) { - if ( reportConnectionErrors(cid) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED (SystemException), PORT/CONNECTION: " << name << "/" << cid ); - } - } catch(...) { - if ( reportConnectionErrors(cid) ) { - LOG_ERROR( logger, "PUSH-SRI FAILED, (UnknownException), PORT/CONNECTION: " << name << "/" << cid ); - } + StreamList result; + boost::mutex::scoped_lock lock(updatingPortsLock); + for (typename StreamMap::const_iterator stream = streams.begin(); stream != streams.end(); ++stream) { + result.push_back(stream->second); } - } - - TRACE_EXIT(logger, "OutPort::_pushSRI" ); - return; + return result; } - - template < typename PortTraits > - void OutPortBase< PortTraits >::_pushSRI( const std::string &connectionId, SriMapStruct &sri_ctx) + template + typename OutPort::StreamType OutPort::createStream(const std::string& streamID) { - TRACE_ENTER(logger, "OutPort::_pushSRI" ); - - typename ConnectionsList::iterator i; - - for ( i=outConnections.begin(); i != outConnections.end(); i++ ) { - if ( i->second == connectionId ) { - this->_pushSRI( i, sri_ctx ); - break; - } + boost::mutex::scoped_lock lock(updatingPortsLock); + typename StreamMap::iterator existing = streams.find(streamID); + if (existing != streams.end()) { + return existing->second; } - TRACE_EXIT(logger, "OutPort::_pushSRI" ); - return; + StreamType stream(bulkio::sri::create(streamID), this); + streams[streamID] = stream; + return stream; } - - template < typename PortTraits > - bool OutPortBase< PortTraits >::reportConnectionErrors( const std::string &cid ) + template + typename OutPort::StreamType OutPort::createStream(const BULKIO::StreamSRI& sri) { - TRACE_ENTER(logger, "OutPort::reportConnectionErrors" ); - bool retval=false; - try { - retval = stats[cid].connectionErrors(1) < 11; + boost::mutex::scoped_lock lock(updatingPortsLock); + const std::string streamID(sri.streamID); + typename StreamMap::iterator existing = streams.find(streamID); + if (existing != streams.end()) { + // Update the stream's SRI from the argument + existing->second.sri(sri); + return existing->second; } - catch(...) - {} - return retval; + StreamType stream(sri, this); + streams[streamID] = stream; + return stream; } - template < typename PortTraits > - bulkio::SriMap OutPortBase< PortTraits >::getCurrentSRI() + template + bulkio::SriMap OutPort::getCurrentSRI() { bulkio::SriMap ret; SCOPED_LOCK lock(updatingPortsLock); // restrict access till method completes - typename OutPortSriMap::iterator cSri = currentSRIs.begin(); - for ( ; cSri != currentSRIs.end(); cSri++ ) { - ret[cSri->first] = std::make_pair< BULKIO::StreamSRI, bool >( cSri->second.sri, false ); + for (typename StreamMap::iterator stream = streams.begin() ; stream != streams.end(); ++stream) { + ret[stream->first] = std::make_pair(stream->second.sri(), false); } return ret; } - template < typename PortTraits > - bulkio::SriList OutPortBase< PortTraits >::getActiveSRIs() + template + bulkio::SriList OutPort::getActiveSRIs() { bulkio::SriList ret; SCOPED_LOCK lock(updatingPortsLock); // restrict access till method completes - typename OutPortSriMap::iterator cSri = currentSRIs.begin(); - for ( ; cSri != currentSRIs.end(); cSri++ ) { - ret.push_back( cSri->second.sri ); + for (typename StreamMap::iterator stream = streams.begin() ; stream != streams.end(); ++stream) { + ret.push_back(stream->second.sri()); } return ret; } - template < typename PortTraits > - typename OutPortBase< PortTraits >::ConnectionsList OutPortBase< PortTraits >::getConnections() + template + typename OutPort::ConnectionsList OutPort::getConnections() { - SCOPED_LOCK lock(updatingPortsLock); // restrict access till method completes - return outConnections; + SCOPED_LOCK lock(updatingPortsLock); // restrict access till method completes + ConnectionsList outConnections; + + for (ConnectionList::iterator iter = _connections.begin(); iter != _connections.end(); ++iter) { + PortVarType port = ossie::corba::_narrowSafe((*iter)->objref); + outConnections.push_back(std::make_pair(port, (*iter)->connectionId)); + } + + return outConnections; } - template < typename PortTraits > - void OutPortBase< PortTraits >::setNewConnectListener(ConnectionEventListener *newListener) + template + void OutPort::setNewConnectListener(ConnectionEventListener *newListener) { _connectCB = boost::shared_ptr< ConnectionEventListener >(newListener, null_deleter()); } - template < typename PortTraits > - void OutPortBase< PortTraits >::setNewConnectListener(ConnectionEventCallbackFn newListener) + template + void OutPort::setNewConnectListener(ConnectionEventCallbackFn newListener) { _connectCB = boost::make_shared< StaticConnectionListener >( newListener ); } - template < typename PortTraits > - void OutPortBase< PortTraits >::setNewDisconnectListener(ConnectionEventListener *newListener) + template + void OutPort::setNewDisconnectListener(ConnectionEventListener *newListener) { _disconnectCB = boost::shared_ptr< ConnectionEventListener >(newListener, null_deleter()); } - template < typename PortTraits > - void OutPortBase< PortTraits >::setNewDisconnectListener(ConnectionEventCallbackFn newListener) + template + void OutPort::setNewDisconnectListener(ConnectionEventCallbackFn newListener) { _disconnectCB = boost::make_shared< StaticConnectionListener >( newListener ); } - template < typename PortTraits > - void OutPortBase< PortTraits >::setLogger(LOGGER_PTR newLogger) - { - logger = newLogger; - } - - template < typename PortTraits > - std::string OutPortBase< PortTraits >::getRepid() const { + template + std::string OutPort::getRepid() const { return PortType::_PD_repoId; //return "IDL:CORBA/Object:1.0"; } - - /* - * Specializations of base class methods for dataXML ports - */ - - template <> - void OutPortBase< XMLPortTraits >::_pushPacketToPort( - BULKIO::dataXML_ptr port, - const char* data, - const BULKIO::PrecisionUTCTime& /*unused*/, - bool EOS, - const char* streamID) - { - port->pushPacket(data, EOS, streamID); - } - - - template <> - void OutPortBase< XMLPortTraits >::_sendEOS( - BULKIO::dataXML_ptr port, - const std::string& streamID) - { - port->pushPacket("", true, streamID.c_str()); - } - - - template <> - size_t OutPortBase< XMLPortTraits >::_dataLength(const char* data) - { - if (!data) { - return 0; - } - return strlen(data); - } - - - /* - * Specializations of base class methods for dataFile ports - */ - - template <> - void OutPortBase< FilePortTraits >::_sendEOS( - BULKIO::dataFile_ptr port, - const std::string& streamID) - { - port->pushPacket("", bulkio::time::utils::notSet(), true, streamID.c_str()); - } - - - template <> - size_t OutPortBase< FilePortTraits >::_dataLength(const char* /*unused*/) - { - return 1; - } - - /* OutPort Constructor Accepts connect/disconnect interfaces for notification when these events occur */ - template < typename PortTraits > - OutPort< PortTraits >::OutPort(std::string port_name, - LOGGER_PTR logger, - ConnectionEventListener *connectCB, - ConnectionEventListener *disconnectCB ) : - OutPortBase(port_name, logger, connectCB, disconnectCB) + template + OutNumericPort::OutNumericPort(const std::string& name, + LOGGER_PTR logger, + ConnectionEventListener *connectCB, + ConnectionEventListener *disconnectCB ) : + OutPort(name, logger, connectCB, disconnectCB) { } - template < typename PortTraits > - OutPort< PortTraits >::OutPort(std::string port_name, - ConnectionEventListener *connectCB, - ConnectionEventListener *disconnectCB ) : - OutPortBase(port_name) + template + OutNumericPort::OutNumericPort(const std::string& name, + ConnectionEventListener *connectCB, + ConnectionEventListener *disconnectCB) : + OutPort(name, LOGGER_PTR(), connectCB, disconnectCB) { } - template < typename PortTraits > - OutPort< PortTraits >::~OutPort() + template + OutNumericPort::~OutNumericPort() { } - - /* - * Push a packet whose payload cannot fit within the CORBA limit. - * The packet is broken down into sub-packets and sent via multiple pushPacket - * calls. The EOS is set to false for all of the sub-packets, except for - * the last sub-packet, who uses the input EOS argument. - */ - template < typename PortTraits > - void OutPort< PortTraits>::_pushOversizedPacket( - const TransportType* buffer, - size_t size, + template + void OutNumericPort::pushPacket( + const VectorType& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { - // don't want to process while command information is coming in - SCOPED_LOCK lock(this->updatingPortsLock); - - // Multiply by some number < 1 to leave some margin for the CORBA header - const size_t maxPayloadSize = (size_t) (bulkio::Const::MaxTransferBytes() * .9); - - size_t maxSamplesPerPush = maxPayloadSize/sizeof(TransportType); - typename OutPortSriMap::iterator sri_iter; - sri_iter = currentSRIs.find( streamID ); - // Determine xdelta for this streamID to be used for time increment for subpackets - double xdelta = 0.0; - size_t itemSize = 1; - if ( sri_iter != currentSRIs.end() ) { - xdelta = sri_iter->second.sri.xdelta; - itemSize = sri_iter->second.sri.mode?2:1; - } - - if ( sri_iter != currentSRIs.end() ) { - if (sri_iter->second.sri.subsize == 0) { - // make sure maxSamplesPerPush is even so that complex data case is handled properly - if (maxSamplesPerPush%2 != 0){ - maxSamplesPerPush--; - } - } else { // this is framed data, so it must be consistent with both subsize and complex - while (maxSamplesPerPush%sri_iter->second.sri.subsize != 0) { - maxSamplesPerPush -= maxSamplesPerPush%(sri_iter->second.sri.subsize); - if (maxSamplesPerPush%2 != 0){ - maxSamplesPerPush--; - } - } - } - } else { - if (maxSamplesPerPush%2 != 0){ - maxSamplesPerPush--; - } - } - - // Always do at least one push (may be empty), ensuring that all samples - // are pushed - size_t samplesRemaining = size; - - // Initialize time of first subpacket - BULKIO::PrecisionUTCTime packetTime = T; - - do { - // Don't send more samples than are remaining - const size_t pushSize = std::min(samplesRemaining, maxSamplesPerPush); - samplesRemaining -= pushSize; - - // Send end-of-stream as false for all sub-packets except for the - // last one (when there are no samples remaining after this push), - // which gets the input EOS. - bool packetEOS = false; - if (samplesRemaining == 0) { - packetEOS = EOS; - } - - // Wrap a non-owning CORBA sequence (last argument is whether to free - // the buffer on destruction) around this sub-packet's data - const PortSequenceType subPacket(pushSize, pushSize, const_cast(buffer), false); - LOG_TRACE(logger,"_pushOversizedPacket calling pushPacket with pushSize " << pushSize << " and packetTime twsec: " << packetTime.twsec << " tfsec: " << packetTime.tfsec) - this->_pushPacketLocked(subPacket, packetTime, packetEOS, streamID); - - // Synthesize the next packet timestamp - if (packetTime.tcstatus == BULKIO::TCS_VALID) { - packetTime += (pushSize/itemSize)* xdelta; - } - - // Advance buffer to next sub-packet boundary - buffer += pushSize; - } while (samplesRemaining > 0); + this->_sendPacket(BufferType::make_transient(&data[0], data.size()), T, EOS, streamID); } - - template < typename PortTraits > - void OutPort< PortTraits >::pushPacket( - NativeSequenceType & data, - const BULKIO::PrecisionUTCTime& T, - bool EOS, - const std::string& streamID) - { - // Use const alias to start of buffer and defer to pointer-based push - const TransportType* buffer = reinterpret_cast(&data[0]); - const size_t size = data.size(); - pushPacket(buffer, size, T, EOS, streamID); - } - - template < typename PortTraits > - void OutPort< PortTraits >::pushPacket( - const DataBufferType & data, - const BULKIO::PrecisionUTCTime& T, - bool EOS, - const std::string& streamID) - { - // Use const alias to start of buffer and defer to pointer-based push - const TransportType* buffer = reinterpret_cast(&data[0]); - const size_t size = data.size(); - pushPacket(buffer, size, T, EOS, streamID); - } - - template < typename PortTraits > - void OutPort< PortTraits >::pushPacket( + template + void OutNumericPort::pushPacket( const TransportType* data, size_t size, const BULKIO::PrecisionUTCTime& T, bool EOS, - const std::string& streamID) { - - TRACE_ENTER(logger, "OutPort::pushPacket" ); - - _pushOversizedPacket(data, size, T, EOS, streamID); - - TRACE_EXIT(logger, "OutPort::pushPacket" ); - } - - template < typename PortTraits > - typename OutPort< PortTraits >::StreamType OutPort< PortTraits >::createStream(const std::string& streamID) - { - BULKIO::StreamSRI sri = bulkio::sri::create(streamID); - return createStream(sri); - } - - template < typename PortTraits > - typename OutPort< PortTraits >::StreamType OutPort< PortTraits >::createStream(const BULKIO::StreamSRI& sri) + const std::string& streamID) { - return StreamType(sri, this); + const NativeType* ptr = reinterpret_cast(data); + this->_sendPacket(BufferType::make_transient(ptr, size), T, EOS, streamID); } - OutCharPort::OutCharPort( std::string name, - ConnectionEventListener *connectCB, - ConnectionEventListener *disconnectCB ): - OutPort < CharPortTraits >(name,connectCB, disconnectCB) + OutCharPort::OutCharPort(const std::string& name, + ConnectionEventListener *connectCB, + ConnectionEventListener *disconnectCB): + OutNumericPort(name,connectCB, disconnectCB) { } - OutCharPort::OutCharPort( std::string name, - LOGGER_PTR logger, - ConnectionEventListener *connectCB, - ConnectionEventListener *disconnectCB ) : - OutPort < CharPortTraits >(name, logger, connectCB, disconnectCB ) + OutCharPort::OutCharPort(const std::string& name, + LOGGER_PTR logger, + ConnectionEventListener *connectCB, + ConnectionEventListener *disconnectCB) : + OutNumericPort(name, logger, connectCB, disconnectCB) { } @@ -809,106 +475,120 @@ namespace bulkio { void OutCharPort::pushPacket(const Int8* buffer, size_t size, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { const TransportType* data = reinterpret_cast(buffer); - OutPort::pushPacket(data, size, T, EOS, streamID); + OutNumericPort::pushPacket(data, size, T, EOS, streamID); } void OutCharPort::pushPacket(const char* buffer, size_t size, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { const TransportType* data = reinterpret_cast(buffer); - OutPort::pushPacket(data, size, T, EOS, streamID); + OutNumericPort::pushPacket(data, size, T, EOS, streamID); } void OutCharPort::pushPacket(const std::vector< Int8 >& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { const TransportType* buffer = reinterpret_cast(&data[0]); - OutPort::pushPacket(buffer, data.size(), T, EOS, streamID); + OutNumericPort::pushPacket(buffer, data.size(), T, EOS, streamID); } void OutCharPort::pushPacket(const std::vector< Char >& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { const TransportType* buffer = reinterpret_cast(&data[0]); - OutPort::pushPacket(buffer, data.size(), T, EOS, streamID); + OutNumericPort::pushPacket(buffer, data.size(), T, EOS, streamID); } - OutFilePort::OutFilePort ( std::string name, - ConnectionEventListener *connectCB, - ConnectionEventListener *disconnectCB ) : - OutPortBase < FilePortTraits >(name,connectCB, disconnectCB ) + OutBitPort::OutBitPort(const std::string& name, LOGGER_PTR logger) : + OutPort(name, logger) { - } - OutFilePort::OutFilePort( std::string name, - LOGGER_PTR logger, - ConnectionEventListener *connectCB, - ConnectionEventListener *disconnectCB ) : - OutPortBase < FilePortTraits >(name,logger,connectCB, disconnectCB ) + void OutBitPort::pushPacket(const redhawk::shared_bitbuffer& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { - + _sendPacket(data, T, EOS, streamID); } - void OutFilePort::pushPacket( const char* URL, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) + OutFilePort::OutFilePort(const std::string& name, + ConnectionEventListener *connectCB, + ConnectionEventListener *disconnectCB) : + OutPort(name, LOGGER_PTR(), connectCB, disconnectCB) { - _pushSinglePacket(URL, T, EOS, streamID); } - void OutFilePort::pushPacket( const std::string& URL, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) + OutFilePort::OutFilePort(const std::string& name, + LOGGER_PTR logger, + ConnectionEventListener *connectCB, + ConnectionEventListener *disconnectCB) : + OutPort(name,logger, connectCB, disconnectCB) { - _pushSinglePacket(URL.c_str(), T, EOS, streamID); } - void OutFilePort::pushPacket( const char *data, bool EOS, const std::string& streamID) + void OutFilePort::pushPacket(const std::string& URL, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { - _pushSinglePacket(data, bulkio::time::utils::now(), EOS, streamID); + _sendPacket(URL, T, EOS, streamID); } - - OutXMLPort::OutXMLPort ( std::string name, - ConnectionEventListener *connectCB, - ConnectionEventListener *disconnectCB ) : - OutPortBase < XMLPortTraits >(name,connectCB, disconnectCB ) + void OutFilePort::pushPacket(const char* URL, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { + std::string url_out; + if (URL) { + url_out = URL; + } + this->pushPacket(url_out, T, EOS, streamID); + } + void OutFilePort::pushPacket(const char *data, bool EOS, const std::string& streamID) + { + this->pushPacket(data, bulkio::time::utils::now(), EOS, streamID); } - OutXMLPort::OutXMLPort( std::string name, - LOGGER_PTR logger, - ConnectionEventListener *connectCB, - ConnectionEventListener *disconnectCB ) : - OutPortBase < XMLPortTraits >(name,logger,connectCB, disconnectCB ) + OutXMLPort::OutXMLPort(const std::string& name, + ConnectionEventListener *connectCB, + ConnectionEventListener *disconnectCB) : + OutPort(name, LOGGER_PTR(), connectCB, disconnectCB) { - } - void OutXMLPort::pushPacket( const char *data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) + OutXMLPort::OutXMLPort(const std::string& name, + LOGGER_PTR logger, + ConnectionEventListener *connectCB, + ConnectionEventListener *disconnectCB) : + OutPort(name,logger,connectCB, disconnectCB) { - _pushSinglePacket(data, T, EOS, streamID); } - void OutXMLPort::pushPacket( const char *data, bool EOS, const std::string& streamID) + void OutXMLPort::pushPacket(const char *data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { - // The time argument is never dereferenced for dataXML, so it is safe to - // pass a null - BULKIO::PrecisionUTCTime* time = 0; - _pushSinglePacket(data, *time, EOS, streamID); + std::string data_out; + if (data) { + data_out = data; + } + _sendPacket(data_out, T, EOS, streamID); } - void OutXMLPort::pushPacket( const std::string& data, bool EOS, const std::string& streamID) + void OutXMLPort::pushPacket(const std::string& data, bool EOS, const std::string& streamID) { - // The time argument is never dereferenced for dataXML, so it is safe to - // pass a null - BULKIO::PrecisionUTCTime* time = 0; - _pushSinglePacket(data.c_str(), *time, EOS, streamID); + // XML ports do not officially support timestamps, although the port + // implementation includes it (because it's templatized); always pass + // "not set" for consistency + _sendPacket(data, bulkio::time::utils::notSet(), EOS, streamID); + } + + void OutXMLPort::pushPacket(const char* data, bool EOS, const std::string& streamID) + { + std::string data_out; + if (data) { + data_out = data; + } + this->pushPacket(data_out, EOS, streamID); } @@ -919,24 +599,13 @@ namespace bulkio { // link against the template. // -#define INSTANTIATE_BASE_TEMPLATE(x) \ - template class OutPortBase; - #define INSTANTIATE_TEMPLATE(x) \ - INSTANTIATE_BASE_TEMPLATE(x); template class OutPort; - - INSTANTIATE_TEMPLATE(CharPortTraits); - INSTANTIATE_TEMPLATE(OctetPortTraits); - INSTANTIATE_TEMPLATE(ShortPortTraits); - INSTANTIATE_TEMPLATE(UShortPortTraits); - INSTANTIATE_TEMPLATE(LongPortTraits); - INSTANTIATE_TEMPLATE(ULongPortTraits); - INSTANTIATE_TEMPLATE(LongLongPortTraits); - INSTANTIATE_TEMPLATE(ULongLongPortTraits); - INSTANTIATE_TEMPLATE(FloatPortTraits); - INSTANTIATE_TEMPLATE(DoublePortTraits); - - INSTANTIATE_BASE_TEMPLATE(FilePortTraits); - INSTANTIATE_BASE_TEMPLATE(XMLPortTraits); + template class OutPort; + +#define INSTANTIATE_NUMERIC_TEMPLATE(x) \ + template class OutNumericPort; + + FOREACH_PORT_TYPE(INSTANTIATE_TEMPLATE); + FOREACH_NUMERIC_PORT_TYPE(INSTANTIATE_NUMERIC_TEMPLATE); } // end of bulkio namespace diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_out_port.h b/bulkioInterfaces/libsrc/cpp/bulkio_out_port.h deleted file mode 100644 index f0fe6e53d..000000000 --- a/bulkioInterfaces/libsrc/cpp/bulkio_out_port.h +++ /dev/null @@ -1,826 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -#ifndef __bulkio_out_port_h -#define __bulkio_out_port_h - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "bulkio_base.h" -#include "bulkio_traits.h" -#include "bulkio_callbacks.h" -#include "bulkio_out_stream.h" - -namespace bulkio { - - // - // OutPortBase - // - // Base template for data transfers between BULKIO ports. This class is defined by 2 trait classes - // PortTraits - This template provides the context for the port's middleware transport classes and they base data types - // passed between port objects - // - // - template < typename PortTraits > - class OutPortBase : public Port_Uses_base_impl -#ifdef BEGIN_AUTOCOMPLETE_IGNORE - , public virtual POA_BULKIO::UsesPortStatisticsProvider -#endif - { - - public: - - typedef PortTraits Traits; - - // - // Port Variable Definition - // - typedef typename Traits::PortVarType PortVarType; - - // - // BULKIO Interface Type - // - typedef typename Traits::PortType PortType; - - // - // Port pointer type - // - typedef typename PortType::_ptr_type PortPtrType; - - // - // Sequence container used during actual pushPacket call - // - typedef typename Traits::SequenceType PortSequenceType; - - // - // True type of argument to pushPacket, typically "const PortSequenceType&" - // except for dataXML and dataFile (which use "const char*") - // - typedef typename Traits::PushType PushArgumentType; - - // - // Data type of items passed into the pushPacket method - // - typedef typename Traits::NativeType NativeType; - - // - // ConnectionList Definition - // - typedef typename bulkio::Connections< PortVarType >::List ConnectionsList; - - // - // Mapping of Stream IDs to SRI Map/Refresh objects - // - typedef std::map< std::string, SriMapStruct > OutPortSriMap; - - - // - // OutPortBase Creates a uses port object for publishing data to the framework - // - // @param port_name name assigned to the port located in scd.xml file - // @param connectionCB callback that will be called when the connectPort method is called - // @pararm disconnectDB callback that receives notification when a disconnectPort happens - // - OutPortBase(std::string port_name, - ConnectionEventListener *connectCB=NULL, - ConnectionEventListener *disconnectCB=NULL ); - - OutPortBase(std::string port_name, - LOGGER_PTR logger, - ConnectionEventListener *connectCB=NULL, - ConnectionEventListener *disconnectCB=NULL ); - - - // - // virtual destructor to clean up resources - // - virtual ~OutPortBase(); - - // - // Interface used by framework to connect/disconnect ports together and introspection of connection states - // - - // - // connections - Return a list of connection objects and identifiers for each connection made by connectPort - // - // @return ExtendedCF::UsesConnectionSequence * List of connection objects and identifiers - // - virtual ExtendedCF::UsesConnectionSequence * connections(); - - // - // connectPort - Called by the framework to connect this port to a Provides port object, the connection is established - // via the association and identified by the connectionId string, no formal "type capatablity" or "bukio interface support" - // is resolved at this time. All data flow occurs from point A to B via the pushPacket/pushSRI interface. - // - // @param CORBA::Object_ptr pointer to an instance of a Provides port - // @param connectionsId identifer for this connection, allows for external users to reference the connection association - // - virtual void connectPort(CORBA::Object_ptr connection, const char* connectionId); - - // - // disconnectPort - Called by the framework to disconnect this port from the Provides port object. The port basicall removes - // the association to the provides port that was established with the connectionId. - // - // @param connectionsId identifer for this connection, allows for external users to reference the connection association - virtual void disconnectPort(const char* connectionId); - - void updateConnectionFilter(const std::vector &_filterTable) { - SCOPED_LOCK lock(updatingPortsLock); // don't want to process while command information is coming in - filterTable = _filterTable; - }; - - - template< typename T > inline - void setNewConnectListener(T &target, void (T::*func)( const char *connectionId ) ) - { - _connectCB = boost::make_shared< MemberConnectionEventListener< T > >( boost::ref(target), func ); - } - - template< typename T > inline - void setNewConnectListener(T *target, void (T::*func)( const char *connectionId ) ) - { - _connectCB = boost::make_shared< MemberConnectionEventListener< T > >( boost::ref(*target), func ); - } - - template< typename T > inline - void setNewDisconnectListener(T &target, void (T::*func)( const char *connectionId ) ) - { - _disconnectCB = boost::make_shared< MemberConnectionEventListener< T > >( boost::ref(target), func ); - } - - template< typename T > inline - void setNewDisconnectListener(T *target, void (T::*func)( const char *connectionId ) ) - { - _disconnectCB = boost::make_shared< MemberConnectionEventListener< T > >( boost::ref(*target), func ); - } - - // - // Attach listener interfaces for connect and disconnect events - // - void setNewConnectListener( ConnectionEventListener *newListener ); - void setNewConnectListener( ConnectionEventCallbackFn newListener ); - void setNewDisconnectListener( ConnectionEventListener *newListener ); - void setNewDisconnectListener( ConnectionEventCallbackFn newListener ); - - // - // pushSRI - called by the source component when SRI data about the stream changes, the data flow policy is this activity - // will occurr first before any data flows to the component. - // - // @param H - Incoming StreamSRI object that defines the state of the data flow portion of the stream (pushPacket) - // - virtual void pushSRI(const BULKIO::StreamSRI& H); - - - // - // statisics - returns a PortStatistics object for this uses port - // BULKIO::UsesPortStatisticsSequence: sequence of PortStatistics object - // PortStatistics - // portname - name of port - // elementsPerSecond - number of elements per second (element is based on size of port type ) - // bitsPerSecond - number of bits per second (based on element storage size in bits) - // callsPerSecond - history window -1 / time between calls to this method - // streamIds - list of active stream id values - // averageQueueDepth - the average depth of the queue for this port - // timeSinceLastCall - time since this method as invoked and the last pushPacket happened - // Keyword Sequence - deprecated - // - // @return BULKIO::UsesPortStatisticsSequenc - current data flow metrics collected for the port, the caller of the method - // is responsible for freeing this object - // - virtual BULKIO::UsesPortStatisticsSequence * statistics(); - - // - // state - returns the current state of the port as follows: - // BULKIO::BUSY - internal queue has reached FULL state - // BULKIO::IDLE - there are no items on the internal queue - // BULKIO::ACTIVE - there are items on the queue - // - // @return BULKIO::PortUsageType - current state of port - // - virtual BULKIO::PortUsageType state(); - - // - // turn on/off the port monitoring capability - // - virtual void enableStats(bool enable); - - // - // Return map of streamID/SRI objects - // - virtual bulkio::SriMap getCurrentSRI(); - - // - // Return list of SRI objects - // - virtual bulkio::SriList getActiveSRIs(); - - // - // Return a ConnectionsList for the current ports and connections ids establish via connectPort method - // - virtual ConnectionsList getConnections(); - - // - // Deprecation Warning - // - // The _getConnections and currentSRIs access will be deprecated in the next release of the - // the bulkio library class, in favor of getCurrentSRI and getConnections. - // - - // - // Allow access to the port's connection list - // - virtual ConnectionsList __attribute__ ((deprecated)) _getConnections() { - return outConnections; - } - - void setLogger( LOGGER_PTR newLogger ); - - std::string getRepid () const; - - protected: - - - // Map of stream ids and statistic object - typedef typename std::map _StatsMap; - - public: - // - // List of SRIs sent out by this port - // - OutPortSriMap currentSRIs __attribute__ ((deprecated)); - - protected: - // - // List of Port connections and connection identifiers - // - ConnectionsList outConnections; - - // - // List of connections returned by connections() method. Used to increase efficiency when there a large amount - // of connections for a port. - // - ExtendedCF::UsesConnectionSequence recConnections; - - // - // - // - bool recConnectionsRefresh; - - // - // Set of statistical collector objects for each stream id - // - _StatsMap stats; - - // - // _pushSRI - method to push given SRI to a specific connections - // - void _pushSRI( typename ConnectionsList::iterator connPair, SriMapStruct &sri_ctx); - void _pushSRI( const std::string &connectionId, SriMapStruct &sri_ctx); - - LOGGER_PTR logger; - std::vector filterTable; - boost::shared_ptr< ConnectionEventListener > _connectCB; - boost::shared_ptr< ConnectionEventListener > _disconnectCB; - - // - // Returns true if the given connection should receive SRI updates and data - // for the given stream - // - bool _isStreamRoutedToConnection(const std::string& connectionID, const std::string& streamID); - - // - // Sends the given data and metadata as a single push, for subclasses that - // will never break a push into multiple packets (XML, File); acquires and - // releases the port lock - // - void _pushSinglePacket( - PushArgumentType data, - const BULKIO::PrecisionUTCTime& T, - bool EOS, - const std::string& streamID); - - // - // Sends the given data and metadata to all appropriate connections and - // updates the associated SRI if necessary (or creates one if it does not - // exist); must be called with the port lock held - // - void _pushPacketLocked( - PushArgumentType data, - const BULKIO::PrecisionUTCTime& T, - bool EOS, - const std::string& streamID); - - // - // Sends an end-of-stream packet for the given stream to a particular port, - // for use when disconnecting; enables XML and File specialization for - // consistent end-of-stream behavior - // - void _sendEOS(PortPtrType port, - const std::string& streamID); - - // - // Low-level push of data and metadata to the given port; enables XML and - // File specialization for consistent high-level pushPacket behavior - // - void _pushPacketToPort( - PortPtrType port, - PushArgumentType data, - const BULKIO::PrecisionUTCTime& T, - bool EOS, - const char* streamID); - - // - // Returns the total number of elements of data in a pushPacket call, for - // statistical tracking; enables XML and File specialization, which have - // different notions of size - // - size_t _dataLength(PushArgumentType data); - - - bool reportConnectionErrors( const std::string &cid ); - - }; - - - template < typename PortTraits > - class OutPort : public OutPortBase< PortTraits > { - public: - - typedef PortTraits Traits; - - // - // Port Variable Definition - // - typedef typename Traits::PortVarType PortVarType; - - // - // BULKIO Interface Type - // - typedef typename Traits::PortType PortType; - - // - // Sequence container used during actual pushPacket call - // - typedef typename Traits::SequenceType PortSequenceType; - - // - // Data type contained in sequence container - // - typedef typename Traits::TransportType TransportType; - - // - // Data type of items passed into the pushPacket method - // - typedef typename Traits::NativeType NativeType; - - // - // Data type of the container for passing data into the pushPacket method - // - typedef std::vector< NativeType > NativeSequenceType; - - // - // Sequence of data returned from an input port and can be passed to the output port - // - typedef typename Traits::DataBufferType DataBufferType; - - // - // ConnectionList Definition - // - typedef typename bulkio::Connections< PortVarType >::List ConnectionsList; - - // - // Mapping of Stream IDs to SRI Map/Refresh objects - // - typedef std::map< std::string, SriMapStruct > OutPortSriMap; - - // - // OutputStream class - // - typedef OutputStream StreamType; - - // - // OutPort Creates a uses port object for publishing data to the framework - // - // @param port_name name assigned to the port located in scd.xml file - // @param connectionCB callback that will be called when the connectPort method is called - // @pararm disconnectDB callback that receives notification when a disconnectPort happens - // - OutPort(std::string port_name, - ConnectionEventListener *connectCB=NULL, - ConnectionEventListener *disconnectCB=NULL ); - - OutPort(std::string port_name, - LOGGER_PTR logger, - ConnectionEventListener *connectCB=NULL, - ConnectionEventListener *disconnectCB=NULL ); - - // - // virtual destructor to clean up resources - // - virtual ~OutPort(); - - /* - * pushPacket - * maps to data BULKIO method call for passing vectors of data - * - * data: sequence structure containing the payload to send out - * T: constant of type BULKIO::PrecisionUTCTime containing the timestamp for the outgoing data. - * tcmode: timecode mode - * tcstatus: timecode status - * toff: fractional sample offset - * twsec: J1970 GMT - * tfsec: fractional seconds: 0.0 to 1.0 - * EOS: end-of-stream flag - * streamID: stream identifier - */ - void pushPacket( NativeSequenceType & data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); - - /* - * pushPacket - * maps to data BULKIO method call for passing a limited amount of data from a source vector - * - * data: pointer to a buffer of data - * size: number of data points in the buffer - * T: constant of type BULKIO::PrecisionUTCTime containing the timestamp for the outgoing data. - * tcmode: timecode mode - * tcstatus: timecode status - * toff: fractional sample offset - * twsec: J1970 GMT - * tfsec: fractional seconds: 0.0 to 1.0 - * EOS: end-of-stream flag - * streamID: stream identifier - */ - void pushPacket( const TransportType* data, size_t size, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); - - /* - * pushPacket - * maps to data BULKIO method call for passing an entire vector of data - * - * data: The sequence structure from an input port containing the payload to send out - * T: constant of type BULKIO::PrecisionUTCTime containing the timestamp for the outgoing data. - * tcmode: timecode mode - * tcstatus: timecode status - * toff: fractional sample offset - * twsec: J1970 GMT - * tfsec: fractional seconds: 0.0 to 1.0 - * EOS: end-of-stream flag - * streamID: stream identifier - */ - void pushPacket( const DataBufferType & data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); - - /** - * @brief Creates a new output stream. - * @param streamID Stream identifier. - * @returns A new output stream. - * - * The returned output stream's SRI is initialized with default values. - */ - StreamType createStream(const std::string& streamID); - - /** - * @brief Creates a new stream based on an existing SRI. - * @param sri Stream SRI. - * @returns A new output stream. - */ - StreamType createStream(const BULKIO::StreamSRI& sri); - - using OutPortBase::currentSRIs; - - protected: - using OutPortBase::logger; - - void _pushOversizedPacket( - const TransportType* buffer, - size_t size, - const BULKIO::PrecisionUTCTime& T, - bool EOS, - const std::string& streamID); - }; - - // - // Character Specialization.. - // - // This class overrides the pushPacket method to support Int8 and char data types - // - // Output port for Int8 and char data types - class OutCharPort : public OutPort < CharPortTraits > { - public: - OutCharPort(std::string port_name, - ConnectionEventListener *connectCB=NULL, - ConnectionEventListener *disconnectCB=NULL ); - - OutCharPort(std::string port_name, - LOGGER_PTR logger, - ConnectionEventListener *connectCB=NULL, - ConnectionEventListener *disconnectCB=NULL ); - - - virtual ~OutCharPort() {}; - - // Push a vector of Int8 data - void pushPacket(const std::vector< Int8 >& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); - - // Push a vector of Char data - void pushPacket(const std::vector< Char >& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); - - // Push a subset of a vector of Int8 data - void pushPacket(const Int8* buffer, size_t size, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); - - // Push a subset of a vector of Char data - void pushPacket(const Char* buffer, size_t size, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); - - }; - - - // - // OutFilePort - // - // This class defines the pushPacket interface for file URL data. - // - // - class OutFilePort : public OutPortBase < FilePortTraits > { - - public: - - typedef FilePortTraits Traits; - - // - // Port Variable Definition - // - typedef Traits::PortVarType PortVarType; - - // - // BULKIO Interface Type - // - typedef Traits::PortType PortType; - - // - // Sequence container used during actual pushPacket call - // - typedef Traits::SequenceType PortSequenceType; - - // - // Data type contained in sequence container - // - typedef Traits::TransportType TransportType; - - // - // Data type of the container for passing data into the pushPacket method - // - typedef char* NativeSequenceType; - - // - // Data type of items passed into the pushPacket method - // - typedef Traits::NativeType NativeType; - - - OutFilePort( std::string pname, - ConnectionEventListener *connectCB=NULL, - ConnectionEventListener *disconnectCB=NULL ); - - - OutFilePort( std::string port_name, - LOGGER_PTR logger, - ConnectionEventListener *connectCB=NULL, - ConnectionEventListener *disconnectCB=NULL ); - - - - virtual ~OutFilePort() {}; - - /* - * pushPacket - * maps to dataFile BULKIO method call for passing the URL of a file - * - * data: char string containing the file URL to send out - * T: constant of type BULKIO::PrecisionUTCTime containing the timestamp for the outgoing data. - * tcmode: timecode mode - * tcstatus: timecode status - * toff: fractional sample offset - * twsec: J1970 GMT - * tfsec: fractional seconds: 0.0 to 1.0 - * EOS: end-of-stream flag - * streamID: stream identifier - */ - void pushPacket(const char *URL, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); - /* - * pushPacket - * maps to dataFile BULKIO method call for passing the URL of a file - * - * data: string containing the file URL to send out - * T: constant of type BULKIO::PrecisionUTCTime containing the timestamp for the outgoing data. - * tcmode: timecode mode - * tcstatus: timecode status - * toff: fractional sample offset - * twsec: J1970 GMT - * tfsec: fractional seconds: 0.0 to 1.0 - * EOS: end-of-stream flag - * streamID: stream identifier - */ - void pushPacket(const std::string& URL, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); - - /* - * DEPRECATED: maps to dataXML BULKIO method call for passing strings of data - */ - void pushPacket(const char *data, bool EOS, const std::string& streamID); - - }; - - - // - // OutXMLPort - // - // This class defines the pushPacket interface for XML data. - // - // - class OutXMLPort : public OutPortBase < XMLPortTraits > { - - public: - - typedef XMLPortTraits Traits; - - typedef OutPortBase Base; - - // - // Port Variable Definition - // - typedef Traits::PortVarType PortVarType; - - // - // BULKIO Interface Type - // - typedef Traits::PortType PortType; - - // - // Sequence container used during actual pushPacket call - // - typedef Traits::SequenceType PortSequenceType; - - // - // Data type contained in sequence container - // - typedef Traits::TransportType TransportType; - - // - // Data type of the container for passing data into the pushPacket method - // - typedef char* NativeSequenceType; - - // - // Data type of items passed into the pushPacket method - // - typedef Traits::NativeType NativeType; - - - OutXMLPort( std::string pname, - ConnectionEventListener *connectCB=NULL, - ConnectionEventListener *disconnectCB=NULL ); - - - OutXMLPort( std::string port_name, - LOGGER_PTR logger, - ConnectionEventListener *connectCB=NULL, - ConnectionEventListener *disconnectCB=NULL ); - - - - virtual ~OutXMLPort() {}; - - /* - * DEPRECATED: maps to dataFile BULKIO method call for passing strings of data - */ - void pushPacket(const char *data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); - - /* - * pushPacket - * maps to dataXML BULKIO method call for passing an XML-formatted string - * - * data: character string containing the XML data to send out - * EOS: end-of-stream flag - * streamID: stream identifier - */ - void pushPacket(const char *data, bool EOS, const std::string& streamID); - - /* - * pushPacket - * maps to dataXML BULKIO method call for passing an XML-formatted string - * - * data: string containing the XML data to send out - * EOS: end-of-stream flag - * streamID: stream identifier - */ - void pushPacket(const std::string& data, bool EOS, const std::string& streamID); - - }; - - - /* - Uses Port Definitions for All Bulk IO port definitions - * - */ - // Bulkio octet (UInt8) output - typedef OutPort< OctetPortTraits > OutOctetPort; - // Bulkio UInt8 output - typedef OutOctetPort OutUInt8Port; - // Bulkio short output - typedef OutPort< ShortPortTraits > OutShortPort; - // Bulkio unsigned short output - typedef OutPort< UShortPortTraits > OutUShortPort; - // Bulkio Int16 output - typedef OutShortPort OutInt16Port; - // Bulkio UInt16 output - typedef OutUShortPort OutUInt16Port; - // Bulkio long output - typedef OutPort< LongPortTraits > OutLongPort; - // Bulkio unsigned long output - typedef OutPort< ULongPortTraits > OutULongPort; - // Bulkio Int32 output - typedef OutLongPort OutInt32Port; - // Bulkio UInt32 output - typedef OutULongPort OutUInt32Port; - // Bulkio long long output - typedef OutPort< LongLongPortTraits > OutLongLongPort; - // Bulkio unsigned long long output - typedef OutPort< ULongLongPortTraits > OutULongLongPort; - // Bulkio Int64 output - typedef OutLongLongPort OutInt64Port; - // Bulkio UInt64 output - typedef OutULongLongPort OutUInt64Port; - // Bulkio float output - typedef OutPort< FloatPortTraits > OutFloatPort; - // Bulkio double output - typedef OutPort< DoublePortTraits > OutDoublePort; - // Bulkio URL output - typedef OutFilePort OutURLPort; - -} // end of bulkio namespace - -inline bool operator>>= (const CORBA::Any& a, bulkio::connection_descriptor_struct& s) { - CF::Properties* temp; - if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("connectionTable::connection_id", props[idx].id)) { - if (!(props[idx].value >>= s.connection_id)) return false; - } else if (!strcmp("connectionTable::stream_id", props[idx].id)) { - if (!(props[idx].value >>= s.stream_id)) return false; - } else if (!strcmp("connectionTable::port_name", props[idx].id)) { - if (!(props[idx].value >>= s.port_name)) return false; - } - } - return true; -}; - -inline void operator<<= (CORBA::Any& a, const bulkio::connection_descriptor_struct& s) { - CF::Properties props; - props.length(3); - props[0].id = CORBA::string_dup("connectionTable::connection_id"); - props[0].value <<= s.connection_id; - props[1].id = CORBA::string_dup("connectionTable::stream_id"); - props[1].value <<= s.stream_id; - props[2].id = CORBA::string_dup("connectionTable::port_name"); - props[2].value <<= s.port_name; - a <<= props; -}; - -inline bool operator== (const bulkio::connection_descriptor_struct& s1, const bulkio::connection_descriptor_struct& s2) { - if (s1.connection_id!=s2.connection_id) - return false; - if (s1.stream_id!=s2.stream_id) - return false; - if (s1.port_name!=s2.port_name) - return false; - return true; -}; - -inline bool operator!= (const bulkio::connection_descriptor_struct& s1, const bulkio::connection_descriptor_struct& s2) { - return !(s1==s2); -}; - -#endif diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_out_stream.cpp b/bulkioInterfaces/libsrc/cpp/bulkio_out_stream.cpp index c56925fd6..aded8c05d 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio_out_stream.cpp +++ b/bulkioInterfaces/libsrc/cpp/bulkio_out_stream.cpp @@ -20,6 +20,8 @@ #include "bulkio_out_stream.h" #include "bulkio_out_port.h" +#include "bulkio_time_operators.h" +#include "bulkio_p.h" using bulkio::OutputStream; @@ -37,301 +39,581 @@ namespace { }; } -template -class OutputStream::Impl { +template +class OutputStream::Impl : public StreamBase::Impl { public: - typedef typename PortTraits::DataTransferTraits::NativeDataType ScalarType; - typedef std::complex ComplexType; - typedef typename PortTraits::DataTransferTraits::TransportType TransportType; - - Impl(const std::string& streamID, bulkio::OutPort* port) : - _streamID(streamID), - _port(port), - _sri(bulkio::sri::create(streamID)), - _sriUpdated(true) - { - } - - Impl(const BULKIO::StreamSRI& sri, bulkio::OutPort* port) : - _streamID(sri.streamID), - _port(port), - _sri(sri), - _sriUpdated(true) - { - } - - const std::string& streamID() const - { - return _streamID; - } - - const BULKIO::StreamSRI& sri() const - { - return _sri; - } - - void setSRI(const BULKIO::StreamSRI& sri) - { - // Copy the new SRI, except for the stream ID, which is immutable - _sri = sri; - _sri.streamID = _streamID.c_str(); - _sriUpdated = true; - } - - void setXDelta(double delta) - { - _setStreamMetadata(_sri.xdelta, delta); - } - - void setComplex(bool mode) - { - _setStreamMetadata(_sri.mode, mode?1:0); - } - - void setBlocking(bool blocking) - { - _setStreamMetadata(_sri.blocking, blocking?1:0); - } - - void setKeywords(const _CORBA_Unbounded_Sequence& properties) - { - _sri.keywords = properties; - _sriUpdated = true; - } - - void setKeyword(const std::string& name, const CORBA::Any& value) - { - redhawk::PropertyMap::cast(_sri.keywords)[name] = value; - _sriUpdated = true; - } - - void eraseKeyword(const std::string& name) - { - redhawk::PropertyMap::cast(_sri.keywords).erase(name); - _sriUpdated = true; - } - - template - void write(const Sample* data, size_t count, const std::list& times) - { - std::list::const_iterator timestamp = times.begin(); - if (timestamp == times.end()) { - throw std::logic_error("no timestamps given"); + typedef StreamBase::Impl ImplBase; + typedef typename BufferTraits::BufferType BufferType; + + Impl(const BULKIO::StreamSRI& sri, OutPortType* port) : + ImplBase(sri), + _modcount(0), + _port(port) + { } - size_t first = 0; - while (first < count) { - size_t last = 0; - const BULKIO::PrecisionUTCTime& when = timestamp->time; - if (++timestamp == times.end()) { - last = count; - } else { - last = timestamp->offset; - if (_sri.mode != 0 && !is_complex::value) { - // If the stream is complex but the data type is not, adjust sample - // offset to account for the fact that each real/imaginary pair is - // actually two values - last *= 2; - } - } - const size_t pass = last-first; - write(data+first, pass, when); - first += pass; + virtual ~Impl() + { + } + + virtual void close() + { + // Send an empty packet with an end-of-stream marker; since there is no + // sample data, the timestamp does not matter + _send(BufferType(), bulkio::time::utils::notSet(), true); } - } - void write(const ScalarType* data, size_t count, const BULKIO::PrecisionUTCTime& time) - { - if (_sriUpdated) { - _port->pushSRI(_sri); - _sriUpdated = false; + void setXStart(double start) + { + _setStreamMetadata(_sri->xstart, start); } - _send(reinterpret_cast(data), count, time, false); - } - void write(const ComplexType* data, size_t count, const BULKIO::PrecisionUTCTime& time) - { - if (_sri.mode == 0) { - throw std::logic_error("stream mode is not complex"); + void setXDelta(double delta) + { + _setStreamMetadata(_sri->xdelta, delta); } - write(reinterpret_cast(data), count*2, time); - } - void close() - { - // Send an empty packet with an end-of-stream marker; since there is no - // sample data, the timestamp does not matter - _send(0, 0, bulkio::time::utils::notSet(), true); - } + void setXUnits(short units) + { + _setStreamMetadata(_sri->xunits, units); + } -private: - void _send(const TransportType* data, size_t count, const BULKIO::PrecisionUTCTime& time, bool eos) - { - _port->pushPacket(data, count, time, eos, _streamID); - } - - template - void _setStreamMetadata(Field& field, Value value) - { - if (field == value) { - return; - } - field = value; - _sriUpdated = true; - } - - const std::string _streamID; - OutPort* _port; - BULKIO::StreamSRI _sri; - bool _sriUpdated; + void setSubsize(int size) + { + _setStreamMetadata(_sri->subsize, size); + } + + void setYStart(double start) + { + _setStreamMetadata(_sri->ystart, start); + } + + void setYDelta(double delta) + { + _setStreamMetadata(_sri->ydelta, delta); + } + + void setYUnits(short units) + { + _setStreamMetadata(_sri->yunits, units); + } + + void setComplex(bool mode) + { + _setStreamMetadata(_sri->mode, mode?1:0); + } + + void setBlocking(bool mode) + { + _setStreamMetadata(_sri->blocking, mode?1:0); + } + + void setKeywords(const _CORBA_Unbounded_Sequence& properties) + { + _modifyingStreamMetadata(); + _sri->keywords = properties; + ++_modcount; + } + + void setKeyword(const std::string& name, const CORBA::Any& value) + { + _modifyingStreamMetadata(); + redhawk::PropertyMap::cast(_sri->keywords)[name] = value; + ++_modcount; + } + + void eraseKeyword(const std::string& name) + { + _modifyingStreamMetadata(); + redhawk::PropertyMap::cast(_sri->keywords).erase(name); + ++_modcount; + } + + void setSRI(const BULKIO::StreamSRI& sri) + { + _modifyingStreamMetadata(); + // Copy the new SRI, except for the stream ID, which is immutable + *_sri = sri; + _sri->streamID = _streamID.c_str(); + ++_modcount; + } + + virtual void write(const BufferType& data, const BULKIO::PrecisionUTCTime& time) + { + _send(data, time, false); + } + + int modcount() const + { + return _modcount; + } + +protected: + virtual void _modifyingStreamMetadata() + { + // By default, do nothing + } + + template + void _setStreamMetadata(Field& field, Value value) + { + if (field != value) { + _modifyingStreamMetadata(); + field = value; + ++_modcount; + } + } + + void _send(const BufferType& data, const BULKIO::PrecisionUTCTime& time, bool eos) + { + _port->_sendPacket(data, time, eos, _streamID); + } + + int _modcount; + OutPortType* _port; }; -template -OutputStream::OutputStream() : - _impl() +template +OutputStream::OutputStream() : + StreamBase() { } -template -OutputStream::OutputStream(const BULKIO::StreamSRI& sri, bulkio::OutPort* port) : - _impl(new Impl(sri, port)) +template +OutputStream::OutputStream(const BULKIO::StreamSRI& sri, OutPortType* port) : + StreamBase(boost::make_shared(sri, port)) { } -template -const std::string& OutputStream::streamID() const +template +OutputStream::OutputStream(boost::shared_ptr impl) : + StreamBase(impl) { - return _impl->streamID(); } -template -const BULKIO::StreamSRI& OutputStream::sri() const +template +void OutputStream::sri(const BULKIO::StreamSRI& sri) { - return _impl->sri(); + impl().setSRI(sri); } -template -void OutputStream::sri(const BULKIO::StreamSRI& sri) +template +void OutputStream::xstart(double start) { - _impl->setSRI(sri); + impl().setXStart(start); } -template -double OutputStream::xdelta() const +template +void OutputStream::xdelta(double delta) { - return _impl->sri().xdelta; + impl().setXDelta(delta); } -template -void OutputStream::xdelta(double delta) +template +void OutputStream::xunits(short units) { - _impl->setXDelta(delta); + impl().setXUnits(units); } -template -bool OutputStream::complex() const +template +void OutputStream::subsize(int size) { - return (_impl->sri().mode != 0); + impl().setSubsize(size); } -template -void OutputStream::complex(bool mode) +template +void OutputStream::ystart(double start) { - _impl->setComplex(mode); + impl().setYStart(start); } -template -bool OutputStream::blocking() const +template +void OutputStream::ydelta(double delta) { - return _impl->sri().blocking; + impl().setYDelta(delta); } -template -void OutputStream::blocking(bool mode) +template +void OutputStream::yunits(short units) { - _impl->setBlocking(mode); + impl().setYUnits(units); } -template -const redhawk::PropertyMap& OutputStream::keywords() const +template +void OutputStream::complex(bool mode) { - return redhawk::PropertyMap::cast(_impl->sri().keywords); + impl().setComplex(mode); } -template -void OutputStream::keywords(const _CORBA_Unbounded_Sequence& props) +template +void OutputStream::blocking(bool mode) { - _impl->setKeywords(props); + impl().setBlocking(mode); } -template -bool OutputStream::hasKeyword(const std::string& name) const +template +void OutputStream::keywords(const _CORBA_Unbounded_Sequence& props) { - return keywords().contains(name); + impl().setKeywords(props); } -template -const redhawk::Value& OutputStream::getKeyword(const std::string& name) const +template +void OutputStream::setKeyword(const std::string& name, const CORBA::Any& value) { - return keywords()[name]; + impl().setKeyword(name, value); } -template -void OutputStream::setKeyword(const std::string& name, const CORBA::Any& value) +template +void OutputStream::setKeyword(const std::string& name, const redhawk::Value& value) { - _impl->setKeyword(name, value); + impl().setKeyword(name, value); } -template -void OutputStream::setKeyword(const std::string& name, const redhawk::Value& value) +template +void OutputStream::eraseKeyword(const std::string& name) { - _impl->setKeyword(name, value); + impl().eraseKeyword(name); } -template -void OutputStream::eraseKeyword(const std::string& name) +template +void OutputStream::close() { - _impl->eraseKeyword(name); + impl().close(); + _impl.reset(); } -template -void OutputStream::write(const ScalarType* data, size_t count, const BULKIO::PrecisionUTCTime& time) +template +OutputStream::operator unspecified_bool_type() const { - _impl->write(data, count, time); + return _impl?static_cast(&OutputStream::impl):0; } -template -void OutputStream::write(const ScalarType* data, size_t count, const std::list& times) +template +bool OutputStream::operator==(const OutputStream& other) const { - _impl->write(data, count, times); + return _impl == other._impl; } -template -void OutputStream::write(const ComplexType* data, size_t count, const BULKIO::PrecisionUTCTime& time) +template +bool OutputStream::operator!=(const OutputStream& other) const { - _impl->write(data, count, time); + return !(*this == other); } -template -void OutputStream::write(const ComplexType* data, size_t count, const std::list& times) +template +typename OutputStream::Impl& OutputStream::impl() { - _impl->write(data, count, times); + return static_cast(*this->_impl); } -template -void OutputStream::close() +template +const typename OutputStream::Impl& OutputStream::impl() const { - _impl->close(); - _impl.reset(); + return static_cast(*this->_impl); } -template class OutputStream; -template class OutputStream; -template class OutputStream; -template class OutputStream; -template class OutputStream; -template class OutputStream; -template class OutputStream; -template class OutputStream; -template class OutputStream; -template class OutputStream; +template +int OutputStream::modcount() const +{ + return impl().modcount(); +} + + +using bulkio::BufferedOutputStream; + +template +class BufferedOutputStream::Impl : public Base::Impl { +public: + typedef typename Base::Impl ImplBase; + + typedef typename BufferTraits::BufferType BufferType; + typedef typename BufferTraits::MutableBufferType MutableBufferType; + + using ImplBase::_sri; + using ImplBase::_streamID; + + Impl(const BULKIO::StreamSRI& sri, OutPortType* port) : + ImplBase::Impl(sri, port), + _bufferSize(0), + _bufferOffset(0) + { + } + + void write(const BufferType& data, const BULKIO::PrecisionUTCTime& time) + { + // If buffering is disabled, or the buffer is empty and the input data is + // large enough for a full buffer, send it immediately + if ((_bufferSize == 0) || (_bufferOffset == 0 && (data.size() >= _bufferSize))) { + ImplBase::write(data, time); + } else { + _doBuffer(data, time); + } + } + + size_t bufferSize() const + { + return _bufferSize; + } + + void setBufferSize(size_t samples) + { + // Avoid needless thrashing + if (samples == _bufferSize) { + return; + } + _bufferSize = samples; + + // If the new buffer size is less than (or exactly equal to) the + // currently buffered data size, flush + if (_bufferSize <= _bufferOffset) { + flush(); + } else if (_bufferSize > _buffer.size()) { + // The buffer size is increasing beyond the existing allocation + _buffer.resize(_bufferSize); + } + } + + void flush() + { + if (_bufferOffset == 0) { + return; + } + + _flush(false); + } + + virtual void close() + { + if (_bufferOffset > 0) { + // Add the end-of-stream marker to the buffered data and its timestamp + _flush(true); + } else { + ImplBase::close(); + } + } + +private: + virtual void _modifyingStreamMetadata() + { + // Flush any data queued with the old SRI + flush(); + } + + void _flush(bool eos) + { + // Push out all buffered data, which must be less than the full allocated + // size otherwise it would have already been sent + this->_send(_buffer.slice(0, _bufferOffset), _bufferTime, eos); + + // Allocate a new buffer and reset the offset index + _buffer = MutableBufferType(_bufferSize); + _bufferOffset = 0; + } + + void _doBuffer(const BufferType& data, const BULKIO::PrecisionUTCTime& time) + { + // If this is the first data being queued, use its timestamp for the start + // time of the buffered data + if (_bufferOffset == 0) { + _bufferTime = time; + } + + // Only buffer up to the currently configured buffer size + size_t count = std::min(data.size(), _bufferSize - _bufferOffset); + _buffer.replace(_bufferOffset, count, data); + + // Advance buffer offset, flushing if the buffer is full + _bufferOffset += count; + if (_bufferOffset >= _bufferSize) { + _flush(false); + } + + // Handle remaining data + if (count < data.size()) { + BULKIO::PrecisionUTCTime next = time + (_sri->xdelta * count); + _doBuffer(data.slice(count), next); + } + } + + MutableBufferType _buffer; + BULKIO::PrecisionUTCTime _bufferTime; + size_t _bufferSize; + size_t _bufferOffset; +}; + +template +BufferedOutputStream::BufferedOutputStream() : + Base() +{ +} + +template +BufferedOutputStream::BufferedOutputStream(const BULKIO::StreamSRI& sri, OutPortType* port) : + Base(boost::make_shared(sri, port)) +{ +} + +template +size_t BufferedOutputStream::bufferSize() const +{ + return impl().bufferSize(); +} + +template +void BufferedOutputStream::setBufferSize(size_t samples) +{ + impl().setBufferSize(samples); +} + +template +void BufferedOutputStream::flush() +{ + impl().flush(); +} + +template +void BufferedOutputStream::write(const BufferType& data, const BULKIO::PrecisionUTCTime& time) +{ + impl().write(data, time); +} + +template +typename BufferedOutputStream::Impl& BufferedOutputStream::impl() +{ + return static_cast(*this->_impl); +} + +template +const typename BufferedOutputStream::Impl& BufferedOutputStream::impl() const +{ + return static_cast(*this->_impl); +} + +// +// Numeric streams add addtional complex/scalar and extended timestamp methods +// +using bulkio::NumericOutputStream; + +template +NumericOutputStream::NumericOutputStream() : + Base() +{ +} + +template +NumericOutputStream::NumericOutputStream(const BULKIO::StreamSRI& sri, OutPortType* port) : + Base(sri, port) +{ +} + +template +void NumericOutputStream::write(const ScalarBuffer& data, const BULKIO::PrecisionUTCTime& time) +{ + Base::write(data, time); +} + +template +void NumericOutputStream::write(const ScalarBuffer& data, const std::list& times) +{ + _writeMultiple(data, times); +} + +template +void NumericOutputStream::write(const ComplexBuffer& data, const BULKIO::PrecisionUTCTime& time) +{ + if (!this->complex()) { + throw std::logic_error("stream mode is not complex"); + } + write(ScalarBuffer::recast(data), time); +} + +template +void NumericOutputStream::write(const ComplexBuffer& data, const std::list& times) +{ + _writeMultiple(data, times); +} + +template +template +inline void NumericOutputStream::_writeMultiple(const redhawk::shared_buffer& data, + const std::list& times) +{ + std::list::const_iterator timestamp = times.begin(); + if (timestamp == times.end()) { + throw std::logic_error("no timestamps given"); + } + size_t first = 0; + while (first < data.size()) { + size_t last = 0; + const BULKIO::PrecisionUTCTime& when = timestamp->time; + if (++timestamp == times.end()) { + last = data.size(); + } else { + last = timestamp->offset < data.size()? timestamp->offset : data.size(); + if (!is_complex::value && this->complex()) { + // If the stream is complex but the data type is not, adjust sample + // offset to account for the fact that each real/imaginary pair is + // actually two values + last *= 2; + } + } + write(data.slice(first, last), when); + first = last; + } +} + +// +// XML +// +using bulkio::OutXMLStream; + +OutXMLStream::OutXMLStream() : + Base() +{ +} + +OutXMLStream::OutXMLStream(const BULKIO::StreamSRI& sri, OutPortType* port) : + Base(sri, port) +{ +} + +void OutXMLStream::write(const std::string& xmlString) +{ + // XML ports do not officially support timestamps, although the port + // implementation includes it (because it's templatized); always pass + // "not set" for consistency + impl().write(xmlString, bulkio::time::utils::notSet()); +} + +// +// File +// +using bulkio::OutFileStream; + +OutFileStream::OutFileStream() : + Base() +{ +} + +OutFileStream::OutFileStream(const BULKIO::StreamSRI& sri, OutPortType* port) : + Base(sri, port) +{ +} + +void OutFileStream::write(const std::string& URL, const BULKIO::PrecisionUTCTime& time) +{ + impl().write(URL, time); +} + +#define INSTANTIATE_TEMPLATE(x) \ + template class OutputStream; + +#define INSTANTIATE_NUMERIC_TEMPLATE(x) \ + template class BufferedOutputStream; \ + template class NumericOutputStream; + +FOREACH_PORT_TYPE(INSTANTIATE_TEMPLATE); +FOREACH_NUMERIC_PORT_TYPE(INSTANTIATE_NUMERIC_TEMPLATE); +// Bit data gets output stream buffering, but does not support scalar/complex +// data APIs +template class BufferedOutputStream; diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_out_stream.h b/bulkioInterfaces/libsrc/cpp/bulkio_out_stream.h deleted file mode 100644 index c67533759..000000000 --- a/bulkioInterfaces/libsrc/cpp/bulkio_out_stream.h +++ /dev/null @@ -1,480 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -#ifndef __bulkio_out_stream_h -#define __bulkio_out_stream_h - -#include -#include -#include - -#include -#include - -#include "bulkio_traits.h" -#include "bulkio_datablock.h" - -namespace bulkio { - - template - class OutPort; - - /** - * @brief BulkIO output stream class. - * @headerfile bulkio_out_stream.h - * - * %OutputStream is a smart pointer-based class that encapsulates a single - * BulkIO stream for writing. It is associated with the OutPort that created - * it, providing a file-like API on top of the classic BulkIO pushPacket - * model. - * - * @warning Do not declare instances of this template class directly in user - * code; the template parameter and class name are not considered - * API. Use the type-specific @c typedef instead, such as - * bulkio::OutFloatStream, or the nested @c typedef StreamType from - * an %OutPort. - * - * Notionally, a BulkIO stream represents a contiguous data set and its - * associated signal-related information (SRI), uniquely identified by a - * stream ID, from creation until close. The SRI may vary over time, but the - * stream ID is immutable. Only one stream with a given stream ID can be - * active at a time. - * - * OutputStreams help manage the stream lifetime by tying that SRI with an - * %OutPort and ensuring that all data is associated with a valid stream. - * When the stream is complete, it may be closed, notifying downstream - * receivers that no more data is expected. - * - * The %OutputStream class itself is a lightweight handle; it is inexpensive - * to copy or store in local variables or nested data types. Assigning one - * %OutputStream to another does not copy the stream state, but instead, it - * aliases both objects to the same underlying stream. - * - * The default constructor creates an invalid "null" %InputStream that cannot - * be used for any real operations, similar to a null pointer. A stream may - * be checked for validity with the boolean ! operator: - * - * @code - * if (!stream) { - * // handle failure - * } else { - * // use stream - * } - * @endcode - * - * OutputStreams must be created via an %OutPort. A stream cannot be - * associated with more than one port. - * @see OutPort::createStream(const std::string&) - * @see OutPort::createStream(const BULKIO::StreamSRI&) - * - * @par SRI Changes - * Updates to the stream that modify its SRI are cached locally until the - * next write to minimize the number of updates that are published. When - * there are pending SRI changes, the %OutputStream pushes the updated SRI - * first, followed by the data. - */ - template - class OutputStream { - public: - /// @brief The native type of a real sample. - typedef typename PortTraits::DataTransferTraits::NativeDataType ScalarType; - - /// @brief The native type of a complex sample. - typedef std::complex ComplexType; - - /** - * @brief Default constructor. - * @see OutPort::createStream(const std::string&) - * @see OutPort::createStream(const BULKIO::StreamSRI&) - * - * Create a null OutputStream. This stream is not associated with a stream - * from any OutPort instance. No methods may be called on the %OutputStream - * except for operator!, which will always return true; and operator==, - * which returns true if the other %OutputStream is also null. Both operators - * will return false if the other %OutputStream is also not null. - * - * New, valid streams are created via an %OutPort. - */ - OutputStream(); - - /** - * @brief Returns the stream ID. - * @pre Stream is valid. - * - * The stream ID is immutable and cannot be changed. - */ - const std::string& streamID() const; - - /** - * @brief Gets the stream metadata. - * @returns Read-only reference to stream SRI. - * @pre Stream is valid. - */ - const BULKIO::StreamSRI& sri() const; - - /** - * @brief Update the SRI. - * @param sri New SRI. - * @pre Stream is valid. - * - * Overwrites all SRI fields except for @c streamID, which is immutable. - * The updated SRI will be pushed on the next call to write(). - */ - void sri(const BULKIO::StreamSRI& sri); - - /** - * @brief Gets the X-axis delta. - * @returns The distance between two adjacent samples in the X direction. - * @pre Stream is valid. - * - * Because the X-axis is commonly in terms of time (that is, @c sri.xunits is - * @c BULKIO::UNITS_TIME), this is typically the reciprocal of the sample - * rate. - */ - double xdelta() const; - - /** - * @brief Sets the X-axis delta. - * @param delta The distance between two adjacent samples in the X - * direction. - * @pre Stream is valid. - * @see xdelta() const - * - * Changing the %xdelta updates the SRI, which will be pushed on the next - * call to write(). - */ - void xdelta(double delta); - - /** - * @brief Gets the complex mode of this stream. - * @returns True if data is complex. False if data is not complex. - * @pre Stream is valid. - * - * A stream is considered complex if @c sri.mode is non-zero. - */ - bool complex() const; - - /** - * @brief Sets the complex mode of this stream. - * @param mode True if data is complex. False if data is not complex. - * @pre Stream is valid. - * @see complex() const - * - * Changing the %complex mode indicates that all subsequent data is real or - * complex based on the value of @a mode. The updated SRI will be pushed on - * the next call to write(). - */ - void complex(bool mode); - - /** - * @brief Gets the blocking mode of this stream. - * @returns True if this stream is blocking. False if stream is non-blocking. - * @pre Stream is valid. - */ - bool blocking() const; - - /** - * @brief Sets the blocking mode of this stream. - * @param mode True if blocking. False if stream is non-blocking. - * @pre Stream is valid. - * - * Changing the %blocking mode updates the SRI, which will be pushed on the - * next call to write(). - */ - void blocking(bool mode); - - /** - * @brief Read-only access to the set of SRI keywords. - * @returns A read-only reference to the SRI keywords. - * @pre Stream is valid. - * - * The SRI keywords are reinterpreted as const reference to a PropertyMap, - * which provides a higher-level interface than the default CORBA sequence. - */ - const redhawk::PropertyMap& keywords() const; - - /** - * @brief Overwrites the SRI keywords. - * @param props New SRI keywords. - * @pre Stream is valid. - * @see setKeyword - * - * The current SRI keywords are replaced with @a props. The updated SRI - * will be pushed on the next call to write(). - */ - void keywords(const _CORBA_Unbounded_Sequence& props); - - /** - * @brief Checks for the presence of a keyword in the SRI. - * @param name The name of the keyword. - * @returns True if the keyword is found. False if keyword is not found. - * @pre Stream is valid. - */ - bool hasKeyword(const std::string& name) const; - - /** - * @brief Gets the current value of a keyword in the SRI. - * @param name The name of the keyword. - * @returns A read-only reference to the keyword's value. - * @throw std::invalid_argument If no keyword @a name exists. - * @pre Stream is valid. - * @see hasKeyword - * - * Allows for easy lookup of keyword values in the SRI. To avoid exceptions - * on missing keywords, the presence of a keyword can be checked with - * hasKeyword(). - */ - const redhawk::Value& getKeyword(const std::string& name) const; - - /** - * @brief Sets the current value of a keyword in the SRI. - * @param name The name of the keyword. - * @param value The new value. - * @pre Stream is valid. - * @see setKeyword(const std::string&, const redhawk::Value&) - * @see setKeyword(const std::string&, const T&) - * - * If the keyword @a name already exists, its value is updated to @a value. - * If the keyword @a name does not exist, the new keyword is appended. - * - * Setting a keyword updates the SRI, which will be pushed on the next - * call to write(). - */ - void setKeyword(const std::string& name, const CORBA::Any& value); - - /** - * @brief Sets the current value of a keyword in the SRI. - * @param name The name of the keyword. - * @param value The new value. - * @pre Stream is valid. - * @see setKeyword(const std::string&, const T&) - * - * If the keyword @a name already exists, its value is updated to @a value. - * If the keyword @a name does not exist, the new keyword is appended. - * - * Setting a keyword updates the SRI, which will be pushed on the next - * call to write(). - */ - void setKeyword(const std::string& name, const redhawk::Value& value); - - /** - * @brief Sets the current value of a keyword in the SRI. - * @param name The name of the keyword. - * @param value The new value. - * @tparam T Any type that can be converted to a redhawk::Value. - * @pre Stream is valid. - * - * If the keyword @a name already exists, its value is updated to @a value. - * If the keyword @a name does not exist, the new keyword is appended. - * - * Setting a keyword updates the SRI, which will be pushed on the next - * call to write(). - */ - template - void setKeyword(const std::string& name, const T& value) - { - setKeyword(name, redhawk::Value(value)); - } - - /** - * @brief Removes a keyword from the SRI. - * @param name The name of the keyword. - * @pre Stream is valid. - * - * Erases the keyword named @a name from the SRI keywords. If no keyword - * @a name is found, the keywords are not modified. - * - * Removing a keyword updates the SRI, which will be pushed on the next - * call to write(). - */ - void eraseKeyword(const std::string& name); - - /** - * @brief Writes a packet of data. - * @tparam T Sample type (must be ScalarType or ComplexType). - * @param data Vector containing real or complex sample data. - * @param time Time stamp of first sample. - * @pre Stream is valid. - * @throw std::logic_error If @p T is complex but stream is not. - * @see write(const ScalarType*,size_t,const BULKIO::PrecisionUTCTime&) - * @see write(const ComplexType*,size_t,const BULKIO::PrecisionUTCTime&) - * - * Sends the contents of a real or complex vector as a single packet. This - * is a convenience wrapper that defers to one of the write methods that - * takes a pointer and size, depending on whether @a T is real or - * complex. - */ - template - void write(const std::vector& data, const BULKIO::PrecisionUTCTime& time) - { - write(&data[0], data.size(), time); - } - - /** - * @brief Writes one or more packets. - * @tparam T Sample type (must be ScalarType or ComplexType). - * @param data Vector containing real or complex sample data. - * @param times List of time stamps, with offsets. - * @pre Stream is valid. - * @throw std::logic_error If @p T is complex but stream is not. - * @throw std::logic_error If @p times is empty. - * @see write(const ScalarType*,size_t,const std::list&) - * @see write(const ComplexType*,size_t,const std::list&) - * - * Sends the contents of a real or complex vector as one or more packets. - * This is a convenience wrapper that defers to one of the write methods - * that takes a pointer and size, depending on whether @a T is real or - * complex. - */ - template - void write(const std::vector& data, const std::list& times) - { - write(&data[0], data.size(), times); - } - - /** - * @brief Writes a packet of real data. - * @param data Pointer to real sample data. - * @param count Number of samples to write. - * @param time Time stamp of first sample. - * @pre Stream is valid. - * - * Sends @a count samples of scalar data as single packet with the time - * stamp @a time via the associated OutPort. - * - * If there are any pending SRI changes, the new SRI is pushed first. - */ - void write(const ScalarType* data, size_t count, const BULKIO::PrecisionUTCTime& time); - - /** - * @brief Writes one or more packets of real data. - * @param data Pointer to real sample data. - * @param count Number of samples to write. - * @param times List of time stamps, with offsets. - * @pre Stream is valid. - * @pre @p times is sorted in order of offset. - * @throw std::logic_error If @p times is empty. - * @see write(const ScalarType*,size_t,const BULKIO::PrecisionUTCTime&) - * - * Writes @a count samples of scalar data to the stream, where each element - * of @a times gives the offset and time stamp of an individual packet. The - * offset of the first time stamp is ignored and assumed to be 0, while - * subsequent offsets determine the length of the prior packet. All offsets - * should be less than @a count. - * - * For example, given three time stamps with offsets 0, 10, and 20, and a - * @a count of 25, @a data is broken into three packets of size 10, 10, and - * 5 samples. - * - * If there are any pending SRI changes, the new SRI is pushed first. - * - * @note This method may be used when the stream is configured for complex - * data, though this usage is not recommended. In this case, the - * offsets in @a times are interpreted in terms of complex samples. - */ - void write(const ScalarType* data, size_t count, const std::list& times); - - /** - * @brief Writes a packet of complex data. - * @param data Pointer to complex sample data. - * @param count Number of samples to write. - * @param time Time stamp of first sample. - * @throw std::logic_error If stream is not configured for complex data. - * @pre Stream is valid. - * - * Sends @a count samples of complex data as single packet with the time - * stamp @a time via the associated OutPort. - * - * If there are any pending SRI changes, the new SRI is pushed first. - */ - void write(const ComplexType* data, size_t count, const BULKIO::PrecisionUTCTime& time); - - /** - * @brief Writes one or more packets of complex data. - * @param data Pointer to complex sample data. - * @param count Number of samples to write. - * @param times List of time stamps, with offsets. - * @pre Stream is valid. - * @pre @p times is sorted in order of offset. - * @throw std::logic_error If stream is not configured for complex data. - * @throw std::logic_error If @p times is empty. - * @see write(const ComplexType*,size_t,const BULKIO::PrecisionUTCTime&) - * - * Writes @a count samples of complex data to the stream, where each element - * of @a times gives the offset and time stamp of an individual packet. The - * offset of the first time stamp is ignored and assumed to be 0, while - * subsequent offsets determine the length of the prior packet. All offsets - * should be less than @a count. - * - * For example, given three time stamps with offsets 0, 10, and 20, and a - * @a count of 25, @a data is broken into three packets of size 10, 10, and - * 5 samples. - * - * If there are any pending SRI changes, the new SRI is pushed first. - */ - void write(const ComplexType* data, size_t count, const std::list& times); - - /** - * @brief Closes this stream and sends an end-of-stream. - * @pre Stream is valid. - * @post Stream is invalid. - * - * Closing a stream sends an end-of-stream packet and resets the stream - * handle. No further operations may be made on the stream. - */ - void close(); - - /** - * @brief Checks stream validity. - * @returns True if this stream is not valid, false if it is valid. - * - * Invalid (null) OutputStreams are not associated with an active stream in - * an %OutPort. If this method returns true, no other methods except - * comparison or assignment may be called. - */ - bool operator! () const - { - return !_impl; - } - - private: - /// @cond IMPL - friend class OutPort; - OutputStream(const BULKIO::StreamSRI& sri, OutPort* port); - - class Impl; - boost::shared_ptr _impl; - /// @endcond - }; - - typedef OutputStream OutCharStream; - typedef OutputStream OutOctetStream; - typedef OutputStream OutShortStream; - typedef OutputStream OutUShortStream; - typedef OutputStream OutLongStream; - typedef OutputStream OutULongStream; - typedef OutputStream OutLongLongStream; - typedef OutputStream OutULongLongStream; - typedef OutputStream OutFloatStream; - typedef OutputStream OutDoubleStream; - -} // end of bulkio namespace - -#endif diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_p.h b/bulkioInterfaces/libsrc/cpp/bulkio_p.h index 81f7e8762..6e7e959a3 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio_p.h +++ b/bulkioInterfaces/libsrc/cpp/bulkio_p.h @@ -92,6 +92,23 @@ namespace bulkio { } // end of namespace +#define FOREACH_NUMERIC_PORT_TYPE(x) \ + x(BULKIO::dataChar); \ + x(BULKIO::dataOctet); \ + x(BULKIO::dataShort); \ + x(BULKIO::dataUshort); \ + x(BULKIO::dataLong); \ + x(BULKIO::dataUlong); \ + x(BULKIO::dataLongLong); \ + x(BULKIO::dataUlongLong); \ + x(BULKIO::dataFloat); \ + x(BULKIO::dataDouble); + +#define FOREACH_PORT_TYPE(x) \ + FOREACH_NUMERIC_PORT_TYPE(x) \ + x(BULKIO::dataBit); \ + x(BULKIO::dataFile); \ + x(BULKIO::dataXML); #endif // __bulkio_p_h__ diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_stream.cpp b/bulkioInterfaces/libsrc/cpp/bulkio_stream.cpp new file mode 100644 index 000000000..07ef14eaf --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/bulkio_stream.cpp @@ -0,0 +1,113 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "bulkio_stream.h" + +using bulkio::StreamBase; + +StreamBase::StreamBase() : + _impl() +{ +} + +StreamBase::StreamBase(const boost::shared_ptr& impl) : + _impl(impl) +{ +} + +const std::string& StreamBase::streamID() const +{ + return _impl->streamID(); +} + +const BULKIO::StreamSRI& StreamBase::sri() const +{ + return _impl->sri(); +} + +StreamBase::operator const BULKIO::StreamSRI& () const +{ + return sri(); +} + +double StreamBase::xstart() const +{ + return sri().xstart; +} + +double StreamBase::xdelta() const +{ + return sri().xdelta; +} + +short StreamBase::xunits() const +{ + return sri().xunits; +} + +int StreamBase::subsize() const +{ + return sri().subsize; +} + +double StreamBase::ystart() const +{ + return sri().ystart; +} + +double StreamBase::ydelta() const +{ + return sri().ydelta; +} + +short StreamBase::yunits() const +{ + return sri().yunits; +} + +bool StreamBase::complex() const +{ + return _impl->complex(); +} + +bool StreamBase::blocking() const +{ + return _impl->blocking(); +} + +bool StreamBase::operator!() const +{ + return !_impl; +} + +const redhawk::PropertyMap& StreamBase::keywords() const +{ + return redhawk::PropertyMap::cast(sri().keywords); +} + +bool StreamBase::hasKeyword(const std::string& name) const +{ + return keywords().contains(name); +} + +const redhawk::Value& StreamBase::getKeyword(const std::string& name) const +{ + return keywords()[name]; +} diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_time_helpers.cpp b/bulkioInterfaces/libsrc/cpp/bulkio_time_helpers.cpp index 1b3586f5a..e9b81d184 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio_time_helpers.cpp +++ b/bulkioInterfaces/libsrc/cpp/bulkio_time_helpers.cpp @@ -38,24 +38,21 @@ namespace bulkio { namespace utils { - BULKIO::PrecisionUTCTime create( const double wholeSecs, const double fractionalSecs, const bulkio::Int16 tsrc ) { - - double wsec = wholeSecs; - double fsec = fractionalSecs; - if ( wsec < 0.0 || fsec < 0.0 ) { - struct timeval tmp_time; - struct timezone tmp_tz; - gettimeofday(&tmp_time, &tmp_tz); - wsec = tmp_time.tv_sec; - fsec = tmp_time.tv_usec / 1e6; + BULKIO::PrecisionUTCTime create(double wsec, double fsec, CORBA::Short tsrc) + { + if ((wsec < 0.0) || (fsec < 0.0)) { + struct timespec tod; + clock_gettime(CLOCK_REALTIME, &tod); + wsec = tod.tv_sec; + fsec = tod.tv_nsec * 1e-9; } - BULKIO::PrecisionUTCTime tstamp = BULKIO::PrecisionUTCTime(); - tstamp.tcmode = tsrc; - tstamp.tcstatus = BULKIO::TCS_VALID; - tstamp.toff = 0.0; - tstamp.twsec = wsec; - tstamp.tfsec = fsec; - return tstamp; + BULKIO::PrecisionUTCTime tstamp = BULKIO::PrecisionUTCTime(); + tstamp.tcmode = tsrc; + tstamp.tcstatus = BULKIO::TCS_VALID; + tstamp.toff = 0.0; + tstamp.twsec = wsec; + tstamp.tfsec = fsec; + return tstamp; } BULKIO::PrecisionUTCTime now() { diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_traits.h b/bulkioInterfaces/libsrc/cpp/bulkio_traits.h deleted file mode 100644 index 3ce3b36f7..000000000 --- a/bulkioInterfaces/libsrc/cpp/bulkio_traits.h +++ /dev/null @@ -1,230 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -#ifndef __bulkio_traits_h -#define __bulkio_traits_h - -#include - -#include // for _seqVector -#include - -#include "BULKIO_Interfaces.h" -#include "bulkio_base.h" - -namespace bulkio { - -template < typename TT, typename AT=_seqVector::seqVectorAllocator< TT > > - class DataTransferBuffer - { - private: - DataTransferBuffer(void) {}; - public: - typedef TT TransportType; - typedef AT AllocatorType; - typedef std::vector< TransportType, AllocatorType > Type; -}; - - -// -// DataTransferTraits -// -// Traits template definition used to define input and output types used the port -// classes -// -template < typename PST, typename TT, typename NDT=TT, class DBT=std::vector< NDT >, class PAT=const PST& > -struct DataTransferTraits { - typedef PST PortSequenceType; // Port Sequence type used by middleware - typedef PAT PushArgumentType; // Type of data argument to pushPacket - typedef TT TransportType; // Transport Type contained in the Port Sequence container - typedef NDT NativeDataType; // Native c++ mapping of Transport Type - typedef DBT DataBufferType; // Container defintion to hold data from Input port - typedef typename DBT::allocator_type AllocatorType; -}; - - -typedef DataTransferTraits< PortTypes::CharSequence, CORBA::Char, Int8 > CharDataTransferTraits; -typedef DataTransferTraits< CF::OctetSequence, CORBA::Octet > OctetDataTransferTraits; -typedef DataTransferTraits< PortTypes::ShortSequence, CORBA::Short > ShortDataTransferTraits; -typedef DataTransferTraits< PortTypes::UshortSequence, CORBA::UShort > UShortDataTransferTraits; -typedef DataTransferTraits< PortTypes::LongSequence, CORBA::Long > LongDataTransferTraits; -typedef DataTransferTraits< PortTypes::UlongSequence, CORBA::ULong > ULongDataTransferTraits; -typedef DataTransferTraits< PortTypes::LongLongSequence, CORBA::LongLong > LongLongDataTransferTraits; -typedef DataTransferTraits< PortTypes::UlongLongSequence, CORBA::ULongLong > ULongLongDataTransferTraits; -typedef DataTransferTraits< PortTypes::FloatSequence, CORBA::Float > FloatDataTransferTraits; -typedef DataTransferTraits< PortTypes::DoubleSequence, CORBA::Double > DoubleDataTransferTraits; -typedef DataTransferTraits< Char *, Char, Char, std::string, const char* > StringDataTransferTraits; - - -// -// DataTransfer -// -// This is the packet of information returned from an InPort's getPacket method. The DataTransferTraits class -// defines the type context for this structure. -// -// This class tries to implement as efficient as possible data movement from the supplied PortSequenceType object. -// The supplied PortSequenceType's data buffer is used to set the start/end/length attributes of the dataBuffer object that will -// be used by the component. This class takes ownership of the PortSequenceType's memory buffer and assigns it the -// the dataBuffer's start address. The DataBufferType allows developers to use standard -// stl iterators and algorithms against the data in this buffer. -// -// All remaining member variables use each type's assignment/copy methods. It is assumed the -// PrecisionUTCTime and StreamSRI object will perform a "deep" copy. -// -// -template < typename DataTransferTraits > -struct DataTransfer { - - typedef DataTransferTraits Traits; - typedef typename Traits::PortSequenceType PortSequenceType; - typedef typename Traits::TransportType TransportType; - typedef typename Traits::NativeDataType NativeDataType; - typedef typename Traits::DataBufferType DataBufferType; - - // - // Construct a DataTransfer object to be returned from an InPort's getPacket method - // - DataTransfer(const PortSequenceType & data, const BULKIO::PrecisionUTCTime &_T, bool _EOS, const char* _streamID, BULKIO::StreamSRI &_H, bool _sriChanged, bool _inputQueueFlushed); - - DataBufferType dataBuffer; - BULKIO::PrecisionUTCTime T; - bool EOS; - std::string streamID; - BULKIO::StreamSRI SRI; - bool sriChanged; - bool inputQueueFlushed; - - redhawk::PropertyMap& getKeywords() - { - return redhawk::PropertyMap::cast(SRI.keywords); - } - - const redhawk::PropertyMap& getKeywords() const - { - return redhawk::PropertyMap::cast(SRI.keywords); - } -}; - - -template < > -struct DataTransfer< StringDataTransferTraits > -{ - typedef StringDataTransferTraits Traits; - typedef Traits::PortSequenceType PortSequenceType; - typedef Traits::DataBufferType DataBufferType; - - DataTransfer(const char *data, const BULKIO::PrecisionUTCTime &_T, bool _EOS, const char* _streamID, BULKIO::StreamSRI &_H, bool _sriChanged, bool _inputQueueFlushed) - { - if ( data != NULL ) dataBuffer = data; - T = _T; - EOS = _EOS; - streamID = _streamID; - SRI = _H; - sriChanged = _sriChanged; - inputQueueFlushed = _inputQueueFlushed; - } - DataTransfer(const char * data, bool _EOS, const char* _streamID, BULKIO::StreamSRI &_H, bool _sriChanged, bool _inputQueueFlushed) - { - if ( data != NULL ) dataBuffer = data; - EOS = _EOS; - streamID = _streamID; - SRI = _H; - sriChanged = _sriChanged; - inputQueueFlushed = _inputQueueFlushed; - } - DataBufferType dataBuffer; - BULKIO::PrecisionUTCTime T; - bool EOS; - std::string streamID; - BULKIO::StreamSRI SRI; - bool sriChanged; - bool inputQueueFlushed; - -}; - -typedef DataTransfer< CharDataTransferTraits > CharDataTransfer; -typedef DataTransfer< OctetDataTransferTraits > OctetDataTransfer; -typedef DataTransfer< ShortDataTransferTraits > ShortDataTransfer; -typedef DataTransfer< UShortDataTransferTraits > UShortDataTransfer; -typedef DataTransfer< LongDataTransferTraits > LongDataTransfer; -typedef DataTransfer< ULongDataTransferTraits > ULongDataTransfer; -typedef DataTransfer< LongLongDataTransferTraits > LongLongDataTransfer; -typedef DataTransfer< ULongLongDataTransferTraits > ULongLongDataTransfer; -typedef DataTransfer< FloatDataTransferTraits > FloatDataTransfer; -typedef DataTransfer< DoubleDataTransferTraits > DoubleDataTransfer; -typedef DataTransfer< StringDataTransferTraits > StringDataTransfer; - -// -// PortTraits -// This template defines the set of traits used by Input and Output port template classes -// -// POA = Portable Object Adapter Class -// PT - BULKIO Port Type -// DTT DataTransferTraits associated with port type -// TransportType - TransportType defined by middleware -// NativeType - TransportType mapped to native type -// PortSequenceType - Data container used by middleware to transfer TransportType objects -// DataBufferType - Data Container of the DataTransfer object returned from getPacket -// - -template < typename POA, typename PT, typename DTT > -struct PortTraits { - typedef POA POAPortType; - typedef PT PortType; - typedef DTT DataTransferTraits; - typedef typename PortType::_var_type PortVarType; - typedef typename DTT::TransportType TransportType; - typedef typename DTT::NativeDataType NativeType; - typedef typename DTT::PortSequenceType SequenceType; - typedef typename DTT::PushArgumentType PushType; - typedef typename DTT::DataBufferType DataBufferType; -}; - - -typedef PortTraits< POA_BULKIO::dataChar, BULKIO::dataChar, CharDataTransferTraits > CharPortTraits; -typedef PortTraits< POA_BULKIO::dataOctet, BULKIO::dataOctet, OctetDataTransferTraits > OctetPortTraits; -typedef PortTraits< POA_BULKIO::dataShort, BULKIO::dataShort, ShortDataTransferTraits > ShortPortTraits; -typedef PortTraits< POA_BULKIO::dataUshort, BULKIO::dataUshort, UShortDataTransferTraits > UShortPortTraits; -typedef PortTraits< POA_BULKIO::dataLong, BULKIO::dataLong, LongDataTransferTraits > LongPortTraits; -typedef PortTraits< POA_BULKIO::dataUlong, BULKIO::dataUlong, ULongDataTransferTraits > ULongPortTraits; -typedef PortTraits< POA_BULKIO::dataLongLong, BULKIO::dataLongLong, LongLongDataTransferTraits > LongLongPortTraits; -typedef PortTraits< POA_BULKIO::dataUlongLong, BULKIO::dataUlongLong, ULongLongDataTransferTraits > ULongLongPortTraits; -typedef PortTraits< POA_BULKIO::dataFloat, BULKIO::dataFloat, FloatDataTransferTraits > FloatPortTraits; -typedef PortTraits< POA_BULKIO::dataDouble, BULKIO::dataDouble, DoubleDataTransferTraits > DoublePortTraits; - -typedef PortTraits< POA_BULKIO::dataFile, BULKIO::dataFile, StringDataTransferTraits > URLPortTraits; -typedef PortTraits< POA_BULKIO::dataFile, BULKIO::dataFile, StringDataTransferTraits > FilePortTraits; -typedef PortTraits< POA_BULKIO::dataXML, BULKIO::dataXML, StringDataTransferTraits > XMLPortTraits; - - -typedef CharPortTraits Int8PortTraits; -typedef OctetPortTraits UInt8PortTraits; -typedef ShortPortTraits Int16PortTraits; -typedef UShortPortTraits Unt16PortTraits; -typedef LongPortTraits Int32PortTraits; -typedef ULongPortTraits Unt32PortTraits; -typedef LongLongPortTraits Int64PortTraits; -typedef ULongLongPortTraits Unt64PortTraits; - - -} // end of bulkio namespace - - -#endif diff --git a/bulkioInterfaces/libsrc/cpp/BULKIO_Interfaces.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/BULKIO_Interfaces.h similarity index 97% rename from bulkioInterfaces/libsrc/cpp/BULKIO_Interfaces.h rename to bulkioInterfaces/libsrc/cpp/include/bulkio/BULKIO_Interfaces.h index 25fb6e24e..bf28b8434 100644 --- a/bulkioInterfaces/libsrc/cpp/BULKIO_Interfaces.h +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/BULKIO_Interfaces.h @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include diff --git a/bulkioInterfaces/libsrc/cpp/include/bulkio/BulkioTransport.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/BulkioTransport.h new file mode 100644 index 000000000..3bb1cedbb --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/BulkioTransport.h @@ -0,0 +1,172 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef __bulkio_BulkioTransport_h +#define __bulkio_BulkioTransport_h + +#include + +#include "bulkio_base.h" +#include "bulkio_typetraits.h" + +namespace bulkio { + + template + class InPort; + + template + class OutPort; + + template + class OutputTransport : public redhawk::UsesTransport + { + public: + typedef typename BufferTraits::BufferType BufferType; + + virtual ~OutputTransport(); + + virtual void disconnect(); + + void pushSRI(const std::string& streamID, const BULKIO::StreamSRI& sri, int version); + + void pushPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID, + const BULKIO::StreamSRI& sri); + + BULKIO::PortStatistics getStatistics(); + + protected: + typedef OutPort OutPortType; + typedef typename PortType::_ptr_type PtrType; + typedef typename PortType::_var_type VarType; + typedef typename NativeTraits::NativeType NativeType; + + OutputTransport(OutPortType* port, PtrType objref); + + virtual void _pushSRI(const BULKIO::StreamSRI& sri) = 0; + + virtual void _sendPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID, + const BULKIO::StreamSRI& sri); + + virtual void _pushPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID) = 0; + + void _recordPush(const std::string& streamID, size_t elements, bool endOfStream); + + virtual redhawk::PropertyMap _getExtendedStatistics(); + + // + // Returns the total number of elements of data in a pushPacket call, for + // statistical tracking; enables XML and File specialization, which have + // different notions of size + // + size_t _dataLength(const BufferType& data); + + OutPortType* _port; + VarType _objref; + typedef std::map VersionMap; + VersionMap _sriVersions; + + private: + linkStatistics _stats; + }; + + template + class InputTransport : public redhawk::ProvidesTransport + { + protected: + typedef InPort InPortType; + typedef typename BufferTraits::BufferType BufferType; + + InputTransport(InPortType* port, const std::string& transportId); + + inline void _queuePacket(const BufferType& data, const BULKIO::PrecisionUTCTime& T, bool eos, const std::string& streamID) + { + _port->queuePacket(data, T, eos, streamID); + } + + InPortType* _port; + }; + + template + class OutputManager : public redhawk::UsesTransportManager + { + public: + typedef typename PortType::_ptr_type PtrType; + + virtual OutputTransport* createOutputTransport(PtrType object, + const std::string& connectionId, + const redhawk::PropertyMap& properties) = 0; + protected: + typedef OutPort OutPortType; + + OutputManager(OutPortType* port); + + OutPortType* _port; + + private: + virtual redhawk::UsesTransport* createUsesTransport(CORBA::Object_ptr object, + const std::string& connectionId, + const redhawk::PropertyMap& properties); + }; + + template + class InputManager : public redhawk::ProvidesTransportManager + { + public: + virtual InputTransport* createInputTransport(const std::string& transportId, + const redhawk::PropertyMap& properties) = 0; + protected: + typedef InPort InPortType; + + InputManager(InPortType* port); + + InPortType* _port; + + private: + virtual redhawk::ProvidesTransport* createProvidesTransport(const std::string& transportId, + const redhawk::PropertyMap& properties); + }; + + template + class BulkioTransportFactory : public redhawk::TransportFactory + { + public: + typedef InPort InPortType; + typedef OutPort OutPortType; + + virtual std::string repoId(); + + virtual InputManager* createInputManager(InPortType* port) = 0; + virtual OutputManager* createOutputManager(OutPortType* port) = 0; + + private: + virtual redhawk::ProvidesTransportManager* createProvidesManager(redhawk::NegotiableProvidesPortBase* port); + virtual redhawk::UsesTransportManager* createUsesManager(redhawk::NegotiableUsesPort* port); + }; +} + +#endif // __bulkio_BulkioTransport_h diff --git a/bulkioInterfaces/libsrc/cpp/bulkio.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio.h similarity index 100% rename from bulkioInterfaces/libsrc/cpp/bulkio.h rename to bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio.h diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_attachable_base.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_attachable_base.h similarity index 99% rename from bulkioInterfaces/libsrc/cpp/bulkio_attachable_base.h rename to bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_attachable_base.h index 10d0001a5..1919280eb 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio_attachable_base.h +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_attachable_base.h @@ -107,7 +107,7 @@ namespace bulkio { InAttachablePort(std::string port_name, - LOGGER_PTR logger, + LOGGER_PTR new_logger, InAttachablePort::Callback *attach_detach_cb = NULL, bulkio::sri::Compare sriCmp = bulkio::sri::DefaultComparator, bulkio::time::Compare timeCmp = bulkio::time::DefaultComparator, @@ -294,8 +294,6 @@ namespace bulkio { // statistics linkStatistics *stats; - - LOGGER_PTR logger; SRICallback newSRICallback; @@ -567,14 +565,14 @@ namespace bulkio { ConnectionEventListener *disconnectCB=NULL ); OutAttachablePort(std::string port_name, - LOGGER_PTR logger, + LOGGER_PTR new_logger, ConnectionEventListener *connectCB=NULL, ConnectionEventListener *disconnectCB=NULL ); virtual ~OutAttachablePort(); // - // Allow users to set own logger + // Allow users to set own Logger // void setLogger( LOGGER_PTR newLogger ); @@ -750,8 +748,6 @@ namespace bulkio { std::vector filterTable; - LOGGER_PTR logger; - private: boost::shared_ptr< ConnectionEventListener > _connectCB; boost::shared_ptr< ConnectionEventListener > _disconnectCB; diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_base.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_base.h similarity index 98% rename from bulkioInterfaces/libsrc/cpp/bulkio_base.h rename to bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_base.h index e9f28888a..3b5979739 100644 --- a/bulkioInterfaces/libsrc/cpp/bulkio_base.h +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_base.h @@ -162,7 +162,7 @@ namespace bulkio { { public: - linkStatistics( std::string &portName, const int nbytes=1 ); + linkStatistics(const std::string& portName, const int nbytes=1); linkStatistics(); @@ -291,7 +291,7 @@ namespace bulkio { /* * Create a time stamp object from the provided input... */ - BULKIO::PrecisionUTCTime create( const double wholeSecs=-1.0, const double fractionalSecs=-1.0, const Int16 tsrc= BULKIO::TCM_CPU ); + BULKIO::PrecisionUTCTime create(double wholeSecs=-1.0, double fractionalSecs=-1.0, CORBA::Short tsrc=BULKIO::TCM_CPU); /* * Create a time stamp object from the current time of day reported by the system @@ -425,8 +425,6 @@ namespace bulkio { }; - - } // end of bulkio namespace diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_callbacks.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_callbacks.h similarity index 100% rename from bulkioInterfaces/libsrc/cpp/bulkio_callbacks.h rename to bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_callbacks.h diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_compat.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_compat.h similarity index 100% rename from bulkioInterfaces/libsrc/cpp/bulkio_compat.h rename to bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_compat.h diff --git a/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_datablock.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_datablock.h new file mode 100644 index 000000000..31c140416 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_datablock.h @@ -0,0 +1,631 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_datablock_h +#define __bulkio_datablock_h + +#include +#include + +#include + +#include +#include + +#include +#include "bulkio_base.h" + +namespace bulkio { + + class StreamDescriptor; + + /** + * @brief Extended time stamp container. + * + * SampleTimestamp adds additional context to a BULKIO::PrecisionUTCTime + * time stamp. When data is read from an sample-oriented input stream, it + * may span more than one packet, or its start may not be on a packet + * boundary. In these cases, the @a offset and @a synthetic fields allow + * more sophisticated handling of time information. + * + * The @a offset indicates at which sample @a time applies. If the sample + * data is complex, @a offset should be interpreted in terms of complex + * samples (i.e., two real values per index). + * + * A %SampleTimestamp is considered synthetic if it was generated by an + * input stream because there was no received time stamp available at that + * sample offset. This occurs when the prior read did not end on a packet + * boundary; only the first time stamp in a DataBlock can be synthetic. + */ + struct SampleTimestamp + { + /** + * @brief Constructor. + * @param time Time stamp. + * @param offset Sample offset. + * @param synthetic False if @p time was received, true if + * interpolated. + */ + SampleTimestamp(const BULKIO::PrecisionUTCTime& time, size_t offset=0, bool synthetic=false) : + time(time), + offset(offset), + synthetic(synthetic) + { + } + + /// @brief The time at which the referenced sample was created. + BULKIO::PrecisionUTCTime time; + + /// @brief The 0-based index of the sample at which @a time applies. + size_t offset; + + /// @brief Indicates whether @a time was interpolated. + bool synthetic; + }; + + /** + * @brief Container for sample data and stream metadata read from an + * input stream. + * @headerfile bulkio_datablock.h + * + * %DataBlock is a smart pointer-based class that encapsulates the result + * of a read operation on an input stream. It contains both data, which + * varies with the input stream type, and metadata, including signal- + * related information (SRI). + * + * @warning Do not declare instances of this template class directly in user + * code; the template parameter and class name are not considered + * API. Use the type-specific @c typedef instead, such as + * bulkio::FloatDataBlock, or the nested @c typedef BlockType from + * an %InputStream. + * + * DataBlocks have reference semantics; in other words, assigning one block + * to another does not make a copy but rather shares the same sample data + * and metadata. When the last reference expires, the memory is released to + * the system to prevent memory leaks. Additionally, blocks are inexpensive + * to return by value, reassign, or store in nested data types. + * + * The default constructor creates an invalid (null) block. Likewise, input + * stream read operations may return an invalid block if the operation + * cannot be completed. When receiving a data block, you must always check + * for validity before accessing the sample data or metadata: + * @code + * if (!block) { + * // handle failure + * } else { + * // access data and metadata + * } + * @endcode + * + * While it is possible to generate DataBlocks in user code, they are usually + * obtained by reading from an input stream. + * @see InputStream::read + * @see InputStream::tryread + */ + template + class DataBlock + { + public: + /** + * @brief Default constructor. + * @see InputStream::read + * @see InputStream::tryread + * + * Create a null block. This block has no data nor metadata associated + * with it. No methods may be called on a null %DataBlock except for + * boolean checks, which will always indicate that the block is not + * valid, and operator==, which returns true if and only if the other + * %DataBlock is also null. + * + * DataBlocks are typically obtained by reading from an input stream. + */ + DataBlock(); + + /** + * @brief Construct a %DataBlock with data. + * @param sri Stream descriptor for the data. + * @param buffer The block data. + * + * Creates a new, valid data block that references the data contained + * in @a buffer. + * + * @note This method is typically used by input streams. + */ + explicit DataBlock(const StreamDescriptor& sri, const T& buffer=T()); + + /** + * @brief Copies this block's data and metadata. + * @returns A new block. + * + * Makes a complete copy of this block, which returns a unique block + * that does not share this block's data or metadata. + * + * If this block is invalid, returns a new null block. + */ + DataBlock copy() const; + + /** + * @brief Gets the stream metadata. + * @returns Read-only reference to stream SRI. + * @pre Block is valid. + * + * The SRI represents the stream metadata at the time the block was + * read. + */ + const BULKIO::StreamSRI& sri() const; + + /** + * @brief Gets the X-axis delta. + * @returns The distance between two adjacent samples in the X + * direction. + * @pre Block is valid. + * + * Because the X-axis is commonly in terms of time (that is, + * @c sri.xunits is @c BULKIO::UNITS_TIME), this is typically the + * reciprocal of the sample rate. + */ + double xdelta() const; + + /** + * @brief Read-only access to block data. + * @returns Read-only reference to the data buffer. + * @pre Block is valid. + */ + const T& buffer() const; + + /** + * @brief Replaces the data contents of this block. + * @param other New data. + * @pre Block is valid. + * + * @note This method is typically used by InputStream. + */ + void buffer(const T& other); + + /** + * @brief Checks whether the SRI has changed since the last read from + * the same stream. + * @returns True if the SRI has changed. False is SRI is unchanged. + * @pre Block is valid. + * @see sriChangeFlags() + */ + bool sriChanged() const; + + /** + * @brief Checks which SRI fields have changed since the last read + * from the same stream. + * @returns Bit mask representing changed fields. + * @pre Block is valid. + * @see sriChanged() + * + * If no SRI change has occurred since the last read, the returned + * value is @c bulkio::sri::NONE (equal to 0). Otherwise, the returned + * value is the bitwise OR of one or more of the following flags: + * @li @c bulkio::sri::HVERSION + * @li @c bulkio::sri::XSTART + * @li @c bulkio::sri::XDELTA + * @li @c bulkio::sri::XUNITS + * @li @c bulkio::sri::SUBSIZE + * @li @c bulkio::sri::YSTART + * @li @c bulkio::sri::YDELTA + * @li @c bulkio::sri::YUNITS + * @li @c bulkio::sri::MODE + * @li @c bulkio::sri::STREAMID + * @li @c bulkio::sri::BLOCKING + * @li @c bulkio::sri::KEYWORDS + * + * The @c HVERSION and @c STREAMID flags are not set in normal + * operation. + */ + int sriChangeFlags() const; + + /** + * @brief Sets the flags for which SRI fields have changed since the + * last read from the same stream. + * @param flags Bit mask representing changed fields. + * @pre Block is valid. + * @see sriChangeFlags() + * + * @note This method is typically called by the input stream. + */ + void sriChangeFlags(int flags); + + /** + * @brief Checks whether the input queue has flushed since the last + * read. + * @returns True if an input queue flush occurred. False if no flush + * has occurred. + * @pre Block is valid. + * + * An input queue flush indicates that the InPort was unable to keep up + * with incoming packets for non-blocking streams and emptied the queue + * to catch up. + * + * The input port reports a flush once, on the next queued packet. This + * is typically reflected in the next %DataBlock read from any input + * stream associated with the port; however, this does not necessarily + * mean that any packets for that stream were discarded. + */ + bool inputQueueFlushed() const; + + /** + * @brief Set the input queue flush flag. + * @param flush True if an input queue flush occurred. False if no + * flush has occurred. + * @pre Block is valid. + * @see inputQueueFlushed() + * + * @note This method is typically called by the input stream. + */ + void inputQueueFlushed(bool flush); + + /** + * @brief Add a time stamp in sorted order. + * @param timestamp The new time stamp. + * @pre Block is valid. + * + * Inserts @a timestamp into the list of timestamps, sorted in sample + * offset order. + * + * If complex() is true, @a timestamp.offset is interpreted in terms of + * complex samples. + * + * @note No validity checks are performed on @a timestamp. + */ + void addTimestamp(const SampleTimestamp& timestamp); + + /** + * @brief Returns the time stamp for the first sample. + * @pre Block is valid. + */ + const BULKIO::PrecisionUTCTime& getStartTime() const; + + /** + * @brief Returns the time stamps for the sample data. + * + * If complex() is true, the offsets of the returned time stamps should + * be interpreted in terms of complex samples. + * + * Valid %DataBlocks obtained by reading from an input stream are + * guaranteed to have at least one time stamp, at offset 0. If the read + * spanned more than one packet, each packet's time stamp is included + * with the packet's respective offset from the first sample. + * + * When the %DataBlock is read from an input stream, only the first + * time stamp may be synthetic. This occurs when the prior read did not + * consume a full packet worth of data. In this case, the input stream + * linearly interpolates the time stamp based on the stream's xdelta + * value. + * + * @note The list is returned as a temporary value. If you plan to + * iterate through the returned list, it must be stored in a + * local variable. + */ + std::list getTimestamps() const; + + /** + * @brief Calculates the difference between the expected and actual + * value of the last time stamp + * @returns Difference, in seconds, between expected and actual value. + * @pre Block is valid. + * @see getMaxTimeDrift() + * @see xdelta() + * + * If this %DataBlock contains more than one time stamp, this method + * compares the last time stamp to a linearly interpolated value based + * on the initial time stamp, the StreamSRI xdelta, and the sample + * offset. This difference gives a rough estimate of the deviation + * between the nominal and actual sample rates over the sample period. + * + * @note If the SRI X-axis is not in units of time, this value has no + * meaning. + */ + double getNetTimeDrift() const; + + /** + * @brief Calculates the largest difference between expected and + * actual time stamps in the block. + * @returns Greatest difference, in seconds, between expected and + * actual time stamps. + * @pre Block is valid. + * @see getNetTimeDrift() + * @see xdelta() + * + * If this %DataBlock contains more than one time stamp, this method + * compares each time stamp to its linearly interpolated equivalent + * time stamp, based on the initial time stamp, the StreamSRI xdelta, + * and the sample offset. The greatest deviation is reported; this + * difference gives a rough indication of how severely the actual + * sample rate deviates from the nominal sample rate on a + * packet-to-packet basis. + * + * @note If the SRI X-axis is not in units of time, this value has no + * meaning. + */ + double getMaxTimeDrift() const; + + /** + * @brief Checks block validity. + * @returns True if this block is invalid. False if the block is + * valid. + * @see operator unspecified_bool_type() + * + * Invalid (null) blocks do not contain any sample data or metadata. An + * input stream read operation may return a null block if there is no + * data available or the operation is interrupted. + * + * If this method returns true, no other methods except comparison or + * assignment may be called. + */ + bool operator! () const + { + return !_impl; + } + + protected: + /// @cond IMPL + struct Impl; + boost::shared_ptr _impl; + + typedef boost::shared_ptr DataBlock::*unspecified_bool_type; + /// @endcond + + public: + /** + * @brief Checks block validity. + * @returns Value convertible to true if this block is valid. + * Value convertible to false if this block is invalid. + * @see operator!() + * + * This operator supports affirmative boolean checks: + * @code + * if (block) { + * // operate on block + * } + * @endcode + * + * If this method returns true, any method may be called on this block. + */ + operator unspecified_bool_type() const; + }; + + /** + * @brief Extended container for sample data types. + * + * %SampleDataBlock provides additional methods for accessing the stored + * data as either real or complex samples. + * + * @warning Do not declare instances of this template class directly in user + * code; the template parameter and class name are not considered + * API. Use the type-specific @c typedef instead, such as + * bulkio::FloatDataBlock, or the nested @c typedef BlockType from + * an %InputStream. + * + * @par Upgrading to 2.2.0 + * Enhancements to BulkIO in REDHAWK 2.2.0 to support shared address space + * components and shared memory IPC changed the ownership model of data + * from strongly-owned to shared. As a result, methods that may provide + * write access to the underlying data must make a local copy of the data + * to preserve data integrity. For best performance, the buffer() and + * cxbuffer() methods should be used in place of data() and cxdata() to + * access block data: + * @code + * const float* data = block.data(); + * size_t count = block.size(); + * @endcode + * becomes: + * @code + * const redhawk::shared_buffer buffer = block.buffer(); + * const float* data = buffer.data(); + * size_t count = buffer.size(); + * @endcode + * + * @par Real vs. Complex Samples + * Because BulkIO streams support both real and complex sample data, blocks + * store data internally as an array of real samples, and provide methods + * that allow the user to interpret the data as either real or complex. + * When the complex mode changes, this is typically indicated with the + * corresponding SRI change flag (see sriChangeFlags()). On a per-block + * basis, the complex() method indicates whether the sample data is + * intended to be handled as real or complex: + * @code + * if (block.complex()) { + * const redhawk::shared_buffer > buffer = block.cxbuffer(); + * for (size_t index = 0; index < buffer.size(); ++index) { + * // do something with buffer[index] + * } + * } else { + * const redhawk::shared_buffer buffer = block.buffer(); + * for (size_t index = 0; index < buffer.size(); ++index) { + * // do something with buffer[index] + * } + * } + * @endcode + */ + template + class SampleDataBlock : public DataBlock > + { + public: + /// @brief The native type of a real sample. + typedef T ScalarType; + + /// @brief The native type of a complex sample. + typedef std::complex ComplexType; + + /// @brief The shared_buffer type for real data. + typedef redhawk::shared_buffer ScalarBuffer; + + /// @brief The shared_buffer type for complex data. + typedef redhawk::shared_buffer ComplexBuffer; + + /** + * @brief Default constructor. + * @see InputStream::read + * @see InputStream::tryread + * + * Create a null block. This block has no data nor metadata associated + * with it. No methods may be called on a null %SampleDataBlock except + * for boolean checks, which will always indicate that the block is not + * valid, and operator==, which returns true if and only if the other + * %DataBlock is also null. + * + * SampleDataBlocks are typically obtained by reading from an input + * stream. + */ + SampleDataBlock(); + + /** + * @brief Construct a %SampleDataBlock with a backing buffer. + * @param sri Stream descriptor for the data. + * @param buffer The %shared_buffer containing sample data. + * + * Creates a new, valid data block that references the data contained + * in @a buffer. + * + * @note This method is typically used by input streams. + */ + explicit SampleDataBlock(const StreamDescriptor& sri, const ScalarBuffer& buffer=ScalarBuffer()); + + /** + * @deprecated Do not use. + */ + SampleDataBlock(const BULKIO::StreamSRI& sri, size_t size=0); + + /** + * @brief Copies this block's data and metadata. + * @returns A new block. + * + * Makes a complete copy of this block, which returns a unique block + * that does not share this block's data or metadata. + * + * If this block is invalid, returns a new null block. + */ + SampleDataBlock copy() const; + + /** + * @deprecated Do not use read/write access methods. + */ + ScalarType* data(); + + /** + * @deprecated Use buffer() for access to real sample data. + */ + const ScalarType* data() const; + + /** + * @brief Gets the size of the data in terms of real samples. + * @returns Number of real samples. + * @pre Block is valid. + * @see cxsize() + */ + size_t size() const; + + /** + * @deprecated Do not use. + */ + void resize(size_t count); + + /** + * @brief Checks whether data should be interpreted as complex + * samples. + * @returns True if data is complex. False if data is real. + * @pre Block is valid. + * + * The sample data is considered complex if @c sri.mode is non-zero. + * + * If the data is complex, the offsets for the time stamps returned by + * getTimestamps() are in terms of complex samples. + */ + bool complex() const; + + /** + * @deprecated Do not use read/write access methods. + */ + ComplexType* cxdata(); + + /** + * @deprecated Use cxbuffer() for access to complex sample data. + */ + const ComplexType* cxdata() const; + + /** + * @brief Gets the size of the data in terms of complex samples. + * @returns Number of complex samples. + * @pre Block is valid. + * @see size() + */ + size_t cxsize() const; + + /** + * @deprecated Do not use. + */ + void swap(std::vector& other); + + /** + * @brief Read-only access to real sample data. + * @returns Read-only reference to %shared_buffer of real samples. + * @pre Block is valid. + * @see cxbuffer() const + * + * Interprets the internal buffer as real samples. The underlying data + * may be shared with multiple consumers. + * + * To interpret the data as complex samples, use cxbuffer() const. + */ + const ScalarBuffer& buffer() const; + + /** + * @brief Read-only access to complex sample data. + * @returns %shared_buffer of complex samples. + * @pre Block is valid. + * @see buffer() const + * + * Interprets the internal buffer as complex samples. The underlying + * data may be shared with multiple consumers. + * + * To interpret the data as real samples, use buffer() const. + */ + ComplexBuffer cxbuffer() const; + + using DataBlock::buffer; + + private: + /// @cond IMPL + typedef DataBlock Base; + using Base::_impl; + /// @endcond + }; + + typedef SampleDataBlock CharDataBlock; + typedef SampleDataBlock OctetDataBlock; + typedef SampleDataBlock ShortDataBlock; + typedef SampleDataBlock UShortDataBlock; + typedef SampleDataBlock LongDataBlock; + typedef SampleDataBlock ULongDataBlock; + typedef SampleDataBlock LongLongDataBlock; + typedef SampleDataBlock ULongLongDataBlock; + typedef SampleDataBlock FloatDataBlock; + typedef SampleDataBlock DoubleDataBlock; + typedef DataBlock BitDataBlock; + typedef DataBlock StringDataBlock; + +} // end of bulkio namespace + +#endif diff --git a/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_datatransfer.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_datatransfer.h new file mode 100644 index 000000000..a8943d461 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_datatransfer.h @@ -0,0 +1,188 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_datatransfer_h +#define __bulkio_datatransfer_h + +#include + +#include +#include +#include + +#include + +namespace bulkio { + + // + // DataTransfer + // + // This is the packet of information returned from an InPort's getPacket method. The DataTransferTraits class + // defines the type context for this structure. + // + // This class tries to implement as efficient as possible data movement from the supplied PortSequenceType object. + // The supplied PortSequenceType's data buffer is used to set the start/end/length attributes of the dataBuffer object that will + // be used by the component. This class takes ownership of the PortSequenceType's memory buffer and assigns it the + // the dataBuffer's start address. The DataBufferType allows developers to use standard + // stl iterators and algorithms against the data in this buffer. + // + // All remaining member variables use each type's assignment/copy methods. It is assumed the + // PrecisionUTCTime and StreamSRI object will perform a "deep" copy. + // + // + template + struct DataTransferBase { + DataTransferBase(const BufferType& data, const BULKIO::PrecisionUTCTime& T, bool EOS, + const char* streamID, const BULKIO::StreamSRI& H, bool sriChanged, bool inputQueueFlushed) : + dataBuffer(data), + T(T), + EOS(EOS), + streamID(streamID), + SRI(H), + sriChanged(sriChanged), + inputQueueFlushed(inputQueueFlushed) + { + } + + BufferType dataBuffer; + BULKIO::PrecisionUTCTime T; + bool EOS; + std::string streamID; + BULKIO::StreamSRI SRI; + bool sriChanged; + bool inputQueueFlushed; + + redhawk::PropertyMap& getKeywords() + { + return redhawk::PropertyMap::cast(SRI.keywords); + } + + const redhawk::PropertyMap& getKeywords() const + { + return redhawk::PropertyMap::cast(SRI.keywords); + } + }; + + template + struct DataTransfer : public DataTransferBase + { + typedef BufferType DataBufferType; + + DataTransfer(const BufferType& data, const BULKIO::PrecisionUTCTime& T, bool EOS, + const char* streamID, const BULKIO::StreamSRI& H, bool sriChanged, bool inputQueueFlushed) : + DataTransferBase(data, T, EOS, streamID, H, sriChanged, inputQueueFlushed) + { + } + }; + + template + struct DataTransfer< std::vector > : public DataTransferBase< std::vector > { + typedef std::vector DataBufferType; + + // + // Construct a DataTransfer object to be returned from an InPort's getPacket method + // + DataTransfer(const redhawk::shared_buffer& data, const BULKIO::PrecisionUTCTime& T, bool EOS, + const char* streamID, const BULKIO::StreamSRI& H, bool sriChanged, bool inputQueueFlushed) : + DataTransferBase(DataBufferType(), T, EOS, streamID, H, sriChanged, inputQueueFlushed) + { + // To preserve data integrity, copy the contents of the shared + // buffer to the vector + this->dataBuffer.assign(data.begin(), data.end()); + } + + template + DataTransfer(const _CORBA_Sequence& data, const BULKIO::PrecisionUTCTime& T, bool EOS, + const char* streamID, const BULKIO::StreamSRI& H, bool sriChanged, bool inputQueueFlushed) : + DataTransferBase(DataBufferType(), T, EOS, streamID, H, sriChanged, inputQueueFlushed) + { + assign(this->dataBuffer, data); + } + + private: + template + static void assign(std::vector& dest, const _CORBA_Sequence& src) + { + if (src.release()) { + _CORBA_Sequence& in = const_cast<_CORBA_Sequence&>(src); + const size_t length = in.length(); + typedef typename std::_Vector_base::_Vector_impl* VectorPtr; + VectorPtr vectorPtr = (VectorPtr)(&dest); + vectorPtr->_M_start = reinterpret_cast(in.get_buffer(1)); + vectorPtr->_M_finish = vectorPtr->_M_start + length; + vectorPtr->_M_end_of_storage = vectorPtr->_M_finish; + } else { + dest.assign(src.get_buffer(), src.get_buffer() + src.length()); + } + } + }; + + template <> + struct DataTransfer : public DataTransferBase { + typedef std::string DataBufferType; + + // + // Construct a DataTransfer object to be returned from an InPort's getPacket method + // + DataTransfer(const std::string& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const char* streamID, + const BULKIO::StreamSRI& H, bool sriChanged, bool inputQueueFlushed) : + DataTransferBase(data, T, EOS, streamID, H, sriChanged, inputQueueFlushed) + { + } + + DataTransfer(const char* data, const BULKIO::PrecisionUTCTime& T, bool EOS, const char* streamID, + const BULKIO::StreamSRI& H, bool sriChanged, bool inputQueueFlushed) : + DataTransferBase(toString(data), T, EOS, streamID, H, sriChanged, inputQueueFlushed) + { + } + + DataTransfer(const char* data, bool EOS, const char* streamID, const BULKIO::StreamSRI& H, + bool sriChanged, bool inputQueueFlushed) : + DataTransferBase(toString(data), BULKIO::PrecisionUTCTime(), EOS, streamID, H, + sriChanged, inputQueueFlushed) + { + } + + private: + static inline std::string toString(const char* src) + { + if (!src) { + return std::string(); + } + return src; + } + }; + + + typedef DataTransfer > CharDataTransfer; + typedef DataTransfer > OctetDataTransfer; + typedef DataTransfer > ShortDataTransfer; + typedef DataTransfer > UShortDataTransfer; + typedef DataTransfer > LongDataTransfer; + typedef DataTransfer > ULongDataTransfer; + typedef DataTransfer > LongLongDataTransfer; + typedef DataTransfer > ULongLongDataTransfer; + typedef DataTransfer > FloatDataTransfer; + typedef DataTransfer > DoubleDataTransfer; + typedef DataTransfer StringDataTransfer; + typedef DataTransfer BitDataTransfer; +} + +#endif // __bulkio_datatransfer_h diff --git a/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_in_port.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_in_port.h new file mode 100644 index 000000000..91ad02a4d --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_in_port.h @@ -0,0 +1,668 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef __bulkio_in_port_h +#define __bulkio_in_port_h + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "bulkio_base.h" +#include "bulkio_typetraits.h" +#include "bulkio_datatransfer.h" +#include "bulkio_in_stream.h" +#include "bulkio_callbacks.h" + +namespace bulkio { + + template + class LocalTransport; + + template + class InputTransport; + + template + struct InStreamTraits { + typedef BufferedInputStream InStreamType; + }; + + template <> + struct InStreamTraits { + typedef InXMLStream InStreamType; + }; + + template <> + struct InStreamTraits { + typedef InFileStream InStreamType; + }; + + // + // InPort + // Base template for data transfers between BULKIO ports. This class is defined by 2 trait classes + // DataTransferTraits: This template trait defines the DataTranfer object that is returned by the getPacket method + // PortTraits - This template provides the context for the port's middleware transport classes and they base data types + // passed between port objects + // + template + class InPort : public redhawk::NegotiableProvidesPortBase +#ifdef BEGIN_AUTOCOMPLETE_IGNORE + , public virtual CorbaTraits::POATypeExt +#endif + { + public: + // The CORBA interface of this port (nested typedef for template parameter) + typedef PortType CorbaType; + + // Transport Sequence Type use to during push packet + typedef typename CorbaTraits::SequenceType PortSequenceType; + + // + // Transport type used by this port + // + typedef typename CorbaTraits::TransportType TransportType; + + // + // Declaration of DataTransfer class from TransportType trait and DataBuffer type trait + // + typedef typename BufferTraits::VectorType VectorType; + typedef DataTransfer DataTransferType; + + // backwards compatible definition + typedef DataTransferType dataTransfer; + + // Input stream interface used by this port + typedef typename InStreamTraits::InStreamType StreamType; + + // List type for input streams provided by this port + typedef std::list StreamList; + + // + // ~InPort - call the virtual destructor to remove all allocated memebers + // + virtual ~InPort(); + + /* + * getPacket - interface used by components to grab data from the port's internal queue object for processing. The timeout parameter allows + * the calling component to perform blocking and non-blocking retrievals. + * + * @param timeout - timeout == bulkio::Const::NON_BLOCKING (0.0) non-blocking io + * timeout == bulkio::Const::BLOCKING (-1) block until data arrives or lock is broken on exit + * timeout > 0.0 wait until time expires. + * @return dataTranfer * pointer to a data transfer object from the port's work queue + * @return NULL - no data available + */ + DataTransferType *getPacket(float timeout); + + /* + * getPacket - interface used by components to grab data from the port's internal queue object for a specified streamID + * + * @param timeout - timeout == bulkio::Const::NON_BLOCKING (0.0) non-blocking io + * timeout == bulkio::Const::BLOCKING (-1) block until data arrives or lock is broken on exit + * timeout > 0.0 wait until time expires. + * @param streamID stream id to match on for when pulling data from the port's work queue + * @return dataTranfer * pointer to a data transfer object from the port's work queue + * @return NULL - no data available + */ + DataTransferType *getPacket(float timeout, const std::string& streamID); + + // + // BULKIO IDL interface for pushing Floating Point vectors between components + // + + /* + * pushSRI - called by the source component when SRI data about the stream changes, the data flow policy is this activity + * will occurr first before any data flows to the component. + * + * @param H - Incoming StreamSRI object that defines the state of the data flow portion of the stream (pushPacket) + */ + virtual void pushSRI(const BULKIO::StreamSRI& H); + + // + // Port Statistics Interface + // + + /* + * turn on/off the port monitoring capability + */ + void enableStats(bool enable); + + // + // state - returns the current state of the port as follows: + // BULKIO::BUSY - internal queue has reached FULL state + // BULKIO::IDLE - there are no items on the internal queue + // BULKIO::ACTIVE - there are items on the queue + // + // @return BULKIO::PortUsageType - current state of port + // + virtual BULKIO::PortUsageType state(); + + // + // statisics - returns a PortStatistics object for this provides port + // PortStatistics: + // portname - name of port + // elementsPerSecond - number of elements per second (element is based on size of port type ) + // bitsPerSecond - number of bits per second (based on element storage size in bits) + // callsPerSecond - history window -1 / time between calls to this method + // streamIds - list of active stream id values + // averageQueueDepth - the average depth of the queue for this port + // timeSinceLastCall - time since this method as invoked and the last pushPacket happened + // Keyword Sequence - deprecated + // + // @return BULKIO::PortStatistics - current data flow metrics collected for the port. + // the caller of the method is responsible for freeing this object + // + virtual BULKIO::PortStatistics* statistics(); + + // + // activeSRIs - returns a sequence of BULKIO::StreamSRI objectsPort + // + // @return BULKIO::StreamSRISequence - list of activte SRI objects for this port + // the caller of the method is responsible for freeing this object + // + virtual BULKIO::StreamSRISequence* activeSRIs(); + + /* + * getCurrentQueueDepth - returns the current number of elements in the queue + * + * @return int - number of items in the queue + */ + int getCurrentQueueDepth(); + + /* + * getMaxQueueDepth - returns the maximum size of the queue , if this water mark is reached the queue will be purged, and the + * component of the port will be notified in getPacket method + * @return int - maximum size the queue can reach before purging occurs + */ + int getMaxQueueDepth(); + + /* + * setMaxQueueDepth - allow users of this port to modify the maximum number of allowable vectors on the queue. + */ + void setMaxQueueDepth(int newDepth); + + // + // Allow the component to control the flow of data from the port to the component. Block will restrict the flow of data back into the + // component. Call in component's stop method + // + void block(); + + // + // Allow the component to control the flow of data from the port to the component. Unblock will release the flow of data back into the + // component. Called in component's start method. + // + void unblock(); + + // + // Support function for automatic component-managed start. Calls unblock. + // + virtual void startPort(); + + // + // Support function for automatic component-managed stop. Calls block. + // + virtual void stopPort(); + + /* + * blocked + * + * @return bool returns state of breakBlock variable used to release any upstream blocking pushPacket calls + */ + bool blocked(); + + /** + * @brief Registers a callback for new streams. + * @param target Class instance. + * @param func Member function pointer. + */ + template + void addStreamListener(Target target, Func func) { + streamAdded.add(target, func); + } + + /** + * @brief Unregisters a callback for new streams. + * @param target Class instance. + * @param func Member function pointer. + */ + template + void removeStreamListener(Target target, Func func) { + streamAdded.remove(target, func); + } + + /** + * @brief Gets the stream that should be used for the next basic read. + * @param timeout Seconds to wait for a stream; a negative value waits + * indefinitely. + * @returns Input stream ready for reading on success. + * @returns Null input stream if timeout expires or port is stopped. + */ + StreamType getCurrentStream(float timeout=bulkio::Const::BLOCKING); + + /** + * @brief Get the active stream with the given stream ID. + * @param streamID Stream identifier. + * @returns Input stream for @p streamID if it exists. + * @returns Null input stream if no such stream ID exits. + */ + StreamType getStream(const std::string& streamID); + + /** + * @brief Gets the current set of active streams. + * @returns List of streams. + */ + StreamList getStreams(); + + /* + * Assign a callback for notification when a new SRI StreamId is received + */ + template + inline void setNewStreamListener(Target target, Func func) { + newStreamCallback.assign(target, func); + } + + /* + * Assign a callback for notification when a new SRI StreamId is received + */ + template + inline void setNewStreamListener(Func func) { + newStreamCallback = func; + } + + void setNewStreamListener(SriListener *newListener); + + // Return the interface that this Port supports + std::string getRepid () const; + + protected: + // + // InPort - creates a provides port that can accept data vectors from a source + // + // @param port_name name of the port taken from .scd.xml file + // @param sriCmp comparator function that accepts to StreamSRI objects and compares their contents, + // if all members match then return true, otherwise false. This is used during the pushSRI method + // @param newStreamCB interface that is called when new SRI.streamID is received + InPort(std::string port_name, + LOGGER_PTR logger, + bulkio::sri::Compare sriCmp = bulkio::sri::DefaultComparator, + SriListener *newStreamCB = NULL ); + + typedef typename BufferTraits::BufferType BufferType; + + struct Packet { + Packet(const BufferType& buffer, const BULKIO::PrecisionUTCTime& T, bool EOS, const StreamDescriptor& SRI, bool sriChanged, bool inputQueueFlushed) : + buffer(buffer), + T(T), + EOS(EOS), + SRI(SRI), + sriChanged(sriChanged), + inputQueueFlushed(inputQueueFlushed), + streamID(SRI.streamID()) + { + } + + BufferType buffer; + BULKIO::PrecisionUTCTime T; + bool EOS; + StreamDescriptor SRI; + bool sriChanged; + bool inputQueueFlushed; + std::string streamID; + }; + + // + // FIFO of data vectors and time stamps waiting to be processed by a component + // + typedef std::deque PacketQueue; + PacketQueue packetQueue; + + // + // SRI compare method used by pushSRI method to determine how to match incoming SRI objects and streamsID + // + bulkio::sri::Compare sri_cmp; + + // + // Callback for notifications when new SRI streamID's are received + // + redhawk::callback newStreamCallback; + + // + // List of SRI objects managed by StreamID + // + typedef std::map > SriTable; + SriTable currentHs; + + // + // synchronizes access to the workQueue member + // + MUTEX dataBufferLock; + CONDITION dataAvailable; + CONDITION queueAvailable; + size_t maxQueue; + + // + // synchronizes access to the currentHs member + // + MUTEX sriUpdateLock; + + // + // used to control data flow from getPacket call + // + bool breakBlock; + + // + // Transfers blocking request from data provider to this port that will block pushPacket calls if queue has reached a maximum value + // + bool blocking; + + // + // Statistics provider object used by the port monitoring interface + // + linkStatistics *stats; + + // + // Synchronized waiter list for use in poll() + // + redhawk::signal packetWaiters; + + // + // Notification for new stream creation + // + ossie::notification streamAdded; + + // + // Streams that are currently active + // + typedef std::map StreamMap; + StreamMap streams; + boost::mutex streamsMutex; + + // Streams that have the same stream ID as an active stream, when an + // end-of-stream has been queued but not yet read + std::multimap pendingStreams; + + // Allow non-CORBA data ingress (shared memory, VITA49) + friend class InputTransport; + + // + // Queues a packet received via pushPacket; in most cases, this method maps + // exactly to pushPacket, except for dataFile + // + void queuePacket(const BufferType& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const std::string& streamID); + + // Allow local transport classes to directly queue packets + friend class LocalTransport; + + // + // Fetches the next packet for the given stream ID, blocking for up to + // timeout seconds for one to be available + // + Packet* nextPacket(float timeout, const std::string& streamID); + + // + // Returns a pointer to the first packet in the queue, blocking for up to + // timeout seconds for one to be available + // + Packet* peekPacket(float timeout, boost::unique_lock& lock); + + Packet* fetchPacket(const std::string& streamID); + + // Discard currently queued packets for the given stream ID, up to the + // first end-of-stream + void discardPacketsForStream(const std::string& streamID); + + friend class InputStream; + size_t samplesAvailable(const std::string& streamID, bool firstPacket); + + void createStream(const std::string& streamID, const StreamDescriptor& sri); + void removeStream(const std::string& streamID); + + bool isStreamActive(const std::string& streamID); + bool isStreamEnabled(const std::string& streamID); + + // Purges the input queue, discarding existing packets while preserving + // end-of-stream and SRI change flags; must hold both dataBufferLock and + // sriUpdateLock + void _flushQueue(); + + // Checks whether the packet should be queued or discarded; also handles + // notifying disabled streams of end-of-stream if the packet is being + // discarded + bool _acceptPacket(const std::string& streamID, bool EOS); + + // Stops tracking the SRI for streamID, returning true if the stream was + // the last blocking stream, indicating that blocking can be turned off + // for the work queue + bool _handleEOS(const std::string& streamID); + + // + // Returns the total number of elements of data in a pushPacket call, for + // statistical tracking; enables XML and File specialization, which have + // different notions of size + // + int _getElementLength(const BufferType& data); + }; + + template + class InNumericPort : public InPort + { + public: + // Transport Sequence Type use to during push packet + typedef typename InPort::PortSequenceType PortSequenceType; + + // + // Transport type used by this port + // + typedef typename InPort::TransportType TransportType; + + // + // Native type mapping of TransportType + // + typedef typename NativeTraits::NativeType NativeType; + + typedef typename InPort::StreamType StreamType; + + typedef typename InPort::StreamList StreamList; + + // + // InNumericPort - creates a provides port that can accept data vectors from a source + // + // @param port_name name of the port taken from .scd.xml file + // @param sriCmp comparator function that accepts to StreamSRI objects and compares their contents, + // if all members match then return true, otherwise false. This is used during the pushSRI method + // @param newStreamCB interface that is called when new SRI.streamID is received + InNumericPort(std::string port_name, + LOGGER_PTR logger, + bulkio::sri::Compare sriCmp = bulkio::sri::DefaultComparator, + SriListener *newStreamCB = NULL); + + InNumericPort(std::string port_name, + bulkio::sri::Compare sriCmp = bulkio::sri::DefaultComparator, + SriListener *newStreamCB = NULL); + + InNumericPort(std::string port_name, void *); + + // + // pushPacket called by the source component when pushing a vector of data into a component. This method will save off the data + // vector, timestamp, EOS and streamID onto a queue for consumption by the component via the getPacket method + // + // @param data - the vector of data to be consumed + // @param T - a time stamp for the data, the time represents the associated time value for the first entry of the data vector + // @param EOS - indicator that the stream has ended, (stream is identified by streamID) + // @param streamID - name of the stream the vector and stream context data are associated with + virtual void pushPacket(const PortSequenceType& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID); + + // + // Stream-based input API + // + + StreamList pollStreams(float timeout); + StreamList pollStreams(StreamList& pollset, float timeout); + + StreamList pollStreams(size_t samples, float timeout); + StreamList pollStreams(StreamList& pollset, size_t samples, float timeout); + + protected: + // Shared buffer type used for local transfers + typedef typename InPort::BufferType BufferType; + + typedef InPort super; + using super::packetWaiters; + using super::_portLog; + typedef typename super::StreamMap StreamMap; + using super::streams; + using super::streamsMutex; + typedef typename super::Packet Packet; + + StreamList getReadyStreams(size_t samples); + }; + + class InBitPort : public InPort + { + public: + InBitPort(const std::string& name, LOGGER_PTR logger=LOGGER_PTR()); + + virtual void pushPacket(const BULKIO::BitSequence& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID); + }; + + // + // InStringPort + // Base template for simple data transfers between Input/Output ports. This class is defined by 2 trait classes + // DataTransferTraits: This template trait defines the DataTranfer object that is returned by the getPacket method + // PortTraits - This template provides the context for the port's middleware transport classes and they base data types + // passed between port objects + // + // Both classes have a simlar types of TransportType and SequenceType and the DataTransferTraits defines the the type for the + // data buffer used to store incoming streams of data. These 2 class should be combined to described InputPortTraits. + // + + + class InFilePort : public InPort + { + public: + // + // InStringPort - creates a provides port that can accept floating point vectors from a source + // + // @param port_name name of the port taken from .scd.xml file + // @param SriCompareFunc comparator function that accepts to StreamSRI objects and compares their contents, + // if all members match then return true, otherwise false. This is used during the pushSRI method + // @param newStreamCB interface that is called when new SRI.streamID is received + + InFilePort(std::string port_name, + LOGGER_PTR logger, + bulkio::sri::Compare=bulkio::sri::DefaultComparator, + SriListener* newStreamCB=0); + + InFilePort(std::string port_name, + bulkio::sri::Compare=bulkio::sri::DefaultComparator, + SriListener* newStreamCB=0); + + InFilePort(std::string port_name, void*); + + // + // pushPacket called by the source component when pushing a vector of data into a component. This method will save off the data + // vector, timestamp, EOS and streamID onto a queue for consumption by the component via the getPacket method + // + // @param data - the vector of data to be consumed + // @param T - a time stamp for the data, the time represents the associated time value for the first entry of the data vector + // @param EOS - indicator that the stream has ended, (stream is identified by streamID) + // @param streamID - name of the stream the vector and stream context data are associated with + virtual void pushPacket(const char *data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID); + }; + + + class InXMLPort : public InPort + { + public: + InXMLPort(std::string port_name, LOGGER_PTR logger, + bulkio::sri::Compare=bulkio::sri::DefaultComparator, + SriListener* newStreamCB=NULL); + + InXMLPort(std::string port_name, + bulkio::sri::Compare=bulkio::sri::DefaultComparator, + SriListener* newStreamCB=NULL); + + InXMLPort(std::string port_name, void*); + + // + // pushPacket called by the source component when pushing a vector of data into a component. This method will save off the data + // vector, timestamp, EOS and streamID onto a queue for consumption by the component via the getPacket method + // + // @param data - the vector of data to be consumed + // @param EOS - indicator that the stream has ended, (stream is identified by streamID) + // @param streamID - name of the stream the vector and stream context data are associated with + virtual void pushPacket(const char *data, CORBA::Boolean EOS, const char* streamID); + + void pushPacket(const char* data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) __attribute__ ((deprecated)); + }; + + + /* + Provides Port Definitions for All Bulk IO pushPacket Port definitions + * + */ + // Bulkio char (Int8) input + typedef InNumericPort InCharPort; + // Bulkio octet (UInt8) input + typedef InNumericPort InOctetPort; + // Bulkio Int8 input + typedef InCharPort InInt8Port; + // Bulkio UInt8 input + typedef InOctetPort InUInt8Port; + // Bulkio short (Int16) input + typedef InNumericPort InShortPort; + // Bulkio unsigned short (UInt16) input + typedef InNumericPort InUShortPort; + // Bulkio Int16 input + typedef InShortPort InInt16Port; + // Bulkio UInt16 input + typedef InUShortPort InUInt16Port; + // Bulkio long (Int32) input + typedef InNumericPort InLongPort; + // Bulkio unsigned long (UInt32) input + typedef InNumericPort InULongPort; + // Bulkio Int32 input + typedef InLongPort InInt32Port; + // Bulkio UInt32 input + typedef InULongPort InUInt32Port; + // Bulkio long long (Int64) input + typedef InNumericPort InLongLongPort; + // Bulkio unsigned long long (UInt64) input + typedef InNumericPort InULongLongPort; + // Bulkio Int64 input + typedef InLongLongPort InInt64Port; + // Bulkio UInt64 input + typedef InULongLongPort InUInt64Port; + // Bulkio float input + typedef InNumericPort InFloatPort; + // Bulkio double input + typedef InNumericPort InDoublePort; + // Maintained for backwards compatibility + typedef InFilePort InURLPort; + +} // end of bulkio namespace + + +#endif diff --git a/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_in_stream.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_in_stream.h new file mode 100644 index 000000000..a83847020 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_in_stream.h @@ -0,0 +1,577 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_in_stream_h +#define __bulkio_in_stream_h + +#include +#include + +#include + +#include "bulkio_typetraits.h" +#include "bulkio_datablock.h" +#include "bulkio_stream.h" + +namespace bulkio { + + template + class InPort; + + template + struct BlockTraits { + typedef SampleDataBlock::NativeType> DataBlockType; + }; + + template <> + struct BlockTraits { + typedef BitDataBlock DataBlockType; + }; + + template <> + struct BlockTraits { + typedef StringDataBlock DataBlockType; + }; + + template <> + struct BlockTraits { + typedef StringDataBlock DataBlockType; + }; + + /** + * @brief Basic BulkIO input stream class. + * @headerfile bulkio_in_stream.h + * + * %InputStream is a smart pointer-based class that encapsulates a single + * BulkIO stream for reading. It is associated with the input port that + * created it, providing a file-like API on top of the classic BulkIO + * getPacket model. + * + * @warning Do not declare instances of this template class directly in user + * code; the template parameter and class name are not considered + * API. Use the type-specific @c typedef instead, such as + * bulkio::InFloatStream, or the nested @c typedef StreamType from + * an %InPort. + * + * Notionally, a BulkIO stream represents a contiguous data set and its + * associated signal-related information (SRI), uniquely identified by a + * stream ID, from creation until close. The SRI may vary over time, but the + * stream ID is immutable. Only one stream with a given stream ID can be + * active at a time. + * + * The %InputStream class itself is a lightweight handle; it is inexpensive + * to copy or store in local variables or nested data types. Assigning one + * %InputStream to another does not copy the stream state but instead + * aliases both objects to the same underlying stream. + * + * The default constructor creates an invalid "null" %InputStream that cannot + * be used for any real operations, similar to a null pointer. A stream may + * be checked for validity with boolean tests: + * + * @code + * if (!stream) { + * // handle failure + * } + * @endcode + * or + * @code + * if (stream) { + * // operate on stream + * } + * @endcode + * + * + * Input streams are managed by the input port, and created in response to + * the arrival of a new SRI. Valid input streams are obtained by either + * querying the port, or registering a callback. + * @see InPort::getCurrentStream(float) + * @see InPort::getStream(const std::string&) + * @see InPort::getStreams() + * @see InPort::addStreamListener(Target,Func) + * + * @par End-of-Stream + * In normal usage, reading continues until the end of the stream is + * reached, at which point all future read operations will fail + * immediately. When a read fails, it is incumbent upon the caller to check + * the stream's end-of-stream state via eos(). Once the end-of-stream has + * been acknowledged, either by an explicit check or with a subsequent + * failed read, the stream is removed from the input port. If the input + * port has another stream with the same streamID pending, it will become + * active. + * @par + * Although the input port may have received and end-of-stream packet, this + * state is not reflected in eos(). As with Unix pipes or sockets, the + * recommended pattern is to continually read until a failure occurs, + * handling the failure as needed. + */ + template + class InputStream : public StreamBase { + public: + /** + * @brief The native type of a real sample, or the real or imaginary + * component of a complex sample. + */ + typedef typename NativeTraits::NativeType NativeType; + + /// @brief The type of data block returned by read methods on this stream. + typedef typename BlockTraits::DataBlockType DataBlockType; + + /** + * @brief Reads the next packet. + * @returns Valid data block if successful. + * @returns Null data block if the read failed. + * @pre Stream is valid. + * + * Blocking read of the next packet for this stream. + * + * Returns a null data block immediately if: + * @li End-of-stream has been reached + * @li The input port is stopped + */ + DataBlockType read(); + + /** + * @brief Non-blocking read of the next packet. + * @returns Valid data block if successful. + * @returns Null data block if the read failed. + * @pre Stream is valid. + * @see read() + * + * Non-blocking version of read(), returning a null data block + * immediately when no data is available. + */ + DataBlockType tryread(); + + /** + * @brief Checks whether this stream can receive data. + * @returns True if this stream is enabled. False if stream is + * disabled. + * @pre Stream is valid. + * @see enable() + * @see disable() + * + * If a stream is enabled, packets received for its stream ID are + * queued in the input port, and the stream may be used for reading. + * Conversely, packets for a disabled stream are discarded, and no + * reading may be performed. + */ + bool enabled() const; + + /** + * @brief Enable this stream for reading data. + * @pre Stream is valid. + * @see enabled() + * @see disable() + * + * The input port will resume queuing packets for this stream. + */ + void enable(); + + /** + * @brief Disable this stream for reading data. + * @pre Stream is valid. + * @see enable() + * @see enabled() + * + * The input port will discard any packets that are currently queued + * for this stream, and all future packets for this stream will be + * discarded upon receipt until an end-of-stream is received. + * + * Disabling unwanted streams may improve performance and queueing + * behavior by reducing the number of queued packets on a port. + */ + void disable(); + + /** + * @brief Checks whether this stream has ended. + * @returns True if this stream has reached the end. False if the end + * of stream has not been reached. + * @pre Stream is valid. + * + * A stream is considered at the end when it has read and consumed all + * data up to the end-of-stream marker. Once end-of-stream has been + * reached, all read operations will fail immediately, as no more data + * will ever be received for this stream. + * + * The recommended practice is to check @a eos any time a read + * operation fails or returns fewer samples than requested. When the + * end-of-stream is acknowledged, either by checking @a eos or when + * successive reads fail due to an end-of-stream, the stream is removed + * from the input port. If the input port has another stream with the + * same streamID pending, it will become active. + */ + bool eos(); + + protected: + /// @cond IMPL + typedef InPort InPortType; + + class Impl; + Impl& impl(); + const Impl& impl() const; + + typedef const Impl& (InputStream::*unspecified_bool_type)() const; + + InputStream(); + InputStream(const boost::shared_ptr& impl); + + // Allow matching InPort class to create instances of this stream type + friend class InPort; + InputStream(const StreamDescriptor& sri, InPortType* port); + + bool hasBufferedData(); + + void close(); + /// @endcond + public: + /** + * @brief Checks stream validity. + * @returns Value convertible to true if this stream is valid. + * Value convertible to false if this stream is invalid. + * @see StreamBase::operator!() const + * + * This operator supports affirmative boolean tests: + * @code + * if (stream) { + * // operate on stream + * } + * @endcode + * + * If this method returns true, it is safe to call any method. + */ + operator unspecified_bool_type() const; + }; + + /** + * @brief BulkIO input stream class with data buffering. + * @headerfile bulkio_in_stream.h + * + * %BufferedInputStream extends InputStream with additional methods for + * data buffering and overlapped reads. + * + * @par Data Buffering + * Often, signal processing algorithms prefer to work on regular, + * fixed-size blocks of data. However, because the producer is working + * independently, data may be received in entirely different packet + * sizes. For this use case, %BufferedInputStream provides a read(size_t) + * method that frees the user from managing their own data buffering. + * @par + * To maintain the requested size, partial packets may be buffered, or a + * read may span multiple packets. Packets are fetched from the input port + * as needed. If an SRI change or input queue flush is encountered during + * the fetch, the operation will stop and return the data up to that + * point. The next read operation will continue at the beginning of the + * packet that contains the new SRI or input queue flush flag. + * + * @par Time Stamps + * The data block from a successful read always includes as least one time + * stamp, at a sample offset of 0. Because buffered reads may not begin on + * a packet boundary, the input stream can interpolate a time stamp based + * on the SRI @a xdelta value and the prior time stamp. When this occurs, + * the time stamp will be marked as "synthetic." + * @par + * Reads that span multiple packets will contain more than one time stamp. + * The time stamp offsets indicate at which sample the time stamp occurs, + * taking real or complex samples into account. Only the first time stamp + * can be synthetic. + * + * @par Overlapped Reads + * Certain classes of signal processing algorithms need to preserve a + * portion of the last data set for the next iteration, such as a power + * spectral density (PSD) calculation with overlap. The read(size_t,size_t) + * method supports this mode of operation by allowing the reader to consume + * fewer samples than are read. This can be thought of as a separate read + * pointer that trails behind the stream's internal buffer. + * @par + * When an overlapped read needs to span multiple packets, but an SRI + * change, input queue flush, or end-of-stream is encountered, all of the + * available data is returned and consumed, equivalent to read(size_t). The + * assumption is that special handling is required due to the pending + * change, and it is not possible for the stream to interpret the + * relationship between the read size and consume size. + * @par + * When the consume length is zero, the read operation becomes a peek. It + * returns data following the normal rules but no data is consumed, even in + * the case of SRI change, input queue flush, or end-of-stream. + * + * @par Non-Blocking Reads + * For each @a read method, there is a corresponsing @a tryread method that + * is non-blocking. If there is not enough data currently available to + * satisfy the request, but more data could become available in the future, + * the operation will return a null data block immediately. + * + * @par End-of-Stream + * The end-of-stream behavior of %BufferedInputStream is consistent with + * %InputStream, with the additional caveat that a read may return fewer + * samples than requested if an end-of-stream packet is encountered. + */ + template + class BufferedInputStream : public InputStream { + public: + /// @brief The type of data block returned by read methods on this stream. + typedef typename InputStream::DataBlockType DataBlockType; + + /** + * @brief Default constructor. + * @see InPort::getCurrentStream() + * @see InPort::getStream(const std::string&) + * @see InPort::getStreams() + * @see InPort::addStreamListener(Target,Func) + * + * Creates a null %BufferedInputStream. This stream is not associated + * with a stream from any InPort instance. No methods may be called on + * the %BufferedInputStream except for boolean tests and comparison. + * A null stream will always test as not valid, and will compare equal + * to another stream if and only if the other stream is also null. + * + * To get a handle to a live stream, you must query an input port or + * register a callback. + */ + BufferedInputStream(); + + /** + * @brief Reads the next packet. + * @returns Valid data block if successful. + * @returns Null data block if the read failed. + * @pre Stream is valid. + * + * Blocking read up to the next packet boundary. Reading a packet at a + * time is the most computationally efficent method because it does not + * require the stream to copy data into an intermediate buffer; + * instead, it may pass the original buffer along to the reader. + * + * Returns a null data block immediately if: + * @li End-of-stream has been reached + * @li The input port is stopped + */ + DataBlockType read(); + + /** + * @brief Reads a specified number of samples. + * @param count Number of samples to read. + * @returns Data block containing up to @p count samples if + * successful. + * @returns Null data block if the read failed. + * @pre Stream is valid. + * + * Blocking read of @a count samples worth of data. For signal + * processing operations that require a fixed input data size, such as + * fast Fourier transform (FFT), this simplifies buffer management by + * offloading it to the stream. This usually incurs some computational + * overhead to copy data between buffers; however, this cost is + * intrinsic to the algorithm, and the reduced complexity of + * implementation avoids common errors. + * + * If the SRI indicates that the data is complex, @a count is in terms + * of complex samples. + * + * If any of the following conditions are encountered while fetching + * packets, the returned data block may contain fewer samples than + * requested: + * @li End-of-stream + * @li SRI change + * @li Input queue flush + * + * Returns a null data block immediately if: + * @li End-of-stream has been reached + * @li The input port is stopped + */ + DataBlockType read(size_t count); + + /** + * @brief Reads a specified number of samples, with overlap. + * @param count Number of samples to read. + * @param consume Number of samples to advance read pointer. + * @returns Data block containing up to @p count samples if + * successful. + * @returns Null data block if the read failed. + * @pre Stream is valid. + * @pre @p consume <= @p count + * @see read(size_t) + * + * Blocking read of @a count samples worth of data will only advance + * the read pointer by @a consume samples. The remaining @c + * count-consume samples are buffered and will be returned on the + * following read operation. This method is designed to support signal + * processing operations that require overlapping data sets, such as + * power spectral density (PSD). + * + * If the SRI indicates that the data is complex, @a count and @a + * consume are in terms of complex samples. + * + * If any of the following conditions are encountered while fetching + * packets, the returned data block may contain fewer samples than + * requested: + * @li End-of-stream + * @li SRI change + * @li Input queue flush + * + * When this occurs, all of the returned samples are consumed unless + * @a consume is 0, as it is assumed that special handling is required. + * + * Returns a null data block immediately if: + * @li End-of-stream has been reached + * @li The input port is stopped + */ + DataBlockType read(size_t count, size_t consume); + + /** + * @brief Non-blocking read of the next packet. + * @returns Valid data block if successful. + * @returns Null data block if the read failed. + * @pre Stream is valid. + * @see read() + * + * Non-blocking version of read(), returning a null data block + * immediately when no data is available. + */ + DataBlockType tryread(); + + /** + * @brief Non-blocking sized read. + * @param count Number of samples to read. + * @returns Data block containing up to @p count samples if + * successful. + * @returns Null data block if the read failed. + * @pre Stream is valid. + * @see read(size_t) + * + * Non-blocking version of read(size_t), returning a null data block + * immediately when no data is available. + */ + DataBlockType tryread(size_t count); + + /** + * @brief Non-blocking read with overlap. + * @param count Number of samples to read. + * @param consume Number of samples to advance read pointer. + * @returns Data block containing up to @p count samples if + * successful. + * @returns Null data block if the read failed. + * @pre Stream is valid. + * @pre @p consume <= @p count + * @see read(size_t,size_t) + * + * Non-blocking version of read(size_t,size_t), returning a null data + * block immediately when no data is available. + */ + DataBlockType tryread(size_t count, size_t consume); + + /** + * @brief Discard a specified number of samples. + * @param count Number of samples to skip. + * @returns Actual number of samples skipped. + * @pre Stream is valid. + * @see read(size_t) + * + * Skips the next @a count samples worth of data and blocks until the + * requested amount of data is available. If the data is not being + * used, this is more computationally efficient than the equivalent + * call to read(size_t) because no buffering is performed. + * + * If the SRI indicates that the data is complex, @a count and the + * return value are in terms of complex samples. + * + * Skipping behaves like read(size_t) when fetching packets. If any of + * the following conditions are encountered, the returned value may be + * less than @a count: + * @li End-of-stream + * @li SRI change + * @li Input queue flush + * + * Returns 0 immediately if: + * @li End-of-stream has been reached + * @li The input port is stopped + */ + size_t skip(size_t count); + + /** + * @brief Estimates the number of samples that can be read + * immediately. + * @returns Number of samples. + * @pre Stream is valid. + * + * The number of samples returned by this method is an estimate based + * on the current state of the stream and the input queue. If there are + * any SRI changes or input queue flushes to report, only samples up to + * that point are considered, as a read cannot span those packets. + * + * If the SRI indicates that the data is complex, the returned value is + * in terms of complex samples. + * + * @warning The returned value is not guaranteed; if the input queue + * flushes in between calls, a subsequent call to @a read may + * block or @a tryread may fail. + */ + size_t samplesAvailable(); + + /** + * @brief Stream equality comparison. + * @param other Another %BufferedInputStream. + * @returns True if and only if both BufferedInputStreams reference + * the same underlying stream. + */ + bool operator== (const BufferedInputStream& other) const; + + /** + * @brief Returns true if data can be read without blocking. + * @see samplesAvailable() + * + * A stream is considered ready if samplesAvailable() would return a + * non-zero value. + * + * @warning Even if this method returns true, if the input queue + * flushes in between calls, a subsequent call to @a read may + * block or @a tryread may fail. + */ + bool ready(); + + private: + /// @cond IMPL + typedef InputStream Base; + using Base::_impl; + + friend class InPort; + typedef InPort InPortType; + BufferedInputStream(const StreamDescriptor&, InPortType*); + + class Impl; + Impl& impl(); + const Impl& impl() const; + /// @endcond + }; + + typedef BufferedInputStream InCharStream; + typedef BufferedInputStream InOctetStream; + typedef BufferedInputStream InShortStream; + typedef BufferedInputStream InUShortStream; + typedef BufferedInputStream InLongStream; + typedef BufferedInputStream InULongStream; + typedef BufferedInputStream InLongLongStream; + typedef BufferedInputStream InULongLongStream; + typedef BufferedInputStream InFloatStream; + typedef BufferedInputStream InDoubleStream; + typedef BufferedInputStream InBitStream; + typedef InputStream InXMLStream; + typedef InputStream InFileStream; + +} // end of bulkio namespace + +#endif diff --git a/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_out_port.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_out_port.h new file mode 100644 index 000000000..384545912 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_out_port.h @@ -0,0 +1,629 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_out_port_h +#define __bulkio_out_port_h + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "bulkio_base.h" +#include "bulkio_typetraits.h" +#include "bulkio_callbacks.h" +#include "bulkio_out_stream.h" +#include "BulkioTransport.h" + +namespace bulkio { + + template + struct OutStreamTraits + { + typedef NumericOutputStream OutStreamType; + }; + + template <> + struct OutStreamTraits { + typedef OutBitStream OutStreamType; + }; + + template <> + struct OutStreamTraits { + typedef OutXMLStream OutStreamType; + }; + + template <> + struct OutStreamTraits { + typedef OutFileStream OutStreamType; + }; + + // + // OutPort + // + // Base template for data transfers between BULKIO ports. This class is defined by 2 trait classes + // PortTraits - This template provides the context for the port's middleware transport classes and they base data types + // passed between port objects + // + // + template + class OutPort : public redhawk::NegotiableUsesPort +#ifdef BEGIN_AUTOCOMPLETE_IGNORE + , public virtual POA_BULKIO::internal::UsesPortStatisticsProviderExt +#endif + { + + public: + // The CORBA interface of this port (nested typedef for template parameter) + typedef PortType CorbaType; + + // + // Port Variable Definition + // + typedef typename PortType::_var_type PortVarType; + + // + // Sequence container used during actual pushPacket call + // + typedef typename CorbaTraits::SequenceType PortSequenceType; + + // + // OutputStream class + // + typedef typename OutStreamTraits::OutStreamType StreamType; + + // + // ConnectionList Definition + // + typedef typename bulkio::Connections< PortVarType >::List ConnectionsList; + + // + // Mapping of Stream IDs to SRI Map/Refresh objects + // + typedef std::map< std::string, SriMapStruct > OutPortSriMap; + + // + // OutPort Creates a uses port object for publishing data to the framework + // + // @param name name assigned to the port located in scd.xml file + // @param logger logger to receive port logging output + // @param connectionCB callback that will be called when the connectPort method is called + // @pararm disconnectDB callback that receives notification when a disconnectPort happens + // + OutPort(const std::string& name, + LOGGER_PTR logger, + ConnectionEventListener *connectCB=NULL, + ConnectionEventListener *disconnectCB=NULL); + + + // + // virtual destructor to clean up resources + // + virtual ~OutPort(); + + void updateConnectionFilter(const std::vector &_filterTable) { + SCOPED_LOCK lock(updatingPortsLock); // don't want to process while command information is coming in + filterTable = _filterTable; + }; + + + template< typename T > inline + void setNewConnectListener(T &target, void (T::*func)( const char *connectionId ) ) + { + _connectCB = boost::make_shared< MemberConnectionEventListener< T > >( boost::ref(target), func ); + } + + template< typename T > inline + void setNewConnectListener(T *target, void (T::*func)( const char *connectionId ) ) + { + _connectCB = boost::make_shared< MemberConnectionEventListener< T > >( boost::ref(*target), func ); + } + + template< typename T > inline + void setNewDisconnectListener(T &target, void (T::*func)( const char *connectionId ) ) + { + _disconnectCB = boost::make_shared< MemberConnectionEventListener< T > >( boost::ref(target), func ); + } + + template< typename T > inline + void setNewDisconnectListener(T *target, void (T::*func)( const char *connectionId ) ) + { + _disconnectCB = boost::make_shared< MemberConnectionEventListener< T > >( boost::ref(*target), func ); + } + + // + // Attach listener interfaces for connect and disconnect events + // + void setNewConnectListener( ConnectionEventListener *newListener ); + void setNewConnectListener( ConnectionEventCallbackFn newListener ); + void setNewDisconnectListener( ConnectionEventListener *newListener ); + void setNewDisconnectListener( ConnectionEventCallbackFn newListener ); + + // + // pushSRI - called by the source component when SRI data about the stream changes, the data flow policy is this activity + // will occurr first before any data flows to the component. + // + // @param H - Incoming StreamSRI object that defines the state of the data flow portion of the stream (pushPacket) + // + virtual void pushSRI(const BULKIO::StreamSRI& H); + + + // + // statisics - returns a PortStatistics object for this uses port + // BULKIO::UsesPortStatisticsSequence: sequence of PortStatistics object + // PortStatistics + // portname - name of port + // elementsPerSecond - number of elements per second (element is based on size of port type ) + // bitsPerSecond - number of bits per second (based on element storage size in bits) + // callsPerSecond - history window -1 / time between calls to this method + // streamIds - list of active stream id values + // averageQueueDepth - the average depth of the queue for this port + // timeSinceLastCall - time since this method as invoked and the last pushPacket happened + // Keyword Sequence - deprecated + // + // @return BULKIO::UsesPortStatisticsSequenc - current data flow metrics collected for the port, the caller of the method + // is responsible for freeing this object + // + virtual BULKIO::UsesPortStatisticsSequence * statistics(); + + // + // state - returns the current state of the port as follows: + // BULKIO::BUSY - internal queue has reached FULL state + // BULKIO::IDLE - there are no items on the internal queue + // BULKIO::ACTIVE - there are items on the queue + // + // @return BULKIO::PortUsageType - current state of port + // + virtual BULKIO::PortUsageType state(); + + /** + * @brief Gets the current set of active streams. + * @returns List of streams. + */ + typedef std::list StreamList; + StreamList getStreams(); + + /** + * @brief Get the active stream with the given stream ID. + * @param streamID Stream identifier. + * @returns Output stream for @p streamID if it exists. + * @returns Null output stream if no such stream ID exists. + */ + StreamType getStream(const std::string& streamID); + + /** + * @brief Creates a new output stream. + * @param streamID Stream identifier. + * @returns A new output stream. + * + * The returned output stream's SRI is initialized with default values. + */ + StreamType createStream(const std::string& streamID); + + /** + * @brief Creates a new stream based on an existing SRI. + * @param sri Stream SRI. + * @returns A new output stream. + */ + StreamType createStream(const BULKIO::StreamSRI& sri); + + // + // turn on/off the port monitoring capability + // + void enableStats(bool enable); + + // + // Return map of streamID/SRI objects + // + bulkio::SriMap getCurrentSRI(); + + // + // Return list of SRI objects + // + bulkio::SriList getActiveSRIs(); + + // + // Return a ConnectionsList for the current ports and connections ids establish via connectPort method + // + ConnectionsList getConnections(); + + // + // Deprecation Warning + // + // The _getConnections and currentSRIs access will be deprecated in the next release of the + // the bulkio library class, in favor of getCurrentSRI and getConnections. + // + + // + // Allow access to the port's connection list + // + ConnectionsList __attribute__ ((deprecated)) _getConnections() { + return getConnections(); + } + + std::string getRepid () const; + + // + // List of SRIs sent out by this port + // + OutPortSriMap currentSRIs __attribute__ ((deprecated)); + + protected: + // + // Shared buffer type used to transfer data without copies, where possible + // + typedef typename BufferTraits::BufferType BufferType; + + // + // Lookup table for connections to input ports in the same process space + // + typedef OutputTransport PortTransportType; + + virtual redhawk::UsesTransport* _createLocalTransport(PortBase* port, CORBA::Object_ptr object, const std::string& connectionId); + + virtual redhawk::UsesTransport* _createTransport(CORBA::Object_ptr object, const std::string& connectionId); + + typedef redhawk::UsesPort::TransportIteratorAdapter TransportIterator; + + typedef std::map StreamMap; + StreamMap streams; + + std::vector filterTable; + boost::shared_ptr< ConnectionEventListener > _connectCB; + boost::shared_ptr< ConnectionEventListener > _disconnectCB; + + void _connectListenerAdapter(const std::string& connectionId); + void _disconnectListenerAdapter(const std::string& connectionId); + + // + // Returns true if the given connection should receive SRI updates and data + // for the given stream + // + bool _isStreamRoutedToConnection(const std::string& connectionID, const std::string& streamID); + + + // + // Sends data and metadata to all connections enabled for the given stream + // + friend class OutputStream; + void _sendPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID); + + StreamType _getStream(const std::string& streamID); + }; + + + template + class OutNumericPort : public OutPort { + public: + // + // Data type contained in sequence container + // + typedef typename CorbaTraits::TransportType TransportType; + + // + // Data type of items passed into the pushPacket method + // + typedef typename NativeTraits::NativeType NativeType; + + // + // Data type of the container for passing data into the pushPacket method + // + typedef typename BufferTraits::VectorType VectorType; + typedef VectorType NativeSequenceType; + + // + // OutNumericPort Creates a uses port object for publishing data to the framework + // + // @param port_name name assigned to the port located in scd.xml file + // @param connectionCB callback that will be called when the connectPort method is called + // @pararm disconnectDB callback that receives notification when a disconnectPort happens + // + OutNumericPort(const std::string& name, + ConnectionEventListener *connectCB=NULL, + ConnectionEventListener *disconnectCB=NULL); + + OutNumericPort(const std::string& name, + LOGGER_PTR logger, + ConnectionEventListener *connectCB=NULL, + ConnectionEventListener *disconnectCB=NULL); + + // + // virtual destructor to clean up resources + // + virtual ~OutNumericPort(); + + /* + * pushPacket + * maps to data BULKIO method call for passing a limited amount of data from a source vector + * + * data: pointer to a buffer of data + * size: number of data points in the buffer + * T: constant of type BULKIO::PrecisionUTCTime containing the timestamp for the outgoing data. + * tcmode: timecode mode + * tcstatus: timecode status + * toff: fractional sample offset + * twsec: J1970 GMT + * tfsec: fractional seconds: 0.0 to 1.0 + * EOS: end-of-stream flag + * streamID: stream identifier + */ + void pushPacket( const TransportType* data, size_t size, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); + + /* + * pushPacket + * maps to data BULKIO method call for passing an entire vector of data + * + * data: The sequence structure from an input port containing the payload to send out + * T: constant of type BULKIO::PrecisionUTCTime containing the timestamp for the outgoing data. + * tcmode: timecode mode + * tcstatus: timecode status + * toff: fractional sample offset + * twsec: J1970 GMT + * tfsec: fractional seconds: 0.0 to 1.0 + * EOS: end-of-stream flag + * streamID: stream identifier + */ + void pushPacket(const VectorType& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); + + protected: + typedef typename OutPort::BufferType BufferType; + }; + + // + // Character Specialization.. + // + // This class overrides the pushPacket method to support Int8 and char data types + // + // Output port for Int8 and char data types + class OutCharPort : public OutNumericPort { + public: + OutCharPort(const std::string& name, + ConnectionEventListener *connectCB=NULL, + ConnectionEventListener *disconnectCB=NULL ); + + OutCharPort(const std::string& name, + LOGGER_PTR logger, + ConnectionEventListener *connectCB=NULL, + ConnectionEventListener *disconnectCB=NULL ); + + // Push a vector of Int8 data + void pushPacket(const std::vector< Int8 >& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); + + // Push a vector of Char data + void pushPacket(const std::vector< Char >& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); + + // Push a subset of a vector of Int8 data + void pushPacket(const Int8* buffer, size_t size, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); + + // Push a subset of a vector of Char data + void pushPacket(const Char* buffer, size_t size, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); + + }; + + + class OutBitPort : public OutPort { + public: + typedef char* NativeSequenceType; + + OutBitPort(const std::string& name, LOGGER_PTR logger=LOGGER_PTR()); + + void pushPacket(const redhawk::shared_bitbuffer& data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); + }; + + // + // OutFilePort + // + // This class defines the pushPacket interface for file URL data. + // + // + class OutFilePort : public OutPort { + public: + typedef char* NativeSequenceType; + + OutFilePort(const std::string& name, + ConnectionEventListener *connectCB=NULL, + ConnectionEventListener *disconnectCB=NULL); + + + OutFilePort(const std::string& name, + LOGGER_PTR logger, + ConnectionEventListener *connectCB=NULL, + ConnectionEventListener *disconnectCB=NULL); + + /* + * pushPacket + * maps to dataFile BULKIO method call for passing the URL of a file + * + * data: string containing the file URL to send out + * T: constant of type BULKIO::PrecisionUTCTime containing the timestamp for the outgoing data. + * tcmode: timecode mode + * tcstatus: timecode status + * toff: fractional sample offset + * twsec: J1970 GMT + * tfsec: fractional seconds: 0.0 to 1.0 + * EOS: end-of-stream flag + * streamID: stream identifier + */ + void pushPacket(const std::string& URL, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); + + /* + * pushPacket + * maps to dataFile BULKIO method call for passing the URL of a file + * + * data: string containing the file URL to send out + * T: constant of type BULKIO::PrecisionUTCTime containing the timestamp for the outgoing data. + * tcmode: timecode mode + * tcstatus: timecode status + * toff: fractional sample offset + * twsec: J1970 GMT + * tfsec: fractional seconds: 0.0 to 1.0 + * EOS: end-of-stream flag + * streamID: stream identifier + */ + void pushPacket(const char* URL, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); + + /* + * DEPRECATED: maps to dataXML BULKIO method call for passing strings of data + */ + void pushPacket(const char *data, bool EOS, const std::string& streamID); + + }; + + + // + // OutXMLPort + // + // This class defines the pushPacket interface for XML data. + // + // + class OutXMLPort : public OutPort { + public: + typedef char* NativeSequenceType; + + OutXMLPort(const std::string& name, + ConnectionEventListener *connectCB=NULL, + ConnectionEventListener *disconnectCB=NULL); + + + OutXMLPort(const std::string& name, + LOGGER_PTR logger, + ConnectionEventListener *connectCB=NULL, + ConnectionEventListener *disconnectCB=NULL); + + /* + * DEPRECATED: maps to dataFile BULKIO method call for passing strings of data + */ + void pushPacket(const char *data, const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID); + + /* + * pushPacket + * maps to dataXML BULKIO method call for passing an XML-formatted string + * + * data: string containing the XML data to send out + * EOS: end-of-stream flag + * streamID: stream identifier + */ + void pushPacket(const std::string& data, bool EOS, const std::string& streamID); + + /* + * pushPacket + * maps to dataXML BULKIO method call for passing an XML-formatted string + * + * data: string containing the XML data to send out + * EOS: end-of-stream flag + * streamID: stream identifier + */ + void pushPacket(const char* data, bool EOS, const std::string& streamID); + + }; + + + /* + Uses Port Definitions for All Bulk IO port definitions + * + */ + // Bulkio octet (UInt8) output + typedef OutNumericPort OutOctetPort; + // Bulkio UInt8 output + typedef OutOctetPort OutUInt8Port; + // Bulkio short output + typedef OutNumericPort OutShortPort; + // Bulkio unsigned short output + typedef OutNumericPort OutUShortPort; + // Bulkio Int16 output + typedef OutShortPort OutInt16Port; + // Bulkio UInt16 output + typedef OutUShortPort OutUInt16Port; + // Bulkio long output + typedef OutNumericPort OutLongPort; + // Bulkio unsigned long output + typedef OutNumericPort OutULongPort; + // Bulkio Int32 output + typedef OutLongPort OutInt32Port; + // Bulkio UInt32 output + typedef OutULongPort OutUInt32Port; + // Bulkio long long output + typedef OutNumericPort OutLongLongPort; + // Bulkio unsigned long long output + typedef OutNumericPort OutULongLongPort; + // Bulkio Int64 output + typedef OutLongLongPort OutInt64Port; + // Bulkio UInt64 output + typedef OutULongLongPort OutUInt64Port; + // Bulkio float output + typedef OutNumericPort OutFloatPort; + // Bulkio double output + typedef OutNumericPort OutDoublePort; + // Bulkio URL output + typedef OutFilePort OutURLPort; +} // end of bulkio namespace + +inline bool operator>>= (const CORBA::Any& a, bulkio::connection_descriptor_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + CF::Properties& props = *temp; + for (unsigned int idx = 0; idx < props.length(); idx++) { + if (!strcmp("connectionTable::connection_id", props[idx].id)) { + if (!(props[idx].value >>= s.connection_id)) return false; + } else if (!strcmp("connectionTable::stream_id", props[idx].id)) { + if (!(props[idx].value >>= s.stream_id)) return false; + } else if (!strcmp("connectionTable::port_name", props[idx].id)) { + if (!(props[idx].value >>= s.port_name)) return false; + } + } + return true; +}; + +inline void operator<<= (CORBA::Any& a, const bulkio::connection_descriptor_struct& s) { + CF::Properties props; + props.length(3); + props[0].id = CORBA::string_dup("connectionTable::connection_id"); + props[0].value <<= s.connection_id; + props[1].id = CORBA::string_dup("connectionTable::stream_id"); + props[1].value <<= s.stream_id; + props[2].id = CORBA::string_dup("connectionTable::port_name"); + props[2].value <<= s.port_name; + a <<= props; +}; + +inline bool operator== (const bulkio::connection_descriptor_struct& s1, const bulkio::connection_descriptor_struct& s2) { + if (s1.connection_id!=s2.connection_id) + return false; + if (s1.stream_id!=s2.stream_id) + return false; + if (s1.port_name!=s2.port_name) + return false; + return true; +}; + +inline bool operator!= (const bulkio::connection_descriptor_struct& s1, const bulkio::connection_descriptor_struct& s2) { + return !(s1==s2); +}; + +#endif diff --git a/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_out_stream.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_out_stream.h new file mode 100644 index 000000000..652c4fcf8 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_out_stream.h @@ -0,0 +1,828 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_out_stream_h +#define __bulkio_out_stream_h + +#include +#include +#include + +#include +#include +#include + +#include "bulkio_typetraits.h" +#include "bulkio_datablock.h" +#include "bulkio_stream.h" + +namespace bulkio { + + template + class OutPort; + + /** + * @brief Abstract BulkIO output stream class. + * @headerfile bulkio_out_stream.h + * + * %OutputStream is a smart pointer-based class that encapsulates a single + * BulkIO stream for writing. It is associated with the output port that + * created it, providing a file-like API on top of the classic BulkIO + * pushPacket model. + * + * @warning Do not declare instances of this template class directly in + * user code; the template parameter and class name are not + * considered API. Use the type-specific @c typedef instead, such + * as bulkio::OutFloatStream, or the nested @c typedef StreamType + * from an %OutPort. + * + * Notionally, a BulkIO stream represents a contiguous data set and its + * associated signal-related information (SRI), uniquely identified by a + * stream ID, from creation until close. The SRI may vary over time, but + * the stream ID is immutable. Only one stream with a given stream ID can + * be active at a time. + * + * OutputStreams help manage the stream lifetime by tying that SRI with an + * output port and ensuring that all data is associated with a valid stream. + * When the stream is complete, it may be closed, notifying downstream + * receivers that no more data is expected. + * + * The %OutputStream class itself is a lightweight handle; it is inexpensive + * to copy or store in local variables or nested data types. Assigning one + * %OutputStream to another does not copy the stream state, but instead, it + * aliases both objects to the same underlying stream. + * + * The default constructor creates an invalid "null" %OutputStream that cannot + * be used for any real operations, similar to a null pointer. A stream may + * be checked for validity with boolean tests: + * + * @code + * if (!stream) { + * // handle failure + * } + * @endcode + * or + * @code + * if (stream) { + * // operate on stream + * } + * @endcode + * + * OutputStreams must be created via an %OutPort. A stream cannot be + * associated with more than one port. + * @see OutPort::createStream(const std::string&) + * @see OutPort::createStream(const BULKIO::StreamSRI&) + * + * @par SRI Changes + * Updates to the stream that modify its SRI are cached locally until the + * next write to minimize the number of updates that are published. When + * there are pending SRI changes, the %OutputStream pushes the updated SRI + * first, followed by the data. + */ + template + class OutputStream : public StreamBase { + public: + using StreamBase::sri; + + /** + * @brief Update the SRI. + * @param sri New SRI. + * @pre Stream is valid. + * + * Overwrites all SRI fields except for @c streamID, which is + * immutable. The updated SRI will be pushed on the next write. + */ + void sri(const BULKIO::StreamSRI& sri); + + using StreamBase::xstart; + + /** + * @brief Sets the X-axis start value. + * @param start Starting coordinate of the first sample in the X + * direction. + * @pre Stream is valid. + * @see xstart() const + * + * Changing @c xstart updates the SRI, which will be pushed on the next + * write. + */ + void xstart(double start); + + using StreamBase::xdelta; + + /** + * @brief Sets the X-axis delta. + * @param delta The distance between two adjacent samples in the X + * direction. + * @pre Stream is valid. + * @see xdelta() const + * + * Changing @c xdelta updates the SRI, which will be pushed on the next + * write. + */ + void xdelta(double delta); + + using StreamBase::xunits; + + /** + * @brief Sets the X-axis units. + * @param units Unit code for @c xstart and @c xdelta. + * @pre Stream is valid. + * @see xunits() const + * + * Changing @c xunits updates the SRI, which will be pushed on the next + * write. + */ + void xunits(short units); + + using StreamBase::subsize; + + /** + * @brief Sets the frame size. + * @param size Length of a row for framed data, or 0 for contiguous + * data. + * @pre Stream is valid. + * @see subsize() const + * + * Changing @c subsize updates the SRI, which will be pushed on the + * next write. + */ + void subsize(int size); + + using StreamBase::ystart; + + /** + * @brief Sets the Y-axis start value. + * @param start Starting coordinate of the first frame in the Y + * direction. + * @pre Stream is valid. + * @see ystart() const + * + * Changing @c ystart updates the SRI, which will be pushed on the next + * write. + */ + void ystart(double start); + + using StreamBase::ydelta; + + /** + * @brief Sets the Y-axis delta. + * @param delta The distance between two adjacent frames in the Y + * direction. + * @pre Stream is valid. + * @see ydelta() const + * + * Changing @c ydelta updates the SRI, which will be pushed on the next + * write. + */ + void ydelta(double delta); + + using StreamBase::yunits; + + /** + * @brief Sets the Y-axis units. + * @returns Unit code for @c ystart and @c ydelta. + * @pre Stream is valid. + * @see yunits() const + * + * Changing @c yunits updates the SRI, which will be pushed on the next + * write. + */ + void yunits(short units); + + using StreamBase::complex; + + /** + * @brief Sets the complex mode of this stream. + * @param mode True if data is complex. False if data is not complex. + * @pre Stream is valid. + * @see complex() const + * + * Changing the %complex mode indicates that all subsequent data is + * real or complex based on the value of @a mode. The updated SRI will + * be pushed on the next write. + */ + void complex(bool mode); + + using StreamBase::blocking; + + /** + * @brief Sets the blocking mode of this stream. + * @param mode True if blocking. False if stream is non-blocking. + * @pre Stream is valid. + * + * Changing the %blocking mode updates the SRI, which will be pushed on + * the next write. + */ + void blocking(bool mode); + + using StreamBase::keywords; + + /** + * @brief Overwrites the SRI keywords. + * @param props New SRI keywords. + * @pre Stream is valid. + * @see setKeyword + * + * The current SRI keywords are replaced with @a props. The updated SRI + * will be pushed on the next write. + */ + void keywords(const _CORBA_Unbounded_Sequence& props); + + /** + * @brief Sets the current value of a keyword in the SRI. + * @param name The name of the keyword. + * @param value The new value. + * @pre Stream is valid. + * @see setKeyword(const std::string&, const redhawk::Value&) + * @see setKeyword(const std::string&, const T&) + * + * If the keyword @a name already exists, its value is updated to + * @a value. If the keyword @a name does not exist, the new keyword is + * appended. + * + * Setting a keyword updates the SRI, which will be pushed on the next + * write. + */ + void setKeyword(const std::string& name, const CORBA::Any& value); + + /** + * @brief Sets the current value of a keyword in the SRI. + * @param name The name of the keyword. + * @param value The new value. + * @pre Stream is valid. + * @see setKeyword(const std::string&, const T&) + * + * If the keyword @a name already exists, its value is updated to + * @a value. If the keyword @a name does not exist, the new keyword is + * appended. + * + * Setting a keyword updates the SRI, which will be pushed on the next + * write. + */ + void setKeyword(const std::string& name, const redhawk::Value& value); + + /** + * @brief Sets the current value of a keyword in the SRI. + * @param name The name of the keyword. + * @param value The new value. + * @tparam T Any type that can be converted to a redhawk::Value. + * @pre Stream is valid. + * + * If the keyword @a name already exists, its value is updated to + * @a value. If the keyword @a name does not exist, the new keyword is + * appended. + * + * Setting a keyword updates the SRI, which will be pushed on the next + * write. + */ + template + void setKeyword(const std::string& name, const T& value) + { + setKeyword(name, redhawk::Value(value)); + } + + /** + * @brief Removes a keyword from the SRI. + * @param name The name of the keyword. + * @pre Stream is valid. + * + * Erases the keyword named @a name from the SRI keywords. If no + * keyword @a name is found, the keywords are not modified. + * + * Removing a keyword updates the SRI, which will be pushed on the next + * write. + */ + void eraseKeyword(const std::string& name); + + /** + * @brief Closes this stream and sends an end-of-stream. + * @pre Stream is valid. + * @post Stream is invalid. + * + * Closing a stream sends an end-of-stream packet and resets the stream + * handle. No further operations may be made on the stream. + */ + void close(); + + protected: + /// @cond IMPL + typedef OutPort OutPortType; + + class Impl; + Impl& impl(); + const Impl& impl() const; + + OutputStream(); + OutputStream(const BULKIO::StreamSRI& sri, OutPortType* port); + OutputStream(boost::shared_ptr impl); + + int modcount() const; + + typedef const Impl& (OutputStream::*unspecified_bool_type)() const; + /// @endcond + public: + /** + * @brief Checks stream validity. + * @returns Value convertible to true if this stream is valid. + * Value convertible to false if this stream is invalid. + * @see StreamBase::operator!() const + * + * This operator supports affirmative boolean tests: + * @code + * if (stream) { + * // operate on stream + * } + * @endcode + * + * If this method returns true, it is safe to call any method. + */ + operator unspecified_bool_type() const; + + /* + * @brief Stream equality comparison. + * @param other Another %OutputStream. + * @returns True if and only if both OutputStreams reference the same + * underlying stream. + */ + bool operator==(const OutputStream& other) const; + + bool operator!=(const OutputStream& other) const; + }; + + + /** + * @brief BulkIO output stream class with data buffering. + * @headerfile bulkio_out_stream.h + * + * %BufferedOutputStream can use an internal buffer to queue up multiple + * packets worth of data into a single push. By default, buffering is + * disabled. + * + * @warning Do not declare instances of this template class directly in + * user code; the template parameter and class name are not + * considered API. Use the type-specific @c typedef instead, such + * as bulkio::OutFloatStream, or the nested @c typedef StreamType + * from an %OutPort. + * + * @par Data Buffering + * + * BufferedOutputStreams can combine multiple small chunks of data into a + * single packet for reduced I/O overhead. Data buffering is enabled by + * setting a non-zero buffer size via the setBufferSize() method. The + * output stream creates an internal buffer of the requested size; the + * stream's complex mode is not taken into account. + * + * With buffering enabled, each write copies its data into the internal + * buffer, up to the maximum of the buffer size. When the internal buffer + * is full, a packet is sent via the output port, using the time stamp of + * the first buffered sample. After the packet is sent, the internal + * buffer is reset to its initial state. If there is any remaining data + * from the write, it is copied into a new buffer and a new starting time + * stamp is interpolated. + * + * @par Time Stamps + * + * When buffering is enabled, the time stamps provided to the write() + * methods may be discarded. Furthermore, when write sizes do not align + * exactly with the buffer size, the output time stamp may be interpolated. + * If precise time stamps are required, buffering should not be used. + */ + template + class BufferedOutputStream : public OutputStream { + public: + /// @brief Data type for write(). + typedef typename BufferTraits::BufferType BufferType; + + /** + * @brief Default constructor. + * @see OutPort::createStream(const std::string&) + * @see OutPort::createStream(const BULKIO::StreamSRI&) + * + * Create a null %BufferedOutputStream. This stream is not associated + * with a stream from any output port. No methods may be called on the + * the %BufferedOutputStream except for boolean tests and comparison. + * A null stream will always test as not valid, and will compare equal + * to another stream if and only if the other stream is also null. + * + * New, valid streams are created via an output port. + */ + BufferedOutputStream(); + + /** + * @brief Gets the internal buffer size. + * @returns Number of real samples to buffer per push. + * @pre Stream is valid. + * + * The buffer size is in terms of real samples, ignoring the complex + * mode of the stream. Complex samples count as two real samples for + * the purposes of buffering. + * + * A buffer size of 0 indicates that buffering is disabled. + */ + size_t bufferSize() const; + + /** + * @brief Sets the internal buffer size. + * @param samples Number of real samples to buffer per push. + * @pre Stream is valid. + * @see bufferSize() const + * + * The internal buffer is flushed if @a samples is less than the number + * of real samples currently buffered. + * + * A buffer size of 0 disables buffering, flushing any buffered data. + */ + void setBufferSize(size_t samples); + + /** + * @brief Flushes the internal buffer. + * @pre Stream is valid. + * + * Any data in the internal buffer is sent to the port to be pushed. + */ + void flush(); + + /** + * @brief Writes data to the stream. + * @param data The data to write. + * @param time Time stamp of first element. + * + * If buffering is disabled, @a data is sent as a single packet with + * the given time stamp. + * + * When buffering is enabled, @a data is copied into the internal + * buffer. If the internal buffer exceeds the configured buffer size, + * one or more packets will be sent. + * + * If there are any pending SRI changes, the new SRI is pushed first. + */ + void write(const BufferType& data, const BULKIO::PrecisionUTCTime& time); + + protected: + /// @cond IMPL + typedef OutputStream Base; + + friend class OutPort; + typedef OutPort OutPortType; + BufferedOutputStream(const BULKIO::StreamSRI& sri, OutPortType* port); + + class Impl; + Impl& impl(); + const Impl& impl() const; + /// @endcond + }; + + + /** + * @brief BulkIO output stream class for numeric data types. + * @headerfile bulkio_out_stream.h + * + * %NumericOutputStream provides overloaded write methods for both real and + * complex sample data. + * + * @warning Do not declare instances of this template class directly in + * user code; the template parameter and class name are not + * considered API. Use the type-specific @c typedef instead, such + * as bulkio::OutFloatStream, or the nested @c typedef StreamType + * from an %OutPort. + */ + template + class NumericOutputStream : public BufferedOutputStream { + public: + /// @brief The native type of a real sample. + typedef typename NativeTraits::NativeType ScalarType; + /// @brief The native type of a complex sample. + typedef std::complex ComplexType; + + /// @brief The shared_buffer type for real data. + typedef redhawk::shared_buffer ScalarBuffer; + /// @brief The shared_buffer type for complex data. + typedef redhawk::shared_buffer ComplexBuffer; + + /** + * @brief Default constructor. + * @see OutPort::createStream(const std::string&) + * @see OutPort::createStream(const BULKIO::StreamSRI&) + * + * Create a null %NumericOutputStream. This stream is not associated + * with a stream from any output port. No methods may be called on the + * the %NumericOutputStream except for boolean tests and comparison. A + * null stream will always test as not valid, and will compare equal to + * another stream if and only if the other stream is also null. + * + * New, valid streams are created via an output port. + */ + NumericOutputStream(); + + /* + * @brief Write real sample data to the stream. + * @param data %shared_buffer containing real data. + * @param time Time stamp of first sample. + + * Sends the real data in @a data as single packet with the time stamp + * @a time via the associated output port. + * + * If there are any pending SRI changes, the new SRI is pushed first. + */ + void write(const ScalarBuffer& data, const BULKIO::PrecisionUTCTime& time); + + /** + * @brief Write real sample data to the stream. + * @param data %shared_buffer containing real data. + * @param times List of time stamps, with offsets. + * @pre Stream is valid. + * @pre @p times is sorted in order of offset. + * @throw std::logic_error If @p times is empty. + * + * Writes the real data in @a data to the stream, where each element of + * @a times gives the offset and time stamp of an individual packet. + * The offset of the first time stamp is ignored and assumed to be 0, + * while subsequent offsets determine the length of the prior packet. + * All offsets should be less than @a data.size(). + * + * For example, given @a data with size 25 and three time stamps with + * offsets 0, 10, and 20, @a data is broken into three packets of size + * 10, 10, and 5 samples. + * + * If there are any pending SRI changes, the new SRI is pushed first. + */ + void write(const ScalarBuffer& data, const std::list& times); + + /** + * @brief Write complex sample data to the stream. + * @param data %shared_buffer containing complex data. + * @param time Time stamp of the first sample. + * @throw std::logic_error If stream is not configured for complex + * data. + * + * Sends the complex data in @a data as single packet with the time + * stamp @a time via the associated output port. + * + * If there are any pending SRI changes, the new SRI is pushed first. + */ + void write(const ComplexBuffer& data, const BULKIO::PrecisionUTCTime& time); + + /** + * @brief Write complex data to the stream. + * @param data %shared_buffer containing complex data. + * @param times List of time stamps, with offsets. + * @pre Stream is valid. + * @pre @p times is sorted in order of offset. + * @throw std::logic_error If stream is not configured for complex + * data. + * @throw std::logic_error If @p times is empty. + * + * Writes the complex data in @a data to the stream, where each element + * of @a times gives the offset and time stamp of an individual packet. + * The offset of the first time stamp is ignored and assumed to be 0, + * while subsequent offsets determine the length of the prior packet. + * All offsets should be less than @a data.size(). + * + * For example, given @a data with size 25 and three time stamps with + * offsets 0, 10, and 20, @a data is broken into three packets of size + * 10, 10, and 5 samples. + * + * If there are any pending SRI changes, the new SRI is pushed first. + */ + void write(const ComplexBuffer& data, const std::list& times); + + /** + * @brief Writes a packet of data. + * @tparam T Sample type (must be ScalarType or ComplexType). + * @param data Vector containing real or complex sample data. + * @param time Time stamp of first sample. + * @pre Stream is valid. + * @throw std::logic_error If @p T is complex but stream is not. + * @see write(const ScalarType*,size_t,const BULKIO::PrecisionUTCTime&) + * @see write(const ComplexType*,size_t,const BULKIO::PrecisionUTCTime&) + * + * Sends the contents of a real or complex vector as a single packet. + * This is a convenience wrapper that defers to one of the write + * methods that takes a pointer and size, depending on whether @a T is + * real or complex. + */ + template + void write(const std::vector& data, const BULKIO::PrecisionUTCTime& time) + { + write(&data[0], data.size(), time); + } + + /** + * @brief Writes one or more packets. + * @tparam T Sample type (must be ScalarType or ComplexType). + * @param data Vector containing real or complex sample data. + * @param times List of time stamps, with offsets. + * @pre Stream is valid. + * @throw std::logic_error If @p T is complex but stream is not. + * @throw std::logic_error If @p times is empty. + * @see write(const ScalarType*,size_t,const std::list&) + * @see write(const ComplexType*,size_t,const std::list&) + * + * Sends the contents of a real or complex vector as one or more + * packets. This is a convenience wrapper that defers to one of the + * write methods that takes a pointer and size, depending on whether + * @a T is real or complex. + */ + template + void write(const std::vector& data, const std::list& times) + { + write(&data[0], data.size(), times); + } + + /** + * @brief Writes a packet of real data. + * @param data Pointer to real sample data. + * @param count Number of samples to write. + * @param time Time stamp of first sample. + * @pre Stream is valid. + * + * Convenience wrapper for write(const ScalarBuffer&,const BULKIO::PrecisionUTCTime&) + * that creates a transient buffer from @a data and @a count. + */ + void write(const ScalarType* data, size_t count, const BULKIO::PrecisionUTCTime& time) + { + write(ScalarBuffer::make_transient(data, count), time); + } + + /** + * @brief Writes one or more packets of real data. + * @param data Pointer to real sample data. + * @param count Number of samples to write. + * @param times List of time stamps, with offsets. + * @pre Stream is valid. + * @pre @p times is sorted in order of offset. + * @throw std::logic_error If @p times is empty. + * + * Convenience wrapper for write(const ScalarBuffer&,const std::list&) + * that creates a transient buffer from @a data and @a count. + */ + void write(const ScalarType* data, size_t count, const std::list& times) + { + write(ScalarBuffer::make_transient(data, count), times); + } + + /** + * @brief Writes a packet of complex data. + * @param data Pointer to complex sample data. + * @param count Number of samples to write. + * @param time Time stamp of first sample. + * @throw std::logic_error If stream is not configured for complex + * data. + * @pre Stream is valid. + * + * Convenience wrapper for write(const ComplexBuffer&,const BULKIO::PrecisionUTCTime&) + * that creates a transient buffer from @a data and @a count. + */ + void write(const ComplexType* data, size_t count, const BULKIO::PrecisionUTCTime& time) + { + write(ComplexBuffer::make_transient(data, count), time); + } + + /** + * @brief Writes one or more packets of complex data. + * @param data Pointer to complex sample data. + * @param count Number of samples to write. + * @param times List of time stamps, with offsets. + * @pre Stream is valid. + * @pre @p times is sorted in order of offset. + * @throw std::logic_error If stream is not configured for complex + * data. + * @throw std::logic_error If @p times is empty. + * + * Convenience wrapper for write(const ComplexBuffer&,const std::list&) + * that creates a transient buffer from @a data and @a count. + */ + void write(const ComplexType* data, size_t count, const std::list& times) + { + write(ComplexBuffer::make_transient(data, count), times); + } + + private: + /// @cond IMPL + typedef BufferedOutputStream Base; + + friend class OutPort; + typedef OutPort OutPortType; + NumericOutputStream(const BULKIO::StreamSRI& sri, OutPortType* port); + + template + inline void _writeMultiple(const redhawk::shared_buffer& data, + const std::list& times); + /// @endcond + }; + + + /** + * @brief BulkIO XML output stream class. + * @headerfile bulkio_out_stream.h + */ + class OutXMLStream : public OutputStream { + public: + /** + * @brief Default constructor. + * @see OutPort::createStream(const std::string&) + * @see OutPort::createStream(const BULKIO::StreamSRI&) + * + * Create a null %OutXMLStream. This stream is not associated with a + * stream from any output port. No methods may be called on the the + * %OutXMLStream except for boolean tests and comparison. A null stream + * will always test as not valid, and will compare equal to another + * stream if and only if the other stream is also null. + * + * New, valid streams are created via an output port. + */ + OutXMLStream(); + + /** + * @brief Writes XML data to the stream. + * @param xmlString An XML string. + * + * The XML string @a data is sent as a single packet. + */ + void write(const std::string& xmlString); + + private: + /// @cond IMPL + typedef OutputStream Base; + + friend class OutPort; + typedef OutPort OutPortType; + OutXMLStream(const BULKIO::StreamSRI& sri, OutPortType* port); + /// @endcond IMPL + }; + + + /** + * @brief BulkIO file output stream class. + * @headerfile bulkio_out_stream.h + */ + class OutFileStream : public OutputStream { + public: + /** + * @brief Default constructor. + * @see OutPort::createStream(const std::string&) + * @see OutPort::createStream(const BULKIO::StreamSRI&) + * + * Create a null %OutFileStream. This stream is not associated with a + * stream from any output port. No methods may be called on the the + * %OutFileStream except for boolean tests and comparison. A null + * stream will always test as not valid, and will compare equal to + * another stream if and only if the other stream is also null. + * + * New, valid streams are created via an output port. + */ + OutFileStream(); + + /** + * @brief Writes a file URI to the stream. + * @param URL The file URI to write. + * @param time Time stamp of file data. + * + * The URI is sent as a single packet with the given time stamp. + */ + void write(const std::string& URL, const BULKIO::PrecisionUTCTime& time); + + private: + /// @cond IMPL + typedef OutputStream Base; + + friend class OutPort; + typedef OutPort OutPortType; + OutFileStream(const BULKIO::StreamSRI& sri, OutPortType* port); + /// @endcond + }; + + + typedef BufferedOutputStream OutBitStream; + typedef NumericOutputStream OutCharStream; + typedef NumericOutputStream OutOctetStream; + typedef NumericOutputStream OutShortStream; + typedef NumericOutputStream OutUShortStream; + typedef NumericOutputStream OutLongStream; + typedef NumericOutputStream OutULongStream; + typedef NumericOutputStream OutLongLongStream; + typedef NumericOutputStream OutULongLongStream; + typedef NumericOutputStream OutFloatStream; + typedef NumericOutputStream OutDoubleStream; + +} // end of bulkio namespace + +#endif diff --git a/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_stream.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_stream.h new file mode 100644 index 000000000..3ffa049e5 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_stream.h @@ -0,0 +1,299 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_stream_h +#define __bulkio_stream_h + +#include +#include + +#include + +#include + +namespace bulkio { + + /** + * @brief Shared ownership container for StreamSRI. + * + * %StreamDescriptor adds shared ownership to the StreamSRI class to + * provide read-only access to the same underlying SRI instance for + * multiple readers. + */ + class StreamDescriptor { + public: + StreamDescriptor() : + _sri() + { + } + + StreamDescriptor(const BULKIO::StreamSRI& sri) : + _sri(boost::make_shared(sri)) + { + } + + std::string streamID() const + { + return std::string(_sri->streamID); + } + + bool blocking() const + { + return _sri->blocking; + } + + bool complex() const + { + return (_sri->mode != 0); + } + + const BULKIO::StreamSRI& sri() + { + return *_sri; + } + + bool operator! () const + { + return !_sri; + } + + protected: + boost::shared_ptr _sri; + }; + + /** + * @brief Base class for input and output streams. + * + * %StreamBase is a smart-pointer based class that encapsulates a single + * BulkIO stream. It implements the basic common API for input and output + * streams, providing accessor methods for StreamSRI fields. + * + * @note User code should typically use the type-specific input and output + * stream classes. + */ + class StreamBase { + public: + /** + * @brief Returns the stream ID. + * @pre Stream is valid. + * + * The stream ID is immutable and cannot be changed. + */ + const std::string& streamID() const; + + /** + * @brief Gets the current stream metadata. + * @returns Read-only reference to stream SRI. + * @pre Stream is valid. + */ + const BULKIO::StreamSRI& sri() const; + + /** + * @brief Implicit conversion to read-only StreamSRI. + * @pre Stream is valid. + */ + operator const BULKIO::StreamSRI& () const; + + /** + * @brief Gets the X-axis start value. + * @returns Starting coordinate of the first sample in the X + * direction. + * @pre Stream is valid. + * + * For contiguous data, this is the start of the stream in terms of + * @c xunits. For framed data, this specifies the starting abscissa + * value, in terms of @c xunits, associated with the first element in + * each frame. + */ + double xstart() const; + + /** + * @brief Gets the X-axis delta. + * @returns The distance between two adjacent samples in the X + * direction. + * @pre Stream is valid. + * + * Because the X-axis is commonly in terms of time (that is, + * @c sri.xunits is @c BULKIO::UNITS_TIME), this is typically the + * reciprocal of the sample rate. + * + * For framed data, this is the interval between consecutive samples in + * a frame. + */ + double xdelta() const; + + /** + * @brief Gets the X-axis units. + * @returns The unit code for the xstart and xdelta values. + * @pre Stream is valid. + * + * Axis units are specified using constants in the BULKIO namespace. + * For contiguous data, the X-axis is commonly in terms of time, + * @c BULKIO::UNITS_TIME. For framed data, the X-axis is often in terms + * of frequency, @c BULKIO::UNITS_FREQUENCY. + */ + short xunits() const; + + /** + * @brief Gets the frame size. + * @returns The length of a row for framed data, or 0 if the data is + * contiguous. + * @pre Stream is valid. + * + * A subsize of 0 indicates that the data is contiguous; this is the + * default setting. For contiguous data, only the X-axis fields are + * applicable. + * + * A non-zero subsize indicates that the data is framed, with each row + * having a length of @c subsize. For framed data, both the X-axis and + * Y-axis fields are applicable. + */ + int subsize() const; + + /** + * @brief Gets the Y-axis start value. + * @returns Starting coordinate of the first frame in the Y direction. + * @pre Stream is valid. + * @see subsize() + * + * @note Y-axis fields are only applicable when subsize is non-zero. + * + * This specifies the start of the stream in terms of @c yunits. + */ + double ystart() const; + + /** + * @brief Gets the Y-axis delta. + * @returns The distance between two adjacent frames in the Y + * direction. + * @pre Stream is valid. + * @see subsize() + * + * @note Y-axis fields are only applicable when subsize is non-zero. + * + * This specifies the interval between frames in terms of @c yunits. + */ + double ydelta() const; + + /** + * @brief Gets the Y-axis units. + * @returns The unit code for the ystart and ydelta values. + * @pre Stream is valid. + * @see subsize() + * @see xunits() + * + * @note Y-axis fields are only applicable when subsize is non-zero. + * + * Axis units are specified using constants in the BULKIO namespace. + */ + short yunits() const; + + /** + * @brief Gets the complex mode of this stream. + * @returns True if data is complex. False if data is not complex. + * @pre Stream is valid. + * + * A stream is considered complex if @c sri.mode is non-zero. + */ + bool complex() const; + + /** + * @brief Gets the blocking mode of this stream. + * @returns True if this stream is blocking. False if stream is non- + * blocking. + * @pre Stream is valid. + */ + bool blocking() const; + + /** + * @brief Read-only access to the set of SRI keywords. + * @returns A read-only reference to the SRI keywords. + * @pre Stream is valid. + * + * The SRI keywords are reinterpreted as const reference to a + * PropertyMap, which provides a higher-level interface than the + * default CORBA sequence. + */ + const redhawk::PropertyMap& keywords() const; + + /** + * @brief Checks for the presence of a keyword in the SRI. + * @param name The name of the keyword. + * @returns True if the keyword is found. False if keyword is not + * found. + * @pre Stream is valid. + */ + bool hasKeyword(const std::string& name) const; + + /** + * @brief Gets the current value of a keyword in the SRI. + * @param name The name of the keyword. + * @returns A read-only reference to the keyword's value. + * @throw std::invalid_argument If no keyword @a name exists. + * @pre Stream is valid. + * @see hasKeyword + * + * Allows for easy lookup of keyword values in the SRI. To avoid + * exceptions on missing keywords, the presence of a keyword can be + * checked with hasKeyword(). + */ + const redhawk::Value& getKeyword(const std::string& name) const; + + /** + * @brief Checks stream validity. + * @returns True if this stream is not valid. False if the stream is + * invalid. + * + * Invalid (null) streams are not associated with an active stream in a + * port. If this method returns true, no other methods except + * comparison or assignment may be called. + */ + bool operator! () const; + + protected: + /// @cond IMPL + class Impl : public StreamDescriptor { + public: + Impl(const StreamDescriptor& sri) : + StreamDescriptor(sri), + _streamID(sri.streamID()) + { + } + + const std::string& streamID() const + { + return _streamID; + } + + virtual ~Impl() { } + + protected: + const std::string _streamID; + }; + + StreamBase(); + StreamBase(const boost::shared_ptr& impl); + + boost::shared_ptr _impl; + /// @endcond + }; +} + +#endif // __bulkio_stream_h diff --git a/bulkioInterfaces/libsrc/cpp/bulkio_time_operators.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_time_operators.h similarity index 100% rename from bulkioInterfaces/libsrc/cpp/bulkio_time_operators.h rename to bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_time_operators.h diff --git a/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_traits.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_traits.h new file mode 100644 index 000000000..c6b2910f9 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_traits.h @@ -0,0 +1,128 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_traits_h +#define __bulkio_traits_h + +#include + +#include // for _seqVector + +#include "BULKIO_Interfaces.h" +#include "bulkio_base.h" +#include "bulkio_datatransfer.h" + +namespace bulkio { + +template < typename TT, typename AT=_seqVector::seqVectorAllocator< TT > > + class DataTransferBuffer + { + private: + DataTransferBuffer(void) {}; + public: + typedef TT TransportType; + typedef AT AllocatorType; + typedef std::vector< TransportType, AllocatorType > Type; +}; + + +// +// DataTransferTraits +// +// Traits template definition used to define input and output types used the port +// classes +// +template < typename PST, typename TT, typename NDT=TT, class DBT=std::vector< NDT > > +struct DataTransferTraits { + typedef PST PortSequenceType; // Port Sequence type used by middleware + typedef TT TransportType; // Transport Type contained in the Port Sequence container + typedef NDT NativeDataType; // Native c++ mapping of Transport Type + typedef DBT DataBufferType; // Container defintion to hold data from Input port + typedef typename DBT::allocator_type AllocatorType; +}; + + +typedef DataTransferTraits< PortTypes::CharSequence, CORBA::Char, Int8 > CharDataTransferTraits; +typedef DataTransferTraits< CF::OctetSequence, CORBA::Octet > OctetDataTransferTraits; +typedef DataTransferTraits< PortTypes::ShortSequence, CORBA::Short > ShortDataTransferTraits; +typedef DataTransferTraits< PortTypes::UshortSequence, CORBA::UShort > UShortDataTransferTraits; +typedef DataTransferTraits< PortTypes::LongSequence, CORBA::Long > LongDataTransferTraits; +typedef DataTransferTraits< PortTypes::UlongSequence, CORBA::ULong > ULongDataTransferTraits; +typedef DataTransferTraits< PortTypes::LongLongSequence, CORBA::LongLong > LongLongDataTransferTraits; +typedef DataTransferTraits< PortTypes::UlongLongSequence, CORBA::ULongLong > ULongLongDataTransferTraits; +typedef DataTransferTraits< PortTypes::FloatSequence, CORBA::Float > FloatDataTransferTraits; +typedef DataTransferTraits< PortTypes::DoubleSequence, CORBA::Double > DoubleDataTransferTraits; +typedef DataTransferTraits< Char *, Char, Char, std::string > StringDataTransferTraits; + +// +// PortTraits +// This template defines the set of traits used by Input and Output port template classes +// +// POA = Portable Object Adapter Class +// PT - BULKIO Port Type +// DTT DataTransferTraits associated with port type +// TransportType - TransportType defined by middleware +// NativeType - TransportType mapped to native type +// PortSequenceType - Data container used by middleware to transfer TransportType objects +// DataBufferType - Data Container of the DataTransfer object returned from getPacket +// + +template < typename POA, typename PT, typename DTT > +struct PortTraits { + typedef POA POAPortType; + typedef PT PortType; + typedef DTT DataTransferTraits; + typedef typename PortType::_var_type PortVarType; + typedef typename DTT::TransportType TransportType; + typedef typename DTT::NativeDataType NativeType; + typedef typename DTT::PortSequenceType SequenceType; + typedef typename DTT::DataBufferType DataBufferType; +}; + + +typedef PortTraits< POA_BULKIO::dataChar, BULKIO::dataChar, CharDataTransferTraits > CharPortTraits; +typedef PortTraits< POA_BULKIO::dataOctet, BULKIO::dataOctet, OctetDataTransferTraits > OctetPortTraits; +typedef PortTraits< POA_BULKIO::dataShort, BULKIO::dataShort, ShortDataTransferTraits > ShortPortTraits; +typedef PortTraits< POA_BULKIO::dataUshort, BULKIO::dataUshort, UShortDataTransferTraits > UShortPortTraits; +typedef PortTraits< POA_BULKIO::dataLong, BULKIO::dataLong, LongDataTransferTraits > LongPortTraits; +typedef PortTraits< POA_BULKIO::dataUlong, BULKIO::dataUlong, ULongDataTransferTraits > ULongPortTraits; +typedef PortTraits< POA_BULKIO::dataLongLong, BULKIO::dataLongLong, LongLongDataTransferTraits > LongLongPortTraits; +typedef PortTraits< POA_BULKIO::dataUlongLong, BULKIO::dataUlongLong, ULongLongDataTransferTraits > ULongLongPortTraits; +typedef PortTraits< POA_BULKIO::dataFloat, BULKIO::dataFloat, FloatDataTransferTraits > FloatPortTraits; +typedef PortTraits< POA_BULKIO::dataDouble, BULKIO::dataDouble, DoubleDataTransferTraits > DoublePortTraits; + +typedef PortTraits< POA_BULKIO::dataFile, BULKIO::dataFile, StringDataTransferTraits > URLPortTraits; +typedef PortTraits< POA_BULKIO::dataFile, BULKIO::dataFile, StringDataTransferTraits > FilePortTraits; +typedef PortTraits< POA_BULKIO::dataXML, BULKIO::dataXML, StringDataTransferTraits > XMLPortTraits; + + +typedef CharPortTraits Int8PortTraits; +typedef OctetPortTraits UInt8PortTraits; +typedef ShortPortTraits Int16PortTraits; +typedef UShortPortTraits Unt16PortTraits; +typedef LongPortTraits Int32PortTraits; +typedef ULongPortTraits Unt32PortTraits; +typedef LongLongPortTraits Int64PortTraits; +typedef ULongLongPortTraits Unt64PortTraits; + +} // end of bulkio namespace + + +#endif diff --git a/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_typetraits.h b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_typetraits.h new file mode 100644 index 000000000..e53b521e2 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/include/bulkio/bulkio_typetraits.h @@ -0,0 +1,114 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_typetraits_h +#define __bulkio_typetraits_h + +#include +#include + +#include +#include + +#include "BULKIO_Interfaces.h" +#include +#include "bulkio_base.h" + +namespace bulkio { + + template + struct CorbaTraits { + }; + +#define DEFINE_CORBA_TRAITS(NAME,TT,ST) \ + template <> \ + struct CorbaTraits { \ + typedef POA_BULKIO::NAME POAType; \ + typedef POA_BULKIO::internal::NAME##Ext POATypeExt; \ + typedef TT TransportType; \ + typedef ST SequenceType; \ + static const char* name() { return #NAME; } \ + }; + + DEFINE_CORBA_TRAITS(dataChar, CORBA::Char, PortTypes::CharSequence); + DEFINE_CORBA_TRAITS(dataOctet, CORBA::Octet, CF::OctetSequence); + DEFINE_CORBA_TRAITS(dataShort, CORBA::Short, PortTypes::ShortSequence); + DEFINE_CORBA_TRAITS(dataUshort, CORBA::UShort, PortTypes::UshortSequence); + DEFINE_CORBA_TRAITS(dataLong, CORBA::Long, PortTypes::LongSequence); + DEFINE_CORBA_TRAITS(dataUlong, CORBA::ULong, PortTypes::UlongSequence); + DEFINE_CORBA_TRAITS(dataLongLong, CORBA::LongLong, PortTypes::LongLongSequence); + DEFINE_CORBA_TRAITS(dataUlongLong, CORBA::ULongLong, PortTypes::UlongLongSequence); + DEFINE_CORBA_TRAITS(dataFloat, CORBA::Float, PortTypes::FloatSequence); + DEFINE_CORBA_TRAITS(dataDouble, CORBA::Double, PortTypes::DoubleSequence); + DEFINE_CORBA_TRAITS(dataBit, CORBA::Octet, BULKIO::BitSequence); + DEFINE_CORBA_TRAITS(dataFile, char, char*); + DEFINE_CORBA_TRAITS(dataXML, char, char*); + +#undef DEFINE_CORBA_TRAITS + + template + struct NativeTraits { + typedef typename CorbaTraits::TransportType NativeType; + static const size_t bits = sizeof(NativeType) * 8; + }; + + template <> + struct NativeTraits { + typedef int8_t NativeType; + static const size_t bits = 8; + }; + + template <> + struct NativeTraits { + typedef redhawk::shared_bitbuffer::data_type NativeType; + static const size_t bits = 1; + }; + + template + struct BufferTraits { + typedef typename NativeTraits::NativeType NativeType; + typedef std::vector VectorType; + typedef redhawk::shared_buffer BufferType; + typedef redhawk::buffer MutableBufferType; + }; + + template <> + struct BufferTraits { + typedef redhawk::shared_bitbuffer VectorType; + typedef redhawk::shared_bitbuffer BufferType; + typedef redhawk::bitbuffer MutableBufferType; + }; + + struct StringBufferTraits { + typedef std::string VectorType; + typedef std::string BufferType; + typedef std::string MutableBufferType; + }; + + template <> + struct BufferTraits : public StringBufferTraits { + }; + + template <> + struct BufferTraits : public StringBufferTraits { + }; +} + +#endif // __bulkio_typetraits_h diff --git a/bulkioInterfaces/libsrc/cpp/shm/FifoIPC.cpp b/bulkioInterfaces/libsrc/cpp/shm/FifoIPC.cpp new file mode 100644 index 000000000..27800baf9 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/shm/FifoIPC.cpp @@ -0,0 +1,241 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "FifoIPC.h" + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +namespace bulkio { + + namespace { + static std::string getErrorMessage() + { + char temp[1024]; + char* msg = strerror_r(errno, temp, sizeof(temp)); + return msg; + } + } + + TempFifo::TempFifo() : + _filename(_makeUniqueName()) + { + // Create the FIFO on the local filesystem + if (mkfifo(_filename.c_str(), 0666)) { + throw std::runtime_error("mkfifo " + _filename + ": " + getErrorMessage()); + } + } + + TempFifo::~TempFifo() + { + // Always unlink the FIFO; while it may have been explicitly unlinked, + // for exception cleanup it's best to be sure. + unlink(); + } + + const std::string& TempFifo::filename() const + { + return _filename; + } + + void TempFifo::unlink() + { + ::unlink(_filename.c_str()); + } + + std::string TempFifo::_makeUniqueName() + { + // Generate a unique name for the FIFO using the process ID and the + // address of the this object (as a hex number). There can only be one + // object at a given address at a time in a single process, so there + // should be no name collisions. + std::ostringstream oss; + oss << "/tmp/fifo-" << getpid() << '-' << std::hex << (size_t) this; + return oss.str(); + } + + + Pipe::Pipe() : + _fd(-1), + _blockSize(32768) + { + // Setting a default block size of 32K seems to give the best performance + } + + Pipe::~Pipe() + { + close(); + } + + void Pipe::open(const std::string& filename, int flags) + { + _fd = ::open(filename.c_str(), flags); + if (_fd < 0) { + throw std::runtime_error("open " + filename + ": " + getErrorMessage()); + } + } + + void Pipe::setFlags(int flags) + { + fcntl(_fd, F_SETFL, fcntl(_fd, F_GETFL) | flags); + } + + void Pipe::clearFlags(int flags) + { + fcntl(_fd, F_SETFL, fcntl(_fd, F_GETFL) & ~flags); + } + + void Pipe::close() + { + if (_fd >= 0) { + ::close(_fd); + _fd = -1; + } + } + + bool Pipe::poll(int events, int timeout) + { + struct pollfd pfd; + pfd.fd = _fd; + pfd.events = events; + + return ::poll(&pfd, 1, timeout) == 1; + } + + size_t Pipe::read(void* buffer, size_t bytes) + { + char* ptr = static_cast(buffer); + size_t remain = bytes; + while (remain > 0) { + boost::this_thread::interruption_point(); + size_t todo = std::min(remain, _blockSize); + ssize_t pass = ::read(_fd, ptr, todo); + if (pass <= 0) { + if (pass < 0) { + throw std::runtime_error("read failed: " + getErrorMessage()); + } + break; + } + remain -= pass; + ptr += pass; + } + return (bytes - remain); + } + + void Pipe::write(const void* data, size_t bytes) + { + const char* ptr = static_cast(data); + size_t remain = bytes; + while (remain > 0) { + boost::this_thread::interruption_point(); + size_t todo = std::min(remain, _blockSize); + ssize_t pass = ::write(_fd, ptr, todo); + if (pass <= 0) { + if (pass < 0) { + throw std::runtime_error("write failed: " + getErrorMessage()); + } + return; + } + ptr += pass; + remain -= pass; + } + } + + + FifoEndpoint::FifoEndpoint() : + _fifo(), + _read(), + _write() + { + // For read mode, set non-blocking mode; this allows open() to return + // immediately, even though the write side has not been opened yet. + _read.open(_fifo.filename(), O_RDONLY|O_NONBLOCK); + + // Clear the non-blocking flag now, as future reads should be blocking. + _read.clearFlags(O_NONBLOCK); + } + + FifoEndpoint::~FifoEndpoint() + { + } + + const std::string& FifoEndpoint::name() const + { + return _fifo.filename(); + } + + void FifoEndpoint::connect(const std::string& name) + { + // Open the filename for write and Send a sync character to allow the + // read end to resynchronize. + _write.open(name, O_WRONLY); + char token = 'w'; + write(&token, sizeof(token)); + } + + void FifoEndpoint::sync(int timeout) + { + if (!_read.poll(POLLIN, timeout)) { + throw std::runtime_error("sync timed out"); + } + + // Read the sync character. Even though the non-blocking flag has been + // cleared, read may return immediately with no data, if the write side + // has not been opened yet. Retrying the read until it returns data + // ensures that the FIFO is fully connected. + char token; + if (read(&token, sizeof(token)) != sizeof(token)) { + throw std::runtime_error("read failed"); + } + + if (token != 'w') { + throw std::runtime_error("open failed"); + } + + // Once both ends have been established, it's safe to remove the FIFO + // from the file system. + _fifo.unlink(); + } + + size_t FifoEndpoint::read(void* buffer, size_t size) + { + return _read.read(buffer, size); + } + + void FifoEndpoint::write(const void* data, size_t bytes) + { + _write.write(data, bytes); + } + + void FifoEndpoint::disconnect() + { + _write.close(); + } + +} diff --git a/bulkioInterfaces/libsrc/cpp/shm/FifoIPC.h b/bulkioInterfaces/libsrc/cpp/shm/FifoIPC.h new file mode 100644 index 000000000..2455329e8 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/shm/FifoIPC.h @@ -0,0 +1,89 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_ipcfifo_h +#define __bulkio_ipcfifo_h + +#include + +namespace bulkio { + + class TempFifo { + public: + TempFifo(); + ~TempFifo(); + + const std::string& filename() const; + + void unlink(); + + private: + std::string _makeUniqueName(); + + const std::string _filename; + }; + + class Pipe { + public: + Pipe(); + ~Pipe(); + + void open(const std::string& filename, int flags); + void close(); + + void setFlags(int flags); + void clearFlags(int flags); + + bool poll(int events, int timeout); + + size_t read(void* buffer, size_t bytes); + void write(const void* data, size_t bytes); + + private: + int _fd; + + // Maximum size to read/write on a single pass + size_t _blockSize; + }; + + class FifoEndpoint { + public: + FifoEndpoint(); + ~FifoEndpoint(); + + const std::string& name() const; + + void connect(const std::string& name); + void sync(int timeout); + + size_t read(void* buffer, size_t bytes); + void write(const void* data, size_t bytes); + + void disconnect(); + void unlink(); + + private: + TempFifo _fifo; + Pipe _read; + Pipe _write; + }; +} + +#endif // __bulkio_ipcfifo_h diff --git a/bulkioInterfaces/libsrc/cpp/shm/MessageBuffer.h b/bulkioInterfaces/libsrc/cpp/shm/MessageBuffer.h new file mode 100644 index 000000000..4530f2354 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/shm/MessageBuffer.h @@ -0,0 +1,122 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_messagebuffer_h +#define __bulkio_messagebuffer_h + +#include + +namespace bulkio { + class MessageBuffer { + public: + MessageBuffer(size_t bytes=0) : + _offset(0) + { + _data.resize(bytes); + } + + char* buffer() + { + return &_data[0]; + } + + const char* buffer() const + { + return &_data[0]; + } + + size_t size() const + { + return _data.size(); + } + + size_t offset() const + { + return _offset; + } + + void resize(size_t size) + { + _data.resize(size); + } + + template + void read(U& val) + { + _checkRead(sizeof(U)); + val = *(reinterpret_cast(&_data[_offset])); + _offset += sizeof(U); + } + + void read(std::string& val) + { + size_t length; + read(length); + _checkRead(length); + const char* begin = &_data[_offset]; + const char* end = begin + length; + val.assign(begin, end); + _offset += length; + } + + template + void write(const U& val) + { + size_t offset = _data.size(); + _data.resize(offset + sizeof(U)); + *(reinterpret_cast(&_data[offset])) = val; + } + + void write(const std::string& val) + { + _writeString(val.size(), val.data()); + } + + void write(const char* val) + { + _writeString(strlen(val), val); + } + + private: + inline void _writeString(size_t length, const char* data) + { + write(length); + size_t offset = _data.size(); + _data.resize(offset + length); + strncpy(&_data[offset], data, length); + } + + inline void _checkRead(size_t bytes) + { + if ((_offset + bytes) > _data.size()) { + if (_offset >= _data.size()) { + throw std::runtime_error("read from empty buffer"); + } else { + throw std::runtime_error("read exceeds buffer size"); + } + } + } + + std::vector _data; + size_t _offset; + }; +} + +#endif // __bulkio_messagebuffer_h diff --git a/bulkioInterfaces/libsrc/cpp/shm/ShmInputTransport.cpp b/bulkioInterfaces/libsrc/cpp/shm/ShmInputTransport.cpp new file mode 100644 index 000000000..d3e12ca83 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/shm/ShmInputTransport.cpp @@ -0,0 +1,289 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "ShmInputTransport.h" +#include "FifoIPC.h" +#include "MessageBuffer.h" + +#include + +#include +#include + +#include +#include + +#include "bulkio_p.h" + +namespace bulkio { + + template + class ShmInputTransport : public InputTransport + { + public: + typedef typename NativeTraits::NativeType NativeType; + typedef typename BufferTraits::BufferType BufferType; + + ShmInputTransport(InPort* port, const std::string& transportId, + const std::string& writePath) : + InputTransport(port, transportId), + _running(false), + _fifo() + { + _fifo.connect(writePath); + } + + ~ShmInputTransport() + { + _fifo.disconnect(); + + // The HeapClient is automatically detached by its destructor, + // ensuring that any heap(s) it was attached to can be cleaned up + } + + std::string transportType() const + { + return "shmipc"; + } + + void startTransport() + { + _running = true; + _thread = boost::thread(&ShmInputTransport::_run, this); + } + + void stopTransport() + { + { + boost::mutex::scoped_lock lock(_mutex); + if (!_running) { + return; + } + + _running = false; + _thread.interrupt(); + } + _thread.join(); + } + + const std::string& getFifoName() const + { + return _fifo.name(); + } + + protected: + bool _isRunning() + { + boost::mutex::scoped_lock lock(_mutex); + return _running; + } + + void _run() + { + // Give the FIFO up to a second to sychronize with the other + // side. This method is being run on a thread that gets started + // when the transport is negotiated, so the uses side may take a + // moment to receive the result and connect on its end. + try { + _fifo.sync(1000); + } catch (const std::exception& exc) { + RH_NL_ERROR("ShmTransport", "Synchronization failed on BulkIO input transport: " << exc.what()); + return; + } + + while (_isRunning()) { + if (!_receiveMessage()) { + return; + } + } + } + + bool _receiveMessage() + { + size_t msg_length; + if (_fifo.read(&msg_length, sizeof(msg_length)) != sizeof(msg_length)) { + return false; + } + + MessageBuffer msg(msg_length); + if (_fifo.read(msg.buffer(), msg.size()) != msg_length){ + return false; + } + + std::string message_name; + try { + msg.read(message_name); + if (message_name == "pushPacket") { + _receivePushPacket(msg); + } else { + // Unknown message type, send error response back + throw std::logic_error("invalid message type"); + } + } catch (const std::exception& exc) { + RH_NL_ERROR("ShmTransport", "Error handling message '" << message_name << "': " << exc.what()); + size_t status = 1; + _fifo.write(&status, sizeof(size_t)); + } + + return true; + } + + void _receivePushPacket(MessageBuffer& msg) + { + size_t count; + msg.read(count); + + BULKIO::PrecisionUTCTime T; + msg.read(T); + + bool EOS; + msg.read(EOS); + + std::string streamID; + msg.read(streamID); + + BufferType buffer; + if (count > 0) { + bool inband_data; + msg.read(inband_data); + + if (inband_data) { + redhawk::buffer temp(count); + _fifo.read(temp.data(), temp.size() * sizeof(NativeType)); + buffer = temp; + } else { + _receiveSharedBuffer(msg, buffer, count); + } + } + + if (msg.offset() < msg.size()) { + std::cerr << "Message bytes left over" << std::endl; + } + + this->_queuePacket(buffer, T, EOS, streamID); + + // Send response back + size_t status = 0; + _fifo.write(&status, sizeof(size_t)); + } + + void _receiveSharedBuffer(MessageBuffer& msg, BufferType& buffer, size_t size) + { + redhawk::shm::MemoryRef ref; + msg.read(ref.heap); + msg.read(ref.superblock); + msg.read(ref.offset); + + size_t offset; + msg.read(offset); + + void* base = _heapClient.fetch(ref); + + // Find the first element, which may be offset from the base + // pointer. If so, start with a larger buffer and then trim the + // elements off of the front. + size_t start = offset / sizeof(NativeType); + if ((start * sizeof(NativeType)) != offset) { + // The starting element is not aligned from the base, which + // will require some additional care to adjust. Start with a + // char buffer, trim that to the correct starting point, and + // then recast to the desired type. + char* ptr = reinterpret_cast(base); + size_t bytes = offset + size * sizeof(NativeType); + redhawk::shared_buffer temp(ptr, bytes, &redhawk::shm::HeapClient::deallocate, redhawk::detail::process_shared_tag()); + temp.trim(offset); + + buffer = BufferType::recast(temp); + } else { + // Normal alignment, include the start offset (if any) in the + // initial buffer size, then trim to the desired start. + NativeType* ptr = reinterpret_cast(base); + size += start; + buffer = BufferType(ptr, size, &redhawk::shm::HeapClient::deallocate, redhawk::detail::process_shared_tag()); + if (start > 0) { + buffer.trim(start); + } + } + } + + volatile bool _running; + boost::mutex _mutex; + boost::thread _thread; + FifoEndpoint _fifo; + redhawk::shm::HeapClient _heapClient; + }; + + template + ShmInputManager::ShmInputManager(InPort* port) : + InputManager(port) + { + } + + template + std::string ShmInputManager::transportType() + { + return "shmipc"; + } + + template + CF::Properties ShmInputManager::transportProperties() + { + CF::Properties properties; + + char host[HOST_NAME_MAX+1]; + gethostname(host, sizeof(host)); + + ossie::corba::push_back(properties, redhawk::PropertyType("hostname", std::string(host))); + return properties; + } + + template + InputTransport* ShmInputManager::createInputTransport(const std::string& transportId, + const redhawk::PropertyMap& properties) + { + if (!properties.contains("fifo")) { + throw redhawk::FatalTransportError("invalid properties for shared memory connection"); + } + const std::string location = properties["fifo"].toString(); + try { + return new ShmInputTransport(this->_port, transportId, location); + } catch (const std::exception& exc) { + throw redhawk::FatalTransportError("failed to connect to FIFO " + location); + } + } + + template + redhawk::PropertyMap ShmInputManager::getNegotiationProperties(redhawk::ProvidesTransport* providesTransport) + { + InputTransportType* transport = dynamic_cast(providesTransport); + if (!transport) { + throw std::logic_error("invalid provides transport instance"); + } + redhawk::PropertyMap properties; + properties["fifo"] = transport->getFifoName(); + return properties; + } + +#define INSTANTIATE_NUMERIC_TEMPLATE(x) \ + template class ShmInputTransport; \ + template class ShmInputManager; + + FOREACH_NUMERIC_PORT_TYPE(INSTANTIATE_NUMERIC_TEMPLATE); +} diff --git a/bulkioInterfaces/libsrc/cpp/shm/ShmInputTransport.h b/bulkioInterfaces/libsrc/cpp/shm/ShmInputTransport.h new file mode 100644 index 000000000..951c03916 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/shm/ShmInputTransport.h @@ -0,0 +1,53 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __bulkio_shminputtransport_h +#define __bulkio_shminputtransport_h + +#include +#include + +#include + +namespace bulkio { + + template + class ShmInputTransport; + + template + class ShmInputManager : public InputManager + { + public: + typedef ShmInputTransport InputTransportType; + + ShmInputManager(InPort* port); + + virtual std::string transportType(); + + virtual CF::Properties transportProperties(); + + virtual InputTransport* createInputTransport(const std::string& transportId, + const redhawk::PropertyMap& properties); + + virtual redhawk::PropertyMap getNegotiationProperties(redhawk::ProvidesTransport* providesTransport); + }; +} + +#endif // __bulkio_shminputtransport_h diff --git a/bulkioInterfaces/libsrc/cpp/shm/ShmOutputTransport.cpp b/bulkioInterfaces/libsrc/cpp/shm/ShmOutputTransport.cpp new file mode 100644 index 000000000..e2c7b535f --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/shm/ShmOutputTransport.cpp @@ -0,0 +1,361 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "ShmOutputTransport.h" +#include "FifoIPC.h" +#include "MessageBuffer.h" + +#include + +#include + +#include +#include + +#include "bulkio_p.h" + +namespace bulkio { + + struct ShmStatPoint { + ShmStatPoint() : + shmTransfer(0), + copied(0) + { + } + + ShmStatPoint(bool shmTransfer, bool copied) : + shmTransfer(shmTransfer), + copied(copied) + { + } + + ShmStatPoint operator+ (const ShmStatPoint& other) const + { + ShmStatPoint result(*this); + result += other; + return result; + } + + ShmStatPoint& operator+= (const ShmStatPoint& other) + { + shmTransfer += other.shmTransfer; + copied += other.copied; + return *this; + } + + int shmTransfer; + int copied; + }; + + template + class ShmOutputTransport : public OutputTransport + { + public: + typedef typename PortType::_ptr_type PtrType; + typedef typename OutputTransport::BufferType BufferType; + typedef typename BufferType::value_type ElementType; + typedef typename CorbaTraits::TransportType TransportType; + + ShmOutputTransport(OutPort* parent, PtrType port) : + OutputTransport(parent, port), + _fifo() + { + } + + ~ShmOutputTransport() + { + } + + virtual std::string transportType() const + { + return "shmipc"; + } + + virtual CF::Properties transportInfo() const + { + return CF::Properties(); + } + + const std::string& getFifoName() + { + return _fifo.name(); + } + + void finishConnect(const std::string& filename) + { + _fifo.connect(filename); + + // The provides side should have already opened its write end, so + // if the FIFO doesn't sync immediately, something is wrong. + _fifo.sync(0); + } + + virtual void disconnect() + { + OutputTransport::disconnect(); + _fifo.disconnect(); + } + + protected: + virtual void _pushSRI(const BULKIO::StreamSRI& sri) + { + try { + this->_objref->pushSRI(sri); + } catch (const CORBA::SystemException& exc) { + throw redhawk::FatalTransportError(ossie::corba::describeException(exc)); + } + } + + virtual void _pushPacket(const BufferType& data, + const BULKIO::PrecisionUTCTime& T, + bool EOS, + const std::string& streamID) + { + MessageBuffer header; + header.write("pushPacket"); + + header.write(data.size()); + header.write(T); + header.write(EOS); + header.write(streamID); + + // Temporary buffer to ensure that if a copy is made, it gets + // released after the transfer + BufferType copy; + + // Data may be sent over the FIFO if shared memory is unavailable; + // this is slower but provides a more graceful failure mode + const void* body = 0; + size_t body_size = 0; + + // If the packet is non-empty, write the additional shared memory + // information for the remote side to pick up + if (!data.empty()) { + // Track whether the buffer was able to be transferred via a + // shared memory object, or if it has to be copied + bool shm_transfer = false; + + // Check that the buffer is already in shared memory (this is + // hoped to be the common case); if not, copy it into another + if (data.get_memory().is_process_shared()) { + // Include the offset from the start of allocated memory to + // the first element of the buffer + const void* base = data.get_memory().address(); + size_t offset = reinterpret_cast(data.data()) - reinterpret_cast(base); + shm_transfer = _transferBuffer(header, base, offset); + } else { + // Try to explicitly allocate from shared memory via the + // global function (which will return a null pointer on + // failure, as opposed to throwing an exception) + size_t count = data.size(); + size_t bytes = count * sizeof(ElementType); + ElementType* ptr = static_cast(redhawk::shm::allocate(bytes)); + if (ptr) { + // Make a copy of the data into the new shared memory, + // ensuring it gets cleaned up appropriately + std::memcpy(ptr, data.data(), bytes); + copy = BufferType(ptr, count, redhawk::shm::deallocate); + shm_transfer = _transferBuffer(header, ptr, 0); + } else { + // Shared memory must be exhausted, fall back to using + // in-band transfer + shm_transfer = false; + } + } + + // If we weren't able to transfer the buffer using shared + // memory, set up to copy it via the FIFO + if (!shm_transfer) { + header.write(true); + body = data.data(); + body_size = data.size() * sizeof(data[0]); + } + } + + _sendMessage(header.buffer(), header.size(), body, body_size); + + ShmStatPoint stat(body_size == 0, !copy.empty()); + _recordExtendedStatistics(stat); + } + + virtual redhawk::PropertyMap _getExtendedStatistics() + { + ShmStatPoint stats = std::accumulate(_extendedStats.begin(), _extendedStats.end(), ShmStatPoint()); + double copy_rate = 0.0; + double shm_rate = 0.0; + if (!_extendedStats.empty()) { + copy_rate = stats.copied * 100.0 / _extendedStats.size(); + shm_rate = stats.shmTransfer * 100.0 / _extendedStats.size(); + } + + redhawk::PropertyMap statistics; + statistics["shm::copy_rate"] = copy_rate; + statistics["shm::shm_rate"] = shm_rate; + return statistics; + } + + private: + void _recordExtendedStatistics(const ShmStatPoint& stat) + { + _extendedStats.push_back(stat); + if (_extendedStats.size() > 10) { + _extendedStats.pop_front(); + } + } + + bool _transferBuffer(MessageBuffer& header, const void* base, size_t offset) + { + redhawk::shm::MemoryRef ref = redhawk::shm::Heap::getRef(base); + if (!ref) { + // The allocator was unable to use shared memory + return false; + } + + header.write(false); + header.write(ref.heap); + header.write(ref.superblock); + header.write(ref.offset); + header.write(offset); + + return true; + } + + void _sendMessage(const void* header, size_t hsize, const void* body, size_t bsize) + { + try { + _fifo.write(&hsize, sizeof(hsize)); + _fifo.write(header, hsize); + if (bsize > 0) { + _fifo.write(body, bsize); + } + } catch (const std::exception& exc) { + throw redhawk::FatalTransportError(exc.what()); + } + + size_t status = 0; + size_t count = 0; + try { + count = _fifo.read(&status, sizeof(size_t)); + } catch (const std::exception& exc) { + throw redhawk::FatalTransportError(exc.what()); + } + + if (count != sizeof(size_t)) { + throw redhawk::FatalTransportError("failed to read response"); + } else if (status != 0) { + throw redhawk::TransportError("call failed"); + } + } + + FifoEndpoint _fifo; + + std::deque _extendedStats; + }; + + template + ShmOutputManager::ShmOutputManager(OutPort* port) : + OutputManager(port) + { + char host[HOST_NAME_MAX+1]; + gethostname(host, sizeof(host)); + _hostname = host; + } + + template + std::string ShmOutputManager::transportType() + { + return "shmipc"; + } + + template + CF::Properties ShmOutputManager::transportProperties() + { + CF::Properties properties; + ossie::corba::push_back(properties, redhawk::PropertyType("hostname", _hostname)); + return properties; + } + + template + OutputTransport* + ShmOutputManager::createOutputTransport(PtrType object, + const std::string& connectionId, + const redhawk::PropertyMap& properties) + { + // For testing, allow disabling + const char* shm_env = getenv("BULKIO_SHM"); + if (shm_env && (strcmp(shm_env, "disable") == 0)) { + return 0; + } + + // If the other end of the connection has a different hostname, it + // is reasonable to assume that we cannot use shared memory + if (properties.get("hostname", "").toString() != _hostname) { + RH_NL_TRACE("ShmTransport", "Connection '" << connectionId << "' is on another host"); + return 0; + } + + // Check whether shared memory is enabled--there may not be enough free + // space to create the heap. The degraded send-via-FIFO mode is usually + // slower CORBA. + if (!redhawk::shm::isEnabled()) { + RH_NL_DEBUG("ShmTransport", "Cannot create SHM transport, shared memory is not available"); + return 0; + } + + return new ShmOutputTransport(this->_port, object); + } + + template + redhawk::PropertyMap ShmOutputManager::getNegotiationProperties(redhawk::UsesTransport* transport) + { + TransportType* shm_transport = dynamic_cast(transport); + if (!shm_transport) { + throw std::logic_error("invalid transport type"); + } + + redhawk::PropertyMap properties; + properties["fifo"] = shm_transport->getFifoName(); + return properties; + } + + template + void ShmOutputManager::setNegotiationResult(redhawk::UsesTransport* transport, + const redhawk::PropertyMap& properties) + { + TransportType* shm_transport = dynamic_cast(transport); + if (!shm_transport) { + throw std::logic_error("invalid transport type"); + } + + if (!properties.contains("fifo")) { + throw redhawk::FatalTransportError("invalid properties for shared memory connection"); + } + + std::string fifo_name = properties["fifo"].toString(); + RH_NL_DEBUG("ShmTransport", "Connecting to provides port FIFO: " << fifo_name); + shm_transport->finishConnect(fifo_name); + } + +#define INSTANTIATE_TEMPLATE(x) \ + template class ShmOutputTransport; \ + template class ShmOutputManager; + + FOREACH_NUMERIC_PORT_TYPE(INSTANTIATE_TEMPLATE); +} diff --git a/bulkioInterfaces/libsrc/cpp/shm/ShmOutputTransport.h b/bulkioInterfaces/libsrc/cpp/shm/ShmOutputTransport.h new file mode 100644 index 000000000..4ea287716 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/shm/ShmOutputTransport.h @@ -0,0 +1,60 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef __bulkio_shmoutputtransport_h +#define __bulkio_shmoutputtransport_h + +#include +#include + +#include + +namespace bulkio { + + template + class ShmOutputTransport; + + template + class ShmOutputManager : public OutputManager + { + public: + typedef ShmOutputTransport TransportType; + typedef typename PortType::_ptr_type PtrType; + + ShmOutputManager(OutPort* port); + + virtual std::string transportType(); + + virtual CF::Properties transportProperties(); + + virtual OutputTransport* createOutputTransport(PtrType object, + const std::string& connectionId, + const redhawk::PropertyMap& properties); + + virtual redhawk::PropertyMap getNegotiationProperties(redhawk::UsesTransport* transport); + + virtual void setNegotiationResult(redhawk::UsesTransport* transport, const redhawk::PropertyMap& properties); + + private: + std::string _hostname; + }; + +} + +#endif // __bulkio_shmoutputtransport_h diff --git a/bulkioInterfaces/libsrc/cpp/shm/ShmTransportFactory.cpp b/bulkioInterfaces/libsrc/cpp/shm/ShmTransportFactory.cpp new file mode 100644 index 000000000..37b1c3542 --- /dev/null +++ b/bulkioInterfaces/libsrc/cpp/shm/ShmTransportFactory.cpp @@ -0,0 +1,71 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "ShmOutputTransport.h" +#include "ShmInputTransport.h" + +#include "bulkio_p.h" + +namespace bulkio { + + template + class ShmTransportFactory : public BulkioTransportFactory + { + public: + ShmTransportFactory() + { + } + + virtual std::string transportType() + { + return "shmipc"; + } + + virtual int defaultPriority() + { + return 1; + } + + virtual InputManager* createInputManager(InPort* port) + { + return new ShmInputManager(port); + } + + virtual OutputManager* createOutputManager(OutPort* port) + { + return new ShmOutputManager(port); + } + }; + + static int initializeModule() + { +#define REGISTER_FACTORY(x) \ + { \ + static ShmTransportFactory factory; \ + redhawk::TransportRegistry::RegisterTransport(&factory); \ + } + + FOREACH_NUMERIC_PORT_TYPE(REGISTER_FACTORY); + + return 0; + } + + static int initialized = initializeModule(); +} diff --git a/bulkioInterfaces/libsrc/java/META-INF/MANIFEST.MF.src.in b/bulkioInterfaces/libsrc/java/META-INF/MANIFEST.MF.src.in deleted file mode 100644 index 470d5afd6..000000000 --- a/bulkioInterfaces/libsrc/java/META-INF/MANIFEST.MF.src.in +++ /dev/null @@ -1,7 +0,0 @@ -Bundle-ManifestVersion: 2 -Bundle-Name: BULKIO Base Class Library Source -Bundle-SymbolicName: bulkio.src -Bundle-Version: @BULKIO_API_VERSION@ -Bundle-RequiredExecutionEnvironment: JavaSE-1.6 -Bundle-Vendor: REDHAWK -Eclipse-SourceBundle: bulkio;version=@BULKIO_API_VERSION@ diff --git a/bulkioInterfaces/libsrc/java/sed/Char.sed b/bulkioInterfaces/libsrc/java/sed/Char.sed new file mode 100644 index 000000000..4f90d6424 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/Char.sed @@ -0,0 +1,4 @@ +s/@name@/Char/g +s/@type@/char/g +s/@idl@/dataChar/g +s/@size@/8/g diff --git a/bulkioInterfaces/libsrc/java/sed/Double.sed b/bulkioInterfaces/libsrc/java/sed/Double.sed new file mode 100644 index 000000000..60ea843dc --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/Double.sed @@ -0,0 +1,4 @@ +s/@name@/Double/g +s/@type@/double/g +s/@idl@/dataDouble/g +s/@size@/64/g diff --git a/bulkioInterfaces/libsrc/java/sed/Float.sed b/bulkioInterfaces/libsrc/java/sed/Float.sed new file mode 100644 index 000000000..743cd7f43 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/Float.sed @@ -0,0 +1,4 @@ +s/@name@/Float/g +s/@type@/float/g +s/@idl@/dataFloat/g +s/@size@/32/g diff --git a/bulkioInterfaces/libsrc/java/sed/Long.sed b/bulkioInterfaces/libsrc/java/sed/Long.sed new file mode 100644 index 000000000..dd736fabf --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/Long.sed @@ -0,0 +1,4 @@ +s/@name@/Long/g +s/@type@/int/g +s/@idl@/dataLong/g +s/@size@/32/g diff --git a/bulkioInterfaces/libsrc/java/sed/LongLong.sed b/bulkioInterfaces/libsrc/java/sed/LongLong.sed new file mode 100644 index 000000000..3af7a60c0 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/LongLong.sed @@ -0,0 +1,4 @@ +s/@name@/LongLong/g +s/@type@/long/g +s/@idl@/dataLongLong/g +s/@size@/64/g diff --git a/bulkioInterfaces/libsrc/java/sed/Octet.sed b/bulkioInterfaces/libsrc/java/sed/Octet.sed new file mode 100644 index 000000000..6664a6d01 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/Octet.sed @@ -0,0 +1,4 @@ +s/@name@/Octet/g +s/@type@/byte/g +s/@idl@/dataOctet/g +s/@size@/8/g diff --git a/bulkioInterfaces/libsrc/java/sed/Short.sed b/bulkioInterfaces/libsrc/java/sed/Short.sed new file mode 100644 index 000000000..8fcdb9375 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/Short.sed @@ -0,0 +1,4 @@ +s/@name@/Short/g +s/@type@/short/g +s/@idl@/dataShort/g +s/@size@/16/g diff --git a/bulkioInterfaces/libsrc/java/sed/ULong.sed b/bulkioInterfaces/libsrc/java/sed/ULong.sed new file mode 100644 index 000000000..49d4e7d61 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/ULong.sed @@ -0,0 +1,4 @@ +s/@name@/ULong/g +s/@type@/int/g +s/@idl@/dataUlong/g +s/@size@/32/g diff --git a/bulkioInterfaces/libsrc/java/sed/ULongLong.sed b/bulkioInterfaces/libsrc/java/sed/ULongLong.sed new file mode 100644 index 000000000..f89296bff --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/ULongLong.sed @@ -0,0 +1,4 @@ +s/@name@/ULongLong/g +s/@type@/long/g +s/@idl@/dataUlongLong/g +s/@size@/64/g diff --git a/bulkioInterfaces/libsrc/java/sed/UShort.sed b/bulkioInterfaces/libsrc/java/sed/UShort.sed new file mode 100644 index 000000000..11da9a51d --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/UShort.sed @@ -0,0 +1,4 @@ +s/@name@/UShort/g +s/@type@/short/g +s/@idl@/dataUshort/g +s/@size@/16/g diff --git a/bulkioInterfaces/libsrc/java/sed/deprecated/Int16.sed b/bulkioInterfaces/libsrc/java/sed/deprecated/Int16.sed new file mode 100644 index 000000000..6ca5a1059 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/deprecated/Int16.sed @@ -0,0 +1,2 @@ +s/@name@/Int16/g +s/@alias@/Short/g diff --git a/bulkioInterfaces/libsrc/java/sed/deprecated/Int32.sed b/bulkioInterfaces/libsrc/java/sed/deprecated/Int32.sed new file mode 100644 index 000000000..656a32b05 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/deprecated/Int32.sed @@ -0,0 +1,2 @@ +s/@name@/Int32/g +s/@alias@/Long/g diff --git a/bulkioInterfaces/libsrc/java/sed/deprecated/Int64.sed b/bulkioInterfaces/libsrc/java/sed/deprecated/Int64.sed new file mode 100644 index 000000000..c36b4256d --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/deprecated/Int64.sed @@ -0,0 +1,2 @@ +s/@name@/Int64/g +s/@alias@/LongLong/g diff --git a/bulkioInterfaces/libsrc/java/sed/deprecated/Int8.sed b/bulkioInterfaces/libsrc/java/sed/deprecated/Int8.sed new file mode 100644 index 000000000..ec9fa079e --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/deprecated/Int8.sed @@ -0,0 +1,2 @@ +s/@name@/Int8/g +s/@alias@/Char/g diff --git a/bulkioInterfaces/libsrc/java/sed/deprecated/UInt16.sed b/bulkioInterfaces/libsrc/java/sed/deprecated/UInt16.sed new file mode 100644 index 000000000..fcefe62a9 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/deprecated/UInt16.sed @@ -0,0 +1,2 @@ +s/@name@/UInt16/g +s/@alias@/UShort/g diff --git a/bulkioInterfaces/libsrc/java/sed/deprecated/UInt32.sed b/bulkioInterfaces/libsrc/java/sed/deprecated/UInt32.sed new file mode 100644 index 000000000..9e0c71774 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/deprecated/UInt32.sed @@ -0,0 +1,2 @@ +s/@name@/UInt32/g +s/@alias@/ULong/g diff --git a/bulkioInterfaces/libsrc/java/sed/deprecated/UInt64.sed b/bulkioInterfaces/libsrc/java/sed/deprecated/UInt64.sed new file mode 100644 index 000000000..f242e172a --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/deprecated/UInt64.sed @@ -0,0 +1,2 @@ +s/@name@/UInt64/g +s/@alias@/ULongLong/g diff --git a/bulkioInterfaces/libsrc/java/sed/deprecated/UInt8.sed b/bulkioInterfaces/libsrc/java/sed/deprecated/UInt8.sed new file mode 100644 index 000000000..0fb9ba5ca --- /dev/null +++ b/bulkioInterfaces/libsrc/java/sed/deprecated/UInt8.sed @@ -0,0 +1,2 @@ +s/@name@/UInt8/g +s/@alias@/Octet/g diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/.gitignore b/bulkioInterfaces/libsrc/java/src/bulkio/.gitignore new file mode 100644 index 000000000..3f8561397 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/.gitignore @@ -0,0 +1,32 @@ +CharDataHelper.java +DoubleDataHelper.java +FloatDataHelper.java +ShortDataHelper.java +LongDataHelper.java +LongLongDataHelper.java +OctetDataHelper.java +UShortDataHelper.java +ULongDataHelper.java +ULongLongDataHelper.java +InDoublePort.java +InFloatPort.java +InShortPort.java +InLongPort.java +InLongLongPort.java +InCharPort.java +InUShortPort.java +InULongPort.java +InULongLongPort.java +InOctetPort.java +OutDoublePort.java +OutFloatPort.java +OutShortPort.java +OutLongPort.java +OutLongLongPort.java +OutCharPort.java +OutUShortPort.java +OutULongPort.java +OutULongLongPort.java +OutOctetPort.java +In*Int*Port.java +Out*Int*Port.java diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/BitDataHelper.java b/bulkioInterfaces/libsrc/java/src/bulkio/BitDataHelper.java new file mode 100644 index 000000000..8fef76448 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/BitDataHelper.java @@ -0,0 +1,57 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package bulkio; + +import java.util.Arrays; + +class BitDataHelper implements DataHelper { + public int bitSize() { + return 1; + } + + public int arraySize(BULKIO.BitSequence data) { + return data.bits; + } + + public boolean isEmpty(BULKIO.BitSequence data) + { + return (data.bits == 0); + } + + public BULKIO.BitSequence emptyArray() { + BULKIO.BitSequence array = new BULKIO.BitSequence(); + array.data = new byte[0]; + array.bits = 0; + return array; + } + + public BULKIO.BitSequence slice(BULKIO.BitSequence data, int start, int end) { + // Without a bit array API, limit slicing to byte boundaries + if (start % 8 != 0) { + throw new IllegalArgumentException("start index <" + start + "> is not byte-aligned"); + } else if (end % 8 != 0) { + throw new IllegalArgumentException("end index <" + end + "> is not byte-aligned"); + } + BULKIO.BitSequence result = new BULKIO.BitSequence(); + result.data = Arrays.copyOfRange(data.data, start/8, end/8); + result.bits = end - start; + return result; + } +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/ChunkingOutPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/ChunkingOutPort.java new file mode 100644 index 000000000..9436e34e0 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/ChunkingOutPort.java @@ -0,0 +1,101 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package bulkio; + +import org.apache.log4j.Logger; + +import BULKIO.PrecisionUTCTime; + +/** + * Adds chunking of pushes to fit within the maximum CORBA transfer size for + * BulkIO data types that support it. + */ +abstract class ChunkingOutPort extends OutDataPort { + /** + * CORBA transfer limit in bytes + */ + // Multiply by some number < 1 to leave some margin for the CORBA header + protected static final int MAX_PAYLOAD_SIZE = (int)(Const.MAX_TRANSFER_BYTES * 0.9); + + /** + * CORBA transfer limit in samples + */ + protected int maxSamplesPerPush; + + protected ChunkingOutPort(String portName, Logger logger, ConnectionEventListener connectionListener, DataHelper helper) { + super(portName, logger, connectionListener, helper); + this.maxSamplesPerPush = (8 * MAX_PAYLOAD_SIZE) / helper.bitSize(); + } + + protected void pushPacketData(A data, PrecisionUTCTime time, boolean endOfStream, String streamID) { + pushOversizedPacket(data, time, endOfStream, streamID); + } + + private void pushOversizedPacket(A data, PrecisionUTCTime time, boolean endOfStream, String streamID) { + final int length = helper.arraySize(data); + + // If there is no need to break data into smaller packets, skip + // straight to the pushPacket call and return. + if (length <= maxSamplesPerPush) { + this.pushSinglePacket(data, time, endOfStream, streamID); + return; + } + + BULKIO.StreamSRI sri = this.currentSRIs.get(streamID).sri; + double xdelta = sri.xdelta; + int item_size = 1; + if (sri.mode != 0) { + item_size = 2; + } + int frame_size = item_size; + if (sri.subsize > 0) { + frame_size *= sri.subsize; + } + // Quantize the push size (in terms of scalars) to the nearest frame, + // which takes both the complex mode and subsize into account + final int max_push_size = (maxSamplesPerPush/frame_size) * frame_size; + + // Initialize time of first subpacket + PrecisionUTCTime packetTime = time; + for (int offset = 0; offset < length;) { + // Don't send more samples than are remaining + final int pushSize = java.lang.Math.min(length-offset, max_push_size); + + // Copy the range for this sub-packet and advance the offset + A subPacket = helper.slice(data, offset, offset+pushSize); + offset += pushSize; + + // Send end-of-stream as false for all sub-packets except for the + // last one (when there are no samples remaining after this push), + // which gets the input EOS. + boolean packetEOS = false; + if (offset == length) { + packetEOS = endOfStream; + } + + if (logger != null) { + logger.trace("bulkio.OutPort pushOversizedPacket() calling pushPacket with pushSize " + pushSize + " and packetTime twsec: " + packetTime.twsec + " tfsec: " + packetTime.tfsec); + } + this.pushSinglePacket(subPacket, packetTime, packetEOS, streamID); + int data_xfer_len = pushSize / item_size; + packetTime = bulkio.time.utils.addSampleOffset(packetTime, data_xfer_len, xdelta); + } + } +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/DataHelper.java b/bulkioInterfaces/libsrc/java/src/bulkio/DataHelper.java new file mode 100644 index 000000000..a0bf0df60 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/DataHelper.java @@ -0,0 +1,28 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package bulkio; + +interface DataHelper { + int bitSize(); + E emptyArray(); + boolean isEmpty(E data); + int arraySize(E data); + E slice(E data, int start, int end); +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/DeprecatedInPort.java.template b/bulkioInterfaces/libsrc/java/src/bulkio/DeprecatedInPort.java.template new file mode 100644 index 000000000..9410f95df --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/DeprecatedInPort.java.template @@ -0,0 +1,55 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from DeprecatedInPort.java.template. + * Do not modify directly. + */ +package bulkio; + +import org.apache.log4j.Logger; + +@Deprecated +public class In@name@Port extends In@alias@Port { + + public In@name@Port(String portName) { + super(portName); + } + + public In@name@Port(String portName, bulkio.sri.Comparator compareSRI) { + super(portName, compareSRI); + } + + public In@name@Port(String portName, + bulkio.sri.Comparator compareSRI, + bulkio.SriListener sriCallback) { + super(portName, compareSRI, sriCallback); + } + + public In@name@Port(String portName, Logger logger) { + super(portName, logger); + } + + public In@name@Port(String portName, + Logger logger, + bulkio.sri.Comparator compareSRI, + bulkio.SriListener sriCallback) { + super(portName, logger, compareSRI, sriCallback); + } +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/DeprecatedOutPort.java.template b/bulkioInterfaces/libsrc/java/src/bulkio/DeprecatedOutPort.java.template new file mode 100644 index 000000000..8803c6d25 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/DeprecatedOutPort.java.template @@ -0,0 +1,42 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from DeprecatedOutPort.java.template. + * Do not modify directly. + */ +package bulkio; + +import org.apache.log4j.Logger; + +@Deprecated +public class Out@name@Port extends Out@alias@Port { + + public Out@name@Port(String portName) { + super(portName); + } + + public Out@name@Port(String portName, Logger logger) { + super(portName, logger); + } + + public Out@name@Port(String portName, Logger logger, ConnectionEventListener eventCB) { + super(portName, logger, eventCB); + } +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/FileDataHelper.java b/bulkioInterfaces/libsrc/java/src/bulkio/FileDataHelper.java new file mode 100644 index 000000000..99733b397 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/FileDataHelper.java @@ -0,0 +1,43 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package bulkio; + +class FileDataHelper implements DataHelper { + public int bitSize() { + return 8; + } + + public int arraySize(String data) { + return data.length(); + } + + public boolean isEmpty(String data) + { + return data.isEmpty(); + } + + public String emptyArray() { + return ""; + } + + public String slice(String data, int start, int end) { + return data.substring(start, end); + } +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InBitPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InBitPort.java new file mode 100644 index 000000000..3602b82c3 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/InBitPort.java @@ -0,0 +1,171 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package bulkio; + +import org.apache.log4j.Logger; + +import org.ossie.component.RHLogger; + +import BULKIO.PrecisionUTCTime; +import BULKIO.StreamSRI; +import BULKIO.PortStatistics; +import BULKIO.PortUsageType; + +/** + * + */ +public class InBitPort extends BULKIO.jni.dataBitPOA implements InDataPort { + + /** + * A class to hold packet data. + * + */ + public class Packet extends DataTransfer { + + public Packet(BULKIO.BitSequence data, PrecisionUTCTime timeStamp, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { + super(data,timeStamp,endOfStream,streamID,H,sriChanged,inputQueueFlushed); + }; + }; + + private InPortImpl impl; + + /** + * + */ + public InBitPort(String name) { + this(name, null); + } + + public InBitPort(String name, Logger logger) { + impl = new InPortImpl(name, logger, new bulkio.sri.DefaultComparator(), null, new BitDataHelper()); + } + + public Logger getLogger() { + return impl.getLogger(); + } + + public void setLogger(Logger logger) { + impl.setLogger(logger); + } + + public void setLogger(RHLogger logger) + { + impl.setLogger(logger); + } + + /** + * + */ + public void setSriListener(bulkio.SriListener sriCallback) { + impl.setSriListener(sriCallback); + } + + /** + * + */ + public String getName() { + return impl.getName(); + } + + /** + * + */ + public void enableStats(boolean enable) { + impl.enableStats(enable); + } + + /** + * + */ + public PortStatistics statistics() { + return impl.statistics(); + } + + /** + * + */ + public PortUsageType state() { + return impl.state(); + } + + /** + * + */ + public StreamSRI[] activeSRIs() { + return impl.activeSRIs(); + } + + /** + * + */ + public int getCurrentQueueDepth() { + return impl.getCurrentQueueDepth(); + } + + /** + * + */ + public int getMaxQueueDepth() { + return impl.getMaxQueueDepth(); + } + + /** + * + */ + public void setMaxQueueDepth(int newDepth) { + impl.setMaxQueueDepth(newDepth); + } + + /** + * + */ + public void pushSRI(StreamSRI header) { + impl.pushSRI(header); + } + + /** + * + */ + public void pushPacket(BULKIO.BitSequence data, PrecisionUTCTime time, boolean eos, String streamID) + { + impl.pushPacket(data, time, eos, streamID); + } + + /** + * + */ + public Packet getPacket(long wait) + { + DataTransfer p = impl.getPacket(wait); + if (p == null) { + return null; + } else { + return new Packet(p.getData(), p.getTime(), p.getEndOfStream(), p.getStreamID(), p.getSRI(), p.sriChanged(), p.inputQueueFlushed()); + } + } + + public String getDirection() { + return CF.PortSet.DIRECTION_PROVIDES; + } + + public String getRepid() { + return BULKIO.dataBitHelper.id(); + } +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InCharPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InCharPort.java deleted file mode 100644 index 7f9cd8d80..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InCharPort.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; -/** - * - */ -public class InCharPort extends InInt8Port { - - /** - * - */ - public InCharPort( String portName ) { - super( portName ); - } - - public InCharPort( String portName, - bulkio.sri.Comparator compareSRI ) { - super( portName, null, compareSRI, null ); - } - - public InCharPort( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, null, compareSRI, sriCallback); - } - - public InCharPort( String portName, Logger logger ) { - super( portName, logger ); - } - - public InCharPort( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, logger, compareSRI, sriCallback); - } - - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InDataPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InDataPort.java new file mode 100644 index 000000000..84a6fcadc --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/InDataPort.java @@ -0,0 +1,41 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package bulkio; + +import org.apache.log4j.Logger; + +import org.ossie.component.PortBase; + +public interface InDataPort extends PortBase { + public String getName(); + + public Logger getLogger(); + public void setLogger(Logger logger); + + public void setSriListener(bulkio.SriListener sriCallback); + + public int getCurrentQueueDepth(); + public int getMaxQueueDepth(); + public void setMaxQueueDepth(int newDepth); + + public DataTransfer getPacket(long wait); + + public void enableStats(boolean enable); +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InDoublePort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InDoublePort.java deleted file mode 100644 index b2bf1649b..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InDoublePort.java +++ /dev/null @@ -1,529 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import BULKIO.PrecisionUTCTime; -import BULKIO.StreamSRI; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.DoubleSize; - -import org.ossie.component.PortBase; - -/** - * - */ -public class InDoublePort extends BULKIO.jni.dataDoublePOA implements org.ossie.component.PortBase { - - /** - * A class to hold packet data. - * - */ - public class Packet extends DataTransfer < double[] > { - - public Packet(double[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { - super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); - }; - }; - - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - protected Logger logger = null; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - - - /** - * - */ - public InDoublePort( String portName ) { - this( portName, null, new bulkio.sri.DefaultComparator(), null ); - } - - public InDoublePort( String portName, bulkio.sri.Comparator compareSRI ) { - this( portName, null, compareSRI, null ); - } - - public InDoublePort( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback - ) { - this( portName, null, compareSRI, null ); - } - - public InDoublePort( String portName, Logger logger ) { - this( portName, logger, new bulkio.sri.DefaultComparator(), null ); - } - - public InDoublePort( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new DoubleSize() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - - } - - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } - } - - - - /** - * - */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } - } - - /** - * - */ - public String getName() { - return this.name; - } - - /** - * - */ - public void enableStats(boolean enable) { - this.stats.setEnabled(enable); - } - - /** - * - */ - public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } - } - - /** - * - */ - public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } - } - - /** - * - */ - public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } - } - - /** - * - */ - public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } - } - - /** - * - */ - public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } - } - - /** - * - */ - public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } - } - - - /** - * - */ - public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } - } - - - - /** - * - */ - public void pushPacket(double[] data, PrecisionUTCTime time, boolean eos, String streamID) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length, 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - /** - * - */ - public Packet getPacket(long wait) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return null; - } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; - } - - public String getRepid() - { - return BULKIO.dataDoubleHelper.id(); - } - - public String getDirection() - { - return "Provides"; - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InFilePort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InFilePort.java index 23778270c..5a2bb5793 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InFilePort.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/InFilePort.java @@ -19,36 +19,19 @@ */ package bulkio; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; +import org.apache.log4j.Logger; + +import org.ossie.component.RHLogger; + import BULKIO.PrecisionUTCTime; import BULKIO.StreamSRI; import BULKIO.PortStatistics; import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import org.ossie.component.PortBase; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.Int8Size; - - /** * */ -public class InFilePort extends BULKIO.jni.dataFilePOA implements PortBase { +public class InFilePort extends BULKIO.jni.dataFilePOA implements InDataPort { /** * A class to hold packet data. @@ -61,74 +44,7 @@ public Packet( String data, PrecisionUTCTime timeStamp, boolean endOfStream, Str }; }; - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - + private InPortImpl impl; /** * @@ -157,369 +73,114 @@ public InFilePort( String portName, Logger logger, bulkio.sri.Comparator compareSRI, bulkio.SriListener sriCallback ){ - - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new Int8Size() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - + impl = new InPortImpl(portName, logger, compareSRI, sriCallback, new FileDataHelper()); } - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } + public Logger getLogger() { + return impl.getLogger(); } + public void setLogger(Logger logger) { + impl.setLogger(logger); + } + public void setLogger(RHLogger logger) { + impl.setLogger(logger); + } /** * */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } + public void setSriListener(bulkio.SriListener sriCallback) { + impl.setSriListener(sriCallback); } /** * */ public String getName() { - return this.name; + return impl.getName(); } /** * */ public void enableStats(boolean enable) { - this.stats.setEnabled(enable); + impl.enableStats(enable); } /** * */ public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } + return impl.statistics(); } /** * */ public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } + return impl.state(); } /** * */ public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } + return impl.activeSRIs(); } - + /** * */ public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } + return impl.getCurrentQueueDepth(); } - + /** * */ public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } + return impl.getMaxQueueDepth(); } - + /** * */ public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } + impl.setMaxQueueDepth(newDepth); } - /** * */ public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } + impl.pushSRI(header); } - - /** * */ - public void pushPacket( String data, PrecisionUTCTime time, boolean eos, String streamID) + public void pushPacket(String data, PrecisionUTCTime time, boolean eos, String streamID) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length(), this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length(), 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length(), this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - + impl.pushPacket(data, time, eos, streamID); } - + /** * */ - public Packet getPacket(long wait) + public Packet getPacket(long wait) { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } + DataTransfer p = impl.getPacket(wait); + if (p == null) { return null; + } else { + return new Packet(p.getData(), p.getTime(), p.getEndOfStream(), p.getStreamID(), p.getSRI(), p.sriChanged(), p.inputQueueFlushed()); } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; } public String getDirection() { - return "Provides"; + return CF.PortSet.DIRECTION_PROVIDES; } public String getRepid() { diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InFloatPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InFloatPort.java deleted file mode 100644 index 8c274c431..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InFloatPort.java +++ /dev/null @@ -1,535 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import BULKIO.PrecisionUTCTime; -import BULKIO.StreamSRI; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.DoubleSize; - -import org.ossie.component.PortBase; - -/** - * - */ -public class InFloatPort extends BULKIO.jni.dataFloatPOA implements org.ossie.component.PortBase { - - /** - * A class to hold packet data. - * - */ - public class Packet extends DataTransfer < float[] > { - - public Packet(float[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { - super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); - }; - }; - - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - - - /** - * - */ - public InFloatPort( String portName ) { - this( portName, null, new bulkio.sri.DefaultComparator(), null ); - } - - public InFloatPort( String portName, - bulkio.sri.Comparator compareSRI ) { - this( portName, null, compareSRI, null ); - } - - public InFloatPort( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback - ) { - this( portName, null, compareSRI, sriCallback ); - } - - public InFloatPort( String portName, Logger logger ) { - this( portName, logger, new bulkio.sri.DefaultComparator(), null ); - } - - public InFloatPort( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new FloatSize() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - } - - /** - * - */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } - } - - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } - } - - - - /** - * - */ - public String getName() { - return this.name; - } - - - - /** - * - */ - public void enableStats(boolean enable) { - this.stats.setEnabled(enable); - } - - /** - * - */ - public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } - } - - /** - * - */ - public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } - } - - /** - * - */ - public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } - } - - /** - * - */ - public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } - } - - /** - * - */ - public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } - } - - /** - * - */ - public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } - } - - - /** - * - */ - public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } - } - - - - /** - * - */ - public void pushPacket(float[] data, PrecisionUTCTime time, boolean eos, String streamID) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length, 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - /** - * - */ - public Packet getPacket(long wait) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return null; - } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket containsKey " + p.getStreamID() + " res:" + - this.currentHs.containsKey(p.getStreamID()) ); - } - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; - } - - public String getRepid() - { - return BULKIO.dataFloatHelper.id(); - } - - public String getDirection() - { - return "Provides"; - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InInt16Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/InInt16Port.java deleted file mode 100644 index 15ff2f18e..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InInt16Port.java +++ /dev/null @@ -1,521 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import BULKIO.PrecisionUTCTime; -import BULKIO.StreamSRI; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.Int16Size; - -import org.ossie.component.PortBase; - -/** - * - */ -public class InInt16Port extends BULKIO.jni.dataShortPOA implements org.ossie.component.PortBase { - - /** - * A class to hold packet data. - * - */ - public class Packet extends DataTransfer < short[] > { - - public Packet(short[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { - super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); - }; - }; - - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - - /** - * - */ - public InInt16Port( String portName ) { - this( portName, null, new bulkio.sri.DefaultComparator(), null ); - } - - public InInt16Port( String portName, - bulkio.sri.Comparator compareSRI ){ - this( portName, null, compareSRI, null ); - } - - public InInt16Port( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback - ) { - this( portName, null, compareSRI, sriCallback ); - } - - public InInt16Port( String portName, Logger logger ) { - this( portName, logger, new bulkio.sri.DefaultComparator(), null ); - } - - public InInt16Port( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new Int16Size() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - } - - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } - } - - - /** - * - */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } - } - - /** - * - */ - public String getName() { - return this.name; - } - - - - /** - * - */ - public void enableStats(boolean enable) { - this.stats.setEnabled(enable); - } - - /** - * - */ - public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } - } - - /** - * - */ - public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } - } - - /** - * - */ - public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } - } - - /** - * - */ - public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } - } - - /** - * - */ - public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } - } - - /** - * - */ - public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } - } - - - /** - * - */ - public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } - } - - - - /** - * - */ - public void pushPacket(short[] data, PrecisionUTCTime time, boolean eos, String streamID) - { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length, 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - /** - * - */ - public Packet getPacket(long wait) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return null; - } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; - } - - public String getRepid() - { - return BULKIO.dataShortHelper.id(); - } - - public String getDirection() - { - return "Provides"; - } -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InInt32Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/InInt32Port.java deleted file mode 100644 index d8b0731a3..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InInt32Port.java +++ /dev/null @@ -1,527 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import BULKIO.PrecisionUTCTime; -import BULKIO.StreamSRI; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.Int32Size; - -import org.ossie.component.PortBase; - -/** - * - */ -public class InInt32Port extends BULKIO.jni.dataLongPOA implements org.ossie.component.PortBase { - - /** - * A class to hold packet data. - * - */ - public class Packet extends DataTransfer < int[] > { - - public Packet(int[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { - super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); - }; - }; - - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - /** - * - */ - public InInt32Port( String portName ) { - this( portName, null, new bulkio.sri.DefaultComparator(), null ); - } - - public InInt32Port( String portName, - bulkio.sri.Comparator compareSRI ){ - this( portName, null, compareSRI, null ); - } - - public InInt32Port( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback - ) { - this( portName, null, compareSRI, sriCallback ); - } - - public InInt32Port( String portName, Logger logger ) { - this( portName, logger, new bulkio.sri.DefaultComparator(), null ); - } - - public InInt32Port( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new Int32Size() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - } - - - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } - } - - - /** - * - */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } - } - - /** - * - */ - public String getName() { - return this.name; - } - - - /** - * - */ - public void enableStats(boolean enable) { - this.stats.setEnabled(enable); - } - - /** - * - */ - public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } - } - - /** - * - */ - public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } - } - - /** - * - */ - public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } - } - - /** - * - */ - public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } - } - - /** - * - */ - public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } - } - - /** - * - */ - public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } - } - - - /** - * - */ - public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } - } - - - - /** - * - */ - public void pushPacket(int[] data, PrecisionUTCTime time, boolean eos, String streamID) - { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length, 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - /** - * - */ - public Packet getPacket(long wait) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return null; - } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; - } - - public String getRepid() - { - return BULKIO.dataLongHelper.id(); - } - - public String getDirection() - { - return "Provides"; - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InInt64Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/InInt64Port.java deleted file mode 100644 index ebcefebb7..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InInt64Port.java +++ /dev/null @@ -1,527 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import BULKIO.PrecisionUTCTime; -import BULKIO.StreamSRI; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.Int64Size; - -import org.ossie.component.PortBase; - -/** - * - */ -public class InInt64Port extends BULKIO.jni.dataLongLongPOA implements org.ossie.component.PortBase { - - /** - * A class to hold packet data. - * - */ - public class Packet extends DataTransfer < long[] > { - - public Packet(long[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { - super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); - }; - }; - - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - /** - * - */ - public InInt64Port( String portName ) { - this( portName, null, new bulkio.sri.DefaultComparator(), null ); - } - - public InInt64Port( String portName, - bulkio.sri.Comparator compareSRI ){ - this( portName, null, compareSRI, null ); - } - - public InInt64Port( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback - ) { - this( portName, null, compareSRI, sriCallback ); - } - - public InInt64Port( String portName, Logger logger ) { - this( portName, logger, new bulkio.sri.DefaultComparator(), null ); - } - - public InInt64Port( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new Int64Size() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - } - - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } - } - - - /** - * - */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } - } - - /** - * - */ - public String getName() { - return this.name; - } - - - - /** - * - */ - public void enableStats(boolean enable) { - this.stats.setEnabled(enable); - } - - /** - * - */ - public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } - } - - /** - * - */ - public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } - } - - /** - * - */ - public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } - } - - /** - * - */ - public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } - } - - /** - * - */ - public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } - } - - /** - * - */ - public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } - } - - - /** - * - */ - public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } - } - - - - /** - * - */ - public void pushPacket(long[] data, PrecisionUTCTime time, boolean eos, String streamID) - { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length, 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - /** - * - */ - public Packet getPacket(long wait) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return null; - } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; - } - - public String getRepid() - { - return BULKIO.dataLongLongHelper.id(); - } - - public String getDirection() - { - return "Provides"; - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InInt8Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/InInt8Port.java deleted file mode 100644 index 7f7418373..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InInt8Port.java +++ /dev/null @@ -1,532 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import org.apache.log4j.Logger; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import BULKIO.PrecisionUTCTime; -import BULKIO.StreamSRI; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; - - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.Int8Size; - -import org.ossie.component.PortBase; - -/** - * - */ -public class InInt8Port extends BULKIO.jni.dataCharPOA implements org.ossie.component.PortBase { - - /** - * A class to hold packet data. - * - */ - public class Packet extends DataTransfer < char[] > { - - public Packet( char[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { - super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); - }; - }; - - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - /** - * - */ - public InInt8Port( String portName ) { - this( portName, null, new bulkio.sri.DefaultComparator(), null ); - } - - public InInt8Port( String portName, - bulkio.sri.Comparator compareSRI ){ - this( portName, null, compareSRI, null ); - } - - public InInt8Port( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback - ) { - this( portName, null, compareSRI, sriCallback ); - } - - public InInt8Port( String portName, Logger logger ) { - this( portName, logger, new bulkio.sri.DefaultComparator(), null ); - } - - public InInt8Port( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new Int8Size() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - - } - - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } - } - - - /** - * - */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } - } - - /** - * - */ - public String getName() { - return this.name; - } - - /** - * - */ - public void enableStats(boolean enable) { - this.stats.setEnabled(enable); - } - - /** - * - */ - public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } - } - - /** - * - */ - public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } - } - - /** - * - */ - public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } - } - - /** - * - */ - public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } - } - - /** - * - */ - public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } - } - - /** - * - */ - public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } - } - - /** - * - */ - public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } - } - - - - /** - * - */ - public void pushPacket( char[] data, PrecisionUTCTime time, boolean eos, String streamID) - { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length, 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - /** - * - */ - public Packet getPacket(long wait) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return null; - } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; - } - - public String getRepid() - { - return BULKIO.dataCharHelper.id(); - } - - public String getDirection() - { - return "Provides"; - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InLongLongPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InLongLongPort.java deleted file mode 100644 index eb768ec14..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InLongLongPort.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import org.apache.log4j.Logger; - -/** - * - */ -public class InLongLongPort extends InInt64Port { - - /** - * - */ - public InLongLongPort( String portName ) { - super( portName ); - } - - public InLongLongPort( String portName, - bulkio.sri.Comparator compareSRI ) { - super( portName, null, compareSRI, null ); - } - - public InLongLongPort( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - - super( portName, null, compareSRI, sriCallback); - } - - public InLongLongPort( String portName, Logger logger ) { - super( portName, logger ); - } - - public InLongLongPort( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, logger, compareSRI, sriCallback); - } - - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InLongPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InLongPort.java deleted file mode 100644 index a1a34410f..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InLongPort.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -/** - * - */ -public class InLongPort extends InInt32Port { - - /** - - * - */ - public InLongPort( String portName ) { - super( portName); - } - - public InLongPort( String portName, - bulkio.sri.Comparator compareSRI ) { - super(portName, compareSRI ); - } - - public InLongPort( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super(portName, compareSRI, sriCallback ); - } - - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InOctetPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InOctetPort.java deleted file mode 100644 index d099de4a3..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InOctetPort.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; -/** - * - */ -public class InOctetPort extends InUInt8Port { - - public InOctetPort( String portName ) { - super( portName ); - } - - public InOctetPort( String portName, - bulkio.sri.Comparator compareSRI ) { - super( portName, null, compareSRI, null ); - } - - public InOctetPort( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, null, compareSRI, sriCallback); - } - - public InOctetPort( String portName, Logger logger ) { - super( portName, logger ); - } - - public InOctetPort( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, logger, compareSRI, sriCallback); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InPort.java.template b/bulkioInterfaces/libsrc/java/src/bulkio/InPort.java.template new file mode 100644 index 000000000..b4b9cfe63 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/InPort.java.template @@ -0,0 +1,197 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from InPort.java.template. + * Do not modify directly. + */ +package bulkio; + +import org.apache.log4j.Logger; + +import org.ossie.component.RHLogger; + +import BULKIO.PortStatistics; +import BULKIO.PortUsageType; +import BULKIO.PrecisionUTCTime; +import BULKIO.StreamSRI; + +/** + * + */ +public class In@name@Port extends BULKIO.jni.@idl@POA implements InDataPort { + + /** + * A class to hold packet data. + * + */ + public class Packet extends DataTransfer < @type@[] > { + + public Packet(@type@[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { + super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); + }; + }; + + private InPortImpl<@type@[]> impl; + + /** + * + */ + public In@name@Port( String portName ) { + this( portName, null, new bulkio.sri.DefaultComparator(), null ); + } + + public In@name@Port( String portName, + bulkio.sri.Comparator compareSRI ){ + this( portName, null, compareSRI, null ); + } + + public In@name@Port( String portName, + bulkio.sri.Comparator compareSRI, + bulkio.SriListener sriCallback + ) { + this( portName, null, compareSRI, sriCallback ); + } + + public In@name@Port( String portName, Logger logger ) { + this( portName, logger, new bulkio.sri.DefaultComparator(), null ); + } + + public In@name@Port( String portName, + Logger logger, + bulkio.sri.Comparator compareSRI, + bulkio.SriListener sriCallback ) { + impl = new InPortImpl<@type@[]>(portName, logger, compareSRI, sriCallback, new @name@DataHelper()); + } + + public Logger getLogger() { + return impl.getLogger(); + } + + public void setLogger(Logger logger){ + impl.setLogger(logger); + } + + public void setLogger(RHLogger logger) + { + impl.setLogger(logger); + } + + /** + * + */ + public void setSriListener(bulkio.SriListener sriCallback) { + impl.setSriListener(sriCallback); + } + + /** + * + */ + public String getName() { + return impl.getName(); + } + + /** + * + */ + public void enableStats(boolean enable) { + impl.enableStats(enable); + } + + /** + * + */ + public PortStatistics statistics() { + return impl.statistics(); + } + + /** + * + */ + public PortUsageType state() { + return impl.state(); + } + + /** + * + */ + public StreamSRI[] activeSRIs() { + return impl.activeSRIs(); + } + + /** + * + */ + public int getCurrentQueueDepth() { + return impl.getCurrentQueueDepth(); + } + + /** + * + */ + public int getMaxQueueDepth() { + return impl.getMaxQueueDepth(); + } + + /** + * + */ + public void setMaxQueueDepth(int newDepth) { + impl.setMaxQueueDepth(newDepth); + } + + /** + * + */ + public void pushSRI(StreamSRI header) { + impl.pushSRI(header); + } + + /** + * + */ + public void pushPacket(@type@[] data, PrecisionUTCTime time, boolean eos, String streamID) + { + impl.pushPacket(data, time, eos, streamID); + } + + /** + * + */ + public Packet getPacket(long wait) + { + DataTransfer<@type@[]> p = impl.getPacket(wait); + if (p == null) { + return null; + } else { + return new Packet(p.getData(), p.getTime(), p.getEndOfStream(), p.getStreamID(), p.getSRI(), p.sriChanged(), p.inputQueueFlushed()); + } + } + + public String getRepid() + { + return BULKIO.@idl@Helper.id(); + } + + public String getDirection() + { + return CF.PortSet.DIRECTION_PROVIDES; + } + +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InPortImpl.java b/bulkioInterfaces/libsrc/java/src/bulkio/InPortImpl.java new file mode 100644 index 000000000..d2ab5cb26 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/InPortImpl.java @@ -0,0 +1,533 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package bulkio; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Iterator; +import java.util.Map; +import java.util.ArrayDeque; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +import org.apache.log4j.Logger; + +import CF.DataType; +import org.ossie.component.RHLogger; + +import BULKIO.PortStatistics; +import BULKIO.PortUsageType; +import BULKIO.PrecisionUTCTime; +import BULKIO.StreamSRI; + +/** + * + */ +class InPortImpl { + + /** + * + */ + protected String name; + + /** + * + */ + protected linkStatistics stats; + + /** + * + */ + protected Object sriUpdateLock; + + /** + * + */ + protected Object statUpdateLock; + + /** + * + */ + protected Map currentHs; + + /** + * + */ + protected Object dataBufferLock; + + /** + * + */ + protected int maxQueueDepth; + + /** + * + */ + protected Semaphore queueSem; + + /** + * + */ + protected Semaphore dataSem; + + /** + * + */ + protected boolean blocking; + + /** + * + */ + protected Logger logger = null; + + protected bulkio.sri.Comparator sri_cmp; + + protected bulkio.SriListener sriCallback; + + + /** + * This queue stores all packets received from pushPacket. + * + */ + private ArrayDeque> workQueue; + + private DataHelper helper; + + /** + * + */ + public InPortImpl(String portName, Logger logger, bulkio.sri.Comparator compareSRI, bulkio.SriListener sriCallback, DataHelper helper) { + this.name = portName; + this.logger = logger; + this.stats = new linkStatistics(this.name, 1); + // Update bit size from the helper, because element size does not take + // sub-byte elements (i.e., dataBit) into account. + this.stats.setBitSize(helper.bitSize()); + this.sriUpdateLock = new Object(); + this.statUpdateLock = new Object(); + this.currentHs = new HashMap(); + this.dataBufferLock = new Object(); + this.maxQueueDepth = 100; + this.queueSem = new Semaphore(this.maxQueueDepth); + this.dataSem = new Semaphore(0); + this.blocking = false; + this.helper = helper; + + this.workQueue = new ArrayDeque>(); + + sri_cmp = compareSRI; + sriCallback = sriCallback; + + if ( this.logger == null ) { + this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); + } + this.logger.debug( "bulkio::InPort CTOR port: " + portName ); + } + + public Logger getLogger() { + synchronized (this.sriUpdateLock) { + return logger; + } + } + + public void setLogger(Logger newlogger) { + synchronized (this.sriUpdateLock) { + logger = newlogger; + } + } + + public void setLogger(RHLogger logger) { + if (logger != null) { + setLogger(logger.getL4Logger()); + } else { + setLogger((Logger) null); + } + } + + /** + * + */ + public void setSriListener( bulkio.SriListener sriCallback ) { + synchronized(this.sriUpdateLock) { + this.sriCallback = sriCallback; + } + } + + /** + * + */ + public String getName() { + return this.name; + } + + /** + * + */ + public void enableStats(boolean enable) { + this.stats.setEnabled(enable); + } + + /** + * + */ + public PortStatistics statistics() { + synchronized (statUpdateLock) { + return this.stats.retrieve(); + } + } + + /** + * + */ + public PortUsageType state() { + int queueSize = 0; + synchronized (dataBufferLock) { + queueSize = workQueue.size(); + if (queueSize == maxQueueDepth) { + return PortUsageType.BUSY; + } else if (queueSize == 0) { + return PortUsageType.IDLE; + } + return PortUsageType.ACTIVE; + } + } + + /** + * + */ + public StreamSRI[] activeSRIs() { + synchronized (this.sriUpdateLock) { + ArrayList sris = new ArrayList(); + Iterator iter = this.currentHs.values().iterator(); + while(iter.hasNext()) { + sris.add(iter.next().getSRI()); + } + return sris.toArray(new StreamSRI[sris.size()]); + } + } + + /** + * + */ + public int getCurrentQueueDepth() { + synchronized (this.dataBufferLock) { + return workQueue.size(); + } + } + + /** + * + */ + public int getMaxQueueDepth() { + synchronized (this.dataBufferLock) { + return this.maxQueueDepth; + } + } + + /** + * + */ + public void setMaxQueueDepth(int newDepth) { + synchronized (this.dataBufferLock) { + this.maxQueueDepth = newDepth; + queueSem = new Semaphore(newDepth); + } + } + + /** + * + */ + public void pushSRI(StreamSRI header) { + + if ( logger != null ) { + logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); + } + + synchronized (sriUpdateLock) { + if (!currentHs.containsKey(header.streamID)) { + if ( logger != null ) { + logger.debug("pushSRI PORT:" + name + " NEW SRI:" + + header.streamID ); + } + if ( sriCallback != null ) { sriCallback.newSRI(header); } + currentHs.put(header.streamID, new sriState(header, true)); + if (header.blocking) { + //If switching to blocking we have to set the semaphore + synchronized (dataBufferLock) { + if (!blocking) { + try { + queueSem.acquire(workQueue.size()); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + blocking = true; + } + } + } else { + StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); + boolean cval = false; + if ( sri_cmp != null ) { + cval = sri_cmp.compare( header, oldSri ); + } + if ( cval == false ) { + if ( sriCallback != null ) { sriCallback.changedSRI(header); } + this.currentHs.put(header.streamID, new sriState(header, true)); + if (header.blocking) { + //If switching to blocking we have to set the semaphore + synchronized (dataBufferLock) { + if (!blocking) { + try { + queueSem.acquire(workQueue.size()); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + blocking = true; + } + } + } + } + } + if ( logger != null ) { + logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); + } + } + + /** + * + */ + public void pushPacket(A data, PrecisionUTCTime time, boolean eos, String streamID) + { + if ( logger != null ) { + logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); + } + + // Discard empty packets if EOS is not set, as there is no useful data + // or metadata to be had--since T applies to the 1st sample (which does + // not exist), all we have is a stream ID + if (helper.isEmpty(data) && !eos) { + return; + } + + synchronized (this.dataBufferLock) { + if (this.maxQueueDepth == 0) { + if ( logger != null ) { + logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); + } + return; + } + } + + boolean portBlocking = false; + StreamSRI tmpH = null; + boolean sriChanged = false; + synchronized (this.sriUpdateLock) { + if (this.currentHs.containsKey(streamID)) { + tmpH = this.currentHs.get(streamID).getSRI(); + sriChanged = this.currentHs.get(streamID).isChanged(); + if ( eos == false ) { + this.currentHs.get(streamID).setChanged(false); + } + portBlocking = blocking; + } else { + if (logger != null) { + logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); + } + tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); + if (sriCallback != null) { + sriCallback.newSRI(tmpH); + } + sriChanged = true; + currentHs.put(streamID, new sriState(tmpH, false)); + } + } + + // determine whether to block and wait for an empty space in the queue + int elements = helper.arraySize(data); + + if (portBlocking) { + DataTransfer p = new DataTransfer(data, time, eos, streamID, tmpH, sriChanged, false); + + try { + queueSem.acquire(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + synchronized (this.dataBufferLock) { + this.stats.update(elements, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); + this.workQueue.add(p); + this.dataSem.release(); + } + } else { + synchronized (this.dataBufferLock) { + boolean flushToReport = false; + if ((this.maxQueueDepth >= 0) && (this.workQueue.size() >= this.maxQueueDepth)) { + if ( logger != null ) { + logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); + } + flushToReport = true; + + // Need to hold the SRI mutex while flushing the queue + // because it may update SRI change state + synchronized (this.sriUpdateLock) { + this._flushQueue(); + + // Update the SRI change flag for this stream, which + // may have been modified during the queue flush + sriState currH = this.currentHs.get(streamID); + sriChanged = currH.isChanged(); + currH.setChanged(false); + } + } + this.stats.update(elements, (this.workQueue.size()+1)/(float)this.maxQueueDepth, eos, streamID, flushToReport); + if ( logger != null ) { + logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); + } + DataTransfer p = new DataTransfer(data, time, eos, streamID, tmpH, sriChanged, false); + this.workQueue.add(p); + // If a flush occurred, always set the flag on the first + // packet; this may not be the packet that was just inserted if + // there were any EOS packets on the queue + if (flushToReport) { + DataTransfer first = this.workQueue.removeFirst(); + first = new DataTransfer(first.dataBuffer, first.T, first.EOS, first.streamID, first.SRI, first.sriChanged, true); + this.workQueue.addFirst(first); + } + this.dataSem.release(); + } + } + + if ( logger != null ) { + logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); + } + return; + + } + + private void _flushQueue() + { + Set sri_changed = new HashSet(); + ArrayDeque> saved_packets = new ArrayDeque>(); + for (DataTransfer packet : this.workQueue) { + if (packet.EOS) { + // Remove the SRI change flag for this stream, as further SRI + // changes apply to a different stream; set the SRI change flag + // for the EOS packet if there was one for this stream earlier + // in the queue + boolean sri_flag = packet.sriChanged; + if (sri_changed.remove(packet.streamID)) { + sri_flag = true; + } + + // Discard data and preserve the EOS packet + DataTransfer modified_packet = new DataTransfer(this.helper.emptyArray(), packet.T, true, packet.streamID, packet.SRI, sri_flag, false); + saved_packets.addLast(modified_packet); + } else if (packet.sriChanged) { + sri_changed.add(packet.streamID); + } + } + this.workQueue = saved_packets; + + // Save any SRI change flags that were collected and not applied to an + // EOS packet + for (String stream_id : sri_changed) { + // It should be safe to assume that an entry exists for the stream + // ID, but just in case, check the result of get + sriState currH = this.currentHs.get(stream_id); + if (currH != null) { + currH.setChanged(true); + } + } + } + + /** + * + */ + public DataTransfer getPacket(long wait) + { + + if ( logger != null ) { + logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); + } + + try { + if (wait < 0) { + if ( logger != null ) { + logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); + } + this.dataSem.acquire(); + } else { + if ( logger != null ) { + logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); + } + this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); + } + } catch (InterruptedException ex) { + if ( logger != null ) { + logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); + } + return null; + } + + DataTransfer p = null; + synchronized (this.dataBufferLock) { + p = this.workQueue.poll(); + } + + if (p != null) { + if (p.getEndOfStream()) { + synchronized (this.sriUpdateLock) { + if (this.currentHs.containsKey(p.getStreamID())) { + sriState rem = this.currentHs.remove(p.getStreamID()); + + if (rem.getSRI().blocking) { + boolean stillBlocking = false; + Iterator iter = currentHs.values().iterator(); + while (iter.hasNext()) { + if (iter.next().getSRI().blocking) { + stillBlocking = true; + break; + } + } + + if (!stillBlocking) { + blocking = false; + } + } + } + } + } + + if (blocking) { + queueSem.release(); + } + } + + if ( logger != null ) { + logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); + } + return p; + } +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InSDDSPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InSDDSPort.java index 9623236a7..fcdbab970 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InSDDSPort.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/InSDDSPort.java @@ -19,18 +19,15 @@ */ package bulkio; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; -import java.util.Iterator; import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; + import org.apache.log4j.Logger; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; + +import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; + import BULKIO.PrecisionUTCTime; import BULKIO.StreamSRI; import BULKIO.PortStatistics; @@ -41,17 +38,10 @@ import BULKIO.dataSDDSPackage.InputUsageState; import BULKIO.dataSDDSPackage.StreamInputError; -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.Int16Size; - -import org.ossie.component.PortBase; - /** * @generated */ -public class InSDDSPort extends BULKIO.jni.dataSDDSPOA implements org.ossie.component.PortBase { +public class InSDDSPort extends BULKIO.jni.dataSDDSPOA implements PortBase { public interface Callback { @@ -107,6 +97,8 @@ public void detach(String attachId) */ protected Logger logger = null; + public RHLogger _portLog = null; + // callback when SDDS Stream Requests happen protected Callback attach_detach_callback; @@ -142,7 +134,7 @@ public InSDDSPort( String portName, bulkio.time.Comparator timeCmp ) { this.name = portName; - this.stats = new linkStatistics(this.name, new Int8Size() ); + this.stats = new linkStatistics(this.name, 1); this.sriUpdateLock = new Object(); this.statUpdateLock = new Object(); this.attachedStreamMap = new HashMap(); @@ -168,6 +160,13 @@ public void setLogger( Logger newlogger ){ } } + public void setLogger(RHLogger logger) + { + synchronized (this.sriUpdateLock) { + this._portLog = logger; + } + } + /** * */ @@ -279,8 +278,8 @@ public StreamSRI[] activeSRIs() { */ public void pushSRI(StreamSRI H, PrecisionUTCTime T) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); } synchronized (this.sriUpdateLock) { @@ -306,8 +305,8 @@ public void pushSRI(StreamSRI H, PrecisionUTCTime T) { } } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); } } @@ -319,22 +318,22 @@ public void pushSRI(StreamSRI H, PrecisionUTCTime T) { */ public String attach(SDDSStreamDefinition stream, String userid) throws AttachError, StreamInputError { - if ( logger != null ) { - logger.trace("bulkio.InPort attach ENTER (port=" + name +")" ); - logger.debug("SDDS PORT: ATTACH REQUEST STREAM/USER:" + stream.id +"/" + userid ); + if ( _portLog != null ) { + _portLog.trace("bulkio.InPort attach ENTER (port=" + name +")" ); + _portLog.debug("SDDS PORT: ATTACH REQUEST STREAM/USER:" + stream.id +"/" + userid ); } String attachId = null; if ( attach_detach_callback != null ) { - if ( logger != null ) { - logger.debug("SDDS PORT: CALLING ATTACH CALLBACK, STREAM/USER:" + stream.id +"/" + userid ); + if ( _portLog != null ) { + _portLog.debug("SDDS PORT: CALLING ATTACH CALLBACK, STREAM/USER:" + stream.id +"/" + userid ); } try { attachId = attach_detach_callback.attach(stream, userid); } catch(Exception e) { - if ( logger != null ) { - logger.error("SDDS PORT: CALLING ATTACH EXCEPTION, STREAM/USER:" + stream.id +"/" + userid ); + if ( _portLog != null ) { + _portLog.error("SDDS PORT: CALLING ATTACH EXCEPTION, STREAM/USER:" + stream.id +"/" + userid ); } throw new AttachError("Callback Failed"); } @@ -346,9 +345,9 @@ public String attach(SDDSStreamDefinition stream, String userid) throws AttachEr this.attachedUsers.put(attachId, userid); - if ( logger != null ) { - logger.debug("SDDS PORT: ATTACH COMPLETED, ID:" + attachId + " STREAM/USER:" + stream.id +"/" + userid ); - logger.trace("bulkio.InPort attach EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.debug("SDDS PORT: ATTACH COMPLETED, ID:" + attachId + " STREAM/USER:" + stream.id +"/" + userid ); + _portLog.trace("bulkio.InPort attach EXIT (port=" + name +")" ); } return attachId; @@ -359,21 +358,21 @@ public String attach(SDDSStreamDefinition stream, String userid) throws AttachEr */ public void detach(String attachId) throws DetachError, StreamInputError { - if ( logger != null ) { - logger.trace("bulkio.InPort detach ENTER (port=" + name +")" ); - logger.debug("SDDS PORT: DETACH REQUEST ID:" + attachId ); + if ( _portLog != null ) { + _portLog.trace("bulkio.InPort detach ENTER (port=" + name +")" ); + _portLog.debug("SDDS PORT: DETACH REQUEST ID:" + attachId ); } if ( attach_detach_callback != null ) { try { - if ( logger != null ) { - logger.debug("SDDS PORT: CALLING DETACH CALLBACK ID:" + attachId ); + if ( _portLog != null ) { + _portLog.debug("SDDS PORT: CALLING DETACH CALLBACK ID:" + attachId ); } attach_detach_callback.detach(attachId); } catch( Exception e ) { - if ( logger != null ) { - logger.error("SDDS PORT: DETACH CALLBACK EXCEPTION, ID:" + attachId ); + if ( _portLog != null ) { + _portLog.error("SDDS PORT: DETACH CALLBACK EXCEPTION, ID:" + attachId ); } throw new DetachError(); } @@ -381,9 +380,9 @@ public void detach(String attachId) throws DetachError, StreamInputError { this.attachedStreamMap.remove(attachId); this.attachedUsers.remove(attachId); - if ( logger != null ) { - logger.debug("SDDS PORT: DETACH SUCCESS, ID:" + attachId ); - logger.trace("bulkio.InPort detach EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.debug("SDDS PORT: DETACH SUCCESS, ID:" + attachId ); + _portLog.trace("bulkio.InPort detach EXIT (port=" + name +")" ); } } @@ -452,7 +451,7 @@ public String getRepid() public String getDirection() { - return "Provides"; + return CF.PortSet.DIRECTION_PROVIDES; } } diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InShortPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InShortPort.java deleted file mode 100644 index 776a4320f..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InShortPort.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; -/** - * - */ -public class InShortPort extends InInt16Port { - - /** - * - */ - public InShortPort( String portName ) { - super( portName ); - } - - public InShortPort( String portName, - bulkio.sri.Comparator compareSRI ) { - super( portName, null, compareSRI, null ); - } - - public InShortPort( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, null, compareSRI, sriCallback); - } - - public InShortPort( String portName, Logger logger ) { - super( portName, logger ); - } - - public InShortPort( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, logger, compareSRI, sriCallback); - } - - - - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InUInt16Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/InUInt16Port.java deleted file mode 100644 index 6e419d024..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InUInt16Port.java +++ /dev/null @@ -1,522 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import BULKIO.PrecisionUTCTime; -import BULKIO.StreamSRI; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.UInt16Size; - -import org.ossie.component.PortBase; - -/** - * - */ -public class InUInt16Port extends BULKIO.jni.dataUshortPOA implements org.ossie.component.PortBase { - - /** - * A class to hold packet data. - * - */ - public class Packet extends DataTransfer < short[] > { - - public Packet(short[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { - super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); - }; - }; - - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - - /** - * - */ - public InUInt16Port( String portName ) { - this( portName, null, new bulkio.sri.DefaultComparator(), null ); - } - - public InUInt16Port( String portName, - bulkio.sri.Comparator compareSRI ){ - this( portName, null, compareSRI, null ); - } - - public InUInt16Port( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback - ) { - this( portName, null, compareSRI, sriCallback ); - } - - public InUInt16Port( String portName, Logger logger ) { - this( portName, logger, new bulkio.sri.DefaultComparator(), null ); - } - - public InUInt16Port( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - - this.name = portName; - this.stats = new linkStatistics(this.name, new UInt16Size() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - } - - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } - } - - - /** - * - */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } - } - - /** - * - */ - public String getName() { - return this.name; - } - - - - /** - * - */ - public void enableStats(boolean enable) { - this.stats.setEnabled(enable); - } - - /** - * - */ - public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } - } - - /** - * - */ - public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } - } - - /** - * - */ - public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } - } - - /** - * - */ - public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } - } - - /** - * - */ - public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } - } - - /** - * - */ - public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } - } - - - /** - * - */ - public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } - } - - - - /** - * - */ - public void pushPacket(short[] data, PrecisionUTCTime time, boolean eos, String streamID) - { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length, 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - /** - * - */ - public Packet getPacket(long wait) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return null; - } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; - } - - public String getRepid() - { - return BULKIO.dataUshortHelper.id(); - } - - public String getDirection() - { - return "Provides"; - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InUInt32Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/InUInt32Port.java deleted file mode 100644 index b0f21da63..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InUInt32Port.java +++ /dev/null @@ -1,526 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import BULKIO.PrecisionUTCTime; -import BULKIO.StreamSRI; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.UInt32Size; - -import org.ossie.component.PortBase; - -/** - * - */ -public class InUInt32Port extends BULKIO.jni.dataUlongPOA implements org.ossie.component.PortBase { - - /** - * A class to hold packet data. - * - */ - public class Packet extends DataTransfer < int[] > { - - public Packet(int[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { - super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); - }; - }; - - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - /** - * - */ - public InUInt32Port( String portName ) { - this( portName, null, new bulkio.sri.DefaultComparator(), null ); - } - - public InUInt32Port( String portName, - bulkio.sri.Comparator compareSRI ){ - this( portName, null, compareSRI, null ); - } - - public InUInt32Port( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback - ) { - this( portName, null, compareSRI, sriCallback ); - } - - public InUInt32Port( String portName, Logger logger ) { - this( portName, logger, new bulkio.sri.DefaultComparator(), null ); - } - - public InUInt32Port( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new UInt32Size() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - } - - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } - } - - /** - * - */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } - } - - /** - * - */ - public String getName() { - return this.name; - } - - - - /** - * - */ - public void enableStats(boolean enable) { - this.stats.setEnabled(enable); - } - - /** - * - */ - public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } - } - - /** - * - */ - public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } - } - - /** - * - */ - public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } - } - - /** - * - */ - public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } - } - - /** - * - */ - public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } - } - - /** - * - */ - public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } - } - - - /** - * - */ - public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } - } - - - - /** - * - */ - public void pushPacket(int[] data, PrecisionUTCTime time, boolean eos, String streamID) - { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length, 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - /** - * - */ - public Packet getPacket(long wait) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return null; - } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; - } - - public String getRepid() - { - return BULKIO.dataUlongHelper.id(); - } - - public String getDirection() - { - return "Provides"; - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InUInt64Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/InUInt64Port.java deleted file mode 100644 index fd8c53fc6..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InUInt64Port.java +++ /dev/null @@ -1,527 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import BULKIO.PrecisionUTCTime; -import BULKIO.StreamSRI; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.UInt64Size; - -import org.ossie.component.PortBase; - -/** - * - */ -public class InUInt64Port extends BULKIO.jni.dataUlongLongPOA implements org.ossie.component.PortBase { - - /** - * A class to hold packet data. - * - */ - public class Packet extends DataTransfer < long[] > { - - public Packet(long[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { - super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); - }; - }; - - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - - /** - * - */ - public InUInt64Port( String portName ) { - this( portName, null, new bulkio.sri.DefaultComparator(), null ); - } - - public InUInt64Port( String portName, - bulkio.sri.Comparator compareSRI ){ - this( portName, null, compareSRI, null ); - } - - public InUInt64Port( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback - ) { - this( portName, null, compareSRI, sriCallback ); - } - - public InUInt64Port( String portName, Logger logger ) { - this( portName, logger, new bulkio.sri.DefaultComparator(), null ); - } - - public InUInt64Port( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new UInt64Size() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - } - - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } - } - - /** - * - */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } - } - - /** - * - */ - public String getName() { - return this.name; - } - - - - /** - * - */ - public void enableStats(boolean enable) { - this.stats.setEnabled(enable); - } - - /** - * - */ - public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } - } - - /** - * - */ - public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } - } - - /** - * - */ - public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } - } - - /** - * - */ - public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } - } - - /** - * - */ - public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } - } - - /** - * - */ - public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } - } - - - /** - * - */ - public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } - } - - - - /** - * - */ - public void pushPacket(long[] data, PrecisionUTCTime time, boolean eos, String streamID) - { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length, 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - /** - * - */ - public Packet getPacket(long wait) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return null; - } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; - } - - public String getRepid() - { - return BULKIO.dataUlongLongHelper.id(); - } - - public String getDirection() - { - return "Provides"; - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InUInt8Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/InUInt8Port.java deleted file mode 100644 index 03c616ddb..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InUInt8Port.java +++ /dev/null @@ -1,536 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import BULKIO.PrecisionUTCTime; -import BULKIO.StreamSRI; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.UInt8Size; - -import org.ossie.component.PortBase; - -/** - * - */ -public class InUInt8Port extends BULKIO.jni.dataOctetPOA implements org.ossie.component.PortBase { - - /** - * A class to hold packet data. - * - */ - public class Packet extends DataTransfer < byte[] > { - - public Packet( byte[] data, PrecisionUTCTime time, boolean endOfStream, String streamID, StreamSRI H, boolean sriChanged, boolean inputQueueFlushed ) { - super(data,time,endOfStream,streamID,H,sriChanged,inputQueueFlushed); - }; - }; - - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - - - - /** - * - */ - public InUInt8Port( String portName ) { - this( portName, null, new bulkio.sri.DefaultComparator(), null ); - } - - public InUInt8Port( String portName, - bulkio.sri.Comparator compareSRI ){ - this( portName, null, compareSRI, null ); - } - - public InUInt8Port( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback - ) { - this( portName, null, compareSRI, sriCallback ); - } - - public InUInt8Port( String portName, Logger logger ) { - this( portName, logger, new bulkio.sri.DefaultComparator(), null ); - } - - public InUInt8Port( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new UInt8Size() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); - - sri_cmp = compareSRI; - sriCallback = sriCallback; - - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); - - } - - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } - } - - /** - * - */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } - } - - /** - * - */ - public String getName() { - return this.name; - } - - - - /** - * - */ - public void enableStats(boolean enable) { - this.stats.setEnabled(enable); - } - - /** - * - */ - public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } - } - - /** - * - */ - public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } - } - - /** - * - */ - public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } - } - - /** - * - */ - public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } - } - - /** - * - */ - public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } - } - - /** - * - */ - public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } - } - - - /** - * - */ - public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } - } - - - - /** - * - */ - public void pushPacket( byte[] data, PrecisionUTCTime time, boolean eos, String streamID) - { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length, 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length, this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - - - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - /** - * - */ - public Packet getPacket(long wait) - { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return null; - } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; - } - - public String getRepid() - { - return BULKIO.dataOctetHelper.id(); - } - - public String getDirection() - { - return "Provides"; - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InULongLongPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InULongLongPort.java deleted file mode 100644 index 8b94dc9c2..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InULongLongPort.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class InULongLongPort extends InUInt64Port { - - /** - * - */ - public InULongLongPort( String portName ) { - super( portName ); - } - - public InULongLongPort( String portName, - bulkio.sri.Comparator compareSRI ) { - super( portName, null, compareSRI, null ); - } - - public InULongLongPort( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, null, compareSRI, sriCallback); - } - - public InULongLongPort( String portName, Logger logger ) { - super( portName, logger ); - } - - public InULongLongPort( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, logger, compareSRI, sriCallback); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InULongPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InULongPort.java deleted file mode 100644 index f7a9c9b46..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InULongPort.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class InULongPort extends InUInt32Port { - - /** - * - */ - public InULongPort( String portName ) { - super( portName ); - } - - public InULongPort( String portName, - bulkio.sri.Comparator compareSRI ) { - super( portName, null, compareSRI, null ); - } - - public InULongPort( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, null, compareSRI, sriCallback); - } - - public InULongPort( String portName, Logger logger ) { - super( portName, logger ); - } - - public InULongPort( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, logger, compareSRI, sriCallback); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InUShortPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InUShortPort.java deleted file mode 100644 index 3e14774ef..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InUShortPort.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class InUShortPort extends InUInt16Port { - - /** - * - */ - public InUShortPort( String portName ) { - super( portName ); - } - - public InUShortPort( String portName, - bulkio.sri.Comparator compareSRI ) { - super( portName, null, compareSRI, null ); - } - - public InUShortPort( String portName, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, null, compareSRI, sriCallback); - } - - public InUShortPort( String portName, Logger logger ) { - super( portName, logger ); - } - - public InUShortPort( String portName, - Logger logger, - bulkio.sri.Comparator compareSRI, - bulkio.SriListener sriCallback ) { - super( portName, logger, compareSRI, sriCallback); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InVITA49Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/InVITA49Port.java index 7ac5ef230..18a60865f 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InVITA49Port.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/InVITA49Port.java @@ -19,18 +19,15 @@ */ package bulkio; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Iterator; import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; + import org.apache.log4j.Logger; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; + +import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; + import BULKIO.PrecisionUTCTime; import BULKIO.StreamSRI; import BULKIO.PortStatistics; @@ -41,17 +38,10 @@ import BULKIO.dataVITA49Package.InputUsageState; import BULKIO.dataVITA49Package.StreamInputError; -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.Int16Size; - -import org.ossie.component.PortBase; - /** * @generated */ -public class InVITA49Port extends BULKIO.jni.dataVITA49POA implements org.ossie.component.PortBase { +public class InVITA49Port extends BULKIO.jni.dataVITA49POA implements PortBase { public interface Callback { @@ -107,6 +97,8 @@ public void detach(String attachId) */ protected Logger logger = null; + public RHLogger _portLog = null; + // callback when VITA49 Stream Requests happen protected Callback attach_detach_callback; @@ -142,7 +134,7 @@ public InVITA49Port( String portName, bulkio.time.Comparator timeCmp ) { this.name = portName; - this.stats = new linkStatistics(this.name, new Int8Size() ); + this.stats = new linkStatistics(this.name, 1); this.sriUpdateLock = new Object(); this.statUpdateLock = new Object(); this.attachedStreamMap = new HashMap(); @@ -169,6 +161,13 @@ public void setLogger( Logger newlogger ){ } } + public void setLogger(RHLogger logger) + { + synchronized (this.sriUpdateLock) { + this._portLog = logger; + } + } + /** * */ @@ -280,8 +279,8 @@ public StreamSRI[] activeSRIs() { */ public void pushSRI(StreamSRI H, PrecisionUTCTime T) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); } synchronized (this.sriUpdateLock) { @@ -307,8 +306,8 @@ public void pushSRI(StreamSRI H, PrecisionUTCTime T) { } } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); } } @@ -320,22 +319,22 @@ public void pushSRI(StreamSRI H, PrecisionUTCTime T) { */ public String attach(VITA49StreamDefinition stream, String userid) throws AttachError, StreamInputError { - if ( logger != null ) { - logger.trace("bulkio.InPort attach ENTER (port=" + name +")" ); - logger.debug("VITA49 PORT: ATTACH REQUEST STREAM/USER:" + stream.id +"/" + userid ); + if ( _portLog != null ) { + _portLog.trace("bulkio.InPort attach ENTER (port=" + name +")" ); + _portLog.debug("VITA49 PORT: ATTACH REQUEST STREAM/USER:" + stream.id +"/" + userid ); } String attachId = null; if ( attach_detach_callback != null ) { - if ( logger != null ) { - logger.debug("VITA49 PORT: CALLING ATTACH CALLBACK, STREAM/USER:" + stream.id +"/" + userid ); + if ( _portLog != null ) { + _portLog.debug("VITA49 PORT: CALLING ATTACH CALLBACK, STREAM/USER:" + stream.id +"/" + userid ); } try { attachId = attach_detach_callback.attach(stream, userid); } catch(Exception e) { - if ( logger != null ) { - logger.error("VITA49 PORT: CALLING ATTACH EXCEPTION, STREAM/USER:" + stream.id +"/" + userid ); + if ( _portLog != null ) { + _portLog.error("VITA49 PORT: CALLING ATTACH EXCEPTION, STREAM/USER:" + stream.id +"/" + userid ); } throw new AttachError("Callback Failed"); } @@ -347,9 +346,9 @@ public String attach(VITA49StreamDefinition stream, String userid) throws Attach this.attachedUsers.put(attachId, userid); - if ( logger != null ) { - logger.debug("VITA49 PORT: ATTACH COMPLETED, ID:" + attachId + " STREAM/USER:" + stream.id +"/" + userid ); - logger.trace("bulkio.InPort attach EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.debug("VITA49 PORT: ATTACH COMPLETED, ID:" + attachId + " STREAM/USER:" + stream.id +"/" + userid ); + _portLog.trace("bulkio.InPort attach EXIT (port=" + name +")" ); } return attachId; @@ -360,21 +359,21 @@ public String attach(VITA49StreamDefinition stream, String userid) throws Attach */ public void detach(String attachId) throws DetachError, StreamInputError { - if ( logger != null ) { - logger.trace("bulkio.InPort detach ENTER (port=" + name +")" ); - logger.debug("VITA49 PORT: DETACH REQUEST ID:" + attachId ); + if ( _portLog != null ) { + _portLog.trace("bulkio.InPort detach ENTER (port=" + name +")" ); + _portLog.debug("VITA49 PORT: DETACH REQUEST ID:" + attachId ); } if ( attach_detach_callback != null ) { try { - if ( logger != null ) { - logger.debug("VITA49 PORT: CALLING DETACH CALLBACK ID:" + attachId ); + if ( _portLog != null ) { + _portLog.debug("VITA49 PORT: CALLING DETACH CALLBACK ID:" + attachId ); } attach_detach_callback.detach(attachId); } catch( Exception e ) { - if ( logger != null ) { - logger.error("VITA49 PORT: DETACH CALLBACK EXCEPTION, ID:" + attachId ); + if ( _portLog != null ) { + _portLog.error("VITA49 PORT: DETACH CALLBACK EXCEPTION, ID:" + attachId ); } throw new DetachError(); } @@ -382,9 +381,9 @@ public void detach(String attachId) throws DetachError, StreamInputError { this.attachedStreamMap.remove(attachId); this.attachedUsers.remove(attachId); - if ( logger != null ) { - logger.debug("VITA49 PORT: DETACH SUCCESS, ID:" + attachId ); - logger.trace("bulkio.InPort detach EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.debug("VITA49 PORT: DETACH SUCCESS, ID:" + attachId ); + _portLog.trace("bulkio.InPort detach EXIT (port=" + name +")" ); } } @@ -453,7 +452,7 @@ public String getRepid() public String getDirection() { - return "Provides"; + return CF.PortSet.DIRECTION_PROVIDES; } } diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/InXMLPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/InXMLPort.java index 763de8a4b..faf36d4f8 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/InXMLPort.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/InXMLPort.java @@ -19,34 +19,19 @@ */ package bulkio; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import java.util.ArrayDeque; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; +import org.apache.log4j.Logger; + +import org.ossie.component.RHLogger; + import BULKIO.PrecisionUTCTime; import BULKIO.StreamSRI; import BULKIO.PortStatistics; import BULKIO.PortUsageType; -import org.apache.log4j.Logger; - -import bulkio.sriState; -import bulkio.linkStatistics; -import bulkio.DataTransfer; -import bulkio.Int8Size; - -import org.ossie.component.PortBase; /** * */ -public class InXMLPort extends BULKIO.jni.dataXMLPOA implements PortBase { +public class InXMLPort extends BULKIO.jni.dataXMLPOA implements InDataPort { /** * A class to hold packet data. @@ -59,72 +44,7 @@ public Packet( String data, PrecisionUTCTime timeStamp, boolean endOfStream, Str }; }; - /** - * - */ - protected String name; - - /** - * - */ - protected linkStatistics stats; - - /** - * - */ - protected Object sriUpdateLock; - - /** - * - */ - protected Object statUpdateLock; - - /** - * - */ - protected Map currentHs; - - /** - * - */ - protected Object dataBufferLock; - - /** - * - */ - protected int maxQueueDepth; - - /** - * - */ - protected Semaphore queueSem; - - /** - * - */ - protected Semaphore dataSem; - - /** - * - */ - protected boolean blocking; - - /** - * - */ - protected Logger logger = null; - - protected bulkio.sri.Comparator sri_cmp; - - protected bulkio.SriListener sriCallback; - - - /** - * This queue stores all packets received from pushPacket. - * - */ - private ArrayDeque< Packet > workQueue; - + private InPortImpl impl; /** * @@ -153,294 +73,98 @@ public InXMLPort( String portName, Logger logger, bulkio.sri.Comparator compareSRI, bulkio.SriListener sriCallback ){ - this.name = portName; - this.logger = logger; - this.stats = new linkStatistics(this.name, new Int8Size() ); - this.sriUpdateLock = new Object(); - this.statUpdateLock = new Object(); - this.currentHs = new HashMap(); - this.dataBufferLock = new Object(); - this.maxQueueDepth = 100; - this.queueSem = new Semaphore(this.maxQueueDepth); - this.dataSem = new Semaphore(0); - this.blocking = false; - - this.workQueue = new ArrayDeque< Packet >(); + impl = new InPortImpl(portName, logger, compareSRI, sriCallback, new XMLDataHelper()); + } - sri_cmp = compareSRI; - sriCallback = sriCallback; + public Logger getLogger() { + return impl.getLogger(); + } - if ( this.logger == null ) { - this.logger = Logger.getLogger("redhawk.bulkio.inport."+portName); - } - this.logger.debug( "bulkio::InPort CTOR port: " + portName ); + public void setLogger(Logger logger){ + impl.setLogger(logger); } - public void setLogger( Logger newlogger ){ - synchronized (this.sriUpdateLock) { - logger = newlogger; - } + public void setLogger(RHLogger logger) + { + impl.setLogger(logger); } /** * */ - public void setSriListener( bulkio.SriListener sriCallback ) { - synchronized(this.sriUpdateLock) { - this.sriCallback = sriCallback; - } + public void setSriListener(bulkio.SriListener sriCallback) { + impl.setSriListener(sriCallback); } /** * */ public String getName() { - return this.name; + return impl.getName(); } - - /** * */ public void enableStats(boolean enable) { - this.stats.setEnabled(enable); + impl.enableStats(enable); } /** * */ public PortStatistics statistics() { - synchronized (statUpdateLock) { - return this.stats.retrieve(); - } + return impl.statistics(); } /** * */ public PortUsageType state() { - int queueSize = 0; - synchronized (dataBufferLock) { - queueSize = workQueue.size(); - if (queueSize == maxQueueDepth) { - return PortUsageType.BUSY; - } else if (queueSize == 0) { - return PortUsageType.IDLE; - } - return PortUsageType.ACTIVE; - } + return impl.state(); } /** * */ public StreamSRI[] activeSRIs() { - synchronized (this.sriUpdateLock) { - ArrayList sris = new ArrayList(); - Iterator iter = this.currentHs.values().iterator(); - while(iter.hasNext()) { - sris.add(iter.next().getSRI()); - } - return sris.toArray(new StreamSRI[sris.size()]); - } + return impl.activeSRIs(); } - + /** * */ public int getCurrentQueueDepth() { - synchronized (this.dataBufferLock) { - return workQueue.size(); - } + return impl.getCurrentQueueDepth(); } - + /** * */ public int getMaxQueueDepth() { - synchronized (this.dataBufferLock) { - return this.maxQueueDepth; - } + return impl.getMaxQueueDepth(); } - + /** * */ public void setMaxQueueDepth(int newDepth) { - synchronized (this.dataBufferLock) { - this.maxQueueDepth = newDepth; - queueSem = new Semaphore(newDepth); - } + impl.setMaxQueueDepth(newDepth); } - /** * */ public void pushSRI(StreamSRI header) { - - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI ENTER (port=" + name +")" ); - } - - synchronized (sriUpdateLock) { - if (!currentHs.containsKey(header.streamID)) { - if ( logger != null ) { - logger.debug("pushSRI PORT:" + name + " NEW SRI:" + - header.streamID ); - } - if ( sriCallback != null ) { sriCallback.newSRI(header); } - currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } else { - StreamSRI oldSri = currentHs.get(header.streamID).getSRI(); - boolean cval = false; - if ( sri_cmp != null ) { - cval = sri_cmp.compare( header, oldSri ); - } - if ( cval == false ) { - if ( sriCallback != null ) { sriCallback.changedSRI(header); } - this.currentHs.put(header.streamID, new sriState(header, true)); - if (header.blocking) { - //If switching to blocking we have to set the semaphore - synchronized (dataBufferLock) { - if (!blocking) { - try { - queueSem.acquire(workQueue.size()); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - blocking = true; - } - } - } - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushSRI EXIT (port=" + name +")" ); - } + impl.pushSRI(header); } - - /** * */ - public void pushPacket( String data, boolean eos, String streamID) + public void pushPacket(String data, boolean eos, String streamID) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket ENTER (port=" + name +")" ); - } - synchronized (this.dataBufferLock) { - if (this.maxQueueDepth == 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - } - } - - boolean portBlocking = false; - StreamSRI tmpH = null; - boolean sriChanged = false; - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(streamID)) { - tmpH = this.currentHs.get(streamID).getSRI(); - sriChanged = this.currentHs.get(streamID).isChanged(); - if ( eos == false ) { - this.currentHs.get(streamID).setChanged(false); - } - portBlocking = blocking; - } else { - if (logger != null) { - logger.warn("bulkio.InPort pushPacket received data from stream '" + streamID + "' with no SRI"); - } - tmpH = new StreamSRI(1, 0.0, 1.0, (short)1, 0, 0.0, 0.0, (short)0, (short)0, streamID, false, new DataType[0]); - if (sriCallback != null) { - sriCallback.newSRI(tmpH); - } - sriChanged = true; - currentHs.put(streamID, new sriState(tmpH, false)); - } - } - - // determine whether to block and wait for an empty space in the queue - Packet p = null; - BULKIO.PrecisionUTCTime time=null; - - if (portBlocking) { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - - try { - queueSem.acquire(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - synchronized (this.dataBufferLock) { - this.stats.update(data.length(), this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - this.workQueue.add(p); - this.dataSem.release(); - } - } else { - synchronized (this.dataBufferLock) { - if (this.workQueue.size() == this.maxQueueDepth) { - if ( logger != null ) { - logger.debug( "bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE" + this.workQueue.size() + ")" ); - } - boolean sriChangedHappened = false; - boolean flagEOS = false; - for (Iterator< Packet > itr = this.workQueue.iterator(); itr.hasNext();) { - if (sriChangedHappened && flagEOS) { - break; - } - Packet currentPacket = itr.next(); - if (currentPacket.sriChanged) { - sriChangedHappened = true; - } - if (currentPacket.EOS) { - flagEOS = true; - } - } - if (sriChangedHappened) { - sriChanged = true; - } - if (flagEOS) { - eos = true; - } - this.workQueue.clear(); - p = new Packet( data, time, eos, streamID, tmpH, sriChanged, true); - this.stats.update(data.length(), 0, eos, streamID, true); - } else { - p = new Packet(data, time, eos, streamID, tmpH, sriChanged, false); - this.stats.update(data.length(), this.workQueue.size()/(float)this.maxQueueDepth, eos, streamID, false); - } - if ( logger != null ) { - logger.trace( "bulkio::InPort pushPacket NEW Packet (QUEUE=" + workQueue.size() + ")"); - } - this.workQueue.add(p); - this.dataSem.release(); - } - } - if ( logger != null ) { - logger.trace("bulkio.InPort pushPacket EXIT (port=" + name +")" ); - } - return; - + impl.pushPacket(data, null, eos, streamID); } /** @@ -448,68 +172,12 @@ public void pushPacket( String data, boolean eos, String streamID) */ public Packet getPacket(long wait) { - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket ENTER (port=" + name +")" ); - } - - try { - if (wait < 0) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" Block until data arrives" ); - } - this.dataSem.acquire(); - } else { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket PORT:" + name +" TIMED WAIT:" + wait ); - } - this.dataSem.tryAcquire(wait, TimeUnit.MILLISECONDS); - } - } catch (InterruptedException ex) { - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } + DataTransfer p = impl.getPacket(wait); + if (p == null) { return null; + } else { + return new Packet(p.getData(), p.getTime(), p.getEndOfStream(), p.getStreamID(), p.getSRI(), p.sriChanged(), p.inputQueueFlushed()); } - - Packet p = null; - synchronized (this.dataBufferLock) { - p = this.workQueue.poll(); - } - - if (p != null) { - if (p.getEndOfStream()) { - synchronized (this.sriUpdateLock) { - if (this.currentHs.containsKey(p.getStreamID())) { - sriState rem = this.currentHs.remove(p.getStreamID()); - - if (rem.getSRI().blocking) { - boolean stillBlocking = false; - Iterator iter = currentHs.values().iterator(); - while (iter.hasNext()) { - if (iter.next().getSRI().blocking) { - stillBlocking = true; - break; - } - } - - if (!stillBlocking) { - blocking = false; - } - } - } - } - } - - if (blocking) { - queueSem.release(); - } - } - - if ( logger != null ) { - logger.trace("bulkio.InPort getPacket EXIT (port=" + name +")" ); - } - return p; } public String getRepid() @@ -519,8 +187,6 @@ public String getRepid() public String getDirection() { - return "Provides"; + return CF.PortSet.DIRECTION_PROVIDES; } - } - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/NumericDataHelper.java.template b/bulkioInterfaces/libsrc/java/src/bulkio/NumericDataHelper.java.template new file mode 100644 index 000000000..b17e028a3 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/NumericDataHelper.java.template @@ -0,0 +1,48 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from NumericDataHelper.java.template. + * Do not modify directly. + */ +package bulkio; + +import java.util.Arrays; + +class @name@DataHelper implements DataHelper<@type@[]> { + public int bitSize() { + return @size@; + } + + public int arraySize(@type@[] data) { + return data.length; + } + + public boolean isEmpty(@type@[] data) { + return (data.length == 0); + } + + public @type@[] emptyArray() { + return new @type@[0]; + } + + public @type@[] slice(@type@[] data, int start, int end) { + return Arrays.copyOfRange(data, start, end); + } +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutBitPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutBitPort.java new file mode 100644 index 000000000..e3271a810 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/OutBitPort.java @@ -0,0 +1,58 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package bulkio; + +import org.apache.log4j.Logger; + +import BULKIO.PrecisionUTCTime; +import BULKIO.dataBitOperations; + +/** + * BulkIO output port implementation for dataBit. + */ +public class OutBitPort extends ChunkingOutPort { + + public OutBitPort(String portName) { + this(portName, null, null); + } + + public OutBitPort(String portName, Logger logger) { + this(portName, logger, null); + } + + public OutBitPort(String portName, Logger logger, ConnectionEventListener eventCB) { + super(portName, logger, eventCB, new BitDataHelper()); + if (this.logger != null) { + this.logger.debug("bulkio.OutPort CTOR port: " + portName); + } + } + + protected dataBitOperations narrow(org.omg.CORBA.Object obj) { + return BULKIO.jni.dataBitHelper.narrow(obj); + } + + protected void sendPacket(dataBitOperations port, BULKIO.BitSequence data, PrecisionUTCTime time, boolean endOfStream, String streamID) { + port.pushPacket(data, time, endOfStream, streamID); + } + + public String getRepid() { + return BULKIO.dataBitHelper.id(); + } +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutCharPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutCharPort.java deleted file mode 100644 index 1a0dbded4..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutCharPort.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class OutCharPort extends OutInt8Port { - - /** - * @generated - */ - public OutCharPort(String portName) - { - super(portName ); - } - - public OutCharPort(String portName, Logger logger) - { - super(portName, logger ); - } - - - public OutCharPort(String portName, Logger logger, ConnectionEventListener eventCB ) - { - super(portName, logger, eventCB ); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutDataPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutDataPort.java index bcfd01d25..92a550b78 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutDataPort.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/OutDataPort.java @@ -30,30 +30,14 @@ import BULKIO.StreamSRI; public abstract class OutDataPort extends OutPortBase { - /** - * CORBA transfer limit in bytes - */ - // Multiply by some number < 1 to leave some margin for the CORBA header - protected static final int MAX_PAYLOAD_SIZE = (int)(Const.MAX_TRANSFER_BYTES * 0.9); - /** * Size of a single element */ - protected final SizeOf sizeof; - - /** - * CORBA transfer limit in samples - */ - protected int maxSamplesPerPush; - - protected List filterTable = null; + protected final DataHelper helper; - protected OutDataPort(String portName, Logger logger, ConnectionEventListener connectionListener, SizeOf size) { + protected OutDataPort(String portName, Logger logger, ConnectionEventListener connectionListener, DataHelper helper) { super(portName, logger, connectionListener); - this.sizeof = size; - // Make sure max samples per push is even so that complex data case is - // handled properly - this.maxSamplesPerPush = (MAX_PAYLOAD_SIZE/this.sizeof.sizeof()) & 0xFFFFFFFE; + this.helper = helper; } /** @@ -61,26 +45,47 @@ protected OutDataPort(String portName, Logger logger, ConnectionEventListener co */ public void connectPort(final org.omg.CORBA.Object connection, final String connectionId) throws CF.PortPackage.InvalidPort, CF.PortPackage.OccupiedPort { - if (logger != null) { - logger.trace("bulkio.OutPort connectPort ENTER (port=" + name +")"); + if (_portLog != null) { + _portLog.trace("bulkio.OutPort connectPort ENTER (port=" + name +")"); } + if (connection == null) { + throw new CF.PortPackage.InvalidPort((short) 1, "Nil object reference"); + } + + // Attempt to check the type of the remote object to reject invalid + // types; note this does not require the lock + final String repo_id = getRepid(); + boolean valid; + try { + valid = connection._is_a(repo_id); + } catch (Exception exc) { + // If _is_a throws an exception, assume the remote object is + // unreachable (probably dead) + throw new CF.PortPackage.InvalidPort((short) 1, "Object unreachable"); + } + + if (!valid) { + throw new CF.PortPackage.InvalidPort((short) 1, "Object does not support "+repo_id); + } + + final E port = this.narrow(connection); + + // Acquire the state lock before modifying the container synchronized (this.updatingPortsLock) { - final E port; - try { - port = this.narrow(connection); - } catch (final Exception ex) { - if (logger != null) { - logger.error("bulkio.OutPort CONNECT PORT: " + name + " PORT NARROW FAILED"); - } - throw new CF.PortPackage.InvalidPort((short)1, "Invalid port for connection '" + connectionId + "'"); + // Prevent duplicate connection IDs + if (this.outConnections.containsKey(connectionId)) { + throw new CF.PortPackage.OccupiedPort(); } this.outConnections.put(connectionId, port); this.active = true; - this.stats.put(connectionId, new linkStatistics(this.name, this.sizeof)); - - if (logger != null) { - logger.debug("bulkio.OutPort CONNECT PORT: " + name + " CONNECTION '" + connectionId + "'"); + linkStatistics stats = new linkStatistics(this.name, 1); + // Update bit size from the helper, because element size does not + // take sub-byte elements (i.e., dataBit) into account. + stats.setBitSize(helper.bitSize()); + this.stats.put(connectionId, stats); + if (_portLog != null) { + _portLog.debug("bulkio.OutPort CONNECT PORT: " + name + " CONNECTION '" + connectionId + "'"); } } @@ -88,38 +93,40 @@ public void connectPort(final org.omg.CORBA.Object connection, final String conn callback.connect(connectionId); } - if (logger != null) { - logger.trace("bulkio.OutPort connectPort EXIT (port=" + name +")"); + if (_portLog != null) { + _portLog.trace("bulkio.OutPort connectPort EXIT (port=" + name +")"); } } /** * Breaks a connection. */ - public void disconnectPort(String connectionId) { - if (logger != null) { - logger.trace("bulkio.OutPort disconnectPort ENTER (port=" + name +")"); + public void disconnectPort(String connectionId) + { + if (_portLog != null) { + _portLog.trace("bulkio.OutPort disconnectPort ENTER (port=" + name +")"); } synchronized (this.updatingPortsLock) { final E port = this.outConnections.remove(connectionId); - if (port != null) - { - // Create an empty data packet with an invalid timestamp to - // send with the end-of-stream - final A data = emptyArray(); - final BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.notSet(); - for (Map.Entry entry: this.currentSRIs.entrySet()) { - final String streamID = entry.getKey(); - - final SriMapStruct sriMap = entry.getValue(); - if (sriMap.connections.contains(connectionId)) { - try { - sendPacket(port, data, tstamp, true, streamID); - } catch(Exception e) { - if (logger != null) { - logger.error("Call to pushPacket failed on port " + name + " connection " + connectionId); - } + if (port == null) { + throw new IllegalArgumentException("No connection "+connectionId); + } + + // Create an empty data packet with an invalid timestamp to send + // with the end-of-stream + final A data = helper.emptyArray(); + final BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.notSet(); + for (Map.Entry entry: this.currentSRIs.entrySet()) { + final String streamID = entry.getKey(); + + final SriMapStruct sriMap = entry.getValue(); + if (sriMap.connections.contains(connectionId)) { + try { + sendPacket(port, data, tstamp, true, streamID); + } catch(Exception e) { + if (_portLog != null) { + _portLog.error("Call to pushPacket failed on port " + name + " connection " + connectionId); } } } @@ -133,10 +140,10 @@ public void disconnectPort(String connectionId) { entry.getValue().connections.remove(connectionId); } - if (logger != null) { - logger.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); + if (_portLog != null) { + _portLog.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); for(Map.Entry entry: this.currentSRIs.entrySet()) { - logger.trace("bulkio.OutPort updated currentSRIs key=" + entry.getKey() + ", value.sri=" + entry.getValue().sri + ", value.connections=" + entry.getValue().connections); + _portLog.trace("bulkio.OutPort updated currentSRIs key=" + entry.getKey() + ", value.sri=" + entry.getValue().sri + ", value.connections=" + entry.getValue().connections); } } } @@ -145,8 +152,8 @@ public void disconnectPort(String connectionId) { callback.disconnect(connectionId); } - if (logger != null) { - logger.trace("bulkio.OutPort disconnectPort EXIT (port=" + name +")"); + if (_portLog != null) { + _portLog.trace("bulkio.OutPort disconnectPort EXIT (port=" + name +")"); } } @@ -155,8 +162,8 @@ public void disconnectPort(String connectionId) { */ public void pushPacket(A data, PrecisionUTCTime time, boolean endOfStream, String streamID) { - if (logger != null) { - logger.trace("bulkio.OutPort pushPacket ENTER (port=" + this.name +")"); + if (_portLog != null) { + _portLog.trace("bulkio.OutPort pushPacket ENTER (port=" + this.name +")"); } synchronized(this.updatingPortsLock) { @@ -166,14 +173,19 @@ public void pushPacket(A data, PrecisionUTCTime time, boolean endOfStream, Strin this.pushSRI(header); } - pushOversizedPacket(data, time, endOfStream, streamID); + pushPacketData(data, time, endOfStream, streamID); } - if (logger != null) { - logger.trace("bulkio.OutPort pushPacket EXIT (port=" + this.name +")"); + if (_portLog != null) { + _portLog.trace("bulkio.OutPort pushPacket EXIT (port=" + this.name +")"); } } + protected void pushPacketData(A data, PrecisionUTCTime time, boolean endOfStream, String streamID) + { + pushSinglePacket(data, time, endOfStream, streamID); + } + /** * Sends out SRI describing the data payload. * @@ -192,14 +204,14 @@ public void pushPacket(A data, PrecisionUTCTime time, boolean endOfStream, Strin */ public void pushSRI(StreamSRI header) { - if (logger != null) { - logger.trace("bulkio.OutPort pushSRI ENTER (port=" + name +")"); + if (_portLog != null) { + _portLog.trace("bulkio.OutPort pushSRI ENTER (port=" + name +")"); } // Header cannot be null if (header == null) { - if (logger != null) { - logger.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")"); + if (_portLog != null) { + _portLog.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")"); } return; } @@ -234,8 +246,8 @@ public void pushSRI(StreamSRI header) this.updateStats(connectionID); } catch (Exception e) { if ( this.reportConnectionErrors(connectionID)) { - if (this.logger != null) { - logger.error("Call to pushSRI failed on port " + name + " connection " + connectionID); + if (this._portLog != null) { + _portLog.error("Call to pushSRI failed on port " + name + " connection " + connectionID); } } } @@ -243,117 +255,14 @@ public void pushSRI(StreamSRI header) } } - if (logger != null) { - logger.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")"); - } - } - - public void updateConnectionFilter(List _filterTable) { - this.filterTable = _filterTable; - } - - protected boolean isStreamRoutedToConnection(final String streamID, final String connectionID) - { - // Is this port listed in the filter table? - boolean portListed = false; - - // Check the filter table for this stream/connection pair. - for (connection_descriptor_struct filter : bulkio.utils.emptyIfNull(this.filterTable)) { - // Ignore filters for other ports - if (!this.name.equals(filter.port_name.getValue())) { - continue; - } - // Filtering is in effect for this port - portListed = true; - - if (connectionID.equals(filter.connection_id.getValue()) && - streamID.equals(filter.stream_id.getValue())) { - if (logger != null) { - logger.trace("OutPort FilterMatch port:" + this.name + " connection:" + connectionID + - " streamID:" + streamID); - } - return true; - } - } - - // If the port was not listed and we made it to here, there is no - // filter in effect, so send the packet or SRI; otherwise, it was - // listed and there is no route. - if (!portListed) { - if (logger != null) { - logger.trace("OutPort NO Filter port:" + this.name + " connection:" + connectionID + - " streamID:" + streamID); - } - return true; - } else { - return false; - } - } - - private void pushOversizedPacket(A data, PrecisionUTCTime time, boolean endOfStream, String streamID) { - final int length = arraySize(data); - - // If there is no need to break data into smaller packets, skip - // straight to the pushPacket call and return. - SriMapStruct sriStruct = this.currentSRIs.get(streamID); - if (sriStruct.sri.subsize != 0) { - if (this.maxSamplesPerPush%sriStruct.sri.subsize != 0) { - this.maxSamplesPerPush = (MAX_PAYLOAD_SIZE/this.sizeof.sizeof()) & 0xFFFFFFFE; - while (this.maxSamplesPerPush%sriStruct.sri.subsize != 0) { - this.maxSamplesPerPush -= this.maxSamplesPerPush%sriStruct.sri.subsize; - if (this.maxSamplesPerPush%2 != 0){ - this.maxSamplesPerPush--; - } - } - } - } - if (length <= this.maxSamplesPerPush) { - this.pushSinglePacket(data, time, endOfStream, streamID); - return; - } - - // Determine xdelta for this streamID to be used for time increment for subpackets - SriMapStruct sriMap = this.currentSRIs.get(streamID); - double xdelta = 0.0; - if (sriMap != null){ - xdelta = sriMap.sri.xdelta; - } - - // Initialize time of first subpacket - PrecisionUTCTime packetTime = time; - for (int offset = 0; offset < length;) { - // Don't send more samples than are remaining - final int pushSize = java.lang.Math.min(length-offset, this.maxSamplesPerPush); - - // Copy the range for this sub-packet and advance the offset - A subPacket = copyOfRange(data, offset, offset+pushSize); - offset += pushSize; - - // Send end-of-stream as false for all sub-packets except for the - // last one (when there are no samples remaining after this push), - // which gets the input EOS. - boolean packetEOS = false; - if (offset == length) { - packetEOS = endOfStream; - } - - if (logger != null) { - logger.trace("bulkio.OutPort pushOversizedPacket() calling pushPacket with pushSize " + pushSize + " and packetTime twsec: " + packetTime.twsec + " tfsec: " + packetTime.tfsec); - } - this.pushSinglePacket(subPacket, packetTime, packetEOS, streamID); - int data_xfer_len = pushSize; - if (sriMap != null){ - if (sriMap.sri.mode == 1) { - data_xfer_len = data_xfer_len / 2; - } - } - packetTime = bulkio.time.utils.addSampleOffset(packetTime, data_xfer_len, xdelta); + if (_portLog != null) { + _portLog.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")"); } } - private void pushSinglePacket(A data, PrecisionUTCTime time, boolean endOfStream, String streamID) + protected void pushSinglePacket(A data, PrecisionUTCTime time, boolean endOfStream, String streamID) { - final int length = arraySize(data); + final int length = helper.arraySize(data); SriMapStruct sriStruct = this.currentSRIs.get(streamID); if (this.active) { for (Entry entry : this.outConnections.entrySet()) { @@ -376,8 +285,8 @@ private void pushSinglePacket(A data, PrecisionUTCTime time, boolean endOfStream this.stats.get(connectionID).update(length, (float)0.0, endOfStream, streamID, false); } catch (Exception e) { if ( this.reportConnectionErrors(connectionID)) { - if ( this.logger != null ) { - logger.error("Call to pushPacket failed on port " + name + " connection " + connectionID); + if ( this._portLog != null ) { + _portLog.error("Call to pushPacket failed on port " + name + " connection " + connectionID); } } } @@ -393,7 +302,4 @@ private void pushSinglePacket(A data, PrecisionUTCTime time, boolean endOfStream protected abstract E narrow(org.omg.CORBA.Object obj); protected abstract void sendPacket(E port, A data, PrecisionUTCTime time, boolean endOfStream, String streamID); - protected abstract A copyOfRange(A array, int start, int end); - protected abstract int arraySize(A array); - protected abstract A emptyArray(); } diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutDoublePort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutDoublePort.java deleted file mode 100644 index b619904ad..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutDoublePort.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.Arrays; -import org.apache.log4j.Logger; -import BULKIO.PrecisionUTCTime; -import BULKIO.dataDoubleOperations; - -/** - * - */ -public class OutDoublePort extends OutDataPort { - - public OutDoublePort(String portName) { - this(portName, null, null); - } - - public OutDoublePort(String portName, Logger logger) { - this(portName, logger, null); - } - - public OutDoublePort(String portName, Logger logger, ConnectionEventListener eventCB) { - super(portName, logger, eventCB, new DoubleSize()); - - if ( this.logger != null ) { - this.logger.debug("bulkio.OutPort CTOR port: " + portName); - } - - } - - protected dataDoubleOperations narrow(final org.omg.CORBA.Object obj) { - return BULKIO.jni.dataDoubleHelper.narrow(obj); - } - - protected void sendPacket(dataDoubleOperations port, double[] data, PrecisionUTCTime time, - boolean endOfStream, String streamID) { - port.pushPacket(data, time, endOfStream, streamID); - } - - protected double[] copyOfRange(double[] array, int start, int end) { - return Arrays.copyOfRange(array, start, end); - } - - protected int arraySize(double[] array) { - return array.length; - } - - protected double[] emptyArray() { - return new double[0]; - } - - public String getRepid() { - return BULKIO.dataDoubleHelper.id(); - } -} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutFilePort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutFilePort.java index 09a267fe7..e303a7f0a 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutFilePort.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/OutFilePort.java @@ -19,387 +19,41 @@ */ package bulkio; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Set; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; import org.apache.log4j.Logger; -import CF.DataType; + import BULKIO.PrecisionUTCTime; import BULKIO.StreamSRI; import BULKIO.dataFileOperations; -import bulkio.linkStatistics; -import bulkio.Int8Size; -import bulkio.ConnectionEventListener; -import bulkio.connection_descriptor_struct; -import bulkio.SriMapStruct; -import org.ossie.properties.*; - /** - * + * BulkIO output port implementation for dataFile. */ -public class OutFilePort extends OutPortBase { - - protected List filterTable = null; - +public class OutFilePort extends OutDataPort { - public OutFilePort(String portName ){ - this( portName, null, null ); + public OutFilePort(String portName) { + this(portName, null, null); } - public OutFilePort(String portName, - Logger logger ) { - this( portName, logger, null ); + public OutFilePort(String portName, Logger logger) { + this(portName, logger, null); } - /** - * @generated - */ - public OutFilePort(String portName, - Logger logger, - ConnectionEventListener eventCB ) { - super(portName, logger, eventCB); - filterTable = null; - if ( this.logger != null ) { - this.logger.debug( "bulkio.OutPort CTOR port: " + portName ); + public OutFilePort(String portName, Logger logger, ConnectionEventListener eventCB) { + super(portName, logger, eventCB, new FileDataHelper()); + if (this.logger != null) { + this.logger.debug("bulkio.OutPort CTOR port: " + portName); } } - /** - * pushSRI - * description: send out SRI describing the data payload - * - * H: structure of type BULKIO.StreamSRI with the SRI for this stream - * hversion - * xstart: start time of the stream - * xdelta: delta between two samples - * xunits: unit types from Platinum specification - * subsize: 0 if the data is one-dimensional - * ystart - * ydelta - * yunits: unit types from Platinum specification - * mode: 0-scalar, 1-complex - * streamID: stream identifier - * sequence keywords: unconstrained sequence of key-value pairs for additional description - * @generated - */ - public void pushSRI(StreamSRI header) - { - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI ENTER (port=" + name +")" ); - } - - // Header cannot be null - if (header == null) { - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); - } - return; - } - - if (header.streamID == null) { - throw new NullPointerException("SRI streamID cannot be null"); - } - - // Header cannot have null keywords - if (header.keywords == null) header.keywords = new DataType[0]; - - synchronized(this.updatingPortsLock) { // don't want to process while command information is coming in - this.currentSRIs.put(header.streamID, new SriMapStruct(header)); - if (this.active) { - // state if this port is not listed in the filter table... then pushSRI down stream - boolean portListed = false; - - // for each connection - for (Entry p : this.outConnections.entrySet()) { - - // if connection is in the filter table - for (connection_descriptor_struct ftPtr : bulkio.utils.emptyIfNull(this.filterTable) ) { - - // if there is an entry for this port in the filter table....so save that state - if (ftPtr.port_name.getValue().equals(this.name)) { - portListed = true; - } - - if ( logger != null ) { - logger.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + - " streamID:" + header.streamID ); - } - - if ( (ftPtr.port_name.getValue().equals(this.name)) && - (ftPtr.connection_id.getValue().equals(p.getKey())) && - (ftPtr.stream_id.getValue().equals(header.streamID))) { - try { - if ( logger != null ) { - logger.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + - " streamID:" + header.streamID ); - } - p.getValue().pushSRI(header); - //Update entry in currentSRIs - this.currentSRIs.get(header.streamID).connections.add(p.getKey()); - this.updateStats(p.getKey()); - } catch(Exception e) { - if ( this.reportConnectionErrors(p.getKey()) ) { - if ( logger != null ) { - logger.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); - } - } - } - } - } - } - - // no entry exists for this port in the filter table so all connections get SRI data - if (!portListed ) { - for (Entry p : this.outConnections.entrySet()) { - try { - if ( logger != null ) { - logger.trace( "pushSRI - NO Filter port:" + this.name + " connection:" + p.getKey() + - " streamID:" + header.streamID ); - } - p.getValue().pushSRI(header); - //Update entry in currentSRIs - this.currentSRIs.get(header.streamID).connections.add(p.getKey()); - this.updateStats(p.getKey()); - } catch(Exception e) { - if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); - } - } - } - } - } - - - } - - - } // don't want to process while command information is coming in - - - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); - } - return; - } - - public void updateConnectionFilter(List _filterTable) { - this.filterTable = _filterTable; - }; - - /** - * @generated - */ - public void pushPacket(String data, PrecisionUTCTime time, boolean endOfStream, String streamID) - { - if ( logger != null ) { - logger.trace("bulkio.OutPort pushPacket ENTER (port=" + name +")" ); - } - - if (!this.currentSRIs.containsKey(streamID)) { - StreamSRI header = bulkio.sri.utils.create(); - header.streamID = streamID; - this.pushSRI(header); - } - SriMapStruct sriStruct = this.currentSRIs.get(streamID); - - synchronized(this.updatingPortsLock) { // don't want to process while command information is coming in - String odata = data; - if (this.active) { - boolean portListed = false; - for (Entry p : this.outConnections.entrySet()) { - - for (connection_descriptor_struct ftPtr : bulkio.utils.emptyIfNull(this.filterTable) ) { - - if (ftPtr.port_name.getValue().equals(this.name)) { - portListed = true; - } - if ( (ftPtr.port_name.getValue().equals(this.name)) && - (ftPtr.connection_id.getValue().equals(p.getKey())) && - (ftPtr.stream_id.getValue().equals(streamID)) ) { - try { - //If SRI for given streamID has not been pushed to this connection, push it - if (!sriStruct.connections.contains(p.getKey())){ - p.getValue().pushSRI(sriStruct.sri); - sriStruct.connections.add(p.getKey()); - } - p.getValue().pushPacket( odata, time, endOfStream, streamID); - this.stats.get(p.getKey()).update( odata.length(), (float)0.0, endOfStream, streamID, false); - } catch(Exception e) { - if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushPacket failed on port " + name + " connection " + p.getKey() ); - } - } - } - } - } - } - - if (!portListed ){ - for (Entry p : this.outConnections.entrySet()) { - try { - //If SRI for given streamID has not been pushed to this connection, push it - if (!sriStruct.connections.contains(p.getKey())){ - p.getValue().pushSRI(sriStruct.sri); - sriStruct.connections.add(p.getKey()); - } - p.getValue().pushPacket( odata, time, endOfStream, streamID); - this.stats.get(p.getKey()).update( odata.length(), (float)0.0, endOfStream, streamID, false); - } catch(Exception e) { - if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushPacket failed on port " + name + " connection " + p.getKey() ); - } - } - } - } - } - } - - if ( endOfStream ) { - if ( this.currentSRIs.containsKey(streamID) ) { - this.currentSRIs.remove(streamID); - } - } - - } // don't want to process while command information is coming in - - if ( logger != null ) { - logger.trace("bulkio.OutPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - - /** - * @generated - */ - public void connectPort(final org.omg.CORBA.Object connection, final String connectionId) throws CF.PortPackage.InvalidPort, CF.PortPackage.OccupiedPort - { - - if ( logger != null ) { - logger.trace("bulkio.OutPort connectPort ENTER (port=" + name +")" ); - } - - synchronized (this.updatingPortsLock) { - final dataFileOperations port; - try { - port = BULKIO.jni.dataFileHelper.narrow(connection); - } catch (final Exception ex) { - if ( logger != null ) { - logger.error("bulkio.OutPort CONNECT PORT: " + name + " PORT NARROW FAILED"); - } - throw new CF.PortPackage.InvalidPort((short)1, "Invalid port for connection '" + connectionId + "'"); - } - this.outConnections.put(connectionId, port); - this.active = true; - this.stats.put(connectionId, new linkStatistics( this.name, new Int8Size() ) ); - - if ( logger != null ) { - logger.debug("bulkio.OutPort CONNECT PORT: " + name + " CONNECTION '" + connectionId + "'"); - } - } - - if ( logger != null ) { - logger.trace("bulkio.OutPort connectPort EXIT (port=" + name +")" ); - } - - if ( callback != null ) { - callback.connect(connectionId); - } + public String getRepid() { + return BULKIO.dataFileHelper.id(); } - /** - * @generated - */ - public void disconnectPort(String connectionId) { - if ( logger != null ) { - logger.trace("bulkio.OutPort disconnectPort ENTER (port=" + name +")" ); - } - synchronized (this.updatingPortsLock) { - boolean portListed = false; - for (connection_descriptor_struct ftPtr : bulkio.utils.emptyIfNull(this.filterTable) ) { - if (ftPtr.port_name.getValue().equals(this.name)) { - portListed = true; - break; - } - } - dataFileOperations port = this.outConnections.remove(connectionId); - if (port != null) - { - String odata = ""; - BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.notSet(); - for (Map.Entry entry: this.currentSRIs.entrySet()) { - String streamID = entry.getKey(); - if (entry.getValue().connections.contains(connectionId)) { - if (portListed) { - for (connection_descriptor_struct ftPtr : bulkio.utils.emptyIfNull(this.filterTable)) { - if ( (ftPtr.port_name.getValue().equals(this.name)) && - (ftPtr.connection_id.getValue().equals(connectionId)) && - (ftPtr.stream_id.getValue().equals(streamID))) { - try { - port.pushPacket(odata,tstamp,true,streamID); - this.updateStats(connectionId); - } catch(Exception e) { - if ( this.reportConnectionErrors( connectionId ) ) { - if ( logger != null ) { - logger.error("Call to pushPacket failed on port " + name + " connection " + connectionId ); - } - } - } - } - } - } else { - try { - port.pushPacket(odata,tstamp,true,streamID); - this.updateStats(connectionId); - } catch(Exception e) { - if ( this.reportConnectionErrors( connectionId ) ) { - if ( logger != null ) { - logger.error("Call to pushPacket failed on port " + name + " connection " + connectionId ); - } - - } - } - } - } - } - } - this.stats.remove(connectionId); - this.active = (this.outConnections.size() != 0); - - // Remove connectionId from any sets in the currentSRIs.connections values - for(Map.Entry entry : this.currentSRIs.entrySet()) { - entry.getValue().connections.remove(connectionId); - } - - if ( logger != null ) { - logger.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); - for(Map.Entry entry: this.currentSRIs.entrySet()) { - logger.trace("bulkio.OutPort updated currentSRIs key=" + entry.getKey() + ", value.sri=" + entry.getValue().sri + ", value.connections=" + entry.getValue().connections); - } - } - } - - if ( callback != null ) { - callback.disconnect(connectionId); - } - - if ( logger != null ) { - logger.trace("bulkio.OutPort disconnectPort EXIT (port=" + name +")" ); - } + protected dataFileOperations narrow(org.omg.CORBA.Object obj) { + return BULKIO.dataFileHelper.narrow(obj); } - public String getRepid() { - return BULKIO.dataFileHelper.id(); + protected void sendPacket(dataFileOperations port, String data, PrecisionUTCTime time, boolean endOfStream, String streamID) { + port.pushPacket(data, time, endOfStream, streamID); } - } - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutFloatPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutFloatPort.java deleted file mode 100644 index 39e7aa52b..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutFloatPort.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.Arrays; -import org.apache.log4j.Logger; -import BULKIO.PrecisionUTCTime; -import BULKIO.dataFloatOperations; - -/** - * - */ -public class OutFloatPort extends OutDataPort { - - public OutFloatPort(String portName) { - this(portName, null, null); - } - - public OutFloatPort(String portName, Logger logger) { - this(portName, logger, null); - } - - public OutFloatPort(String portName, Logger logger, ConnectionEventListener eventCB) { - super(portName, logger, eventCB, new FloatSize()); - if ( this.logger != null ) { - this.logger.debug("bulkio.OutPort CTOR port: " + portName); - } - - } - - protected dataFloatOperations narrow(final org.omg.CORBA.Object obj) { - return BULKIO.jni.dataFloatHelper.narrow(obj); - } - - protected void sendPacket(dataFloatOperations port, float[] data, PrecisionUTCTime time, - boolean endOfStream, String streamID) { - port.pushPacket(data, time, endOfStream, streamID); - } - - protected float[] copyOfRange(float[] array, int start, int end) { - return Arrays.copyOfRange(array, start, end); - } - - protected int arraySize(float[] array) { - return array.length; - } - - protected float[] emptyArray() { - return new float[0]; - } - - public String getRepid() { - return BULKIO.dataFloatHelper.id(); - } -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutInt16Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutInt16Port.java deleted file mode 100644 index 255b7c7c1..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutInt16Port.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.Arrays; -import org.apache.log4j.Logger; -import BULKIO.PrecisionUTCTime; -import BULKIO.dataShortOperations; - -/** - * - */ -public class OutInt16Port extends OutDataPort { - - public OutInt16Port(String portName) { - this(portName, null, null); - } - - public OutInt16Port(String portName, Logger logger) { - this(portName, logger, null); - } - - public OutInt16Port(String portName, Logger logger, ConnectionEventListener eventCB) { - super(portName, logger, eventCB, new Int16Size()); - if (this.logger != null) { - this.logger.debug("bulkio.OutPort CTOR port: " + portName); - } - - } - - protected dataShortOperations narrow(final org.omg.CORBA.Object obj) { - return BULKIO.jni.dataShortHelper.narrow(obj); - } - - protected void sendPacket(dataShortOperations port, short[] data, PrecisionUTCTime time, - boolean endOfStream, String streamID) { - port.pushPacket(data, time, endOfStream, streamID); - } - - protected short[] copyOfRange(short[] array, int start, int end) { - return Arrays.copyOfRange(array, start, end); - } - - protected int arraySize(short[] array) { - return array.length; - } - - protected short[] emptyArray() { - return new short[0]; - } - - public String getRepid() { - return BULKIO.dataShortHelper.id(); - } -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutInt32Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutInt32Port.java deleted file mode 100644 index 85b17a06f..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutInt32Port.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.Arrays; -import org.apache.log4j.Logger; -import BULKIO.PrecisionUTCTime; -import BULKIO.dataLongOperations; - -/** - * - */ -public class OutInt32Port extends OutDataPort { - - public OutInt32Port(String portName) { - this(portName, null, null); - } - - public OutInt32Port(String portName, Logger logger) { - this(portName, logger, null); - } - - public OutInt32Port(String portName, Logger logger, ConnectionEventListener eventCB) { - super(portName, logger, eventCB, new Int32Size()); - if (this.logger != null) { - this.logger.debug("bulkio.OutPort CTOR port: " + portName); - } - } - - protected dataLongOperations narrow(final org.omg.CORBA.Object obj) { - return BULKIO.jni.dataLongHelper.narrow(obj); - } - - protected void sendPacket(dataLongOperations port, int[] data, PrecisionUTCTime time, - boolean endOfStream, String streamID) { - port.pushPacket(data, time, endOfStream, streamID); - } - - protected int[] copyOfRange(int[] array, int start, int end) { - return Arrays.copyOfRange(array, start, end); - } - - protected int arraySize(int[] array) { - return array.length; - } - - protected int[] emptyArray() { - return new int[0]; - } - - public String getRepid() { - return BULKIO.dataLongHelper.id(); - } -} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutInt64Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutInt64Port.java deleted file mode 100644 index b771ff496..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutInt64Port.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.Arrays; -import org.apache.log4j.Logger; -import BULKIO.PrecisionUTCTime; -import BULKIO.dataLongLongOperations; - -/** - * - */ -public class OutInt64Port extends OutDataPort { - - public OutInt64Port(String portName) { - this(portName, null, null); - } - - public OutInt64Port(String portName, Logger logger) { - this(portName, logger, null); - } - - public OutInt64Port(String portName, Logger logger, ConnectionEventListener eventCB) { - super(portName, logger, eventCB, new Int64Size()); - if (this.logger != null) { - this.logger.debug("bulkio.OutPort CTOR port: " + portName); - } - } - - protected dataLongLongOperations narrow(final org.omg.CORBA.Object obj) { - return BULKIO.jni.dataLongLongHelper.narrow(obj); - } - - protected void sendPacket(dataLongLongOperations port, long[] data, PrecisionUTCTime time, - boolean endOfStream, String streamID) { - port.pushPacket(data, time, endOfStream, streamID); - } - - protected long[] copyOfRange(long[] array, int start, int end) { - return Arrays.copyOfRange(array, start, end); - } - - protected int arraySize(long[] array) { - return array.length; - } - - protected long[] emptyArray() { - return new long[0]; - } - - public String getRepid() { - return BULKIO.dataLongLongHelper.id(); - } -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutInt8Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutInt8Port.java deleted file mode 100644 index 7e2a9c2ed..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutInt8Port.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.Arrays; -import org.apache.log4j.Logger; -import BULKIO.PrecisionUTCTime; -import BULKIO.dataCharOperations; - -/** - * - */ -public class OutInt8Port extends OutDataPort { - - public OutInt8Port(String portName) { - this(portName, null, null); - } - - public OutInt8Port(String portName, Logger logger) { - this(portName, logger, null); - } - - public OutInt8Port(String portName, Logger logger, ConnectionEventListener eventCB) { - super(portName, logger, eventCB, new Int8Size()); - if (this.logger != null) { - this.logger.debug("bulkio.OutPort CTOR port: " + portName); - } - } - - protected dataCharOperations narrow(final org.omg.CORBA.Object obj) { - return BULKIO.jni.dataCharHelper.narrow(obj); - } - - protected void sendPacket(dataCharOperations port, char[] data, PrecisionUTCTime time, - boolean endOfStream, String streamID) { - port.pushPacket(data, time, endOfStream, streamID); - } - - protected char[] copyOfRange(char[] array, int start, int end) { - return Arrays.copyOfRange(array, start, end); - } - - protected int arraySize(char[] array) { - return array.length; - } - - protected char[] emptyArray() { - return new char[0]; - } - - public String getRepid() { - return BULKIO.dataCharHelper.id(); - } -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutLongLongPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutLongLongPort.java deleted file mode 100644 index 48d173ef7..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutLongLongPort.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class OutLongLongPort extends OutInt64Port { - - /** - * @generated - */ - public OutLongLongPort(String portName) - { - super(portName ); - } - - public OutLongLongPort(String portName, Logger logger) - { - super(portName, logger ); - } - - - public OutLongLongPort(String portName, Logger logger, ConnectionEventListener eventCB ) - { - super(portName, logger, eventCB ); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutLongPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutLongPort.java deleted file mode 100644 index 4dbbc547b..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutLongPort.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class OutLongPort extends OutInt32Port { - - /** - * @generated - */ - public OutLongPort(String portName) - { - super(portName ); - } - - public OutLongPort(String portName, Logger logger) - { - super(portName, logger ); - } - - - public OutLongPort(String portName, Logger logger, ConnectionEventListener eventCB ) - { - super(portName, logger, eventCB ); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutOctetPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutOctetPort.java deleted file mode 100644 index e9633599f..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutOctetPort.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class OutOctetPort extends OutUInt8Port { - - /** - * @generated - */ - public OutOctetPort(String portName) - { - super(portName ); - } - - public OutOctetPort(String portName, Logger logger) - { - super(portName, logger ); - } - - - public OutOctetPort(String portName, Logger logger, ConnectionEventListener eventCB ) - { - super(portName, logger, eventCB ); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutPort.java.template b/bulkioInterfaces/libsrc/java/src/bulkio/OutPort.java.template new file mode 100644 index 000000000..e28d3580e --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/OutPort.java.template @@ -0,0 +1,65 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from OutPort.java.template. + * Do not modify directly. + */ +package bulkio; + +import org.apache.log4j.Logger; + +import BULKIO.PrecisionUTCTime; +import BULKIO.@idl@Operations; + +/** + * BulkIO output port implementation for @idl@. + */ +public class Out@name@Port extends ChunkingOutPort<@idl@Operations,@type@[]> { + + public Out@name@Port(String portName) { + this(portName, null, null); + } + + public Out@name@Port(String portName, Logger logger) { + this(portName, logger, null); + } + + public Out@name@Port(String portName, Logger logger, ConnectionEventListener eventCB) { + super(portName, logger, eventCB, new @name@DataHelper()); + if (this.logger != null) { + this.logger.debug("bulkio.OutPort CTOR port: " + portName); + } + + } + + protected @idl@Operations narrow(final org.omg.CORBA.Object obj) { + return BULKIO.jni.@idl@Helper.narrow(obj); + } + + protected void sendPacket(@idl@Operations port, @type@[] data, PrecisionUTCTime time, + boolean endOfStream, String streamID) { + port.pushPacket(data, time, endOfStream, streamID); + } + + public String getRepid() { + return BULKIO.@idl@Helper.id(); + } +} + diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutPortBase.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutPortBase.java index 3fda0f8e2..f3055a801 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutPortBase.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/OutPortBase.java @@ -28,14 +28,16 @@ import org.apache.log4j.Logger; import ExtendedCF.UsesConnection; +import ExtendedCF.TransportInfo; +import ExtendedCF.ConnectionStatus; +import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; import BULKIO.PortUsageType; import BULKIO.StreamSRI; import BULKIO.UsesPortStatistics; -import org.ossie.component.PortBase; - -public abstract class OutPortBase extends BULKIO.UsesPortStatisticsProviderPOA implements org.ossie.component.PortBase { +public abstract class OutPortBase extends BULKIO.UsesPortStatisticsProviderPOA implements PortBase { /** * Name within the component */ @@ -66,6 +68,8 @@ public abstract class OutPortBase extends BULKIO.UsesPortStatisticsProviderPO */ protected Logger logger; + public RHLogger _portLog = null; + /** * Event listener when connect/disconnet events happen */ @@ -76,6 +80,11 @@ public abstract class OutPortBase extends BULKIO.UsesPortStatisticsProviderPO */ protected final Map currentSRIs = new HashMap(); + /** + * Table of port names, connection IDs and stream IDs for connection-based routing + */ + protected List filterTable = null; + protected OutPortBase(String portName) { this(portName, null, null); } @@ -120,6 +129,11 @@ public void setLogger(Logger newlogger){ } } + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + /** * Sets the listener to receive connect and disconnect notifications. */ @@ -132,6 +146,7 @@ public void setConnectionEventListener(ConnectionEventListener newListener){ /** * @deprecated */ + @Deprecated public HashMap getPorts() { return new HashMap(); } @@ -186,8 +201,8 @@ public boolean reportConnectionErrors( String cid, String msg ) { boolean retval=reportConnectionErrors(cid ); if ( retval ) { - if ( logger != null ) { - logger.error(msg); + if ( _portLog != null ) { + _portLog.error(msg); } } return retval; @@ -230,13 +245,55 @@ public StreamSRI[] activeSRIs() return sriList.toArray(new StreamSRI[0]); } - public String getRepid() - { - return "IDL:CORBA/Object:1.0"; - } + public String getRepid() + { + return "IDL:CORBA/Object:1.0"; + } - public String getDirection() - { - return "Uses"; - } + public String getDirection() + { + return CF.PortSet.DIRECTION_USES; + } + + public void updateConnectionFilter(List filterTable) { + this.filterTable = filterTable; + } + + protected boolean isStreamRoutedToConnection(final String streamID, final String connectionID) + { + // Is this port listed in the filter table? + boolean portListed = false; + + // Check the filter table for this stream/connection pair. + for (connection_descriptor_struct filter : bulkio.utils.emptyIfNull(this.filterTable)) { + // Ignore filters for other ports + if (!this.name.equals(filter.port_name.getValue())) { + continue; + } + // Filtering is in effect for this port + portListed = true; + + if (connectionID.equals(filter.connection_id.getValue()) && + streamID.equals(filter.stream_id.getValue())) { + if (_portLog != null) { + _portLog.trace("OutPort FilterMatch port:" + this.name + " connection:" + connectionID + + " streamID:" + streamID); + } + return true; + } + } + + // If the port was not listed and we made it to here, there is no + // filter in effect, so send the packet or SRI; otherwise, it was + // listed and there is no route. + if (!portListed) { + if (_portLog != null) { + _portLog.trace("OutPort NO Filter port:" + this.name + " connection:" + connectionID + + " streamID:" + streamID); + } + return true; + } else { + return false; + } + } } diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutSDDSPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutSDDSPort.java index 5bf970505..23a0c3702 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutSDDSPort.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/OutSDDSPort.java @@ -30,6 +30,7 @@ import java.util.Set; import java.util.HashSet; import org.ossie.component.QueryableUsesPort; +import org.ossie.component.RHLogger; import org.apache.log4j.Logger; import CF.DataType; import CF.PropertiesHelper; @@ -43,7 +44,6 @@ import BULKIO.SDDSStreamDefinition; import bulkio.linkStatistics; -import bulkio.Int8Size; import bulkio.connection_descriptor_struct; import bulkio.SriMapStruct; import bulkio.sdds.SDDSStreamContainer; @@ -66,8 +66,6 @@ public class OutSDDSPort extends OutPortBase { */ protected SDDSStreamContainer streamContainer; - protected List filterTable = null; - public OutSDDSPort(String portName ){ this( portName, null, null ); } @@ -84,7 +82,6 @@ public OutSDDSPort(String portName, Logger logger, ConnectionEventListener eventCB ) { super(portName, logger, eventCB); - this.filterTable = null; this.streamContainer = new SDDSStreamContainer(this); this.userId = new String("defaultUserId"); if ( this.logger == null ) { @@ -104,6 +101,12 @@ public void setLogger( Logger newlogger ){ } } + public void setLogger(RHLogger logger) + { + this._portLog = logger; + this.streamContainer.setLogger(logger.getL4Logger()); + } + /** * pushSRI * description: send out SRI describing the data payload @@ -132,14 +135,14 @@ public void setLogger( Logger newlogger ){ public void pushSRI(StreamSRI header, PrecisionUTCTime time) { - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort pushSRI ENTER (port=" + name +")" ); } // Header cannot be null if (header == null) { - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); } return; } @@ -167,16 +170,16 @@ public void pushSRI(StreamSRI header, PrecisionUTCTime time) if (ftPtr.port_name.getValue().equals(this.name)) { portListed = true; } - if ( logger != null ) { - logger.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + + if ( _portLog != null ) { + _portLog.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + " streamID:" + header.streamID ); } if ( (ftPtr.port_name.getValue().equals(this.name)) && (ftPtr.connection_id.getValue().equals(p.getKey())) && (ftPtr.stream_id.getValue().equals(header.streamID))) { try { - if ( logger != null ) { - logger.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + + if ( _portLog != null ) { + _portLog.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + " streamID:" + header.streamID ); } p.getValue().pushSRI(header, time); @@ -185,8 +188,8 @@ public void pushSRI(StreamSRI header, PrecisionUTCTime time) this.updateStats(p.getKey()); } catch(Exception e) { if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); + if ( _portLog != null ) { + _portLog.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); } } } @@ -198,8 +201,8 @@ public void pushSRI(StreamSRI header, PrecisionUTCTime time) if (!portListed ) { for (Entry p : this.outConnections.entrySet()) { try { - if ( logger != null ) { - logger.trace( "pushSRI - NO Filter port:" + this.name + " connection:" + p.getKey() + + if ( _portLog != null ) { + _portLog.trace( "pushSRI - NO Filter port:" + this.name + " connection:" + p.getKey() + " streamID:" + header.streamID ); } p.getValue().pushSRI(header, time); @@ -208,8 +211,8 @@ public void pushSRI(StreamSRI header, PrecisionUTCTime time) this.updateStats(p.getKey()); } catch(Exception e) { if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); + if ( _portLog != null ) { + _portLog.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); } } } @@ -220,14 +223,14 @@ public void pushSRI(StreamSRI header, PrecisionUTCTime time) } // don't want to process while command information is coming in - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); } return; } public void updateConnectionFilter(List _filterTable) { - this.filterTable = _filterTable; + super.updateConnectionFilter(_filterTable); //1. loop over fitlerTable // A. ignore other port names @@ -254,8 +257,8 @@ public void updateConnectionFilter(List _filterTab hasPortEntry = true; dataSDDSOperations connectedPort = this.outConnections.get(ftPtr.connection_id.getValue()); if (connectedPort == null){ - if ( logger != null ) { - logger.debug("bulkio.OutPort updateConnectionFilter() did not find connected port for connection_id " + ftPtr.connection_id.getValue()); + if ( _portLog != null ) { + _portLog.debug("bulkio.OutPort updateConnectionFilter() did not find connected port for connection_id " + ftPtr.connection_id.getValue()); } continue; } @@ -283,16 +286,16 @@ public void updateConnectionFilter(List _filterTab try{ foundStream.updateAttachments(entry.getValue().toArray(new SDDSStreamAttachment[0])); }catch (AttachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() AttachError on updateAttachments() for streamId " + streamId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() AttachError on updateAttachments() for streamId " + streamId); } }catch (DetachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() DetachError on updateAttachments() for streamId " + streamId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() DetachError on updateAttachments() for streamId " + streamId); } }catch (StreamInputError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() StreamInputError on updateAttachments() for streamId " + streamId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() StreamInputError on updateAttachments() for streamId " + streamId); } } } @@ -306,16 +309,16 @@ public void updateConnectionFilter(List _filterTab if(stream != null){ try{ stream.detachAll(); - if ( logger != null ) { - logger.debug("bulkio::OutPort updateConnectionFilter() calling detachAll() for streamId " + entry.getKey()); + if ( _portLog != null ) { + _portLog.debug("bulkio::OutPort updateConnectionFilter() calling detachAll() for streamId " + entry.getKey()); } }catch (DetachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() DetachError on detachAll() for streamId " + entry.getKey()); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() DetachError on detachAll() for streamId " + entry.getKey()); } }catch (StreamInputError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() StreamInputError on detachAll() for streamId " + entry.getKey()); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() StreamInputError on detachAll() for streamId " + entry.getKey()); } } } @@ -326,20 +329,20 @@ public void updateConnectionFilter(List _filterTab for (Entry p : this.outConnections.entrySet()) { try{ this.streamContainer.addConnectionToAllStreams(p.getKey(),p.getValue()); - if ( logger != null ) { - logger.debug("bulkio::OutPort updateConnectionFilter() calling addConnectionToAllStreams for connection " + p.getKey()); + if ( _portLog != null ) { + _portLog.debug("bulkio::OutPort updateConnectionFilter() calling addConnectionToAllStreams for connection " + p.getKey()); } }catch (AttachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() AttachError on updateAttachments() for all streams"); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() AttachError on updateAttachments() for all streams"); } }catch (DetachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() DetachError on updateAttachments() for all streams"); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() DetachError on updateAttachments() for all streams"); } }catch (StreamInputError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() StreamInputError on updateAttachments() for all streams"); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() StreamInputError on updateAttachments() for all streams"); } } } @@ -360,7 +363,6 @@ public void updateSRIForAllConnections() { String streamId; Set streamConnIds = new HashSet(); Set currentSRIConnIds = new HashSet(); - Iterator connIdIter; // Iterate through all registered streams for (SDDSStream s: this.streamContainer.getStreams()){ @@ -381,8 +383,8 @@ public void updateSRIForAllConnections() { // Grab the port dataSDDSOperations connectedPort = this.outConnections.get(connId); if (connectedPort == null) { - if ( logger != null ) { - logger.debug("updateSRIForAllConnections() Unable to find connected port with connectionId: " + connId); + if ( _portLog != null ) { + _portLog.debug("updateSRIForAllConnections() Unable to find connected port with connectionId: " + connId); } continue; } @@ -401,8 +403,8 @@ public void updateSRIForAllConnections() { public void connectPort(final org.omg.CORBA.Object connection, final String connectionId) throws CF.PortPackage.InvalidPort, CF.PortPackage.OccupiedPort { - if ( logger != null ) { - logger.trace("bulkio.OutPort connectPort ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort connectPort ENTER (port=" + name +")" ); } synchronized (this.updatingPortsLock) { @@ -410,14 +412,14 @@ public void connectPort(final org.omg.CORBA.Object connection, final String conn try { port = BULKIO.jni.dataSDDSHelper.narrow(connection); } catch (final Exception ex) { - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " PORT NARROW FAILED"); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " PORT NARROW FAILED"); } throw new CF.PortPackage.InvalidPort((short)1, "Invalid port for connection '" + connectionId + "'"); } this.outConnections.put(connectionId, port); this.active = true; - this.stats.put(connectionId, new linkStatistics( this.name, new Int8Size() ) ); + this.stats.put(connectionId, new linkStatistics(this.name, 1)); boolean portListed = false; @@ -431,16 +433,16 @@ public void connectPort(final org.omg.CORBA.Object connection, final String conn try{ this.streamContainer.addConnectionToStream(connectionId, port,ftPtr.stream_id.getValue()); }catch (AttachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() AttachError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() AttachError for connectionId " + connectionId); } }catch (DetachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() DetachError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() DetachError for connectionId " + connectionId); } }catch (StreamInputError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() StreamInputError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() StreamInputError for connectionId " + connectionId); } } } @@ -449,22 +451,22 @@ public void connectPort(final org.omg.CORBA.Object connection, final String conn try{ this.streamContainer.addConnectionToAllStreams(connectionId,port); }catch (AttachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() AttachError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() AttachError for connectionId " + connectionId); } }catch (DetachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() DetachError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() DetachError for connectionId " + connectionId); } }catch (StreamInputError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() StreamInputError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() StreamInputError for connectionId " + connectionId); } } } - if ( logger != null ) { - logger.debug("bulkio::OutPort CONNECT PORT: " + name + " CONNECTION '" + connectionId + "'"); + if ( _portLog != null ) { + _portLog.debug("bulkio::OutPort CONNECT PORT: " + name + " CONNECTION '" + connectionId + "'"); } } this.streamContainer.printState("After connectPort"); @@ -476,8 +478,8 @@ public void connectPort(final org.omg.CORBA.Object connection, final String conn */ public void disconnectPort(String connectionId) { - if ( logger != null ) { - logger.trace("bulkio.OutPort disconnectPort ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort disconnectPort ENTER (port=" + name +")" ); } synchronized (this.updatingPortsLock) { try { @@ -492,8 +494,8 @@ public void disconnectPort(String connectionId) { this.stats.remove(connectionId); this.active = (this.outConnections.size() != 0); - if ( logger != null ) { - logger.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); } // Remove connectionId from any sets in the currentSRIs.connections values @@ -501,10 +503,10 @@ public void disconnectPort(String connectionId) { entry.getValue().connections.remove(connectionId); } - if ( logger != null ) { - logger.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); for(Map.Entry entry: this.currentSRIs.entrySet()) { - logger.trace("bulkio.OutPort updated currentSRIs key=" + entry.getKey() + ", value.sri=" + entry.getValue().sri + ", value.connections=" + entry.getValue().connections); + _portLog.trace("bulkio.OutPort updated currentSRIs key=" + entry.getKey() + ", value.sri=" + entry.getValue().sri + ", value.connections=" + entry.getValue().connections); } } } @@ -513,8 +515,8 @@ public void disconnectPort(String connectionId) { callback.disconnect(connectionId); } - if ( logger != null ) { - logger.trace("bulkio.OutPort disconnectPort EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort disconnectPort EXIT (port=" + name +")" ); } } @@ -622,8 +624,8 @@ public boolean updateStream(final BULKIO.SDDSStreamDefinition streamDef) throws */ public boolean addStream(final BULKIO.SDDSStreamDefinition streamDef) throws AttachError, StreamInputError, DetachError { - if ( logger != null ) { - logger.trace("bulkio.OutPort addStream ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort addStream ENTER (port=" + name +")" ); } String attachId = null; @@ -674,11 +676,11 @@ public boolean addStream(final BULKIO.SDDSStreamDefinition streamDef) throws Att } String[] attachIds = stream.getAttachIds(); - if ( logger != null ) { + if ( _portLog != null ) { for(String str: attachIds){ - logger.trace("SDDS PORT: addStream() ATTACHMENT COMPLETED ATTACH ID:" + str + " NAME(userid):" + stream.getName() ); + _portLog.trace("SDDS PORT: addStream() ATTACHMENT COMPLETED ATTACH ID:" + str + " NAME(userid):" + stream.getName() ); } - logger.trace("bulkio.OutPort addStream() EXIT (port=" + name +")" ); + _portLog.trace("bulkio.OutPort addStream() EXIT (port=" + name +")" ); } this.streamContainer.printState("After addStream"); return true; @@ -698,8 +700,8 @@ public void removeStream(String streamId) throws AttachError, StreamInputError, */ public void detach(String attachId, String connectionId) throws DetachError, StreamInputError { - if ( logger != null ) { - logger.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); } synchronized (this.updatingPortsLock) { @@ -714,24 +716,24 @@ public void detach(String attachId, String connectionId) throws DetachError, Str this.streamContainer.detach(); } } - if ( logger != null ) { - logger.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); } this.streamContainer.printState("After detach"); } public void detach(String attachId) throws DetachError, StreamInputError { - if ( logger != null ) { - logger.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); } if(attachId != null){ this.streamContainer.detachByAttachId(attachId); } - if ( logger != null ) { - logger.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); } this.streamContainer.printState("After detach"); } diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutShortPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutShortPort.java deleted file mode 100644 index c01a60fb5..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutShortPort.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class OutShortPort extends OutInt16Port { - - /** - * @generated - */ - public OutShortPort(String portName) - { - super(portName ); - } - - public OutShortPort(String portName, Logger logger) - { - super(portName, logger ); - } - - - public OutShortPort(String portName, Logger logger, ConnectionEventListener eventCB ) - { - super(portName, logger, eventCB ); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt16Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt16Port.java deleted file mode 100644 index 8510136c7..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt16Port.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.Arrays; -import org.apache.log4j.Logger; -import BULKIO.PrecisionUTCTime; -import BULKIO.dataUshortOperations; - -/** - * - */ -public class OutUInt16Port extends OutDataPort { - - public OutUInt16Port(String portName) { - this(portName, null, null); - } - - public OutUInt16Port(String portName, Logger logger) { - this(portName, logger, null); - } - - public OutUInt16Port(String portName, Logger logger, ConnectionEventListener eventCB) { - super(portName, logger, eventCB, new UInt16Size()); - if (this.logger != null) { - this.logger.debug("bulkio.OutPort CTOR port: " + portName); - } - } - - protected dataUshortOperations narrow(final org.omg.CORBA.Object obj) { - return BULKIO.jni.dataUshortHelper.narrow(obj); - } - - protected void sendPacket(dataUshortOperations port, short[] data, PrecisionUTCTime time, - boolean endOfStream, String streamID) { - port.pushPacket(data, time, endOfStream, streamID); - } - - protected short[] copyOfRange(short[] array, int start, int end) { - return Arrays.copyOfRange(array, start, end); - } - - protected int arraySize(short[] array) { - return array.length; - } - - protected short[] emptyArray() { - return new short[0]; - } - - public String getRepid() { - return BULKIO.dataUshortHelper.id(); - } -} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt32Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt32Port.java deleted file mode 100644 index 3cc007d2f..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt32Port.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.Arrays; -import org.apache.log4j.Logger; -import BULKIO.PrecisionUTCTime; -import BULKIO.dataUlongOperations; - -/** - * - */ -public class OutUInt32Port extends OutDataPort { - - public OutUInt32Port(String portName) { - this(portName, null, null); - } - - public OutUInt32Port(String portName, Logger logger) { - this(portName, logger, null); - } - - public OutUInt32Port(String portName, Logger logger, ConnectionEventListener eventCB) { - super(portName, logger, eventCB, new UInt32Size()); - if (this.logger != null) { - this.logger.debug("bulkio.OutPort CTOR port: " + portName); - } - } - - protected dataUlongOperations narrow(final org.omg.CORBA.Object obj) { - return BULKIO.jni.dataUlongHelper.narrow(obj); - } - - protected void sendPacket(dataUlongOperations port, int[] data, PrecisionUTCTime time, - boolean endOfStream, String streamID) { - port.pushPacket(data, time, endOfStream, streamID); - } - - protected int[] copyOfRange(int[] array, int start, int end) { - return Arrays.copyOfRange(array, start, end); - } - - protected int arraySize(int[] array) { - return array.length; - } - - protected int[] emptyArray() { - return new int[0]; - } - - public String getRepid() { - return BULKIO.dataUlongHelper.id(); - } -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt64Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt64Port.java deleted file mode 100644 index 16d4da67d..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt64Port.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.Arrays; -import org.apache.log4j.Logger; -import BULKIO.PrecisionUTCTime; -import BULKIO.dataUlongLongOperations; - -/** - * - */ -public class OutUInt64Port extends OutDataPort { - - public OutUInt64Port(String portName) { - this(portName, null, null); - } - - public OutUInt64Port(String portName, Logger logger) { - this(portName, logger, null); - } - - public OutUInt64Port(String portName, Logger logger, ConnectionEventListener eventCB) { - super(portName, logger, eventCB, new UInt64Size()); - if (this.logger != null) { - this.logger.debug("bulkio.OutPort CTOR port: " + portName); - } - } - - protected dataUlongLongOperations narrow(final org.omg.CORBA.Object obj) { - return BULKIO.jni.dataUlongLongHelper.narrow(obj); - } - - protected void sendPacket(dataUlongLongOperations port, long[] data, PrecisionUTCTime time, - boolean endOfStream, String streamID) { - port.pushPacket(data, time, endOfStream, streamID); - } - - protected long[] copyOfRange(long[] array, int start, int end) { - return Arrays.copyOfRange(array, start, end); - } - - protected int arraySize(long[] array) { - return array.length; - } - - protected long[] emptyArray() { - return new long[0]; - } - - public String getRepid() { - return BULKIO.dataUlongLongHelper.id(); - } -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt8Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt8Port.java deleted file mode 100644 index b8510d3b4..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutUInt8Port.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.Arrays; -import org.apache.log4j.Logger; -import BULKIO.PrecisionUTCTime; -import BULKIO.dataOctetOperations; - -/** - * - */ -public class OutUInt8Port extends OutDataPort { - - public OutUInt8Port(String portName) { - this(portName, null, null); - } - - public OutUInt8Port(String portName, Logger logger) { - this(portName, logger, null); - } - - public OutUInt8Port(String portName, Logger logger, ConnectionEventListener eventCB) { - super(portName, logger, eventCB, new UInt8Size()); - if (this.logger != null) { - this.logger.debug("bulkio.OutPort CTOR port: " + portName); - } - } - - protected dataOctetOperations narrow(final org.omg.CORBA.Object obj) { - return BULKIO.jni.dataOctetHelper.narrow(obj); - } - - protected void sendPacket(dataOctetOperations port, byte[] data, PrecisionUTCTime time, - boolean endOfStream, String streamID) { - port.pushPacket(data, time, endOfStream, streamID); - } - - protected byte[] copyOfRange(byte[] array, int start, int end) { - return Arrays.copyOfRange(array, start, end); - } - - protected int arraySize(byte[] array) { - return array.length; - } - - protected byte[] emptyArray() { - return new byte[0]; - } - - public String getRepid() { - return BULKIO.dataOctetHelper.id(); - } -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutULongLongPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutULongLongPort.java deleted file mode 100644 index 5df7a7686..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutULongLongPort.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class OutULongLongPort extends OutUInt64Port { - - /** - * @generated - */ - public OutULongLongPort(String portName) - { - super(portName ); - } - - public OutULongLongPort(String portName, Logger logger) - { - super(portName, logger ); - } - - - public OutULongLongPort(String portName, Logger logger, ConnectionEventListener eventCB ) - { - super(portName, logger, eventCB ); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutULongPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutULongPort.java deleted file mode 100644 index 31a2f2ea9..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutULongPort.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class OutULongPort extends OutUInt32Port { - - public OutULongPort(String portName) - { - super(portName ); - } - - public OutULongPort(String portName, Logger logger) - { - super(portName, logger ); - } - - - public OutULongPort(String portName, Logger logger, ConnectionEventListener eventCB ) - { - super(portName, logger, eventCB ); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutUShortPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutUShortPort.java deleted file mode 100644 index 1cdd353a0..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutUShortPort.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; -import org.apache.log4j.Logger; - -/** - * - */ -public class OutUShortPort extends OutUInt16Port { - - /** - * @generated - */ - public OutUShortPort(String portName) - { - super(portName ); - } - - public OutUShortPort(String portName, Logger logger) - { - super(portName, logger ); - } - - - public OutUShortPort(String portName, Logger logger, ConnectionEventListener eventCB ) - { - super(portName, logger, eventCB ); - } - -} - diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutVITA49Port.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutVITA49Port.java index 29c12b845..440b80c9a 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutVITA49Port.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/OutVITA49Port.java @@ -30,6 +30,7 @@ import java.util.Set; import java.util.HashSet; import org.ossie.component.QueryableUsesPort; +import org.ossie.component.RHLogger; import org.apache.log4j.Logger; import CF.DataType; import CF.PropertiesHelper; @@ -43,7 +44,6 @@ import BULKIO.VITA49StreamDefinition; import bulkio.linkStatistics; -import bulkio.Int8Size; import bulkio.connection_descriptor_struct; import bulkio.SriMapStruct; import bulkio.vita49.VITA49StreamContainer; @@ -66,8 +66,6 @@ public class OutVITA49Port extends OutPortBase { */ protected VITA49StreamContainer streamContainer; - protected List filterTable = null; - public OutVITA49Port(String portName ){ this( portName, null, null ); } @@ -104,6 +102,12 @@ public void setLogger( Logger newlogger ){ } } + public void setLogger(RHLogger logger) + { + this._portLog = logger; + this.streamContainer.setLogger(logger.getL4Logger()); + } + /** * pushSRI * description: send out SRI describing the data payload @@ -132,14 +136,14 @@ public void setLogger( Logger newlogger ){ public void pushSRI(StreamSRI header, PrecisionUTCTime time) { - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort pushSRI ENTER (port=" + name +")" ); } // Header cannot be null if (header == null) { - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); } return; } @@ -167,16 +171,16 @@ public void pushSRI(StreamSRI header, PrecisionUTCTime time) if (ftPtr.port_name.getValue().equals(this.name)) { portListed = true; } - if ( logger != null ) { - logger.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + + if ( _portLog != null ) { + _portLog.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + " streamID:" + header.streamID ); } if ( (ftPtr.port_name.getValue().equals(this.name)) && (ftPtr.connection_id.getValue().equals(p.getKey())) && (ftPtr.stream_id.getValue().equals(header.streamID))) { try { - if ( logger != null ) { - logger.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + + if ( _portLog != null ) { + _portLog.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + " streamID:" + header.streamID ); } p.getValue().pushSRI(header, time); @@ -185,8 +189,8 @@ public void pushSRI(StreamSRI header, PrecisionUTCTime time) this.updateStats(p.getKey()); } catch(Exception e) { if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); + if ( _portLog != null ) { + _portLog.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); } } } @@ -198,8 +202,8 @@ public void pushSRI(StreamSRI header, PrecisionUTCTime time) if (!portListed ) { for (Entry p : this.outConnections.entrySet()) { try { - if ( logger != null ) { - logger.trace( "pushSRI - NO Filter port:" + this.name + " connection:" + p.getKey() + + if ( _portLog != null ) { + _portLog.trace( "pushSRI - NO Filter port:" + this.name + " connection:" + p.getKey() + " streamID:" + header.streamID ); } p.getValue().pushSRI(header, time); @@ -208,8 +212,8 @@ public void pushSRI(StreamSRI header, PrecisionUTCTime time) this.updateStats(p.getKey()); } catch(Exception e) { if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); + if ( _portLog != null ) { + _portLog.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); } } } @@ -220,14 +224,14 @@ public void pushSRI(StreamSRI header, PrecisionUTCTime time) } // don't want to process while command information is coming in - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); } return; } public void updateConnectionFilter(List _filterTable) { - this.filterTable = _filterTable; + super.updateConnectionFilter(_filterTable); //1. loop over fitlerTable // A. ignore other port names @@ -254,8 +258,8 @@ public void updateConnectionFilter(List _filterTab hasPortEntry = true; dataVITA49Operations connectedPort = this.outConnections.get(ftPtr.connection_id.getValue()); if (connectedPort == null){ - if ( logger != null ) { - logger.debug("bulkio.OutPort updateConnectionFilter() did not find connected port for connection_id " + ftPtr.connection_id +")" ); + if ( _portLog != null ) { + _portLog.debug("bulkio.OutPort updateConnectionFilter() did not find connected port for connection_id " + ftPtr.connection_id +")" ); } continue; } @@ -283,16 +287,16 @@ public void updateConnectionFilter(List _filterTab try{ foundStream.updateAttachments(entry.getValue().toArray(new VITA49StreamAttachment[0])); }catch (AttachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() AttachError on updateAttachments() for streamId " + streamId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() AttachError on updateAttachments() for streamId " + streamId); } }catch (DetachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() DetachError on updateAttachments() for streamId " + streamId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() DetachError on updateAttachments() for streamId " + streamId); } }catch (StreamInputError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() StreamInputError on updateAttachments() for streamId " + streamId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() StreamInputError on updateAttachments() for streamId " + streamId); } } } @@ -306,16 +310,16 @@ public void updateConnectionFilter(List _filterTab if(stream != null){ try{ stream.detachAll(); - if ( logger != null ) { - logger.debug("bulkio::OutPort updateConnectionFilter() calling detachAll() for streamId " + entry.getKey()); + if ( _portLog != null ) { + _portLog.debug("bulkio::OutPort updateConnectionFilter() calling detachAll() for streamId " + entry.getKey()); } }catch (DetachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() DetachError on detachAll() for streamId " + entry.getKey()); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() DetachError on detachAll() for streamId " + entry.getKey()); } }catch (StreamInputError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() StreamInputError on detachAll() for streamId " + entry.getKey()); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() StreamInputError on detachAll() for streamId " + entry.getKey()); } } } @@ -326,20 +330,20 @@ public void updateConnectionFilter(List _filterTab for (Entry p : this.outConnections.entrySet()) { try{ this.streamContainer.addConnectionToAllStreams(p.getKey(),p.getValue()); - if ( logger != null ) { - logger.debug("bulkio::OutPort updateConnectionFilter() calling addConnectionToAllStreams for connection " + p.getKey()); + if ( _portLog != null ) { + _portLog.debug("bulkio::OutPort updateConnectionFilter() calling addConnectionToAllStreams for connection " + p.getKey()); } }catch (AttachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() AttachError on updateAttachments() for all streams"); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() AttachError on updateAttachments() for all streams"); } }catch (DetachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() DetachError on updateAttachments() for all streams"); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() DetachError on updateAttachments() for all streams"); } }catch (StreamInputError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort updateConnectionFilter() StreamInputError on updateAttachments() for all streams"); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort updateConnectionFilter() StreamInputError on updateAttachments() for all streams"); } } } @@ -360,7 +364,6 @@ public void updateSRIForAllConnections() { String streamId; Set streamConnIds = new HashSet(); Set currentSRIConnIds = new HashSet(); - Iterator connIdIter; // Iterate through all registered streams for (VITA49Stream s: this.streamContainer.getStreams()){ @@ -381,8 +384,8 @@ public void updateSRIForAllConnections() { // Grab the port dataVITA49Operations connectedPort = this.outConnections.get(connId); if (connectedPort == null) { - if ( logger != null ) { - logger.debug("updateSRIForAllConnections() Unable to find connected port with connectionId: " + connId); + if ( _portLog != null ) { + _portLog.debug("updateSRIForAllConnections() Unable to find connected port with connectionId: " + connId); } continue; } @@ -401,8 +404,8 @@ public void updateSRIForAllConnections() { public void connectPort(final org.omg.CORBA.Object connection, final String connectionId) throws CF.PortPackage.InvalidPort, CF.PortPackage.OccupiedPort { - if ( logger != null ) { - logger.trace("bulkio.OutPort connectPort ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort connectPort ENTER (port=" + name +")" ); } synchronized (this.updatingPortsLock) { @@ -410,14 +413,14 @@ public void connectPort(final org.omg.CORBA.Object connection, final String conn try { port = BULKIO.jni.dataVITA49Helper.narrow(connection); } catch (final Exception ex) { - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " PORT NARROW FAILED"); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " PORT NARROW FAILED"); } throw new CF.PortPackage.InvalidPort((short)1, "Invalid port for connection '" + connectionId + "'"); } this.outConnections.put(connectionId, port); this.active = true; - this.stats.put(connectionId, new linkStatistics( this.name, new Int8Size() ) ); + this.stats.put(connectionId, new linkStatistics(this.name, 1)); boolean portListed = false; @@ -431,16 +434,16 @@ public void connectPort(final org.omg.CORBA.Object connection, final String conn try{ this.streamContainer.addConnectionToStream(connectionId, port,ftPtr.stream_id.getValue()); }catch (AttachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() AttachError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() AttachError for connectionId " + connectionId); } }catch (DetachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() DetachError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() DetachError for connectionId " + connectionId); } }catch (StreamInputError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() StreamInputError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToStream() StreamInputError for connectionId " + connectionId); } } } @@ -449,22 +452,22 @@ public void connectPort(final org.omg.CORBA.Object connection, final String conn try{ this.streamContainer.addConnectionToAllStreams(connectionId,port); }catch (AttachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() AttachError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() AttachError for connectionId " + connectionId); } }catch (DetachError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() DetachError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() DetachError for connectionId " + connectionId); } }catch (StreamInputError e){ - if ( logger != null ) { - logger.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() StreamInputError for connectionId " + connectionId); + if ( _portLog != null ) { + _portLog.error("bulkio::OutPort CONNECT PORT: " + name + " addConnectionToAllStreams() StreamInputError for connectionId " + connectionId); } } } - if ( logger != null ) { - logger.debug("bulkio::OutPort CONNECT PORT: " + name + " CONNECTION '" + connectionId + "'"); + if ( _portLog != null ) { + _portLog.debug("bulkio::OutPort CONNECT PORT: " + name + " CONNECTION '" + connectionId + "'"); } } this.streamContainer.printState("After connectPort"); @@ -476,8 +479,8 @@ public void connectPort(final org.omg.CORBA.Object connection, final String conn */ public void disconnectPort(String connectionId) { - if ( logger != null ) { - logger.trace("bulkio.OutPort disconnectPort ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort disconnectPort ENTER (port=" + name +")" ); } synchronized (this.updatingPortsLock) { try { @@ -492,8 +495,8 @@ public void disconnectPort(String connectionId) { this.stats.remove(connectionId); this.active = (this.outConnections.size() != 0); - if ( logger != null ) { - logger.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); } // Remove connectionId from any sets in the currentSRIs.connections values @@ -501,10 +504,10 @@ public void disconnectPort(String connectionId) { entry.getValue().connections.remove(connectionId); } - if ( logger != null ) { - logger.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); for(Map.Entry entry: this.currentSRIs.entrySet()) { - logger.trace("bulkio.OutPort updated currentSRIs key=" + entry.getKey() + ", value.sri=" + entry.getValue().sri + ", value.connections=" + entry.getValue().connections); + _portLog.trace("bulkio.OutPort updated currentSRIs key=" + entry.getKey() + ", value.sri=" + entry.getValue().sri + ", value.connections=" + entry.getValue().connections); } } } @@ -513,8 +516,8 @@ public void disconnectPort(String connectionId) { callback.disconnect(connectionId); } - if ( logger != null ) { - logger.trace("bulkio.OutPort disconnectPort EXIT (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort disconnectPort EXIT (port=" + name +")" ); } } @@ -622,8 +625,8 @@ public boolean updateStream(final BULKIO.VITA49StreamDefinition streamDef) throw */ public boolean addStream(final BULKIO.VITA49StreamDefinition streamDef) throws AttachError, StreamInputError, DetachError { - if ( logger != null ) { - logger.trace("bulkio.OutPort addStream ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort addStream ENTER (port=" + name +")" ); } String attachId = null; @@ -673,11 +676,11 @@ public boolean addStream(final BULKIO.VITA49StreamDefinition streamDef) throws A } String[] attachIds = stream.getAttachIds(); - if ( logger != null ) { + if ( _portLog != null ) { for(String str: attachIds){ - logger.trace("VITA49 PORT: addStream() ATTACHMENT COMPLETED ATTACH ID:" + str + " NAME(userid):" + stream.getName() ); + _portLog.trace("VITA49 PORT: addStream() ATTACHMENT COMPLETED ATTACH ID:" + str + " NAME(userid):" + stream.getName() ); } - logger.trace("bulkio.OutPort addStream() EXIT (port=" + name +")" ); + _portLog.trace("bulkio.OutPort addStream() EXIT (port=" + name +")" ); } this.streamContainer.printState("After addStream"); return true; @@ -697,8 +700,8 @@ public void removeStream(String streamId) throws AttachError, StreamInputError, */ public void detach(String attachId, String connectionId) throws DetachError, StreamInputError { - if ( logger != null ) { - logger.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); } synchronized (this.updatingPortsLock) { @@ -713,24 +716,24 @@ public void detach(String attachId, String connectionId) throws DetachError, Str this.streamContainer.detach(); } } - if ( logger != null ) { - logger.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); } this.streamContainer.printState("After detach"); } public void detach(String attachId) throws DetachError, StreamInputError { - if ( logger != null ) { - logger.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); } if(attachId != null){ this.streamContainer.detachByAttachId(attachId); } - if ( logger != null ) { - logger.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); + if ( _portLog != null ) { + _portLog.trace("bulkio.OutPort detach ENTER (port=" + name +")" ); } this.streamContainer.printState("After detach"); } diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/OutXMLPort.java b/bulkioInterfaces/libsrc/java/src/bulkio/OutXMLPort.java index b685a0546..ce3fd38ed 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/OutXMLPort.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/OutXMLPort.java @@ -19,386 +19,49 @@ */ package bulkio; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Set; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; import org.apache.log4j.Logger; -import CF.DataType; + import BULKIO.PrecisionUTCTime; import BULKIO.StreamSRI; import BULKIO.dataXMLOperations; -import bulkio.linkStatistics; -import bulkio.Int8Size; -import bulkio.ConnectionEventListener; -import bulkio.connection_descriptor_struct; -import bulkio.SriMapStruct; -import org.ossie.properties.*; - /** - * + * BulkIO output port implementation for dataXML. */ -public class OutXMLPort extends OutPortBase { - - protected List filterTable = null; - +public class OutXMLPort extends OutDataPort { - public OutXMLPort(String portName ){ - this( portName, null, null ); + public OutXMLPort(String portName) { + this(portName, null, null); } - public OutXMLPort(String portName, - Logger logger ) { - this( portName, logger, null ); + public OutXMLPort(String portName, Logger logger) { + this(portName, logger, null); } - /** - * @generated - */ - public OutXMLPort(String portName, - Logger logger, - ConnectionEventListener eventCB ) { - super(portName, logger, eventCB); - filterTable = null; - if ( this.logger != null ) { - this.logger.debug( "bulkio.OutPort CTOR port: " + portName ); - } - } - - /** - * pushSRI - * description: send out SRI describing the data payload - * - * H: structure of type BULKIO.StreamSRI with the SRI for this stream - * hversion - * xstart: start time of the stream - * xdelta: delta between two samples - * xunits: unit types from Platinum specification - * subsize: 0 if the data is one-dimensional - * ystart - * ydelta - * yunits: unit types from Platinum specification - * mode: 0-scalar, 1-complex - * streamID: stream identifier - * sequence keywords: unconstrained sequence of key-value pairs for additional description - * @generated - */ - public void pushSRI(StreamSRI header) - { - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI ENTER (port=" + name +")" ); - } - - // Header cannot be null - if (header == null) { - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); - } - return; - } - - if (header.streamID == null) { - throw new NullPointerException("SRI streamID cannot be null"); + public OutXMLPort(String portName, Logger logger, ConnectionEventListener eventCB) { + super(portName, logger, eventCB, new XMLDataHelper()); + if (this.logger != null) { + this.logger.debug("bulkio.OutPort CTOR port: " + portName); } - - // Header cannot have null keywords - if (header.keywords == null) header.keywords = new DataType[0]; - - synchronized(this.updatingPortsLock) { // don't want to process while command information is coming in - this.currentSRIs.put(header.streamID, new SriMapStruct(header)); - if (this.active) { - // state if this port is not listed in the filter table... then pushSRI down stream - boolean portListed = false; - - // for each connection - for (Entry p : this.outConnections.entrySet()) { - - // if connection is in the filter table - for (connection_descriptor_struct ftPtr : bulkio.utils.emptyIfNull(this.filterTable) ) { - - // if there is an entry for this port in the filter table....so save that state - if (ftPtr.port_name.getValue().equals(this.name)) { - portListed = true; - } - if ( logger != null ) { - logger.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + - " streamID:" + header.streamID ); - } - if ( (ftPtr.port_name.getValue().equals(this.name)) && - (ftPtr.connection_id.getValue().equals(p.getKey())) && - (ftPtr.stream_id.getValue().equals(header.streamID))) { - try { - if ( logger != null ) { - logger.trace( "pushSRI - FilterMatch port:" + this.name + " connection:" + p.getKey() + - " streamID:" + header.streamID ); - } - p.getValue().pushSRI(header); - //Update entry in currentSRIs - this.currentSRIs.get(header.streamID).connections.add(p.getKey()); - this.updateStats(p.getKey()); - } catch(Exception e) { - if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); - } - } - } - } - } - } - - // no entry exists for this port in the filter table so all connections get SRI data - if (!portListed ) { - for (Entry p : this.outConnections.entrySet()) { - try { - if ( logger != null ) { - logger.trace( "pushSRI - NO Filter port:" + this.name + " connection:" + p.getKey() + - " streamID:" + header.streamID ); - } - p.getValue().pushSRI(header); - //Update entry in currentSRIs - this.currentSRIs.get(header.streamID).connections.add(p.getKey()); - this.updateStats(p.getKey()); - } catch(Exception e) { - if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushSRI failed on port " + name + " connection " + p.getKey() ); - } - } - } - } - } - - - } - - - } // don't want to process while command information is coming in - - - if ( logger != null ) { - logger.trace("bulkio.OutPort pushSRI EXIT (port=" + name +")" ); - } - return; } - public void updateConnectionFilter(List _filterTable) { - this.filterTable = _filterTable; - }; - - public void pushPacket(String data, PrecisionUTCTime time, boolean endOfStream, String streamID) { - pushPacket( data, endOfStream, streamID ); + public String getRepid() { + return BULKIO.dataXMLHelper.id(); } - /** - * @generated - */ public void pushPacket(String data, boolean endOfStream, String streamID) { - if ( logger != null ) { - logger.trace("bulkio.OutPort pushPacket ENTER (port=" + name +")" ); - } - - if (!this.currentSRIs.containsKey(streamID)) { - StreamSRI header = bulkio.sri.utils.create(); - header.streamID = streamID; - this.pushSRI(header); - } - SriMapStruct sriStruct = this.currentSRIs.get(streamID); - - synchronized(this.updatingPortsLock) { // don't want to process while command information is coming in - String odata = data; - if (this.active) { - boolean portListed = false; - for (Entry p : this.outConnections.entrySet()) { - - for (connection_descriptor_struct ftPtr : bulkio.utils.emptyIfNull(this.filterTable) ) { - - if (ftPtr.port_name.getValue().equals(this.name)) { - portListed = true; - } - if ( (ftPtr.port_name.getValue().equals(this.name)) && - (ftPtr.connection_id.getValue().equals(p.getKey())) && - (ftPtr.stream_id.getValue().equals(streamID)) ) { - try { - //If SRI for given streamID has not been pushed to this connection, push it - if (!sriStruct.connections.contains(p.getKey())){ - p.getValue().pushSRI(sriStruct.sri); - sriStruct.connections.add(p.getKey()); - } - p.getValue().pushPacket( odata, endOfStream, streamID); - this.stats.get(p.getKey()).update( odata.length(), (float)0.0, endOfStream, streamID, false); - } catch(Exception e) { - if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushPacket failed on port " + name + " connection " + p.getKey() ); - } - } - } - } - } - } - - if (!portListed ){ - for (Entry p : this.outConnections.entrySet()) { - try { - //If SRI for given streamID has not been pushed to this connection, push it - if (!sriStruct.connections.contains(p.getKey())){ - p.getValue().pushSRI(sriStruct.sri); - sriStruct.connections.add(p.getKey()); - } - p.getValue().pushPacket( odata, endOfStream, streamID); - this.stats.get(p.getKey()).update( odata.length(), (float)0.0, endOfStream, streamID, false); - } catch(Exception e) { - if ( this.reportConnectionErrors( p.getKey() ) ) { - if ( logger != null ) { - logger.error("Call to pushPacket failed on port " + name + " connection " + p.getKey() ); - } - } - } - } - } - } - if ( endOfStream ) { - if ( this.currentSRIs.containsKey(streamID) ) { - this.currentSRIs.remove(streamID); - } - } - - } // don't want to process while command information is coming in - - if ( logger != null ) { - logger.trace("bulkio.OutPort pushPacket EXIT (port=" + name +")" ); - } - return; - - } - - - /** - * @generated - */ - public void connectPort(final org.omg.CORBA.Object connection, final String connectionId) throws CF.PortPackage.InvalidPort, CF.PortPackage.OccupiedPort - { - - if ( logger != null ) { - logger.trace("bulkio.OutPort connectPort ENTER (port=" + name +")" ); - } - - synchronized (this.updatingPortsLock) { - final dataXMLOperations port; - try { - port = BULKIO.jni.dataXMLHelper.narrow(connection); - } catch (final Exception ex) { - if ( logger != null ) { - logger.error("bulkio.OutPort CONNECT PORT: " + name + " PORT NARROW FAILED"); - } - throw new CF.PortPackage.InvalidPort((short)1, "Invalid port for connection '" + connectionId + "'"); - } - this.outConnections.put(connectionId, port); - this.active = true; - this.stats.put(connectionId, new linkStatistics( this.name, new Int8Size() ) ); - - if ( logger != null ) { - logger.debug("bulkio.OutPort CONNECT PORT: " + name + " CONNECTION '" + connectionId + "'"); - } - } - - if ( logger != null ) { - logger.trace("bulkio.OutPort connectPort EXIT (port=" + name +")" ); - } - - if ( callback != null ) { - callback.connect(connectionId); - } + // Pass a null timestamp; it will never be referenced in the base class + // and can be safely dropped in sendPacket(). + super.pushPacket(data, null, endOfStream, streamID); } - /** - * @generated - */ - public void disconnectPort(String connectionId) { - if ( logger != null ) { - logger.trace("bulkio.OutPort disconnectPort ENTER (port=" + name +")" ); - } - synchronized (this.updatingPortsLock) { - boolean portListed = false; - for (connection_descriptor_struct ftPtr : bulkio.utils.emptyIfNull(this.filterTable)) { - if (ftPtr.port_name.getValue().equals(this.name)) { - portListed = true; - break; - } - } - dataXMLOperations port = this.outConnections.remove(connectionId); - if (port != null) - { - String odata = ""; - BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.notSet(); - for (Map.Entry entry: this.currentSRIs.entrySet()) { - String streamID = entry.getKey(); - if (entry.getValue().connections.contains(connectionId)) { - if (portListed) { - for (connection_descriptor_struct ftPtr : bulkio.utils.emptyIfNull(this.filterTable) ) { - if ( (ftPtr.port_name.getValue().equals(this.name)) && - (ftPtr.connection_id.getValue().equals(connectionId)) && - (ftPtr.stream_id.getValue().equals(streamID))) { - try { - port.pushPacket(odata,true,streamID); - this.updateStats(connectionId); - } catch(Exception e) { - if ( this.reportConnectionErrors(connectionId) ) { - if ( logger != null ) { - logger.error("Call to pushPacket failed on port " + name + " connection " + connectionId ); - } - } - } - } - } - } else { - try { - port.pushPacket(odata,true,streamID); - this.updateStats(connectionId); - } catch(Exception e) { - if ( this.reportConnectionErrors(connectionId) ) { - if ( logger != null ) { - logger.error("Call to pushPacket failed on port " + name + " connection " + connectionId ); - } - } - } - } - } - } - } - this.stats.remove(connectionId); - this.active = (this.outConnections.size() != 0); - - // Remove connectionId from any sets in the currentSRIs.connections values - for(Map.Entry entry : this.currentSRIs.entrySet()) { - entry.getValue().connections.remove(connectionId); - } - - if ( logger != null ) { - logger.trace("bulkio.OutPort DISCONNECT PORT:" + name + " CONNECTION '" + connectionId + "'"); - for(Map.Entry entry: this.currentSRIs.entrySet()) { - logger.trace("bulkio.OutPort updated currentSRIs key=" + entry.getKey() + ", value.sri=" + entry.getValue().sri + ", value.connections=" + entry.getValue().connections); - } - } - } - - if ( callback != null ) { - callback.disconnect(connectionId); - } - - if ( logger != null ) { - logger.trace("bulkio.OutPort disconnectPort EXIT (port=" + name +")" ); - } + protected dataXMLOperations narrow(org.omg.CORBA.Object obj) { + return BULKIO.dataXMLHelper.narrow(obj); } - public String getRepid() { - return BULKIO.dataXMLHelper.id(); + protected void sendPacket(dataXMLOperations port, String data, PrecisionUTCTime time, boolean endOfStream, String streamID) { + port.pushPacket(data, endOfStream, streamID); } } diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/SizeOf.java b/bulkioInterfaces/libsrc/java/src/bulkio/SizeOf.java index e9e41c34d..971bf7d2d 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/SizeOf.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/SizeOf.java @@ -20,118 +20,9 @@ package bulkio; - +@Deprecated public class SizeOf { - public int sizeof() { return 0; } } - - -class FloatSize extends SizeOf< Float > { - - public FloatSize() {}; - public < T > int sizeof() { - return bytes(); - }; - - public static int bytes() { - return 4; - } -} - -class DoubleSize extends SizeOf< Double > { - - public DoubleSize() {}; - public < T > int sizeof() { - return bytes(); - }; - - public static int bytes() { - return 8; - } -} - -class Int8Size extends SizeOf< Byte > { - - public Int8Size() {}; - public < T > int sizeof() { - return bytes(); - }; - - public static int bytes() { - return 1; - } -} - -class CharSize extends SizeOf< Character > { - public CharSize() {}; - public < T > int sizeof() { - return bytes(); - }; - - public static int bytes() { - return 1; - } -} - -class UInt8Size extends Int8Size { - public UInt8Size() {}; -} - - -class OctetSize extends Int8Size { - public OctetSize() {}; -} - - -class Int16Size extends SizeOf< Short > { - - public Int16Size() {}; - public < T > int sizeof() { - return bytes(); - }; - - public static int bytes() { - return 2; - } -} - - -class UInt16Size extends Int16Size { - public UInt16Size() { }; -} - - -class Int32Size extends SizeOf< Integer > { - - public Int32Size() {}; - public < T > int sizeof() { - return bytes(); - }; - - public static int bytes() { - return 4; - } -} - -class UInt32Size extends Int32Size { - public UInt32Size() { }; -} - -class Int64Size extends SizeOf< Long > { - - public Int64Size() {}; - public < T > int sizeof() { - return bytes(); - }; - - public static int bytes() { - return 8; - } -} - -class UInt64Size extends Int64Size { - public UInt64Size() { }; -} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/XMLDataHelper.java b/bulkioInterfaces/libsrc/java/src/bulkio/XMLDataHelper.java new file mode 100644 index 000000000..e09830c51 --- /dev/null +++ b/bulkioInterfaces/libsrc/java/src/bulkio/XMLDataHelper.java @@ -0,0 +1,43 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package bulkio; + +class XMLDataHelper implements DataHelper { + public int bitSize() { + return 8; + } + + public int arraySize(String data) { + return data.length(); + } + + public boolean isEmpty(String data) + { + return data.isEmpty(); + } + + public String emptyArray() { + return ""; + } + + public String slice(String data, int start, int end) { + return data.substring(start, end); + } +} diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/linkStatistics.java b/bulkioInterfaces/libsrc/java/src/bulkio/linkStatistics.java index 1cc776d68..0744ff33a 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/linkStatistics.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/linkStatistics.java @@ -22,12 +22,14 @@ import java.util.ArrayList; import java.util.List; -import java.util.Iterator; + import org.omg.CORBA.TCKind; -import org.ossie.properties.AnyUtils; + import CF.DataType; +import org.ossie.properties.AnyUtils; + import BULKIO.PortStatistics; -import bulkio.SizeOf; + /** * @generated @@ -62,12 +64,9 @@ class statPoint { /** @generated */ protected String portName; protected int connection_errors; - - /** - * @generated - */ - public linkStatistics(String portName, SizeOf dataum ) { - this.sizeof = dataum.sizeof(); + + public linkStatistics(String portName, int sizeof) { + this.sizeof = sizeof; this.enabled = true; this.bitSize = this.sizeof * 8.0; this.connection_errors=0; @@ -91,6 +90,11 @@ public linkStatistics(String portName, SizeOf dataum ) { } } + @Deprecated + public linkStatistics(String portName, SizeOf dataum) { + this(portName, dataum.sizeof()); + } + public List< String > getActiveStreamIDs() { return this.activeStreamIDs; } @@ -174,7 +178,7 @@ public PortStatistics retrieve() { this.runningStats.timeSinceLastCall = (float)(secs - front_sec); this.runningStats.bitsPerSecond = (float)((totalData * this.bitSize) / totalTime); this.runningStats.elementsPerSecond = (float)(totalData / totalTime); - this.runningStats.averageQueueDepth = (float)(queueSize / receivedSize); + this.runningStats.averageQueueDepth = queueSize / receivedSize; this.runningStats.callsPerSecond = (float)((receivedSize - 1) / totalTime); this.runningStats.streamIDs = this.activeStreamIDs.toArray(new String[0]); if (flushTime != 0.0) { diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/queueSemaphore.java b/bulkioInterfaces/libsrc/java/src/bulkio/queueSemaphore.java deleted file mode 100644 index 7e338c812..000000000 --- a/bulkioInterfaces/libsrc/java/src/bulkio/queueSemaphore.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -package bulkio; - -import java.util.concurrent.locks.*; - -class queueSemaphore -{ - private int maxValue; - private int currValue; - private Lock mutex = new ReentrantLock(); - private Condition condition = mutex.newCondition(); - - public queueSemaphore( int initialMaxValue) { - currValue=0; - maxValue = initialMaxValue; - } - - public void release() { - currValue=0; - condition.signalAll(); - } - - public void setMaxValue( int newMaxValue) { - mutex.lock(); - try { - maxValue = newMaxValue; - } - finally { - mutex.unlock(); - } - } - - public int getMaxValue() { - mutex.lock(); - try { - return maxValue; - } - finally { - mutex.unlock(); - } - } - - public void setCurrValue( int newValue) { - mutex.lock(); - try { - if (newValue < maxValue) { - int oldValue = currValue; - currValue = newValue; - - if (oldValue > newValue) { - condition.signal(); - } - } - - } - finally { - mutex.unlock(); - } - } - - public void incr() { - mutex.lock(); - try { - while (currValue >= maxValue) { - try { - condition.await(); - }catch( InterruptedException e) {}; - - } - ++currValue; - } - finally { - mutex.unlock(); - } - } - - public void decr() { - mutex.lock(); - try { - if (currValue > 0) { - --currValue; - } - condition.signal(); - } - finally { - mutex.unlock(); - } - } - -}; diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/sdds/SDDSStream.java b/bulkioInterfaces/libsrc/java/src/bulkio/sdds/SDDSStream.java index f4bee5f70..1817f7b8b 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/sdds/SDDSStream.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/sdds/SDDSStream.java @@ -300,7 +300,7 @@ public void updateAttachments(SDDSStreamAttachment[] expectedAttachments) throws String existingConnectionId = nextAttachment.getConnectionId(); boolean detachConnection = true; - Iterator expectedConnIdIter = expectedConnectionIds.iterator(); + Iterator expectedConnIdIter = expectedConnectionIds.iterator(); while (expectedConnIdIter.hasNext()){ if (existingConnectionId.equals(expectedConnIdIter.next())){ detachConnection = false; diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/sri/DefaultComparator.java b/bulkioInterfaces/libsrc/java/src/bulkio/sri/DefaultComparator.java index 4c5cc1934..0b01b9f3d 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/sri/DefaultComparator.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/sri/DefaultComparator.java @@ -20,54 +20,16 @@ package bulkio.sri; -import org.ossie.properties.AnyUtils; import BULKIO.StreamSRI; public class DefaultComparator implements bulkio.sri.Comparator { - public boolean compare(StreamSRI SRI_1, StreamSRI SRI_2){ - - if ( SRI_1 == null || SRI_2 == null ) + public boolean compare(StreamSRI SRI_1, StreamSRI SRI_2) + { + if ((SRI_1 == null) || (SRI_2 == null)) { return false; - if (SRI_1.hversion != SRI_2.hversion) - return false; - if (SRI_1.xstart != SRI_2.xstart) - return false; - if (SRI_1.xdelta != SRI_2.xdelta) - return false; - if (SRI_1.xunits != SRI_2.xunits) - return false; - if (SRI_1.subsize != SRI_2.subsize) - return false; - if (SRI_1.ystart != SRI_2.ystart) - return false; - if (SRI_1.ydelta != SRI_2.ydelta) - return false; - if (SRI_1.yunits != SRI_2.yunits) - return false; - if (SRI_1.mode != SRI_2.mode) - return false; - if (SRI_1.blocking != SRI_2.blocking) - return false; - if (!SRI_1.streamID.equals(SRI_2.streamID)) - return false; - if (SRI_1.keywords == null || SRI_2.keywords == null ) - return false; - if (SRI_1.keywords.length != SRI_2.keywords.length) - return false; - String action = "eq"; - for (int i=0; i < SRI_1.keywords.length; i++) { - if (!SRI_1.keywords[i].id.equals(SRI_2.keywords[i].id)) { - return false; - } - if (!SRI_1.keywords[i].value.type().equivalent(SRI_2.keywords[i].value.type())) { - return false; - } - if (!AnyUtils.compareAnys(SRI_1.keywords[i].value, SRI_2.keywords[i].value, action)) { - return false; - } } - return true; + return utils.compare(SRI_1, SRI_2); } } diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/sri/utils.java b/bulkioInterfaces/libsrc/java/src/bulkio/sri/utils.java index 8eb82d179..de0fd3aca 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/sri/utils.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/sri/utils.java @@ -19,16 +19,52 @@ */ package bulkio.sri; -import java.lang.System; -import BULKIO.StreamSRI; +import org.ossie.properties.AnyUtils; import CF.DataType; -public class utils { +import BULKIO.StreamSRI; + +public class utils { + + /** + * Bit flags for SRI fields. + */ + public static final int NONE = 0; + public static final int HVERSION = (1<<0); + public static final int XSTART = (1<<1); + public static final int XDELTA = (1<<2); + public static final int XUNITS = (1<<3); + public static final int SUBSIZE = (1<<4); + public static final int YSTART = (1<<5); + public static final int YDELTA = (1<<6); + public static final int YUNITS = (1<<7); + public static final int MODE = (1<<8); + public static final int STREAMID = (1<<9); + public static final int BLOCKING = (1<<10); + public static final int KEYWORDS = (1<<11); public static StreamSRI create() { return create("defaultSRI", 1.0, (short)1, false ); } + public static StreamSRI create(String sid) + { + // Default sample rate is 1 + return create(sid, 1.0); + } + + public static StreamSRI create(String sid, double srate) + { + // Default xunits is BULKIO::UNITS_TIME + return create(sid, srate, BULKIO.UNITS_TIME.value); + } + + public static StreamSRI create(String sid, double srate, short xunits) + { + // Default is non-blocking + return create(sid, srate, xunits, false); + } + public static StreamSRI create( String sid, double srate, short xunits, @@ -55,4 +91,95 @@ public static StreamSRI create( String sid, return tsri; } + + public static boolean compare(BULKIO.StreamSRI sriA, BULKIO.StreamSRI sriB) + { + if (sriA.hversion != sriB.hversion) { + return false; + } else if (sriA.xstart != sriB.xstart) { + return false; + } else if (sriA.xdelta != sriB.xdelta) { + return false; + } else if (sriA.xunits != sriB.xunits) { + return false; + } else if (sriA.subsize != sriB.subsize) { + return false; + } else if (sriA.ystart != sriB.ystart) { + return false; + } else if (sriA.ydelta != sriB.ydelta) { + return false; + } else if (sriA.yunits != sriB.yunits) { + return false; + } else if (sriA.mode != sriB.mode) { + return false; + } else if (sriA.streamID != sriB.streamID) { + return false; + } else if (sriA.blocking != sriB.blocking) { + return false; + } else if (!_compareKeywords(sriA.keywords, sriB.keywords)) { + return false; + } + return true; + } + + private static boolean _compareKeywords(CF.DataType[] keywordsA, CF.DataType[] keywordsB) + { + if (keywordsA == keywordsB) { + return true; + } else if ((keywordsA == null) || (keywordsB == null)) { + return false; + } else if (keywordsA.length != keywordsB.length) { + return false; + } + for (int index = 0; index < keywordsA.length; index++) { + if (!keywordsA[index].id.equals(keywordsB[index].id)) { + return false; + } else if (!AnyUtils.compareAnys(keywordsA[index].value, keywordsB[index].value, "eq")) { + return false; + } + } + return true; + } + + public static int compareFields(BULKIO.StreamSRI sriA, BULKIO.StreamSRI sriB) + { + int result = NONE; + if (sriA.hversion != sriB.hversion) { + result |= HVERSION; + } + if (sriA.xstart != sriB.xstart) { + result |= XSTART; + } + if (sriA.xdelta != sriB.xdelta) { + result |= XDELTA; + } + if (sriA.xunits != sriB.xunits) { + result |= XUNITS; + } + if (sriA.subsize != sriB.subsize) { + result |= SUBSIZE; + } + if (sriA.ystart != sriB.ystart) { + result |= YSTART; + } + if (sriA.ydelta != sriB.ydelta) { + result |= YDELTA; + } + if (sriA.yunits != sriB.yunits) { + result |= YUNITS; + } + if (sriA.mode != sriB.mode) { + result |= MODE; + } + if (sriA.streamID != sriB.streamID) { + result |= STREAMID; + } + if (sriA.blocking != sriB.blocking) { + result |= BLOCKING; + } + if (!_compareKeywords(sriA.keywords, sriB.keywords)) { + result |= KEYWORDS; + } + return result; + } } diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/sriState.java b/bulkioInterfaces/libsrc/java/src/bulkio/sriState.java index daa94fac9..f7e4290f6 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/sriState.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/sriState.java @@ -19,8 +19,6 @@ */ package bulkio; -import CF.DataType; -import BULKIO.PrecisionUTCTime; import BULKIO.StreamSRI; /** diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/utils.java b/bulkioInterfaces/libsrc/java/src/bulkio/utils.java index 898d87891..299c3aa86 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/utils.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/utils.java @@ -21,13 +21,6 @@ package bulkio; import java.util.Collections; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; public class utils { diff --git a/bulkioInterfaces/libsrc/java/src/bulkio/vita49/VITA49Stream.java b/bulkioInterfaces/libsrc/java/src/bulkio/vita49/VITA49Stream.java index e804d939d..b9ecb60d1 100644 --- a/bulkioInterfaces/libsrc/java/src/bulkio/vita49/VITA49Stream.java +++ b/bulkioInterfaces/libsrc/java/src/bulkio/vita49/VITA49Stream.java @@ -299,7 +299,7 @@ public void updateAttachments(VITA49StreamAttachment[] expectedAttachments) thro String existingConnectionId = nextAttachment.getConnectionId(); boolean detachConnection = true; - Iterator expectedConnIdIter = expectedConnectionIds.iterator(); + Iterator expectedConnIdIter = expectedConnectionIds.iterator(); while (expectedConnIdIter.hasNext()){ if (existingConnectionId.equals(expectedConnIdIter.next())){ detachConnection = false; diff --git a/bulkioInterfaces/libsrc/pom.xml b/bulkioInterfaces/libsrc/pom.xml deleted file mode 100644 index a1605f033..000000000 --- a/bulkioInterfaces/libsrc/pom.xml +++ /dev/null @@ -1,99 +0,0 @@ - - 4.0.0 - - redhawk.coreframework - parent - 2.0.9-SNAPSHOT - ../../pom.xml - - bulkio - bundle - - - ${project.groupId} - ossie - ${project.version} - - - ${project.groupId} - bulkio-interfaces - ${project.version} - - - ${project.groupId} - cf-interfaces - ${project.version} - - - - log4j - log4j - 1.2.15 - - - com.sun.jmx - jmxri - - - com.sun.jdmk - jmxtools - - - javax.jms - jms - - - - - - java/src - - - org.codehaus.gmaven - gmaven-plugin - 1.3 - - - set-main-artifact - package - - execute - - - - project.artifact.setFile(new - File("${project.basedir}/bulkio.jar")) - - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.7 - - - attach-artifacts - package - - attach-artifact - - - - - ${project.build.directory}/${project.artifactId}-${project.version}.jar - beta - jar - - - - - - - - - - diff --git a/bulkioInterfaces/libsrc/python/__init__.py b/bulkioInterfaces/libsrc/python/__init__.py deleted file mode 100644 index 650b7831e..000000000 --- a/bulkioInterfaces/libsrc/python/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -""" -bulkio - -Is the python interface library data exchange methods between component of the REDHAWK framework. There are 3 main modules -that support this library: - - timestamp : methods used to create simple BULKIO.PrecisionUTCTime object that provide the ability to reference a time stamp - - sri : meta data that further documents the contents of the data stream being passed between components - - input ports : input port (sinks) objects used by REDHAWK SCA components to receive data streams. - - output ports : output port (source) objects used by REDHAWK SCA components to publish data streams. - - - - -""" -# -# Import classes for bulkio python library -# - -# -from statistics import * - -import timestamp - -import sri - -import const - -from input_ports import * - -from output_ports import * diff --git a/bulkioInterfaces/libsrc/python/bulkio/__init__.py b/bulkioInterfaces/libsrc/python/bulkio/__init__.py new file mode 100644 index 000000000..028349eac --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/__init__.py @@ -0,0 +1,53 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +""" +bulkio + +Is the python interface library data exchange methods between component of the REDHAWK framework. There are 3 main modules +that support this library: + + timestamp : methods used to create simple BULKIO.PrecisionUTCTime object that provide the ability to reference a time stamp + + sri : meta data that further documents the contents of the data stream being passed between components + + input ports : input port (sinks) objects used by REDHAWK SCA components to receive data streams. + + output ports : output port (source) objects used by REDHAWK SCA components to publish data streams. + + + + +""" +# +# Import classes for bulkio python library +# + +# +from statistics import * + +import timestamp + +import sri + +import const + +from input_ports import * + +from output_ports import * diff --git a/bulkioInterfaces/libsrc/python/bulkio/bulkioInterfaces b/bulkioInterfaces/libsrc/python/bulkio/bulkioInterfaces new file mode 120000 index 000000000..04558b3a2 --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/bulkioInterfaces @@ -0,0 +1 @@ +../../../src/python/bulkio/bulkioInterfaces \ No newline at end of file diff --git a/bulkioInterfaces/libsrc/python/bulkio_compat.py b/bulkioInterfaces/libsrc/python/bulkio/bulkio_compat.py similarity index 100% rename from bulkioInterfaces/libsrc/python/bulkio_compat.py rename to bulkioInterfaces/libsrc/python/bulkio/bulkio_compat.py diff --git a/bulkioInterfaces/libsrc/python/const.py b/bulkioInterfaces/libsrc/python/bulkio/const.py similarity index 100% rename from bulkioInterfaces/libsrc/python/const.py rename to bulkioInterfaces/libsrc/python/bulkio/const.py diff --git a/bulkioInterfaces/libsrc/python/bulkio/datablock.py b/bulkioInterfaces/libsrc/python/bulkio/datablock.py new file mode 100644 index 000000000..cb86a3273 --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/datablock.py @@ -0,0 +1,337 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import itertools + +import bulkio.sri + +def _get_drift(begin, end, xdelta): + real = end.time - begin.time + expected = (end.offset - begin.offset) * self._sri.xdelta + return real - expected + +def _interleaved_to_complex(values): + real = itertools.islice(values, 0, len(values), 2) + imag = itertools.islice(values, 1, len(values), 2) + return [complex(re,im) for re, im in zip(real, imag)] + +class SampleTimestamp(object): + """ + Extended time stamp container. + + SampleTimestamp adds additional context to a BULKIO.PrecisionUTCTime time + stamp. When data is read from an sample-oriented input stream, it may span + more than one packet, or its start may not be on a packet boundary. In + these cases, the offset and synthetic attributes allow more sophisticated + handling of time information. + + The offset indicates at which sample the time applies. If the sample data + is complex, the offset should be interpreted in terms of complex samples + (i.e., two real values per index). + + A SampleTimestamp is considered synthetic if it was generated by an input + stream because there was no received time stamp available at that sample + offset. This occurs when the prior read did not end on a packet boundary; + only the first time stamp in a DataBlock can be synthetic. + + Attributes: + time: The time at which the referenced sample was created. + offset: The 0-based index of the sample at which time applies. + synthetic: Indicates whether a time was interpolated. + """ + __slots__ = ('time', 'offset', 'synthetic') + def __init__(self, time, offset=0, synthetic=False): + """ + Constructs a SampleTimestamp. + + Args: + time: Time stamp. + offset: Integral sample offset. + synthetic: False if time was received, True if interpolated. + """ + self.time = time + self.offset = offset + self.synthetic = synthetic + +class DataBlock(object): + """ + Container for sample data and stream metadata read from an input stream. + + DataBlock encapsulates the result of a read operation on an input stream. + It contains both data, which varies with the input stream type, and + metadata, including signal-related information (SRI). + + While it is possible to create DataBlocks in user code, they are usually + obtained by reading from an input stream. + + See Also: + InputStream.read() + InputStream.tryread() + """ + __slots__ = ('_sri', '_data', '_sriChangeFlags', '_inputQueueFlushed', '_timestamps') + def __init__(self, sri, data, sriChangeFlags, inputQueueFlushed): + self._sri = sri + self._data = data + self._timestamps = [] + self._sriChangeFlags = sriChangeFlags + self._inputQueueFlushed = inputQueueFlushed + + @property + def sri(self): + """ + BULKIO.StreamSRI: Stream metadata at the time the block was read. + """ + return self._sri + + @property + def buffer(self): + """ + The data read from the stream. + + The data type varies depending on the input stream. + """ + return self._data + + @property + def xdelta(self): + """ + float: The distance between two adjacent samples in the X direction. + + Because the X-axis is commonly in terms of time (that is, sri.xunits is + BULKIO.UNITS_TIME), this is typically the reciprocal of the sample rate. + """ + return self.sri.xdelta + + @property + def sriChanged(self): + """ + bool: Indicates whether the SRI has changed since the last read from + the same stream. + + See Also: + DataBlock.sriChangeFlags + """ + return self.sriChangeFlags != bulkio.sri.NONE + + @property + def sriChangeFlags(self): + """ + int: Bit mask representing which SRI fields have changed since the last + read from the same stream. + + If no SRI change has occurred since the last read, the value is + bulkio.sri.NONE (equal to 0). Otherwise, the value is the bitwise OR of + one or more of the following flags: + * bulkio.sri.HVERSION + * bulkio.sri.XSTART + * bulkio.sri.XDELTA + * bulkio.sri.XUNITS + * bulkio.sri.SUBSIZE + * bulkio.sri.YSTART + * bulkio.sri.YDELTA + * bulkio.sri.YUNITS + * bulkio.sri.MODE + * bulkio.sri.STREAMID + * bulkio.sri.BLOCKING + * bulkio.sri.KEYWORDS + + The HVERSION and STREAMID flags are not set in normal operation. + """ + return self._sriChangeFlags + + @property + def inputQueueFlushed(self): + """ + bool: Indicates whether an input queue flush occurred. + + An input queue flush indicates that the input port was unable to keep + up with incoming packets for non-blocking streams and emptied the queue + to catch up. + + The input port reports a flush once, on the next queued packet. This is + typically reflected in the next DataBlock read from any input stream + associated with the port; however, this does not necessarily mean that + any packets for that stream were discarded. + """ + return self._inputQueueFlushed + + def getStartTime(self): + """ + Gets the time stamp for the first sample. + + Returns: + BULKIO.PrecisionUTCTime: The first time stamp. + + Raises: + IndexError: If there are no time stamps. + """ + self._validateTimestamps() + return self._timestamps[0].time + + def addTimestamp(self, timestamp, offset=0, synthetic=False): + self._timestamps.append(SampleTimestamp(timestamp, offset, synthetic)) + + def getTimestamps(self): + """ + Gets the time stamps for the sample data. + + If complex is True, the offsets of the returned time stamps should be + interpreted in terms of complex samples. + + Valid DataBlocks obtained by reading from an input stream are + guaranteed to have at least one time stamp, at offset 0. If the read + spanned more than one packet, each packet's time stamp is included with + the packet's respective offset from the first sample. + + When the DataBlock is read from an input stream, only the first time + stamp may be synthetic. This occurs when the prior read did not consume + a full packet worth of data. In this case, the input stream linearly + interpolates the time stamp based on the stream's xdelta value. + + Returns: + list(SampleTimestamp): The time stamps for the sample data. + """ + return self._timestamps + + def getNetTimeDrift(self): + """ + Calculates the difference between the expected and actual value of the + last time stamp. + + If this DataBlock contains more than one time stamp, this method + compares the last time stamp to a linearly interpolated value based on + the initial time stamp, the StreamSRI xdelta, and the sample offset. + This difference gives a rough estimate of the deviation between the + nominal and actual sample rates over the sample period. + + Note: + If the SRI X-axis is not in units of time, this value has no + meaning. + + Returns: + float: Difference, in seconds, between expected and actual value. + """ + self._validateTimestamps() + return _get_drift(self._timestamps[0], self._timestamps[-1], self.xdelta) + + def getMaxTimeDrift(self): + """ + Calculates the largest difference between expected and actual time + stamps in the block. + + If this DataBlock contains more than one time stamp, this method + compares each time stamp to its linearly interpolated equivalent time + stamp, based on the initial time stamp, the StreamSRI xdelta, and the + sample offset. The greatest deviation is reported; this difference + gives a rough indication of how severely the actual sample rate + deviates from the nominal sample rate on a packet-to-packet basis. + + Note: + If the SRI X-axis is not in units of time, this value has no + meaning. + + Returns: + float: Difference, in seconds, between expected and actual value. + """ + self._validateTimestamps() + max_drift = 0.0 + for index in xrange(1, len(self._timestamps)): + drift = _get_drift(self._timestamps[index-1], self._timestamps[index], self.xdelta) + if abs(drift) > abs(max): + max_drift = drift + return max_drift + + def _validateTimestamps(self): + if not self._timestamps: + raise Exception('block contains no timestamps') + elif self._timestamps[0].offset != 0: + raise Exception('no timestamp at offset 0') + + +class SampleDataBlock(DataBlock): + """ + Extended container for sample data types. + + SampleDataBlock provides additional methods for accessing the stored data + as either real or complex samples. + + Real vs. Complex Samples: + Because BulkIO streams support both real and complex sample data, blocks + store data internally as an array of real samples, and provide methods that + allow the user to interpret the data as either real or complex. When the + complex mode changes, this is typically indicated with the corresponding + SRI change flag (see sriChangeFlags). On a per-block basis, the complex + attribute indicates whether the sample data is intended to be handled as + real or complex: + + if block.complex: + for value in block.cxdata: + ... + else: + for value in block.data: + ... + """ + @property + def data(self): + """ + list: Sample data interpreted as Python numbers. + + The type of each element depends on the input stream. Integer types + return int or long values, while floating point types return float + values. + + To intepret the data as complex samples, use cxdata. + """ + return self._data + + @property + def size(self): + """ + int: The size of the data in terms of real samples. + """ + return len(self._data) + + @property + def complex(self): + """ + bool: Indicates whether data should be interpreted as complex samples. + + The sample data is considered complex if sri.mode is non-zero. + + If the data is complex, the offsets for the time stamps returned by + getTimestamps() are in terms of complex samples. + """ + return self.sri.mode != 0 + + @property + def cxdata(self): + """ + list(complex): Sample data interpreted as Python complex values. + + To interpret the data as real samples, use data. + """ + return _interleaved_to_complex(self.data) + + @property + def cxsize(self): + """ + int: The size of the data in terms of complex samples. + """ + return self.size / 2 diff --git a/bulkioInterfaces/libsrc/python/bulkio/input_ports.py b/bulkioInterfaces/libsrc/python/bulkio/input_ports.py new file mode 100644 index 000000000..8e90d32fc --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/input_ports.py @@ -0,0 +1,949 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + + +import threading +import collections +import copy +import time +import struct + +from ossie.utils import uuid +from ossie.cf import CF +from ossie.utils.notify import notification +from ossie.utils.log4py import logging +from redhawk.bitbuffer import bitbuffer + +from bulkio.statistics import InStats +import bulkio.sri +import bulkio.timestamp +from bulkio.const import BLOCKING, NON_BLOCKING +from bulkio.input_streams import InputStream, BufferedInputStream +from bulkio.datablock import DataBlock +from bulkio.bulkioInterfaces import BULKIO, BULKIO__POA + +class InPort(object): + DATA_BUFFER=0 + TIME_STAMP=1 + END_OF_STREAM=2 + STREAM_ID=3 + SRI=4 + SRI_CHG=5 + QUEUE_FLUSH=6 + + # Backwards-compatible DataTransfer type can still be unpacked like a tuple + # but also supports named fields + DataTransfer = collections.namedtuple('DataTransfer', 'dataBuffer T EOS streamID SRI sriChanged inputQueueFlushed') + + class Packet(object): + __slots__ = ('buffer', 'T', 'EOS', 'SRI', 'sriChanged', 'inputQueueFlushed') + def __init__(self, data, T, EOS, SRI, sriChanged, inputQueueFlushed): + self.buffer = data + self.T = T + self.EOS = EOS + self.SRI = SRI + self.sriChanged = sriChanged + self.inputQueueFlushed = inputQueueFlushed + + @property + def streamID(self): + return self.SRI.streamID + + def __init__(self, name, bits, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize): + self.name = name + self._portLog = logger + self.queue = collections.deque() + self._maxSize = maxsize + self._breakBlock = False + self.stats = InStats(name, bits=bits) + self.blocking = False + self.sri_cmp = sriCompare + self.newSriCallback = newSriCallback + self.sriChangeCallback = sriChangeCallback + self.sriDict = {} # key=streamID, value=StreamSRI + + self._dataBufferLock = threading.Lock() + self._dataAvailable = threading.Condition(self._dataBufferLock) + self._queueAvailable = threading.Condition(self._dataBufferLock) + + # Backwards-compatibility + self.port_lock = self._dataBufferLock + + # Synchronizes access to the SRIs + self._sriUpdateLock = threading.Lock() + + # Streams that are currently active (map of streamID to stream objects) + self._streams = {} + self._streamsMutex = threading.Lock() + + # Streams that have the same stream ID as an active stream, when an + # end-of-stream has been queued but not yet read (each entry in the map + # is a list of stream objects) + self._pendingStreams = {} + + if self._portLog is None: + self._portLog = logging.getLogger("redhawk.bulkio.input."+name) + + _cmpMsg = "DEFAULT" + _newSriMsg = "EMPTY" + _sriChangeMsg = "EMPTY" + if sriCompare != bulkio.sri.compare: + _cmpMsg = "USER_DEFINED" + if newSriCallback: + _newSriMsg = "USER_DEFINED" + if sriChangeCallback: + _sriChangeMsg = "USER_DEFINED" + + if self._portLog: + self._portLog.debug( "bulkio::InPort CTOR port:" + str(name) + + " Blocking/MaxInputQueueSize " + str(self.blocking) + "/" + str(maxsize) + + " SriCompare/NewSriCallback/SriChangeCallback " + _cmpMsg + "/" + _newSriMsg + "/" + _sriChangeMsg ); + + @notification + def streamAdded(self, stream): + """ + A new input stream was received. + + Args: + stream: New input stream. + """ + pass + + def addStreamListener(self, callback): + """ + Registers a callback for new streams. + + When a new input stream is created, `callback` is called with the new + input stream as its argument. + + Args: + callback: Callable object that takes one argument. + """ + self.streamAdded.addListener(callback) + + def removeStreamListener(self, callback): + """ + Unregisters a callback for new streams. + + Args: + callback: Previous registered callable object. + """ + self.streamAdded.removeListener(callback) + + def setNewSriListener(self, newSriCallback): + with self._sriUpdateLock: + self.newSriCallback = newSriCallback + + def setSriChangeListener(self, sriChangeCallback): + with self._sriUpdateLock: + self.sriChangeCallback = sriChangeCallback + + def getLogger(self): + return self._portLog + + def setLogger(self, logger): + self._portLog = logger + + def enableStats(self, enabled): + self.stats.setEnabled(enabled) + + def _get_statistics(self): + with self._dataBufferLock: + return self.stats.retrieve() + + def _get_state(self): + with self._dataBufferLock: + if len(self.queue) == 0: + return BULKIO.IDLE + elif len(self.queue) == self._maxSize: + return BULKIO.BUSY + else: + return BULKIO.ACTIVE + + def _get_activeSRIs(self): + with self._sriUpdateLock: + return [self.sriDict[entry][0] for entry in self.sriDict] + + def getCurrentQueueDepth(self): + with self._dataBufferLock: + return len(self.queue) + + def getMaxQueueDepth(self): + with self._dataBufferLock: + return self._maxSize + + #set to -1 for infinite queue + def setMaxQueueDepth(self, newDepth): + with self._dataBufferLock: + self._maxSize = int(newDepth) + + def unblock(self): + with self._dataBufferLock: + self._breakBlock = False + + def block(self): + # Interrupt packet queue operations + with self._dataBufferLock: + self._breakBlock = True + self._dataAvailable.notifyAll() + self._queueAvailable.notifyAll() + + # Provide standard interface for start/stop + startPort = unblock + stopPort = block + + def pushSRI(self, H): + + if self._portLog: + self._portLog.trace( "bulkio::InPort pushSRI ENTER (port=" + str(self.name) +")" ) + + # If the updated SRI is blocking, ensure port blocking mode is set + if H.blocking: + with self._dataBufferLock: + self.blocking = True + + with self._sriUpdateLock: + if H.streamID not in self.sriDict: + new_stream = True + sri_changed = True + if self._portLog: + self._portLog.debug( "pushSRI PORT:" + str(self.name) + " NEW SRI:" + str(H.streamID) ) + if self.newSriCallback: + self.newSriCallback(H) + else: + new_stream = False + sri, sri_changed = self.sriDict[H.streamID] + if self.sri_cmp and not self.sri_cmp(sri, H): + sri_changed = True + if self.sriChangeCallback: + self.sriChangeCallback(H) + + if sri_changed: + self.sriDict[H.streamID] = (copy.deepcopy(H), True) + + if new_stream: + self._createStream(H) + + if self._portLog: + self._portLog.trace( "bulkio::InPort pushSRI EXIT (port=" + str(self.name) +")" ) + + def getPacket(self, timeout=NON_BLOCKING): + if self._portLog: + self._portLog.trace( "bulkio::InPort getPacket ENTER (port=" + str(self.name) +")" ) + + packet = self._nextPacket(timeout) + if not packet: + # Return an empty packet instead of None for backwards + # compatibility + packet = InPort.DataTransfer(None, None, None, None, None, None, None) + else: + packet = InPort.DataTransfer(packet.buffer, packet.T, packet.EOS, packet.streamID, packet.SRI, packet.sriChanged, packet.inputQueueFlushed) + + if self._portLog: + self._portLog.trace( "bulkio::InPort getPacket EXIT (port=" + str(self.name) +")" ) + + return packet + + def getCurrentStream(self, timeout=BLOCKING): + """ + Gets the stream that should be used for the next basic read. + + Args: + timeout: Seconds to wait for a stream; a negative value waits + indefinitely. + + Returns: + InputStream ready for reading on success. + None if timeout expires or port is stopped. + """ + # Prefer a stream that already has buffered data + with self._streamsMutex: + for stream in self._streams.itervalues(): + if stream._hasBufferedData(): + return stream + + # Otherwise, return the stream that owns the next packet on the queue, + # potentially waiting for one to be received + with self._dataBufferLock: + packet = self._peekPacket(timeout) + + if packet: + return self.getStream(packet.streamID) + else: + return None + + def getStream(self, streamID): + """ + Gets the active stream with the given stream ID. + + Args: + streamID: Stream identifier. + + Returns: + Input stream for `streamID` if it exists. + None if no such stream ID exits. + """ + with self._streamsMutex: + return self._streams.get(streamID, None) + + def getStreams(self): + """ + Gets the current set of active streams. + + Returns: + List of input streams. + """ + with self._streamsMutex: + return self._streams.values() + + def pushPacket(self, data, T, EOS, streamID): + self._portLog.trace("pushPacket ENTER") + self._queuePacket(data, T, EOS, streamID) + self._portLog.trace("pushPacket EXIT") + + def _queuePacket(self, data, T, EOS, streamID): + # Discard packets for disabled streams + if not self._acceptPacket(streamID, EOS): + if EOS and self._disableBlockingOnEOS(streamID): + # If this was the only blocking stream, turn off blocking + with self._dataBufferLock: + self.blocking = False; + return + + # Discard empty packets if EOS is not set, as there is no useful data or + # metadata to be had--since T applies to the 1st sample (which does not + # exist), all we have is a stream ID + if not data and not EOS: + return + + if self._maxSize == 0: + return + + new_stream = False + with self._sriUpdateLock: + sri, sri_changed = self.sriDict.get(streamID, (None, True)) + if not sri: + # Unknown stream ID, register a new default SRI following the + # logic in pushSRI + self._portLog.warn("received data for stream '%s' with no SRI", streamID) + new_stream = True + sri = bulkio.sri.create(streamID) + if self.newSriCallback: + self.newSriCallback(sri) + + # Acknowledge SRI change was received; in the unknown stream case, + # this also records it in the map + self.sriDict[streamID] = (sri, False) + + # If a new stream needs to be created for an unrecognized stream ID, do + # it here after the lock is released + if new_stream: + self._createStream(sri) + + with self._dataBufferLock: + queue_flushed = False + + if self.blocking: + while len(self.queue) >= self._maxSize: + self._queueAvailable.wait() + else: + # Flush the queue if not using infinite queue (maxSize < 0), + # blocking is not on, and queue is currently full + if len(self.queue) >= self._maxSize and self._maxSize > -1: + queue_flushed = True + self._portLog.debug("bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE=%d)", len(self.queue)) + + # Need to hold the SRI mutex while flushing the queue + # because it may update SRI change state + with self._sriUpdateLock: + self._flushQueue() + + # Update the SRI change flag for this stream, which may + # have been modified during the queue flush + sri, sri_changed = self.sriDict[streamID] + self.sriDict[streamID] = (sri, False) + + self._portLog.trace("bulkio::InPort pushPacket NEW Packet (QUEUE=%d)", len(self.queue)) + self.stats.update(self._packetSize(data), float(len(self.queue))/float(self._maxSize), EOS, streamID, queue_flushed) + packet = InPort.Packet(data, T, EOS, sri, sri_changed, False) + self.queue.append(packet) + + # If a flush occurred, always set the flag on the first packet; + # this may not be the packet that was just inserted if there were + # any EOS packets on the queue + if queue_flushed: + self.queue[0].inputQueueFlushed = True + + # Let one waiting getPacket call know there is a packet available + self._dataAvailable.notify() + + def _flushQueue(self): + # Prerequisite: caller holds self._dataBufferLock and + # self._sriUpdateLock + sri_changed = set() + saved_packets = collections.deque() + for packet in self.queue: + if packet.EOS: + # Remove the SRI change flag for this stream, as further SRI + # changes apply to a different stream; set the SRI change flag + # for the EOS packet if there was one for this stream earlier + # in the queue + if packet.streamID in sri_changed: + packet.sriChanged = True + sri_changed.discard(packet.streamID) + + # Discard data (using a 0-length slice works with any sequence) + # and preserve the EOS packet + packet.buffer = packet.buffer[:0] + packet.inputQueueFlushed = False + saved_packets.append(packet) + elif packet.sriChanged: + sri_changed.add(packet.streamID) + + self.queue = saved_packets + + for stream_id in sri_changed: + # It should be safe to assume that an entry exists for the stream + # ID, but just in case, use get instead of operator[] + sri, _ = self.sriDict.get(stream_id, (None, None)) + if sri is not None: + self.sriDict[stream_id] = (sri, True) + + def _acceptPacket(self, streamID, EOS): + # Acquire streamsMutex for the duration of this call to ensure that + # end-of-stream is handled atomically for disabled streams + with self._streamsMutex: + # Find the current stream for the stream ID and check whether it's + # enabled + stream = self._streams.get(streamID, None); + if not stream or stream.enabled: + return True + + # If there's a pending stream, the packet is designated for that + pending_streams = self._pendingStreams.get(streamID, []) + if pending_streams: + return True + + new_stream = None + if EOS: + # Acknowledge the end-of-stream by removing the disabled stream + # before discarding the packet + self._portLog.debug("Removing stream '%s'", streamID); + stream._close(); + del self._streams[streamID] + + if pending_streams: + self._portLog.debug("Moving pending stream '%s' to active", streamID); + new_stream = pending_streams.pop(0) + self._streams[streamID] = new_stream + + # If a pending stream became active, notify listeners + if new_stream: + self.streamAdded(new_stream) + + return False; + + def _peekPacket(self, timeout): + # Requires self._dataBufferLock + to_time = time.time() + timeout + while not self._breakBlock and not self.queue: + if timeout == 0.0: + break + elif timeout > 0: + wait_time = to_time - time.time() + if wait_time <= 0.0: + break + self._dataAvailable.wait(wait_time) + else: + self._dataAvailable.wait() + + if self._breakBlock or not self.queue: + return None + else: + return self.queue[0] + + def _nextPacket(self, timeout, streamID=None): + if self._breakBlock: + return None + + to_time = time.time() + timeout + + with self._dataBufferLock: + packet = self._fetchPacket(streamID) + while not packet: + if timeout == 0.0: + return None + elif timeout > 0.0: + wait_time = to_time - time.time() + if wait_time <= 0.0: + return None + self._dataAvailable.wait(wait_time) + else: + self._dataAvailable.wait() + if self._breakBlock: + return None + packet = self._fetchPacket(streamID) + + #LOG_TRACE(logger, "InPort::nextPacket PORT:" << name << " (QUEUE="<< packetQueue.size() << ")"); + self._queueAvailable.notify() + + if packet and packet.EOS and self._disableBlockingOnEOS(packet.streamID): + with self._dataBufferLock: + self.blocking = False + + return packet + + def _fetchPacket(self, streamID): + # Prerequisite: caller holds self._dataBufferLock + if not streamID: + if not self.queue: + return None + return self.queue.popleft() + + for index in xrange(len(self.queue)): + if self.queue[index].streamID == streamID: + packet = self.queue[index] + del self.queue[index] + return packet + return None + + def _disableBlockingOnEOS(self, streamID): + with self._sriUpdateLock: + sri, _ = self.sriDict.pop(streamID, (None, None)) + if sri and sri.blocking: + for hdr, _ in self.sriDict.itervalues(): + if hdr.blocking: + return False + return True + + return False + + def _createStream(self, sri): + with self._streamsMutex: + if sri.streamID in self._streams: + # An active stream has the same stream ID; add this new stream + # to the pending list + self._portLog.debug("Creating pending stream '%s'", sri.streamID) + if not sri.streamID in self._pendingStreams: + self._pendingStreams[sri.streamID] = [] + self._pendingStreams[sri.streamID].append(sri) + stream = None + else: + # New stream + self._portLog.debug("Creating new stream '%s'", sri.streamID) + stream = self._streamType(sri, self) + self._streams[sri.streamID] = stream + + # Notify stream listeners (without the mutex held) + if stream: + self.streamAdded(stream) + + def _removeStream(self, streamID): + self._portLog.debug("Removing stream '%s'", streamID) + + new_stream = None + with self._streamsMutex: + # Remove the current stream, and if there's a pending stream with + # the same stream ID, move it to the active list + self._streams.pop(streamID, None); + pending_streams = self._pendingStreams.get(streamID, []) + if pending_streams: + self._portLog.debug("Moving pending stream '%s' active", streamID) + new_stream = pending_streams.pop(0) + self._streams[streamID] = new_stream + + if new_stream: + self.streamAdded(stream); + + def _discardPacketsForStream(self, streamID): + with self._dataBufferLock: + self.queue = [pkt for pkt in self.queue if pkt.streamID != streamID] + + def _streamType(self, sri, port): + return InputStream(sri, port) + + def _packetSize(self, data): + return len(data) + + def _reformat(self, data): + # Default behavior: no data reformatting is required + return data + +class InNumericPort(InPort): + def __init__(self, name, bits, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize): + InPort.__init__(self, name, bits, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + + def _streamType(self, sri, port): + return BufferedInputStream(sri, port) + +class InCharPort(InNumericPort, BULKIO__POA.dataChar): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InNumericPort.__init__(self, name, 8, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + + def _reformat(self, data): + # Unpack the binary string as a list of signed bytes + return list(struct.unpack('%db' % len(data), data)) + +class InOctetPort(InNumericPort, BULKIO__POA.dataOctet): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InNumericPort.__init__(self, name, 8, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + + def _reformat(self, data): + # Unpack the binary string as a list of unsigned bytes + return list(struct.unpack('%dB' % len(data), data)) + +class InShortPort(InNumericPort, BULKIO__POA.dataShort): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InNumericPort.__init__(self, name, 16, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + +class InUShortPort(InNumericPort, BULKIO__POA.dataUshort): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InNumericPort.__init__(self, name, 16, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + +class InLongPort(InNumericPort, BULKIO__POA.dataLong): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InNumericPort.__init__(self, name, 32, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + +class InULongPort(InNumericPort, BULKIO__POA.dataUlong): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InNumericPort.__init__(self, name, 32, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + +class InLongLongPort(InNumericPort, BULKIO__POA.dataLongLong): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InNumericPort.__init__(self, name, 64, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + +class InULongLongPort(InNumericPort, BULKIO__POA.dataUlongLong): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InNumericPort.__init__(self, name, 64, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + +class InFloatPort(InNumericPort, BULKIO__POA.dataFloat): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InNumericPort.__init__(self, name, 32, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + +class InDoublePort(InNumericPort, BULKIO__POA.dataDouble): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InNumericPort.__init__(self, name, 64, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + +class InBitPort(InPort, BULKIO__POA.dataBit): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100): + InPort.__init__(self, name, 1, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + + def pushPacket(self, data, T, EOS, streamID): + self._portLog.trace("pushPacket ENTER") + if isinstance(data, BULKIO.BitSequence): + data = bitbuffer(bytearray(data.data), data.bits) + self._queuePacket(data, T, EOS, streamID) + self._portLog.trace("pushPacket EXIT") + + def _streamType(self, sri, port): + return BufferedInputStream(sri, port, DataBlock) + +class InFilePort(InPort, BULKIO__POA.dataFile): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): + InPort.__init__(self, name, 8, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + + def _packetSize(self, data): + # For statistics, consider the entire URL a single element + return 1 + + +class InXMLPort(InPort, BULKIO__POA.dataXML): + def __init__(self, name, logger=None, sriCompare=bulkio.sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): + InPort.__init__(self, name, 8, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize) + + def pushPacket(self, xml_string, EOS, streamID): + # Insert a None for the timestamp and use parent implementation + InPort.pushPacket(self, xml_string, None, EOS, streamID) + +class InAttachablePort: + _TYPE_='b' + def __init__(self, name, logger=None, attachDetachCallback=None, sriCmp=bulkio.sri.compare, timeCmp=bulkio.timestamp.compare, PortType = _TYPE_, newSriCallback=None, sriChangeCallback=None,interface=None): + self.name = name + self._portLog = logger + self.port_lock = threading.Lock() + self.sri_query_lock = threading.Lock() + self._attachedStreams = {} # key=attach_id, value = (streamDef, userid) + self.stats = InStats(name, PortType ) + self.sriDict = {} # key=streamID, value=(StreamSRI, PrecisionUTCTime) + self.attachDetachCallback = attachDetachCallback + self.newSriCallback = newSriCallback + self.sriChangeCallback = sriChangeCallback + self.sri_cmp = sriCmp + self.time_cmp = timeCmp + self.sriChanged = False + if not interface: + if self._portLog: + self._portLog.error("InAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49") + raise CF.Port.InvalidPort(1, "InAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49") + self.interface=interface # BULKIO port interface (valid options are BULKIO.dataSDDS or BULKIO.dataVITA49) + self.setNewAttachDetachListener(attachDetachCallback) + if self._portLog: + self._portLog.debug("bulkio::InAttachablePort CTOR port:" + str(self.name) + " using interface " + str(self.interface)) + + def setNewAttachDetachListener(self, attachDetachCallback ): + self.port_lock.acquire() + try: + self.attachDetachCallback = attachDetachCallback + + # Set _attach_cb + try: + self._attach_cb = getattr(attachDetachCallback, "attach") + if not callable(self._attach_cb): + self._attach_cb = None + except AttributeError: + self._attach_cb = None + + # Set _detach_cb + try: + self._detach_cb = getattr(attachDetachCallback, "detach") + if not callable(self._detach_cb): + self._detach_cb = None + except AttributeError: + self._detach_cb = None + + finally: + self.port_lock.release() + + def setNewSriListener(self, newSriCallback): + self.port_lock.acquire() + try: + self.newSriCallback = newSriCallback + finally: + self.port_lock.release() + + def setSriChangeListener(self, sriChangeCallback): + self.port_lock.acquire() + try: + self.sriChangeCallback = sriChangeCallback + finally: + self.port_lock.release() + + def setBitSize(self, bitSize): + self.stats.setBitSize(bitSize) + + def enableStats(self, enabled): + self.stats.setEnabled(enabled) + + def updateStats(self, elementsReceived, queueSize, streamID): + self.port_lock.acquire() + try: + self.stats.update(elementsReceived, queueSize, streamID) + finally: + self.port_lock.release() + + def _get_statistics(self): + self.port_lock.acquire() + try: + recStat = self.stats.retrieve() + finally: + self.port_lock.release() + return recStat + + def _get_state(self): + self.port_lock.acquire() + try: + numAttachedStreams = len(self._attachedStreams.values()) + finally: + self.port_lock.release() + if numAttachedStreams == 0: + return BULKIO.IDLE + # default behavior is to limit to one connection + elif numAttachedStreams == 1: + return BULKIO.BUSY + else: + return BULKIO.ACTIVE + + def _get_attachedSRIs(self): + sris = [] + self.sri_query_lock.acquire() + try: + for entry in self.sriDict: + # First value of sriDict entry is the StreamSRI object + sris.append(copy.deepcopy(self.sriDict[entry][0])) + finally: + self.sri_query_lock.release() + return sris + + def _get_usageState(self): + self.port_lock.acquire() + try: + numAttachedStreams = len(self._attachedStreams.values()) + finally: + self.port_lock.release() + if numAttachedStreams == 0: + return self.interface.IDLE + # default behavior is to limit to one connection + elif numAttachedStreams == 1: + return self.interface.BUSY + else: + return self.interface.ACTIVE + + def _get_attachedStreams(self): + return [x[0] for x in self._attachedStreams.values()] + + def _get_attachmentIds(self): + return self._attachedStreams.keys() + + def attach(self, streamDef, userid): + + if self._portLog: + self._portLog.trace("bulkio::InAttachablePort attach ENTER (port=" + str(self.name) +")" ) + self._portLog.debug("InAttachablePort.attach() - ATTACH REQUEST, STREAM/USER" + str(streamDef) + '/' + str(userid)) + + attachId = None + self.port_lock.acquire() + try: + try: + if self._portLog: + self._portLog.debug("InAttachablePort.attach() - CALLING ATTACH CALLBACK, STREAM/USER" + str(streamDef) + '/' + str(userid) ) + if self._attach_cb != None: + attachId = self._attach_cb(streamDef, userid) + except Exception, e: + if self._portLog: + self._portLog.error("InAttachablePort.attach() - ATTACH CALLBACK EXCEPTION : " + str(e) + " STREAM/USER" + str(streamDef) + '/' + str(userid) ) + raise self.interface.AttachError(str(e)) + + if attachId == None: + attachId = str(uuid.uuid4()) + + self._attachedStreams[attachId] = (streamDef, userid) + + finally: + self.port_lock.release() + + if self._portLog: + self._portLog.debug("InAttachablePort.attach() - ATTACH COMPLETED, ID:" + str(attachId) + " STREAM/USER: " + str(streamDef) + '/' + str(userid)) + self._portLog.trace("bulkio::InAttachablePort attach EXIT (port=" + str(self.name) +")" ) + + return attachId + + def detach(self, attachId): + + if self._portLog: + self._portLog.trace("bulkio::InAttachablePort detach ENTER (port=" + str(self.name) +")" ) + self._portLog.debug("InAttachablePort.detach() - DETACH REQUESTED, ID:" + str(attachId) ) + + self.port_lock.acquire() + try: + if not self._attachedStreams.has_key(attachId): + + if self._portLog: + self._portLog.debug("InAttachablePort.detach() - DETACH UNKNOWN ID:" + str(attachId) ) + + if attachId: + raise self.interface.DetachError("Stream %s not attached" % str(attachId)) + else: + raise self.interface.DetachError("Cannot detach Unkown ID") + + attachedStreamDef, refcnf = self._attachedStreams[attachId] + + # + # Deallocate capacity here if applicable + # + try: + if self._portLog: + self._portLog.debug("InAttachablePort.detach() - CALLING DETACH CALLBACK, ID:" + str(attachId) ) + + if self._detach_cb != None: + self._detach_cb(attachId) + except Exception, e: + if self._portLog: + self._portLog.error("InAttachablePort.detach() - DETACH CALLBACK EXCEPTION: " + str(e) ) + raise self.interface.DetachError(str(e)) + + # Remove the attachment from our list + del self._attachedStreams[attachId] + + finally: + self.port_lock.release() + + if self._portLog: + self._portLog.debug("InAttachablePort.detach() - DETACH SUCCESS, ID:" + str(attachId) ) + self._portLog.trace("bulkio::InAttachablePort detach EXIT (port=" + str(self.name) +")" ) + + def getStreamDefinition(self, attachId): + try: + return self._attachedStreams[attachId][0] + except KeyError: + raise self.interface.StreamInputError("Stream %s not attached" % attachId) + + def getUser(self, attachId): + try: + return self._attachedStreams[attachId][1] + except KeyError: + raise self.interface.StreamInputError("Stream %s not attached" % attachId) + + def _get_activeSRIs(self): + self.sri_query_lock.acquire() + try: + activeSRIs = [self.sriDict[entry][0] for entry in self.sriDict] + finally: + self.sri_query_lock.release() + return activeSRIs + + def pushSRI(self, H, T): + + if self._portLog: + self._portLog.trace("bulkio::InAttachablePort pushSRI ENTER (port=" + str(self.name) +")" ) + + self.port_lock.acquire() + try: + if H.streamID not in self.sriDict: + if self.newSriCallback: + self.newSriCallback( H ) + # Disable querying while adding a new SRI + self.sri_query_lock.acquire() + try: + self.sriDict[H.streamID] = (copy.deepcopy(H), copy.deepcopy(T)) + finally: + self.sri_query_lock.release() + else: + cur_H, cur_T = self.sriDict[H.streamID] + s_same = False + if self.sri_cmp: + s_same = self.sri_cmp(cur_H, H) + + t_same = False + if self.time_cmp: + t_same = self.time_cmp(cur_T, T) + + self.sriChanged = ( s_same == False ) or ( t_same == False ) + if self.sriChanged and self.sriChangeCallback: + self.sriChangeCallback( H ) + # Disable querying while adding a new SRI + self.sri_query_lock.acquire() + try: + self.sriDict[H.streamID] = (copy.deepcopy(H), copy.deepcopy(T)) + finally: + self.sri_query_lock.release() + + finally: + self.port_lock.release() + + if self._portLog: + self._portLog.trace("bulkio::InAttachablePort pushSRI EXIT (port=" + str(self.name) +")" ) + +class InSDDSPort(BULKIO__POA.dataSDDS,InAttachablePort): + def __init__(self, name, logger=None, attachDetachCallback=None, sriCmp=None, timeCmp=None, PortType = 'b', newSriCallback=None, sriChangeCallback=None ): + InAttachablePort.__init__(self, name, logger, attachDetachCallback, sriCmp, timeCmp, PortType, newSriCallback, sriChangeCallback, interface=BULKIO.dataSDDS) + +class InVITA49Port(BULKIO__POA.dataVITA49,InAttachablePort): + def __init__(self, name, logger=None, attachDetachCallback=None, sriCmp=None, timeCmp=None, PortType = 'b', newSriCallback=None, sriChangeCallback=None ): + InAttachablePort.__init__(self, name, logger, attachDetachCallback, sriCmp, timeCmp, PortType, newSriCallback, sriChangeCallback, interface=BULKIO.dataVITA49) diff --git a/bulkioInterfaces/libsrc/python/bulkio/input_streams.py b/bulkioInterfaces/libsrc/python/bulkio/input_streams.py new file mode 100644 index 000000000..eee45ed77 --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/input_streams.py @@ -0,0 +1,750 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +from bulkio.stream_base import StreamBase +from bulkio.datablock import DataBlock, SampleDataBlock +import bulkio.const + +__all__ = ('InputStream', 'BufferedInputStream') + +EOS_NONE = 0 +EOS_RECEIVED = 1 +EOS_REACHED = 2 +EOS_REPORTED = 3 + +class InputStream(StreamBase): + """ + Basic BulkIO input stream class. + + InputStream encapsulates a single BulkIO stream for reading. It is + associated with the input port that created it, providing a file-like API + on top of the classic BulkIO getPacket model. + + Notionally, a BulkIO stream represents a contiguous data set and its + associated signal-related information (SRI), uniquely identified by a + stream ID, from creation until close. The SRI may vary over time, but the + stream ID is immutable. Only one stream with a given stream ID can be + active at a time. + + Input streams are managed by the input port, and created in response to + the arrival of a new SRI. Valid input streams are obtained by either + querying the port, or registering a callback. + + End-of-Stream: + In normal usage, reading continues until the end of the stream is reached, + at which point all future read operations will fail immediately. When a + read fails, it is incumbent upon the caller to check the stream's + end-of-stream state via eos(). Once the end-of-stream has been + acknowledged, either by an explicit check or with a subsequent failed read, + the stream is removed from the input port. If the input port has another + stream with the same streamID pending, it will become active. + + Although the input port may have received and end-of-stream packet, this + state is not reflected in eos(). As with Unix pipes or sockets, the + recommended pattern is to continually read until a failure occurs, handling + the failure as needed. + + See Also: + InPort.getCurrentStream + InPort.getStream + InPort.getStreams + InPort.addStreamListener + """ + def __init__(self, sri, port, blockType=DataBlock): + """ + Create an InputStream. + + Warning: + Input streams are created by the input port. This constructor + should not be called directly. + + See Also: + InPort.getCurrentStream + InPort.getStream + """ + StreamBase.__init__(self, sri) + self._port = port + self._eosState = EOS_NONE + self.__enabled = True + self.__newstream = True + self.__blockType = blockType + + def read(self): + """ + Blocking read of the next packet for this stream. + + The read may fail if: + * End-of-stream has been reached + * The input port is stopped + + Returns: + DataBlock if successful. + None if the read failed. + """ + return self._readPacket(True) + + def tryread(self): + """ + Non-blocking version of read(). + + The read returns immediately whether data is available or not. + + Returns: + DataBlock if successful. + None if no data is available or the read failed. + """ + return self._readPacket(False) + + @property + def enabled(self): + """ + bool: Indicates whether this stream can receive data. + + If a stream is enabled, packets received for its stream ID are queued + in the input port, and the stream may be used for reading. Conversely, + packets for a disabled stream are discarded, and no reading may be + performed. + """ + return self.__enabled + + def enable(self): + """ + Enable this stream for reading data. + + The input port will resume queuing packets for this stream. + """ + # Changing the enabled flag requires holding the port's streamsMutex + # (that controls access to the stream map) to ensure that the change + # is atomic with respect to handling end-of-stream packets. Otherwise, + # there is a race condition between the port's IO thread and the + # thread that enables the stream--it could be re-enabled and start + # reading in between the port checking whether to discard the packet + # and closing the stream. Because it is assumed that the same thread + # that calls enable is the one doing the reading, it is not necessary + # to apply mutual exclusion across the entire public stream API, just + # enable/disable. + with self._port._streamsMutex: + self.__enabled = True + + def disable(self): + """ + Disable this stream for reading data. + + The input port will discard any packets that are currently queued for + this stream, and all future packets for this stream will be discarded + upon receipt until an end-of-stream is received. + + Disabling unwanted streams may improve performance and queueing + behavior by reducing the number of queued packets on a port. + """ + # See above re: locking + with self._port._streamsMutex: + self.__enabled = False + + # Unless end-of-stream has been received by the port (meaning any further + # packets with this stream ID are for a different instance), purge any + # packets for this stream from the port's queue + if self._eosState == EOS_NONE: + self._port._discardPacketsForStream(self.streamID); + + self._disable() + + def _disable(self): + # Subclasses may override this method to add additional behavior on + # disable() + pass + + def eos(self): + """ + Checks whether this stream has ended. + + A stream is considered at the end when it has read and consumed all + data up to the end-of-stream marker. Once end-of-stream has been + reached, all read operations will fail immediately, as no more data + will ever be received for this stream. + + The recommended practice is to check @a eos any time a read operation + fails or returns fewer samples than requested. When the end-of-stream + is acknowledged, either by checking @a eos or when successive reads + fail due to an end-of-stream, the stream is removed from the input + port. If the input port has another stream with the same streamID + pending, it will become active. + + Returns: + True if this stream has reached the end. + False if the end of stream has not been reached. + """ + return self._checkEos() + + def _checkEos(self): + # Internal method to check for end-of-stream. Subclasses should extend + # or override this method. + + self._reportIfEosReached() + # At this point, if end-of-stream has been reached, the state is + # reported (it gets set above), so the checking for the latter is + # sufficient + return self._eosState == EOS_REPORTED; + + def _hasBufferedData(self): + # For the base class, there is no data to report; however, to nudge + # the check end-of-stream, return true if it has been reached but not + # reported + return self._eosState == EOS_REACHED; + + def _close(self): + # NB: This method is always called by the port with streamsMutex held + + # Consider end-of-stream reported, since the stream has already been + # removed from the port; otherwise, there's nothing to do + self._eosState = EOS_REPORTED; + + def _readPacket(self, blocking): + packet = self._fetchNextPacket(blocking) + if self._eosState == EOS_RECEIVED: + self._eosState = EOS_REACHED + if not packet or (packet.EOS and (len(packet.buffer) == 0)): + self._reportIfEosReached() + return None + + # Turn packet into a data block + sri_flags = self._getSriChangeFlags(packet) + block = self._createBlock(packet.SRI, packet.buffer, sri_flags, packet.inputQueueFlushed) + # Only add a timestamp if one was given (XML does not include one in + # the CORBA interface, but passes a None to adapt to the common port + # implementation) + if packet.T is not None: + block.addTimestamp(packet.T) + + # Update local SRI from packet + self._sri = packet.SRI + return block + + def _fetchNextPacket(self, blocking): + # Don't fetch a packet from the port if stream is disabled + if not self.__enabled: + return None + + # Any future packets with this stream ID belong to another InputStream + if self._eosState != EOS_NONE: + return None + + if blocking: + timeout = bulkio.const.BLOCKING + else: + timeout = bulkio.const.NON_BLOCKING + packet = self._port._nextPacket(timeout, self.streamID) + if packet: + # Data conversion (no-op for all types except dataChar/dataByte) + packet.buffer = self._port._reformat(packet.buffer) + if packet.EOS: + self._eosState = EOS_RECEIVED + + return packet + + def _reportIfEosReached(self): + if self._eosState == EOS_REACHED: + # This is the first time end-of-stream has been checked since it + # was reached; remove the stream from the port now, since the + # caller knows that the stream ended + self._port._removeStream(self.streamID); + self._eosState = EOS_REPORTED; + + def _getSriChangeFlags(self, packet): + if self.__newstream: + self.__newstream = False + return -1 + elif packet.sriChanged: + return bulkio.sri.compareFields(self._sri, packet.SRI) + else: + return bulkio.sri.NONE + + def _createBlock(self, *args, **kwargs): + return self.__blockType(*args, **kwargs) + + +class BufferedInputStream(InputStream): + """ + BulkIO input stream class with data buffering. + + BufferedInputStream extends InputStream with additional methods for + data buffering and overlapped reads. + + Data Buffering: + Often, signal processing algorithms prefer to work on regular, fixed-size + blocks of data. However, because the producer is working independently, + data may be received in entirely different packet sizes. For this use case, + the read method accepts an optional size argument that frees the user from + managing their own data buffering. + + To maintain the requested size, partial packets may be buffered, or a read + may span multiple packets. Packets are fetched from the input port needed; + however, if an SRI change or input queue flush is encountered, the + operation will stop, therefore, data is only read up to that point. The + next read operation will continue at the beginning of the packet that + contains the new SRI or input queue flush flag. + + Time Stamps: + The data block from a successful read always includes as least one time + stamp, at a sample offset of 0. Because buffered reads may not begin on a + packet boundary, the input stream can interpolate a time stamp based on the + SRI xdelta value and the prior time stamp. When this occurs, the time stamp + will be marked as "synthetic." + + Reads that span multiple packets will contain more than one time stamp. + The time stamp offsets indicate at which sample the time stamp occurs, + taking real or complex samples into account. Only the first time stamp can + be synthetic. + + Overlapped Reads: + Certain classes of signal processing algorithms need to preserve a portion + of the last data set for the next iteration, such as a power spectral + density (PSD) calculation with overlap. The read method supports this mode + of operation by allowing the reader to consume fewer samples than are + read. This can be thought of as a separate read pointer that trails behind + the stream's internal buffer. + + When an overlapped read needs to span multiple packets, but an SRI change, + input queue flush, or end-of-stream is encountered, all of the available + data is returned and consumed, equivalent to read with no consume length + specified. The assumption is that special handling is required due to the + pending change, and it is not possible for the stream to interpret the + relationship between the read size and consume size. + + Non-Blocking Reads: + For each read method, there is a corresponsing tryread method that is + non-blocking. If there is not enough data currently available to satisfy + the request, but more data could become available in the future, the + operation will return a null data block immediately. + + End-of-Stream: + The end-of-stream behavior of BufferedInputStream is consistent with + InputStream, with the additional caveat that a read may return fewer + samples than requested if an end-of-stream packet is encountered. + + See Also: + InputStream + """ + def __init__(self, sri, port, blockType=SampleDataBlock): + """ + Create a BufferedInputStream. + + Warning: + Input streams are created by the input port. This constructor + should not be called directly. + + See Also: + InPort.getCurrentStream + InPort.getStream + """ + InputStream.__init__(self, sri, port, blockType) + self.__queue = [] + self.__samplesQueued = 0 + self.__sampleOffset = 0 + self.__pending = None + + def read(self, count=None, consume=None): + """ + Blocking read with optional size and overlap. + + If neither `count` nor `consume` are given, performs a blocking read up + to the next packet boundary. + + When `count` is given without `consume`, performs a blocking read of + `count` samples worth of data. For signal processing operations that + require a fixed input data size, such as fast Fourier transform (FFT), + this simplifies buffer management by offloading it to the stream. This + usually incurs some computational overhead to copy data between + buffers; however, this cost is intrinsic to the algorithm, and the + reduced complexity of implementation avoids common errors. + + When both `count` and `consume` are given, performs a blocking read of + `count` samples worth of data, but only advances the read pointer by + `consume` samples. The remaining `count-consume` samples are buffered + and will be returned on the following read operation. This mode is + designed to support signal processing operations that require + overlapping data sets, such as power spectral density (PSD). + + If the SRI indicates that the data is complex, `count` and `consume` + are interpreted in terms of complex samples. + + If any of the following conditions are encountered while fetching + packets, the returned data block may contain fewer samples than + requested: + * End-of-stream + * SRI change + * Input queue flush + + When this occurs, all of the returned samples are consumed unless + `consume` is 0, as it is assumed that special handling is required. + + Args: + count: Number of samples to read. + consume: Number of samples to advance read pointer. + + Returns: + DataBlock if successful. + None if the read failed. + + Raises: + ValueError: `consume` was specified without `count`. + ValueError: `consume` is larger than `count`. + """ + if count is None: + if consume is not None: + raise ValueError('cannot specify consume without count') + elif consume > count: + raise ValueError('cannot specify consume larger than count') + return InputStream.read(self) + return self._read(count, consume, True) + + def tryread(self, count=None, consume=None): + """ + Non-blocking read with optional size and overlap. + + Non-blocking version of read(), returning None immediately when no data + is available. + + Args: + count: Number of samples to read. + consume: Number of samples to advance read pointer. + + Returns: + DataBlock if successful. + None if no data is available or the read failed. + + Raises: + ValueError: `consume` was specified without `count`. + ValueError: `consume` is larger than `count`. + + See Also: + BufferedInputStream.read() + """ + if count is None: + return InputStream.tryread(self) + return self._read(count, consume, False) + + def skip(self, count): + """ + Discards data. + + Skips the next `count` samples worth of data and blocks until the + requested amount of data is available. If the data is not being used, + this is more computationally efficient than the equivalent call to read + because no buffering is performed. + + If the SRI indicates that the data is complex, `count` and the return + value are in terms of complex samples. + + Skipping behaves like read when fetching packets. If any of the + following conditions are encountered, the returned value may be less + than count: + * End-of-stream + * SRI change + * Input queue flush + * The input port is stopped + + Args: + count: Number of samples to skip. + + Returns: + int: Actual number of samples skipped. + """ + # If the next block of data is complex, double the skip size (which the + # lower-level I/O handles in terms of scalars) so that the right number + # of samples is skipped + sri = self._nextSRI(True) + if not sri: + return 0 + + item_size = 2 if (sri.mode != 0) else 1 + count *= item_size; + + # Queue up packets from the port until we have enough data to satisfy + # the requested read amount + while self.__samplesQueued < count: + if not self._fetchPacket(True): + break + + count = min(count, self.__samplesQueued) + self._consumeData(count) + + # Convert scalars back to samples + return count / item_size + + def _disable(self): + # NB: A lock is not required to modify the internal stream queue state, + # because it should only be accessed by the thread that is reading from + # the stream + + # Clear queued packets and pending packet + self.__queue = [] + self.__sampleOffset = 0 + self.__samplesQueued = 0 + self.__pending = 0 + + def _checkEos(self): + if not self.__queue: + # Try a non-blocking fetch to see if there's an empty end-of-stream + # packet waiting; this helps with the case where the last read + # consumes exactly the remaining data, and the stream will never + # report a ready state again + self._fetchPacket(False) + + return InputStream._checkEos(self) + + def _hasBufferedData(self): + if self.__queue or self.__pending: + # Return true if either there are queued or pending packets + return True + return InputStream._hasBufferedData(self) + + def _readPacket(self, blocking): + if self.__samplesQueued == 0: + self._fetchPacket(blocking) + + if self.__samplesQueued == 0: + # It's possible that there are no samples queued because of an + # end-of-stream; if so, report it so that this stream can be + # dissociated from the port + self._reportIfEosReached() + return None + + # Only read up to the end of the first packet in the queue + samples = len(self.__queue[0].buffer) - self.__sampleOffset; + return self._readData(samples, samples) + + def _read(self, count, consume, blocking): + # Consume length not specified, consume entire read + if consume is None: + consume = count + + # Try to get the SRI for the upcoming block of data, fetching it from + # the port's input queue if necessary + sri = self._nextSRI(blocking); + if not sri: + # No SRI retreived implies no data will be retrieved, either due + # to end-of-stream or because it would block + self._reportIfEosReached() + return None + + # If the next block of data is complex, double the read and consume + # size (which the lower-level I/O handles in terms of scalars) so that + # the returned block has the right number of samples + if sri.mode != 0: + count = count * 2 + consume = consume * 2 + + # Queue up packets from the port until we have enough data to satisfy + # the requested read amount + while self.__samplesQueued < count: + if not self._fetchPacket(blocking): + break + + if self.__samplesQueued == 0: + # As above, it's possible that there are no samples due to an end- + # of-stream + self._reportIfEosReached() + return None + + # Only read as many samples as are available (e.g., if a new SRI is + # coming or the stream reached the end) + samples = min(count, self.__samplesQueued); + + # Handle a partial read, which could mean that there's not enough data + # at present (non-blocking), or that the read pointer has reached the + # end of a segment (new SRI, queue flush, end-of-stream) + if samples < count: + # Non-blocking: return None if there's not currently a break in the + # data, under the assumption that a future read might return the + # full amount + if (not blocking) and (not self.__pending) and (self._eosState == EOS_NONE): + return None + + # Otherwise, consume all remaining data (when not explicitly + # requested as 0) + if consume != 0: + consume = samples + + return self._readData(samples, consume) + + def _consumeData(self, count): + while count > 0: + data = self.__queue[0].buffer + data_len = len(data) + + available = data_len - self.__sampleOffset + nelem = min(available, count) + + self.__sampleOffset += nelem + self.__samplesQueued -= nelem + count -= nelem + + if self.__sampleOffset >= data_len: + # Read pointer has passed the end of the packed data + self._consumePacket() + self.__sampleOffset = 0 + + def _consumePacket(self): + # Acknowledge any end-of-stream flag and delete the packet + front = self.__queue.pop(0) + if front.EOS: + self._eosState = EOS_REACHED + + # If the queue is empty, move the pending packet onto the queue + if not self.__queue and self.__pending: + self._queuePacket(self.__pending); + self.__pending = 0 + + def _readData(self, count, consume): + # SRI and flags are taken from the front packet + front = self.__queue[0] + + last_offset = self.__sampleOffset + count + if last_offset <= len(front.buffer): + # The requsted sample count can be satisfied from the first packet + time_stamps = [self._getTimestamp(front.SRI, self.__sampleOffset, 0, front.T)] + data = front.buffer[self.__sampleOffset:last_offset] + else: + # We have to span multiple packets to get the data + time_stamps, data = self._mergePacketData(count) + + # Allocate data block and propagate the SRI change and input queue + # flush flags + sri_flags = self._getSriChangeFlags(front) + block = self._createBlock(front.SRI, data, sri_flags, front.inputQueueFlushed) + if front.sriChanged: + # Update the stream metadata + self._sri = front.SRI + + # Add time stamps calculated above + for (ts, offset, synthetic) in time_stamps: + block.addTimestamp(ts, offset, synthetic) + + # Clear flags from packet, since they've been reported + front.sriChanged = False; + front.inputQueueFlushed = False; + + # Advance the read pointers + self._consumeData(consume) + + return block + + def _mergePacketData(self, count): + # Assembles data and calculates time stamps spanning several input + # packets + front = self.__queue[0] + + data = type(front.buffer)() + time_stamps = [] + data_offset = 0 + + packet_offset = self.__sampleOffset + for packet in self.__queue: + # Add the timestamp for this pass + time_stamps.append(self._getTimestamp(front.SRI, packet_offset, data_offset, packet.T)) + + # The number of samples copied on this pass may be less than + # the total remaining + available = len(packet.buffer) - packet_offset; + nelem = min(available, count); + + # Append chunk to buffer and advance counters + data += packet.buffer[packet_offset:packet_offset+nelem] + data_offset += nelem + count -= nelem + + # Next chunk (if any) will be from the next packet, starting at + # the beginning + packet_offset = 0 + + # Finished? + if count == 0: + break + + return (time_stamps, data) + + def _getTimestamp(self, sri, inputOffset, outputOffset, time): + # Determine the timestamp of this chunk of data; if this is the first + # chunk, the packet offset (number of samples already read) must be + # accounted for, so adjust the timestamp based on the SRI. Otherwise, + # the adjustment is a noop. + time_offset = inputOffset * sri.xdelta + if sri.mode != 0: + # Complex data; each sample is two values + time_offset /= 2.0 + outputOffset /= 2 + + # If there is a time offset, apply the adjustment and mark the + # timestamp so that the caller knows it was calculated rather than + # received + if time_offset > 0.0: + time = time + time_offset + synthetic = True + else: + synthetic = False + + return (time, outputOffset, synthetic) + + def _nextSRI(self, blocking): + if not self.__queue: + if not self._fetchPacket(blocking): + return None + + return self.__queue[0].SRI + + def _fetchPacket(self, blocking): + if self.__pending: + # Cannot read another packet until non-bridging packet is + # acknowledged + return False + + packet = self._fetchNextPacket(blocking) + if not packet: + return False + + if not self.__queue or self._canBridge(packet): + return self._queuePacket(packet) + else: + self.__pending = packet; + return False + + def _queuePacket(self, packet): + if packet.EOS and not packet.buffer: + # Handle end-of-stream packet with no data (assuming that + # timestamps, SRI changes, and queue flushes are irrelevant at this + # point) + if not self.__queue: + # No queued packets, read pointer has reached end-of-stream + self._eosState = EOS_REACHED; + else: + # Assign the end-of-stream flag to the last packet in the queue + # so that it is handled on read + self.__queue[-1].EOS = True + # Let the caller know that no more sample data is forthcoming + return False + else: + # Add the packet to the queue + self.__samplesQueued += len(packet.buffer) + self.__queue.append(packet); + return True + + def _canBridge(self, packet): + return not (packet.sriChanged or packet.inputQueueFlushed) diff --git a/bulkioInterfaces/libsrc/python/bulkio/output_ports.py b/bulkioInterfaces/libsrc/python/bulkio/output_ports.py new file mode 100644 index 000000000..355777119 --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/output_ports.py @@ -0,0 +1,1220 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import threading +import copy +import time +import sys +import struct + +from ossie.cf import CF, ExtendedCF +from ossie.cf.CF import Port +from ossie.utils import uuid +from ossie.properties import simple_property +from ossie.utils.log4py import logging +from redhawk.bitbuffer import bitbuffer + +from bulkio.statistics import OutStats +import bulkio.sri +from bulkio import timestamp +from bulkio.bulkioInterfaces import BULKIO, BULKIO__POA +from bulkio.const import MAX_TRANSFER_BYTES +from bulkio.output_streams import * +import traceback + +class connection_descriptor_struct(object): + connection_id = simple_property(id_="connectionTable::connection_id", + name="connection_id", + type_="string") + + stream_id = simple_property(id_="connectionTable::stream_id", + name="stream_id", + type_="string") + + port_name = simple_property(id_="connectionTable::port_name", + name="port_name", + type_="string") + + def __init__(self, connection_id="", stream_id="", port_name=""): + self.connection_id = connection_id + self.stream_id = stream_id + self.port_name = port_name + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["connection_id"] = self.connection_id + d["stream_id"] = self.stream_id + d["port_name"] = self.port_name + return str(d) + + def getId(self): + return "connectionTable::connection_descriptor" + + def isStruct(self): + return True + + def getMembers(self): + return [("connection_id",self.connection_id),("stream_id",self.stream_id),("port_name",self.port_name)] + + +class OutPort(BULKIO__POA.UsesPortStatisticsProvider): + + class SriMapStruct: + def __init__( self, sri=None, connections=None, time=None): + self.sri=sri + self.connections = connections #set of connection ID strings that have received this SRI + self.time=time + + def __init__(self, name, PortTypeClass, PortTransferType, logger=None, dataType=list, bits=0): + # Backwards-compatibility: accept an element type string for use with + # struct.calcsize + if bits == 0: + bits = struct.calcsize(PortTransferType) * 8 + self.name = name + self._portLog = logger + self.PortType = PortTypeClass + self.PortTransferType=PortTransferType + self.outConnections = {} # key=connectionId, value=port + self.stats = OutStats(self.name, bits=bits) + self.port_lock = threading.Lock() + self.sriDict = {} # key=streamID value=SriMapStruct + self.filterTable = [] + + # Data type class + self._dataType = dataType + # Retain noData member for backwards-compatibility + self.noData = dataType() + + # Determine maximum transfer size in advance + self._bitSize = bits + # Multiply by some number < 1 to leave some margin for the CORBA header + self.maxSamplesPerPush = 8 * int(MAX_TRANSFER_BYTES*.9) / self._bitSize + # Retain byte size for backwards-compatibility + self.byteSize = self._bitSize / 8 + + self._streams = {} + + if self._portLog == None: + self._portLog = logging.getLogger("redhawk.bulkio.outport."+name) + + def getLogger(self): + return self._portLog + + def setLogger(self, logger): + self._portLog = logger + + def connectPort(self, connection, connectionId): + if self._portLog: + self._portLog.trace('bulkio::OutPort connectPort ENTER ') + + if connection is None: + raise CF.Port.InvalidPort(1, 'Nil object reference') + + # Attempt to check the type of the remote object to reject invalid + # types; note this does not require the lock + repo_id = self.PortType._NP_RepositoryId + try: + valid = connection._is_a(repo_id) + except: + # If _is_a throws an exception, assume the remote object is + # unreachable (probably dead) + raise CF.Port.InvalidPort(1, 'Object unreachable') + + if not valid: + raise CF.Port.InvalidPort(1, 'Object does not support '+repo_id) + + port = connection._narrow(self.PortType) + + # Acquire the state lock before modifying the container + with self.port_lock: + # Prevent duplicate connection IDs + if str(connectionId) in self.outConnections: + raise Port.OccupiedPort() + + self.outConnections[str(connectionId)] = port + self.stats.add(connectionId) + + if self._portLog: + self._portLog.debug('bulkio::OutPort CONNECT PORT:%s CONNECTION:%s', self.name, connectionId) + + if self._portLog: + self._portLog.trace('bulkio::OutPort connectPort EXIT ') + + def disconnectPort(self, connectionId): + if self._portLog: + self._portLog.trace('bulkio::OutPort disconnectPort ENTER ') + + with self.port_lock: + port = self.outConnections.pop(connectionId, None) + if not port: + raise CF.Port.InvalidPort(2, 'No connection '+connectionId) + + for stream_id in self.sriDict.iterkeys(): + if not self._isStreamRoutedToConnection(stream_id, connectionId): + continue + + try: + self._sendPacket(port, self._dataType(), timestamp.notSet(), True, stream_id) + except Exception, e: + if self._portLog: + self._portLog.error("PUSH-PACKET FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connectionId, e) + + self.stats.remove(connectionId) + for key in self.sriDict.keys(): + # if connID exist in set, remove it, otherwise do nothing (that is what discard does) + self.sriDict[key].connections.discard(connectionId) + if self._portLog: + self._portLog.debug( "bulkio::OutPort DISCONNECT PORT:%s CONNECTION:%s", self.name, connectionId) + self._portLog.trace( "bulkio::OutPort DISCONNECT PORT:%s updatedSriDict%s", self.name, self.sriDict) + + if self._portLog: + self._portLog.trace('bulkio::OutPort disconnectPort EXIT ') + + def enableStats(self, enabled): + self.stats.setEnabled(enabled) + + def setBitSize(self, bitSize): + self.stats.setBitSize(bitSize) + + def reportConnectionErrors(self, cid): + retval=False + if ( self.stats.connectionErrors(cid, 1) < 11 ): retval=True + return retval + + def _get_connections(self): + currentConnections = [] + self.port_lock.acquire() + try: + for id_, port in self.outConnections.items(): + currentConnections.append(ExtendedCF.UsesConnection(id_, port)) + finally: + self.port_lock.release() + return currentConnections + + def _get_connectionStatus(self): + connectionStatus = [] + with self.port_lock: + for id_, port in self.outConnections.items(): + connectionStatus.append(ExtendedCF.ConnectionStatus(id_, port, True, 'CORBA', [])) + return connectionStatus + + def _get_statistics(self): + self.port_lock.acquire() + try: + recStat = self.stats.retrieve() + finally: + self.port_lock.release() + return recStat + + def _get_state(self): + self.port_lock.acquire() + try: + numberOutgoingConnections = len(self.outConnections) + finally: + self.port_lock.release() + if numberOutgoingConnections == 0: + return BULKIO.IDLE + else: + return BULKIO.ACTIVE + + def _get_activeSRIs(self): + self.port_lock.acquire() + try: + sris = [] + for entry in self.sriDict: + sris.append(copy.deepcopy(self.sriDict[entry].sri)) + finally: + self.port_lock.release() + return sris + + def updateConnectionFilter(self, _filterTable): + self.port_lock.acquire() + try: + if _filterTable == None : + _filterTable = [] + self.filterTable = _filterTable + finally: + self.port_lock.release() + + def pushSRI(self, H): + if self._portLog: + self._portLog.trace('bulkio::OutPort pushSRI ENTER ') + + with self.port_lock: + sri = copy.deepcopy(H) + self.sriDict[H.streamID] = OutPort.SriMapStruct(sri=sri, connections=set()) + if not H.streamID in self._streams: + self._streams[H.streamID] = self._createStream(sri) + + for connId, port in self.outConnections.iteritems(): + if not self._isStreamRoutedToConnection(H.streamID, connId): + continue + + try: + port.pushSRI(H) + self.sriDict[H.streamID].connections.add(connId) + except Exception, e: + if self.reportConnectionErrors(connId) : + if self._portLog: + self._portLog.error("PUSH-SRI FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) + + if self._portLog: + self._portLog.trace('bulkio::OutPort pushSRI EXIT ') + + def getStream(self, streamID): + """ + Get the active stream with the given stream ID. + + Args: + streamID: String stream identifier. + + Returns: + Output stream for `streamID` if it exists. + None if no such stream ID exists. + """ + with self.port_lock: + return self._streams.get(streamID, None) + + def getStreams(self): + """ + Gets the current set of active streams. + + Returns: + List of output streams. + """ + with self.port_lock: + return self._streams.values() + + def createStream(self, stream): + """ + Creates a new output stream. + + If `stream` is a string, a new output stream is created with stream ID + `stream` and default values for the SRI. If an output stream with that + stream ID already exists, it is returned unmodified. + + If `stream` is a BULKIO.StreamSRI, a new output stream is created with + the same SRI values as `stream`. If an output stream with the same + stream ID already exists, its SRI is updated to match `stream`. + + Args: + stream: String stream identifier or BULKIO.StreamSRI. + + Returns: + Newly-created or updated output stream. + """ + with self.port_lock: + if isinstance(stream, BULKIO.StreamSRI): + # Try to find an existing stream with the same streamID, and + # update it + sri = stream + stream = self._streams.get(sri.streamID, None) + if stream: + # Update the stream's SRI from the argument + stream.sri = sri + return stream + else: + # Assume we were given a stream ID + stream_id = stream + stream = self._streams.get(stream_id, None) + if stream: + return stream + + # Create an SRI with the given streamID + sri = bulkio.sri.create(stream_id) + + # No existing stream was found, create one + stream = self._createStream(sri) + self._streams[sri.streamID] = stream + return stream + + def _createStream(self, sri): + return OutputStream(sri, self, self._dataType) + + def _pushPacket(self, data, T, EOS, streamID): + # Prerequisite: caller holds self.port_lock + packet_size = self._packetSize(data) + if self._portLog: + self._portLog.trace("_pushPacket() sending packet size=%d time=%s EOS=%s streamID='%s'", + packet_size, T, EOS, streamID) + + for connId, port in self.outConnections.iteritems(): + if not self._isStreamRoutedToConnection(streamID, connId): + continue + try: + if connId not in self.sriDict[streamID].connections and packet_size == 0: + # connection is being closed but no data was ever sent, so ignore + continue + if connId not in self.sriDict[streamID].connections: + port.pushSRI(self.sriDict[streamID].sri) + self.sriDict[streamID].connections.add(connId) + self._sendPacket(port, data, T, EOS, streamID) + self.stats.update(packet_size, 0, EOS, streamID, connId) + except Exception, e: + if self.reportConnectionErrors(connId) : + if self._portLog: + self._portLog.exception("PUSH-PACKET FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) + + if EOS: + if self.sriDict.has_key(streamID): + tmp = self.sriDict.pop(streamID) + + def _sendPacket(self, port, data, T, EOS, streamID): + port.pushPacket(data, T, EOS, streamID) + + def _isStreamRoutedToConnection(self, streamID, connectionID): + port_listed = False + for rule in self.filterTable: + if rule.port_name != self.name: + continue + port_listed = True + if rule.stream_id == streamID and rule.connection_id == connectionID: + return True + return not port_listed + + def pushPacket(self, data, T, EOS, streamID): + + if self._portLog: + self._portLog.trace('bulkio::OutPort pushPacket ENTER ') + + if not self.sriDict.has_key(streamID): + sri = BULKIO.StreamSRI(1, 0.0, 1.0, 1, 0, 0.0, 0.0, 0, 0, streamID, False, []) + self.pushSRI(sri) + + with self.port_lock: + self._pushPacket(data, T, EOS, streamID) + if EOS: + del self._streams[streamID] + + if self._portLog: + self._portLog.trace('bulkio::OutPort pushPacket EXIT ') + + def _reformat(self, data): + return data + + def _packetSize(self, data): + return len(data) + +class OutNumericPort(OutPort): + def __init__(self, *args, **kwargs): + elemType = kwargs.pop('elemType', int) + OutPort.__init__(self, *args, **kwargs) + self._elemType = elemType + + def _pushPacket(self, data, T, EOS, streamID): + # If there is no need to break data into smaller packets, skip straight + # to the pushPacket call and return. + elements = len(data) + if elements <= self.maxSamplesPerPush: + return OutPort._pushPacket(self, data, T, EOS, streamID); + + sri = self.sriDict[streamID].sri + + # Quantize the push size (in terms of scalars) to the nearest frame, + # which takes both the complex mode and subsize into account + item_size = 2 if sri.mode else 1 + frame_size = item_size + if sri.subsize > 0: + frame_size *= sri.subsize + max_samples = int(self.maxSamplesPerPush/frame_size) * frame_size + + # Intialize time for the first subpacket + packetTime = T + + # Push sub-packets max_samples at a time + count = len(data) + for start in xrange(0, count, max_samples): + # The end index of the packet may exceed the length of the data; + # the Python slice operator will clamp it to the actual end + end = start + max_samples + + # Send end-of-stream as false for all sub-packets except for the + # last one (when the end of the sub-packet goes past the end of the + # input data), which gets the input EOS. + if end >= count: + packetEOS = EOS + else: + packetEOS = False + + # Push the current slice of the input data + OutPort._pushPacket(self, data[start:end], packetTime, packetEOS, streamID); + + # Synthesize the next packet timestamp + if packetTime.tcstatus == BULKIO.TCS_VALID: + push_size = min(end, count) - start + packetTime = packetTime + (push_size/item_size) * sri.xdelta + + def _createStream(self, sri): + return NumericOutputStream(sri, self, self._dataType, self._elemType) + + +class OutCharPort(OutNumericPort): + TRANSFER_TYPE = 'c' + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataChar, OutCharPort.TRANSFER_TYPE, logger, dataType=str, bits=8) + + def _reformat(self, data): + if isinstance(data, basestring): + return data + return struct.pack('%db' % len(data), *data) + +class OutOctetPort(OutNumericPort): + TRANSFER_TYPE = 'B' + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataOctet, OutOctetPort.TRANSFER_TYPE, logger, dataType=str, bits=8) + + def _reformat(self, data): + if isinstance(data, basestring): + return data + return struct.pack('%dB' % len(data), *data) + +class OutShortPort(OutNumericPort): + TRANSFER_TYPE = 'h' + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataShort, OutShortPort.TRANSFER_TYPE, logger, bits=16) + +class OutUShortPort(OutNumericPort): + TRANSFER_TYPE = 'H' + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataUshort, OutUShortPort.TRANSFER_TYPE, logger, bits=16) + +class OutLongPort(OutNumericPort): + TRANSFER_TYPE = 'i' + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataLong, OutLongPort.TRANSFER_TYPE, logger, bits=32) + +class OutULongPort(OutNumericPort): + TRANSFER_TYPE = 'I' + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataUlong, OutULongPort.TRANSFER_TYPE, logger, bits=32) + +class OutLongLongPort(OutNumericPort): + TRANSFER_TYPE = 'q' + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataLongLong, OutLongLongPort.TRANSFER_TYPE, logger, bits=64) + +class OutULongLongPort(OutNumericPort): + TRANSFER_TYPE = 'Q' + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataUlongLong, OutULongLongPort.TRANSFER_TYPE, logger, bits=64) + +class OutFloatPort(OutNumericPort): + TRANSFER_TYPE = 'f' + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataFloat, OutFloatPort.TRANSFER_TYPE, logger, elemType=float, bits=32) + +class OutDoublePort(OutNumericPort): + TRANSFER_TYPE = 'd' + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataDouble, OutDoublePort.TRANSFER_TYPE, logger, elemType=float, bits=64) + +class OutBitPort(OutNumericPort): + TRANSFER_TYPE = 'B' + + def __init__(self, name, logger=None): + OutNumericPort.__init__(self, name, BULKIO.dataBit, OutBitPort.TRANSFER_TYPE, logger, dataType=bitbuffer, bits=1) + + def _sendPacket(self, port, data, T, EOS, streamID): + data = BULKIO.BitSequence(data.bytes(), len(data)) + port.pushPacket(data, T, EOS, streamID) + + def _reformat(self, data): + return bitbuffer(data) + + def _createStream(self, sri): + return BufferedOutputStream(sri, self, bitbuffer) + +class OutFilePort(OutPort): + TRANSFER_TYPE = 'c' + def __init__(self, name, logger=None): + OutPort.__init__(self, name, BULKIO.dataFile, OutFilePort.TRANSFER_TYPE, logger, dataType=str, bits=8) + + def _createStream(self, sri): + return OutFileStream(sri, self) + +class OutXMLPort(OutPort): + TRANSFER_TYPE = 'c' + def __init__(self, name, logger=None): + OutPort.__init__(self, name, BULKIO.dataXML, OutXMLPort.TRANSFER_TYPE, logger, dataType=str, bits=8) + + def pushPacket(self, xml_string, EOS, streamID): + OutPort.pushPacket(self, xml_string, None, EOS, streamID) + + def _sendPacket(self, port, xml_string, T, EOS, streamID): + port.pushPacket(xml_string, EOS, streamID) + + def _createStream(self, sri): + return OutXMLStream(sri, self) + + +class OutAttachablePort(OutPort): + class StreamAttachment: + def __init__(self, connectionId, attachId, inputPort, inStream=None): + self.connectionId=connectionId + self.attachId=attachId + self.inputPort=inputPort + self.stream=inStream + self._sa_logger=None + + def setLogger(self, inLogger ): + self._sa_logger= inLogger + + def setLogger(self, inLogger ): + self._sa_logger= inLogger + + def detach(self): + p = None + if self.stream: + p = self.stream.getPort() + try: + self.inputPort.detach(self.attachId) + if p : p.updateStats(self.connectionId) + except Exception, e: + if p and p.reportConnectionErrors(self.connectionId) : + if self._sa_logger: + self._sa_logger.error("DETACH FAILURE, CONNECTION: %s , EXCEPTION: %s", self.connectionId, str(e)) + + class Stream: + def __init__(self, streamDef, name, streamId=None, streamAttachments=[], sri=None, time=None, port=None): + self.streamDef=streamDef + self.name = name + self.streamId=streamId + self.streamAttachments=streamAttachments[:] + self.sri=sri + self.time=time + self.port = port + self._s_logger=None + + def detachAll(self): + for att in list(self.streamAttachments): + att.detach() + self.streamAttachments.remove(att) + + def detachByConnectionId(self, connectionId): + for att in list(self.streamAttachments): + if att.connectionId == connectionId and att.inputPort and att.attachId: + att.detach() + self.streamAttachments.remove(att) + + def detachByAttachId(self, attachId): + for att in list(self.streamAttachments): + if att.attachId and att.inputPort and att.attachId == attachId: + att.detach() + self.streamAttachments.remove(att) + + def detachByAttachIdConnectionId(self, connectionId): + for att in list(self.streamAttachments): + if att.attachId and att.inputPort and att.attachId == attachId and att.connectionId == connectionId: + att.detach() + self.streamAttachments.remove(att) + + def createNewAttachment(self,connectionId, port): + newAttachment = OutAttachablePort.StreamAttachment(connectionId=connectionId, attachId=None, inputPort=port, inStream=self) + newAttachment.setLogger(self._s_logger) + try: + newAttachment.attachId = port.attach(self.streamDef, self.name) + self.streamAttachments.append(newAttachment) + except Exception, e: + if self._s_logger: + self._s_logger.trace( "ATTACH FAILURE, CONNECTION/STREAM %s/%s , EXCEPTION: %s" , connectionId, self.streamDef.id, str(e)) + raise + + def hasConnectionId(self, connectionId): + for att in list(self.streamAttachments): + if att.connectionId == connectionId: + return True + return False + + def getPort(self): + return self.port + + def setPort(self, inPort): + self.port = inPort + + def setLogger(self, inlogger): + self._s_logger=inlogger + for att in self.streamAttachments: + att.setLogger(inlogger) + + def getConnectionIds(self): + connectionIds = [] + for att in list(self.streamAttachments): + connectionIds.append(att.connectionId) + return connectionIds + + def updateAttachments(self, expectedAttachments): + expectedConnectionIds = [] + # Add new attachments that do not already exist + for att in expectedAttachments: + if not self.hasConnectionId(att.connectionId): + self.createNewAttachment(att.connectionId, att.inputPort) + expectedConnectionIds.append(att.connectionId) + + # Iterate through attachments and compare to expected connectionIds + connectionsToRemove = [] + for att in self.streamAttachments: + existingConnectionId = att.connectionId + detachConnection = True + for connId in expectedConnectionIds: + if existingConnectionId == connId: + detachConnection = False + break + if detachConnection == True: + # Store off and apply detach outside of this loop + # Removing now will mess up iterator + connectionsToRemove.append(existingConnectionId) + + for connId in connectionsToRemove: + self.detachByConnectionId(connId) + + def detachAll(self): + for att in list(self.streamAttachments): + att.detach() + self.streamAttachments.remove(att) + + + class StreamContainer: + def __init__(self, streams=None): + if streams == None: + self.streams = [] + else: + self.streams = streams + self._sc_logger = None + + def printState(self, title): + if self._sc_logger: + self._sc_logger.debug(title) + for stream in self.streams: + self.printBlock("Stream", stream.streamId,0) + for att in stream.streamAttachments: + self.printBlock("Attachment",att.attachId,1) + if self._sc_logger: + self._sc_logger.debug("") + + def printBlock(self, title, id, indents): + indent = "" + for ii in range(indents): + indent += " " + line = "---------------" + + if self._sc_logger: + self._sc_logger.debug(indent + " |" + line) + self._sc_logger.debug(indent + " |" + str(title)) + self._sc_logger.debug(indent + " | '" + str(id) + "'") + self._sc_logger.debug(indent + " |" + line) + + def hasStreams(self): + if len(self.streams) > 0: + return True + else: + return False + + def hasStreamId(self, streamId): + for stream in self.streams: + if stream.streamId == streamId: + return True + return False + + def getStreamIds(self): + streamIds = [] + for stream in self.streams: + streamIds.append(stream.streamId) + return streamIds + + def addConnectionToAllStreams(self, connectionId, port): + for stream in self.streams: + if not stream.hasConnectionId(connectionId): + stream.createNewAttachment(connectionId, port) + + def addConnectionToStream(self, connectionId, port, streamId): + for stream in self.streams: + if stream.streamId == streamId: + if not stream.hasConnectionId(connectionId): + stream.createNewAttachment(connectionId, port) + + def updateSRIForAllStreams(self, currentSRIs): + for stream in self.streams: + if currentSRIs.has_key(stream.streamId): + stream.sri = currentSRIs[stream.streamId].sri + stream.time = currentSRIs[stream.streamId].time + + def updateStreamSRI(self, streamId, sri): + for stream in self.streams: + if stream.streamId == streamId: + stream.sri = sri + + def updateStreamTime(self, streamId, time): + for stream in self.streams: + if stream.streamId == streamId: + stream.time = time + + def updateStreamSRIAndTime(self, streamId, sri, time): + for stream in self.streams: + if stream.streamId == streamId: + stream.sri = sri + stream.time = time + + def addStream(self, stream): + self.streams.append(stream) + + def removeStreamByStreamId(self, streamId): + for s in list(self.streams): + if s.streamId == streamId: + s.detachAll() + self.streams.remove(s) + + def findByStreamId(self, streamId): + for s in self.streams: + if s.streamId == streamId: + return s + return None + + def detachByAttachIdConnectionId(self, attachId=None, connectionId=None): + for stream in self.streams: + for atts in list(stream.streamAttachments): + if atts.connectionId == connectionId and atts.inputPort and atts.attachId and atts.attachId == attachId: + atts.detach() + stream.streamAttachments.remove(atts) + + def detachAllStreams(self): + for stream in self.streams: + for atts in list(stream.streamAttachments): + if atts.inputPort and atts.attachId: + atts.detach() + stream.streamAttachments.remove(atts) + + def detachByConnectionId(self, connectionId=None): + for stream in self.streams: + for atts in list(stream.streamAttachments): + if atts.connectionId == connectionId and atts.inputPort and atts.attachId: + atts.detach() + stream.streamAttachments.remove(atts) + + def detachByAttachId(self, attachId=None): + for stream in self.streams: + for atts in list(stream.streamAttachments): + if atts.attachId and atts.attachId == attachId and atts.inputPort: + atts.detach() + + def findStreamAttachmentsByAttachId(self, attachId): + attachList = [] + for stream in self.streams: + for att in stream.streamAttachments: + if att.attachId == attachId: + attachList.append(att) + return attachList + + def setLogger(self, inlogger): + self._sc_logger = inlogger + for stream in self.streams: + stream.setLogger(inlogger) + + + TRANSFER_TYPE = 'c' + def __init__(self, name, max_attachments=None, logger=None, interface=None ): + OutPort.__init__(self, name, interface, OutAttachablePort.TRANSFER_TYPE, logger, bits=8) + self.max_attachments = max_attachments + self.streamContainer = OutAttachablePort.StreamContainer() + self.sriDict = {} # key=streamID value=SriMapStruct + self.filterTable = [] + if not interface: + if self._portLog: + self._portLog.error("OutAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49") + raise Port.InvalidPort(1, "OutAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49") + self.interface=interface # BULKIO port interface (valid options are BULKIO.dataSDDS or BULKIO.dataVITA49) + self.setLogger(self._portLog) + + def setLogger(self, in_logger): + self._portLog = in_logger; + self.streamContainer.setLogger(in_logger) + + def _get_state(self): + self.port_lock.acquire() + try: + numberAttachedStreams = len(self._attachedStreams.values()) + finally: + self.port_lock.release() + if numberAttachedStreams == 0: + return BULKIO.IDLE + else: + return BULKIO.ACTIVE + + def _get_attachedSRIs(self): + return self._get_activeSRIs() + + def attachedStreams(self): + streams = [] + for stream in self.streamContainer.streams: + streams.append(stream.streamDef) + return streams + + def attachmentIds(self): + ids = [] + for stream in self.streamContainer.streams: + for atts in stream.streamAttachments: + ids.append(atts.attachId) + return ids + + def attachmentIds(self,streamId): + ids = [] + for stream in self.streamContainer.streams: + if stream.streamId == streamId: + for atts in stream.streamAttachments: + ids.append(atts.attachId) + break + return ids + + def connectPort(self, connection, connectionId): + OutPort.connectPort( self, connection, connectionId ) + self.port_lock.acquire() + try: + try: + portListed = False + port = self.outConnections[str(connectionId)] + + if self._portLog: + self._portLog.trace("bulkio::OutAttachablePort connectPort(), Filter Table %s" % self.filterTable) + for ftPtr in self.filterTable: + # check if port was listed in connection filter table + if ftPtr.port_name == self.name: + portListed = True + + if (ftPtr.port_name == self.name) and (ftPtr.connection_id == connectionId): + desiredStreamId = ftPtr.stream_id + self.streamContainer.addConnectionToStream(connectionId,port,desiredStreamId) + + if not portListed: + self.streamContainer.addConnectionToAllStreams(connectionId,port) + + self.updateSRIForAllConnections() + except Exception, e: + if self._portLog: + self._portLog.error("CONNECTION FAILED, CONNECTION %s , EXCEPTION: %s" , connectionId, str(e)) + raise Port.InvalidPort(1, "Invalid Port for Connection ID:" + str(connectionId) ) + finally: + self.port_lock.release() + self.streamContainer.printState("After connectPort") + + def disconnectPort(self, connectionId): + self.port_lock.acquire() + try: + try: + self.streamContainer.detachByConnectionId(connectionId) + except Exception, e: + if self._portLog: + self._portLog.error("Unable to detach from stream before disconnecting port, Connection: %s , Exception: %s", str(connectionId), str(e)) + + if not self.outConnections.has_key(connectionId): + if self._portLog: + self._portLog.warn("bulkio::OutAttachablePort disconnectPort() - connectionId " + str(connectionId) + " is not contained in list of outConnections") + else: + self.outConnections.pop(connectionId, None) + for key in self.sriDict.keys(): + # if connID exist in set, remove it, otherwise do nothing (that is what discard does) + self.sriDict[key].connections.discard(connectionId) + if self._portLog: + self._portLog.debug( "bulkio::OutAttachablePort DISCONNECT PORT:" + str(self.name) + " CONNECTION:" + str(connectionId)) + self._portLog.trace( "bulkio::OutAttachablePort DISCONNECT PORT:" + str(self.name) + " updated sriDict" + str(self.sriDict)) + finally: + self.port_lock.release() + self.streamContainer.printState("After disconnectPort") + + def detach(self, attachId=None, connectionId=None): + if self._portLog: + self._portLog.trace("bulkio::OutAttachablePort, DETACH ENTER ") + + self.port_lock.acquire() + try: + if connectionId: + for stream in self.streamContainer.streams: + stream.detachByConnectionId(connectionId) + + if attachId: + for stream in self.streamContainer.streams: + for atts in list(stream.streamAttachments): + if atts.attachId == attachId: + atts.detach(attachId) + stream.streamAttachments.pop(atts) + + if not attachId and not connectionId: + for stream in self.streamContainer.streams: + for atts in list(stream.streamAttachments): + atts.detach() + self.streamContainer = OutAttachablePort.StreamContainer() + self.streamContainer.setLogger(self._portLog) + + finally: + self.port_lock.release() + + if self._portLog: + self._portLog.trace("bulkio::OutAttachablePort, DETACH EXIT ") + + def attach(self, streamData, name): + # Eventually deprecate attach() method for output port + self.streamContainer.removeStreamByStreamId(streamData.id) + self.addStream(streamData) + return "" + + def updateStream(self, streamData): + self.port_lock.acquire() + streamId = streamData.id + if (not self.streamContainer.hasStreamId(streamId)): + return False; + + self.streamContainer.removeStreamByStreamId(streamId) + self.port_lock.release() + return self.addStream(streamData) + + + def addStream(self, streamData): + if self._portLog: + self._portLog.trace("bulkio::OutAttachablePort, addStream ENTER ") + + ids = [] + self.port_lock.acquire() + try: + if self.streamContainer.hasStreamId(streamData.id): + return False; + + stream = OutAttachablePort.Stream(streamDef=streamData, name="", streamId=streamData.id) + stream.setLogger(self._portLog) + + + portListed = False + for connId, port in self.outConnections.items(): + for ftPtr in self.filterTable: + + # check if port was listed in connection filter table + if ftPtr.port_name == self.name: + portListed = True + + if (ftPtr.port_name == self.name) and (ftPtr.connection_id == connId) and (ftPtr.stream_id == stream.streamId): + try: + if self.sriDict.has_key(stream.streamId): + sriMap = self.sriDict[stream.streamId] + stream.sri = sriMap.sri + stream.time = sriMap.time + stream.createNewAttachment(connId,port) + except Exception, e: + if self.reportConnectionErrors(connId) : + if self._portLog: + self._portLog.error("ATTACH FAILED, PORT/CONNECTION %s/%s , EXCEPTION: %s" , str(self.name), str(connId), str(e)) + + if not portListed: + if self.sriDict.has_key(stream.streamId): + sriMap = self.sriDict[stream.streamId] + stream.sri = sriMap.sri + stream.time = sriMap.time + for connId,port in self.outConnections.items(): + try: + stream.createNewAttachment(connId,port) + except Exception, e: + if self.reportConnectionErrors(connId) : + if self._portLog: + self._portLog.error("ATTACH FAILED, PORT/CONNECTION %s/%s , EXCEPTION: %s" , str(self.name), str(connId), str(e)) + + self.streamContainer.addStream(stream) + + finally: + self.port_lock.release() + + for atts in stream.streamAttachments: + ids.append(atts.attachId) + if self._portLog: + self._portLog.debug("bulkio.OutAttachablePort addStream() PORT, ATTACH COMPLETED ID " + str(atts.attachId) + " CONNECTION ID:" + str(atts.connectionId)) + + if self._portLog: + self._portLog.trace("bulkio::OutAttachablePort, addStream EXIT ") + + self.streamContainer.printState("After addStream") + return True + + def removeStream(self, streamId): + self.streamContainer.removeStreamByStreamId(streamId) + self.streamContainer.printState("After removeStream") + + def getStreamDefinition(self, attachId): + streamDefList = [] + for stream in self.streamContainer.streams: + for atts in stream.streamAttachments: + if atts.attachId == attachId: + streamDefList.append(stream.streamDef) + return streamDefList + + def getUser(self, attachId): + nameList = [] + for stream in self.streamContainer.streams: + for atts in stream.streamAttachments: + if atts.attachId == attachId: + nameList.append(stream.name) + return nameList + + def pushSRI(self, H, T): + if self._portLog: + self._portLog.trace("bulkio::OutAttachablePort, PUSH-SRI ENTER ") + + self.port_lock.acquire() + try: + sri = copy.deepcopy(H) + sriTime = copy.deepcopy(T) + self.sriDict[H.streamID] = OutPort.SriMapStruct(sri=sri, connections=set(), time=sriTime) + portListed = False + self.streamContainer.updateStreamSRIAndTime(H.streamID, sri, sriTime) + + for connId, port in self.outConnections.items(): + for ftPtr in self.filterTable: + + # check if port was listed in connection filter table + if ftPtr.port_name == self.name: + portListed = True + + if (ftPtr.port_name == self.name) and (ftPtr.connection_id == connId) and (ftPtr.stream_id == H.streamID): + try: + if port != None: + port.pushSRI(H, T) + self.sriDict[H.streamID].connections.add(connId) + except Exception, e: + if self.reportConnectionErrors(connId) : + if self._portLog: + self._portLog.exception("PUSH-SRI (attachable) FAILED, PORT/CONNECTION %s/%s , EXCEPTION: %s ", str(self.name), connId, str(e)) + + if not portListed: + for connId, port in self.outConnections.items(): + try: + if port != None: + port.pushSRI(H, T) + self.sriDict[H.streamID].connections.add(connId) + except Exception, e: + if self.reportConnectionErrors(connId) : + if self._portLog: + self._portLog.exception("PUSH-SRI (attachable) FAILED, PORT/CONNECTION %s/%s , EXCEPTION: %s ", str(self.name), connId, str(e)) + finally: + self.port_lock.release() + + if self._portLog: + self._portLog.trace("bulkio::OutAttachablePort, PUSH-SRI EXIT ") + + def updateConnectionFilter(self, _filterTable): + self.port_lock.acquire() + try: + if _filterTable == None : + _filterTable = [] + self.filterTable = _filterTable + + #1. loop over filterTable + #A. ignore other port_names + #B. create mapping of streamid->connections(attachments) + + hasPortEntry = False + streamsFound = {} + streamAttachments = {} + # Populate streamsFound + knownStreamIds = self.streamContainer.getStreamIds() + for id in knownStreamIds: + streamsFound[id] = False + + # Iterate through each filterTable entry and capture state + for entry in self.filterTable: + if entry.port_name != self.name: + continue + + hasPortEntry = True + if entry.connection_id in self.outConnections.keys(): + connectedPort = self.outConnections.get(entry.connection_id) + else: + if self._portLog: + self._portLog.trace("bulkio::OutAttachablePort, updateConnectionFilter() Unable to find connected port with connectionId: " + entry.connection_id) + continue + + if self.streamContainer.hasStreamId(entry.stream_id): + streamsFound[entry.stream_id] = True + expectedAttachment = OutAttachablePort.StreamAttachment(entry.connection_id, None, connectedPort) + if not streamAttachments.has_key(entry.stream_id): + streamAttachments[entry.stream_id] = [] + streamAttachments[entry.stream_id].append(expectedAttachment) + + for streamId, expectedAttachements in streamAttachments.iteritems(): + foundStream = self.streamContainer.findByStreamId(streamId) + if foundStream: + foundStream.updateAttachments(expectedAttachements) + else: + if self._portLog: + self._portLog.warn("bulkio::OutAttachablePort, updateConnectionFilter() Unable to locate stream definition for streamId: " +streamId) + + + if hasPortEntry: + # If there's a valid port entry, we need to detach unmentioned streams + for streamId,found in streamsFound.items(): + if not found: + stream = self.streamContainer.findByStreamId(streamId) + if stream: + stream.detachAll() + else: + # No port entry == All connections on + for connId, port in self.outConnections.items(): + self.streamContainer.addConnectionToAllStreams(connId,port) + + self.updateSRIForAllConnections() + + finally: + self.port_lock.release() + self.streamContainer.printState("After updateFilterTable") + + def updateSRIForAllConnections(self): + # Iterate through stream objects in container + # Check if sriDict has stream entry + # Yes: Check that ALL connections are listed in sriDict entry + # Update currentSRI + # No: PushSRI on all attachment ports + # Update currentSRI + + # Iterate through all registered streams + for stream in self.streamContainer.streams: + streamConnIds = stream.getConnectionIds() + + # Check if sriDict has entry for StreamId + if self.sriDict.has_key(stream.streamId): + sriMap = self.sriDict[stream.streamId] + + # Check if all connections on the streams have pushed SRI + currentSRIConnIds = sriMap.connections + for connId in streamConnIds: + + # If not found, pushSRI and update currentSRIs container + if not connId in currentSRIConnIds: + + # Grab the port + if self.outConnections.has_key(connId): + connectedPort = self.outConnections[connId] + # Push sri and update sriMap + connectedPort.pushSRI(sriMap.sri, sriMap.time) + sriMap.connections.add(connId) + else: + if self._portLog: + self._portLog.debug("updateSRIForAllConnections() Unable to find connected port with connectionId: " + connId) + +class OutSDDSPort(OutAttachablePort): + def __init__(self, name, max_attachments=None, logger=None ): + OutAttachablePort.__init__(self, name, max_attachments, logger, interface=BULKIO.dataSDDS) + +class OutVITA49Port(OutAttachablePort): + def __init__(self, name, max_attachments=None, logger=None ): + OutAttachablePort.__init__(self, name, max_attachments, logger, interface=BULKIO.dataVITA49) diff --git a/bulkioInterfaces/libsrc/python/bulkio/output_streams.py b/bulkioInterfaces/libsrc/python/bulkio/output_streams.py new file mode 100644 index 000000000..587659e79 --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/output_streams.py @@ -0,0 +1,519 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import copy + +from ossie.cf import CF +from redhawk.bitbuffer import bitbuffer + +import bulkio +from bulkio.bulkioInterfaces import BULKIO +from bulkio.stream_base import StreamBase + +class OutputStream(StreamBase): + """ + Basic BulkIO output stream class. + + OutputStream encapsulates a single BulkIO stream for writing. It is + associated with the output port that created it, providing a file-like API + on top of the classic BulkIO pushPacket model. + + Notionally, a BulkIO stream represents a contiguous data set and its + associated signal-related information (SRI), uniquely identified by a + stream ID, from creation until close. The SRI may vary over time, but the + stream ID is immutable. Only one stream with a given stream ID can be + active at a time. + + OutputStreams help manage the stream lifetime by tying that SRI with an + output port and ensuring that all data is associated with a valid stream. + When the stream is complete, it may be closed, notifying downstream + receivers that no more data is expected. + + OutputStreams must be created via an output port. A stream cannot be + associated with more than one port. + + SRI Changes: + Updates to the stream that modify or replace its SRI are cached locally + until the next write to minimize the number of updates that are published. + When there are pending SRI changes, the OutputStream pushes the updated SRI + first, followed by the data. + + See Also: + OutPort.createStream + OutPort.getStream + """ + def __init__(self, sri, port, dtype=list): + """ + Create an OutputStream. + + Warning: + Output streams are created via an output port. This constructor + should not be called directly. + + See Also: + OutputPort.createStream + OutputPort.getStream + """ + StreamBase.__init__(self, sri) + self._port = port + self._dtype = dtype + self.__sriModified = True + + @StreamBase.sri.setter + def sri(self, sri): + if not isinstance(sri, BULKIO.StreamSRI): + raise TypeError('sri must be a BULKIO.StreamSRI') + self._modifyingStreamMetadata() + # Deep copy to avoid accidental updates to the SRI via the caller's + # reference + sri = copy.deepcopy(sri) + # Preserve stream ID + sri.streamID = self.streamID + self._sri = sri + + @StreamBase.xstart.setter + def xstart(self, xstart): + self._setStreamMetadata('xstart', float(xstart)) + + @StreamBase.xdelta.setter + def xdelta(self, xdelta): + self._setStreamMetadata('xdelta', float(xdelta)) + + @StreamBase.xunits.setter + def xunits(self, xunits): + self._setStreamMetadata('xunits', int(xunits)) + + @StreamBase.subsize.setter + def subsize(self, subsize): + self._setStreamMetadata('subsize', int(subsize)) + + @StreamBase.ystart.setter + def ystart(self, ystart): + self._setStreamMetadata('ystart', float(ystart)) + + @StreamBase.ydelta.setter + def ydelta(self, ydelta): + self._setStreamMetadata('ydelta', float(ydelta)) + + @StreamBase.yunits.setter + def yunits(self, yunits): + self._setStreamMetadata('yunits', int(yunits)) + + @StreamBase.complex.setter + def complex(self, mode): + self._setStreamMetadata('mode', 1 if mode else 0) + + @StreamBase.blocking.setter + def blocking(self, blocking): + self._setStreamMetadata('blocking', 1 if blocking else 0) + + @StreamBase.keywords.setter + def keywords(self, keywords): + self._modifyingStreamMetadata() + # Copy the sequence, but not the values + self._sri.keywords = keywords[:] + + def setKeyword(self, name, value, format=None): + """ + Sets the current value of a keyword in the SRI. + + If the keyword name already exists, its value is updated to value. If + the keyword name does not exist, the new keyword is appended. + + If the optional 'format' argument is given, it must be the name of the + desired CORBA type. Otherwise, the CORBA type is determined based on + the Python type of 'value'. + + Setting a keyword updates the SRI, which will be pushed on the next + write. + + Args: + name: The name of the keyword. + value: The new value. + format: Optional type name. + """ + self._modifyingStreamMetadata() + bulkio.sri.setKeyword(self._sri, name, value, format) + + def eraseKeyword(self, name): + """ + Removes a keyword from the SRI. + + Erases the keyword named 'name' from the SRI keywords. If no keyword + 'name' is found, the keywords are not modified. + + Removing a keyword updates the SRI, which will be pushed on the next + write. + + Args: + name: The name of the keyword. + """ + self._modifyingStreamMetadata() + bulkio.sri.eraseKeyword(self._sri, name) + + def close(self): + """ + Closes this stream. + + Sends an end-of-stream packet. No further operations should be made on + the stream. + """ + data = self._dtype() + self._send(data, bulkio.timestamp.notSet(), True) + + def _setStreamMetadata(self, attr, value): + field = getattr(self._sri, attr) + if field != value: + self._modifyingStreamMetadata() + setattr(self._sri, attr, value) + + def _send(self, data, time, eos): + if self.__sriModified: + self._port.pushSRI(self._sri) + self.__sriModified = False + self._pushPacket(self._port, data, time, eos, self.streamID) + + def _pushPacket(self, port, data, time, eos, streamID): + port.pushPacket(data, time, eos, streamID) + + def _modifyingStreamMetadata(self): + self.__sriModified = True + + +class BufferedOutputStream(OutputStream): + """ + BulkIO output stream class with data buffering. + + BufferedOutputStream can use an internal buffer to queue up multiple + packets worth of data into a single push. By default, buffering is + disabled. + + Data Buffering: + BufferedOutputStreams can combine multiple small chunks of data into a + single packet for reduced I/O overhead. Data buffering is enabled by + setting a non-zero buffer size via the setBufferSize() method. The output + stream creates an internal buffer of the requested size; the stream's + complex mode is not taken into account. + + With buffering enabled, each write copies its data into the internal + buffer, up to the maximum of the buffer size. When the internal buffer is + full, a packet is sent via the output port, using the time stamp of the + first buffered sample. After the packet is sent, the internal buffer is + reset to its initial state. If there is any remaining data from the write, + it is copied into a new buffer and a new starting time stamp is + interpolated. + + Time Stamps: + When buffering is enabled, the time stamps provided to the write() methods + may be discarded. Furthermore, when write sizes do not align exactly with + the buffer size, the output time stamp may be interpolated. If precise + time stamps are required, buffering should not be used. + + See Also: + OutputStream + """ + def __init__(self, sri, port, dtype=list): + """ + Create a BufferedOutputStream. + + Warning: + Output streams are created via an output port. This constructor + should not be called directly. + + See Also: + OutputPort.createStream + OutputPort.getStream + """ + OutputStream.__init__(self, sri, port, dtype) + self.__buffer = self._dtype() + self.__bufferSize = 0 + self.__bufferTime = bulkio.timestamp.notSet() + + def write(self, data, time): + """ + Writes data to the stream. + + Data is reformatted as necessary to match the port's requirements. For + example, char or octet ports will pack numeric values into a binary + string. In the case of bit data, string literals will be parsed into a + bitbuffer. + + If buffering is disabled, `data` is sent as a single packet with the + given time stamp. + + When buffering is enabled, `data` is copied into the internal buffer. + If the internal buffer exceeds the configured buffer size, one or more + packets will be sent. + + Args: + data: Sample data to write. + time: Time stamp of first sample. + + See Also: + bufferSize() + setBufferSize() + """ + # Allow the port to reformat the data in its natural format + data = self._port._reformat(data) + + # If buffering is disabled, or the buffer is empty and the input data + # is large enough for a full buffer, send it immediately + if self.__bufferSize == 0 or (not self.__buffer and (len(data) >= self.__bufferSize)): + self._send(data, time, False) + else: + self._doBuffer(data, time) + + def bufferSize(self): + """ + Gets the internal buffer size. + + The buffer size is in terms of real samples, ignoring the complex mode + of the stream. Complex samples count as two real samples for the + purposes of buffering. + + A buffer size of 0 indicates that buffering is disabled. + + Returns: + int: Number of real samples to buffer per push. + """ + return self.__bufferSize + + def setBufferSize(self, samples): + """ + Sets the internal buffer size. + + The internal buffer is flushed if samples is less than the number of + real samples currently buffered. + + A buffer size of 0 disables buffering, flushing any buffered data. + + Args: + samples: Number of real samples to buffer per push. + + Raises: + ValueError: If samples is negative. + """ + size = int(samples) + if size < 0: + raise ValueError('buffer size cannot be negative') + self.__bufferSize = size + + # If the new buffer size is less than (or exactly equal to) the + # currently buffered data size, flush + if self.__bufferSize <= len(self.__buffer): + self.flush() + + def flush(self): + """ + Flushes the internal buffer. + + Any data in the internal buffer is sent to the port to be pushed. + """ + if not self.__buffer: + return + self._flush(False) + + def close(self): + """ + Closes the stream. + + Sends an end-of-stream packet with any remaining buffered data. No + further operations should be made on the stream. + """ + if self.__buffer: + # Add the end-of-stream marker to the buffered data and its + # timestamp + self._flush(True) + else: + OutputStream.close(self) + + def _modifyingStreamMetadata(self): + # Flush any data queued with the old SRI + self.flush() + + # Post-extend base class method + OutputStream._modifyingStreamMetadata(self) + + def _flush(self, eos): + self._send(self.__buffer, self.__bufferTime, eos) + self.__buffer = self._dtype() + + def _doBuffer(self, data, time): + # If this is the first data being queued, use its timestamp for the + # start time of the buffered data + if not self.__buffer: + self.__bufferTime = copy.copy(time) + + # Only buffer up to the currently configured buffer size + count = min(len(data), self.__bufferSize - len(self.__buffer)); + self.__buffer += data[:count] + + # Flush if the buffer is full + if len(self.__buffer) >= self.__bufferSize: + self._flush(False) + + # Handle remaining data + if count < len(data): + next = time + self.xdelta * count + self._doBuffer(data[count:], next) + + +def _unpack_complex(data, dtype): + # Yields alternating real and imaginary elements from a sequence of complex + # values (or real values treated as complex values, where the imaginary + # portion is always 0), converted to a desired data type + for item in data: + yield dtype(item.real) + yield dtype(item.imag) + +def _complex_to_interleaved(data, dtype): + # Turns a sequence of complex values into a list with the real and + # imaginary elements interleaved + return list(_unpack_complex(data, dtype)) + +class NumericOutputStream(BufferedOutputStream): + """ + BulkIO output stream class for numeric data types. + + NumericOutputStream extends BufferedOutputStream to add support for complex + data and data reformatting. + + See Also: + BufferedOutputStream + OutputStream + """ + def __init__(self, sri, port, dtype, elemType): + """ + Create a NumericOutputStream. + + Warning: + Output streams are created via an output port. This constructor + should not be called directly. + + See Also: + OutPort.createStream + OutPort.getStream + """ + BufferedOutputStream.__init__(self, sri, port, dtype) + self._elemType = elemType + + def write(self, data, time, interleaved=False): + """ + Writes sample data to the stream. + + If this stream is configured for complex data, `data` is treated as a + list of complex values. The real and imaginary elements are interleaved + into a list of real numbers. + + When `data` is already an interleaved list of real values, setting the + optional `interleaved` keyword argument will skip the complex-to-real + interleaving. + + For char or octet streams, real values are packed into a binary string + after applying complex-to-real conversion (if required). + + Buffering behavior is inherited from BufferedOutputStream.write(). + + Args: + data: Sample data to write. + time: Time stamp of first sample. + interleaved: Indicates whether complex data is already interleaved. + + See Also: + BufferedOutputStream.write() + NumericOutputStream.complex + """ + if not interleaved and self.complex: + data = _complex_to_interleaved(data, self._elemType) + BufferedOutputStream.write(self, data, time) + + +class OutFileStream(OutputStream): + """ + File output stream class. + + See Also: + OutputStream + """ + def __init__(self, sri, port): + """ + Create an OutFileStream. + + Warning: + Output streams are created via an output port. This constructor + should not be called directly. + + See Also: + OutFilePort.createStream + OutFilePort.getStream + """ + OutputStream.__init__(self, sri, port, str) + + def write(self, data, time): + """ + Writes a file URI to the stream. + + The URI is sent as a single packet with the given time stamp. File + streams do not support buffering. + + Args: + data: The file URI to write. + time: Time stamp of file URI. + """ + self._send(data, time, False) + + +class OutXMLStream(OutputStream): + """ + XML output stream class. + + See Also: + OutputStream + """ + def __init__(self, sri, port): + """ + Create an OutXMLStream. + + Warning: + Output streams are created via an output port. This constructor + should not be called directly. + + See Also: + OutXMLPort.createStream + OutXMLPort.getStream + """ + OutputStream.__init__(self, sri, port, str) + + def write(self, data): + """ + Writes XML data to the stream. + + The XML document `data` is sent as a single packet. XML streams do not + support time stamps or buffering. + + Args: + data: An XML string. + """ + # Add a "null" timestamp to adapt to the base class method + self._send(data, None, False) + + def _pushPacket(self, port, data, time, eos, streamID): + # Drop the time stamp from the base class + port.pushPacket(data, eos, streamID) diff --git a/bulkioInterfaces/libsrc/python/bulkio/sandbox/__init__.py b/bulkioInterfaces/libsrc/python/bulkio/sandbox/__init__.py new file mode 100644 index 000000000..75538aa48 --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/sandbox/__init__.py @@ -0,0 +1,24 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +from streamsource import StreamSource +from streamsink import StreamSink + +__all__ = ('StreamSource', 'StreamSink') diff --git a/bulkioInterfaces/libsrc/python/bulkio/sandbox/streamsink.py b/bulkioInterfaces/libsrc/python/bulkio/sandbox/streamsink.py new file mode 100644 index 000000000..1660c133b --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/sandbox/streamsink.py @@ -0,0 +1,358 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import collections +import operator +import threading +import time + +from ossie.utils.sandbox.helper import SandboxHelper + +from bulkio.bulkioInterfaces import BULKIO +from bulkio.input_ports import * + +_PORT_MAP = { + 'char' : (InCharPort, BULKIO.dataChar), + 'octet': (InOctetPort, BULKIO.dataOctet), + 'short' : (InShortPort, BULKIO.dataShort), + 'ushort' : (InUShortPort, BULKIO.dataUshort), + 'long' : (InLongPort, BULKIO.dataLong), + 'ulong' : (InULongPort, BULKIO.dataUlong), + 'longlong' : (InLongLongPort, BULKIO.dataLongLong), + 'ulonglong' : (InULongLongPort, BULKIO.dataUlongLong), + 'float' : (InFloatPort, BULKIO.dataFloat), + 'double' : (InDoublePort, BULKIO.dataDouble), + 'bit' : (InBitPort, BULKIO.dataBit), + 'file' : (InFilePort, BULKIO.dataFile), + 'xml' : (InXMLPort, BULKIO.dataXML) +} + +SRI = collections.namedtuple('SRI', 'offset sri') +TimeStamp = collections.namedtuple('TimeStamp', 'offset time') + +class StreamData(object): + def __init__(self, sris, data, timestamps, eos): + self.sris = sris + self.data = data + self.timestamps = timestamps + self.eos = eos + + @property + def streamID(self): + return self.sri.streamID + + @property + def sri(self): + offset, sri = self.sris[0] + return sri + +class StreamContainer(object): + """ + Internal class to cache data read from a stream. + """ + def __init__(self, sri): + self.sri = sri + self.blocks = [] + self.eos = False + + @property + def streamID(self): + return self.sri.streamID + + def append(self, block): + self.blocks.append(block) + + def ready(self): + if self.eos: + return True + else: + return bool(self.blocks) + + def get(self): + # Combine blocks' data and metadata into a single data object + data = StreamData([], self._createEmpty(), [], self.eos) + framed = False + for block in self.blocks: + offset = len(data.data) + block_data = self._getBlockData(block) + + # Check for framed data mode + if block.sri.subsize > 0: + # If the data wasn't framed before, turn it into a list + if not framed: + if data.data: + # There was some data, put it in a list + data.data = [data.data] + else: + # Replace empty data with a list (this is mostly for + # the benefit of bitbuffer) + data.data = [] + framed = True + + # Reframe the block's data to match the framing + block_data = self._reframeData(block_data, block.sri.subsize) + data.data += block_data + + if block.sriChanged or not data.sris: + data.sris.append(SRI(offset, block.sri)) + for ts in block.getTimestamps(): + data.timestamps.append(TimeStamp(ts.offset + offset, ts.time)) + + # In the event that there were no blocks (which can only happen in the + # case of an end-of-stream with no data), add the saved SRI + if not data.sris: + data.sris = [SRI(0, self.sri)] + + return data + + def _createEmpty(self): + return [] + + def _reframeData(self, data, frameSize): + return [data[pos:pos+frameSize] for pos in xrange(0, len(data), frameSize)] + + def _getBlockSize(self, block): + if block.complex: + return block.cxsize + else: + return block.size + + def _getBlockData(self, block): + if block.complex: + return block.cxdata + else: + return block.data + +class StringStreamContainer(StreamContainer): + """ + Internal class for storing data from string-type streams (XML, File). + """ + def __init__(self, sri): + StreamContainer.__init__(self, sri) + + def _getBlockSize(self, block): + return 1 + + def _getBlockData(self, block): + return [block.buffer] + +class BitStreamContainer(StreamContainer): + """ + Internal class for storing data from packed bit streams. + """ + def __init__(self, sri): + StreamContainer.__init__(self, sri) + + def _createEmpty(self): + return bitbuffer() + + def _getBlockSize(self, block): + return len(block.buffer) + + def _getBlockData(self, block): + return block.buffer + +class StreamSink(SandboxHelper): + """ + Sandbox helper for reading BulkIO stream data. + + StreamSink provides a simplified interface for reading data from BulkIO + streams. It can support any BulkIO data type (with the exception of SDDS + and VITA49), but only one type is supported per instance. + + Unlike DataSink, reading from StreamSink returns data from only one stream + at a time. This avoids problems caused by accidental data interleaving. + + If more control over stream reading is desired, the `port` attribute + provides access to the underlying BulkIO port. The port is an instance of + the same class used in Python components, and supports the full stream API. + + See Also: + StreamSource + """ + def __init__(self, format=None): + """ + Creates a new StreamSink. + + Args: + format: BulkIO port type to support (e.g., "float"). If not + given, all BulkIO port types except SDDS and VITA49 are + supported. + """ + SandboxHelper.__init__(self) + + if format: + formats = [format] + else: + formats = _PORT_MAP.keys() + for format in formats: + clazz, helper = _PORT_MAP[format] + if format == 'bit': + cache = BitStreamContainer + elif format in ('xml', 'file'): + cache = StringStreamContainer + else: + cache = StreamContainer + self._addProvidesPort(format+'In', helper._NP_RepositoryId, clazz, {'cache':cache}) + + self._cachedStreams = {} + self._cacheClass = StreamContainer + + def _portCreated(self, port, portDict): + self._cacheClass = portDict['cache'] + + def streamIDs(self): + """ + Gets the stream IDs for the active streams. + + Returns: + list(str): The currently active stream IDs. + """ + return [sri.streamID for sri in self.activeSRIs()] + + def activeSRIs(self): + """ + Gets the SRIs for the active streams. + + Returns: + list(BULKIO.StreamSRI): The currently active SRIs. + """ + sri_list = [c.sri for c in self._cachedStreams.itervalues()] + if self._port: + for stream in self._port.getStreams(): + if stream.streamID not in self._cachedStreams: + sri_list.append(stream.sri) + return sri_list + + @property + def port(self): + """ + The BulkIO input port in use by this helper. + + The port is created when a connection is made from this helper to + another object in the sandbox. If no connection exists, the port is + None. + """ + return self._port + + def read(self, timeout=-1.0, streamID=None, eos=False): + """ + Reads stream data. + + Reading attempts to return as much data from a single stream as + possible. If the read succeeds, returns a StreamData object with the + following fields: + * streamID - Identifier of the stream from which data was read. + * sri - SRI in effect at the start of the read. + * sris - List of (offset, StreamSRI) tuples that describe the + data. The offset gives the element in `data` at which + the SRI applies (e.g., data[0]). + * data - The data read from the stream, formatted as necessary. + * timestamps - List of (offset, PrecisionUTCTime) tuples that give + time information for data. The offset gives the + element in `data` at which the time stamp applies + (e.g., data[0]). + * eos - True if the stream has ended, False if still active. + + If the SRI indicates that the data is framed (that is, `sri.subsize` is + non-zero), StreamSink will turn the data into a list of frames. For + example, with float data and a frame size of 4, the `data` field will + be a list of 4-element lists, where each item is a float. + + Args: + timeout: Maximum time, in seconds, to wait for data to become + available. A negative time waits indefinitely (this is + the default). + streamID: Identifier of stream to read from (default is first + available). + eos: If True, wait up to `timeout` for a stream to receive an + EOS (default is False). This may be combined with the + `streamID` argument to wait for a specific stream. + + Returns: + StreamData object on success. + None if timeout elapsed or helper was stopped and no data was + available. + """ + if eos: + condition = lambda x: x.eos + else: + condition = lambda x: x.ready() + + container = self._read(timeout, streamID, condition) + if not container: + return None + # The read consumes all cached data for the stream, so we can discard + # the cache object + self._removeStreamCache(container) + return container.get() + + def _read(self, timeout, streamID, condition): + if timeout >= 0.0: + end = time.time() + timeout + else: + end = None + + while self.started: + # Fetch as much data as possible without blocking + while self._fetchData(): + pass + + for container in self._cachedStreams.itervalues(): + if streamID and container.streamID != streamID: + continue + if condition(container): + return container + + # Sleep to allow more data to come in + wait_time = 0.1 + if end is not None: + now = time.time() + if now >= end: + break + wait_time = min(wait_time, end - now) + time.sleep(wait_time) + + return None + + def _fetchData(self): + # Use a polling loop instead of waiting in getCurrentStream so that the + # operation can be interrupted by ^C + stream = self._port.getCurrentStream(0.0) + if not stream: + return False + + container = self._getStreamCache(stream) + block = stream.tryread() + if block: + container.append(block) + elif stream.eos(): + container.eos = True + return True + + def _getStreamCache(self, stream): + container = self._cachedStreams.get(stream.streamID, None) + if not container: + container = self._cacheClass(stream.sri) + self._cachedStreams[stream.streamID] = container + return container + + def _removeStreamCache(self, container): + del self._cachedStreams[container.streamID] diff --git a/bulkioInterfaces/libsrc/python/bulkio/sandbox/streamsource.py b/bulkioInterfaces/libsrc/python/bulkio/sandbox/streamsource.py new file mode 100644 index 000000000..92e23e77d --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/sandbox/streamsource.py @@ -0,0 +1,252 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +from ossie.utils.docstring import inherit_doc +from ossie.utils.sandbox.helper import SandboxHelper + +from redhawk.bitbuffer import bitbuffer + +import bulkio +from bulkio.bulkioInterfaces import BULKIO +from bulkio.output_ports import * +from bulkio.output_streams import OutputStream, OutXMLStream + +_PORT_MAP = { + 'char' : (OutCharPort, BULKIO.dataChar), + 'octet': (OutOctetPort, BULKIO.dataOctet), + 'short' : (OutShortPort, BULKIO.dataShort), + 'ushort' : (OutUShortPort, BULKIO.dataUshort), + 'long' : (OutLongPort, BULKIO.dataLong), + 'ulong' : (OutULongPort, BULKIO.dataUlong), + 'longlong' : (OutLongLongPort, BULKIO.dataLongLong), + 'ulonglong' : (OutULongLongPort, BULKIO.dataUlongLong), + 'float' : (OutFloatPort, BULKIO.dataFloat), + 'double' : (OutDoublePort, BULKIO.dataDouble), + 'bit' : (OutBitPort, BULKIO.dataBit), + 'file' : (OutFilePort, BULKIO.dataFile), + 'xml' : (OutXMLPort, BULKIO.dataXML) +} + +def proxy(attr): + """ + Wrapper class to handle passthrough for getting and setting OutputStream + attributes in StreamSource. + """ + class property_proxy(object): + __doc__ = attr.__doc__ + def __init__(self, attr): + self.__attr = attr + + def __get__(self, obj, cls=None): + if obj is None: + return self + if obj._stream is not None: + return self.__attr.__get__(obj._stream, cls) + return self.__attr.__get__(obj, cls) + + def __set__(self, obj, value): + if obj._stream is not None: + self.__attr.__set__(obj._stream, value) + self.__attr.__set__(obj, value) + + return property_proxy(attr) + +class StreamSource(SandboxHelper): + """ + Sandbox helper for writing BulkIO stream data. + + StreamSource provides a simplified interface to write a single BulkIO + stream to one or more components. It can support any BulkIO data type (with + the exception of SDDS and VITA49), but only one type is supported per + instance. + + If more than one stream is desired, create one StreamSource per stream ID, + or use the `port` attribute to get direct access to the underlying BulkIO + port. The port is an instance of the same class used in Python components, + and supports the full stream API. + + See Also: + StreamSink + """ + def __init__(self, streamID=None, format=None): + """ + Creates a new StreamSource. + + Args: + streamID: The unique identifier for this stream. If not given, the + helper's instance name is used (e.g., "StreamSource_1"). + format: BulkIO port type to support (e.g., "float"). If not + given, all BulkIO port types except SDDS and VITA49 are + supported. + """ + SandboxHelper.__init__(self) + self._streamID = streamID + self._stream = None + + if format: + formats = [format] + else: + formats = _PORT_MAP.keys() + for format in formats: + clazz, helper = _PORT_MAP[format] + self._addUsesPort(format+'Out', helper._NP_RepositoryId, clazz) + + def _initializeHelper(self): + if not self._streamID: + self._streamID = self._instanceName + self._sri = bulkio.sri.create(self._streamID) + + def write(self, data, time=None, interleaved=False): + """ + Writes data to the stream. + + Args: + data: Data to write. + time: Optional time stamp for first sample of `data`. If not + given, the current time is used. Ignored when the data + type is XML. + interleaved: Indicates whether complex data is already interleaved. + """ + if not self._port: + # Not connected to anything + return + + # Get the stream via the attribute, which will create it if it does not + # already exist + stream = self.stream + + # Turn framed input data into a 1-dimensional sequence, but only if the + # stream is configured for it + if stream.subsize > 0: + data = self._unframeData(data) + + args = [data] + kwargs = {} + if not isinstance(stream, OutXMLStream): + if time is None: + time = bulkio.timestamp.now() + kwargs['time'] = time + if interleaved: + kwargs['interleaved'] = True + stream.write(*args, **kwargs) + + def close(self): + """ + Closes the stream, sending an end-of-stream packet. + """ + if self._stream: + self._stream.close() + self._stream = None + + @property + def streamID(self): + """ + The unique identifier of this stream. + """ + return self._sri.streamID + + @property + def port(self): + """ + The BulkIO output port in use by this helper. + + The port is created when a connection is made from this helper to + another object in the sandbox. If no connection exists, the port is + None. + """ + return self._port + + @property + def stream(self): + """ + The BulkIO stream managed by this helper. + + If no connection from this helper has been made yet, the stream is + None. + + See Also: + StreamSource.port + """ + if self._port and not self._stream: + # Port has to exist before creating the stream + self.log.debug("Creating stream '%s'", self.streamID) + self._stream = self._port.createStream(self._sri) + return self._stream + + def _unframeData(self, data): + if not data: + # Assume empty sequence, nothing to do + return data + elif isinstance(data[0], bitbuffer): + # Sequence of bitbuffers, compact down to a single bitbuffer + return sum(data, bitbuffer()) + elif isinstance(data[0], (list, tuple)): + # Sequence of sequences, probably numeric data, compact into a + # single list + return sum(data, []) + else: + # Something else (probably numbers), just pass through + return data + + def _setStreamMetadata(self, attr, value): + # This method is required by the proxied setters from the output stream + # class; it just needs to update the local copy of the SRI + setattr(self._sri, attr, value) + + def _modifyingStreamMetadata(self): + # This method is required by the proxied setter for 'sri'; it doesn't + # have to actually do anything + pass + + # Stream properties are defined as proxies to provide consistent behavior. + # If a stream is already created, it writes through to the stream, but also + # updates its local SRI. If no stream has been created, the local SRI is + # modified so that when a stream is created, it gets the latest SRI. + sri = proxy(OutputStream.sri) + xstart = proxy(OutputStream.xstart) + xdelta = proxy(OutputStream.xdelta) + xunits = proxy(OutputStream.xunits) + subsize = proxy(OutputStream.subsize) + ystart = proxy(OutputStream.ystart) + ydelta = proxy(OutputStream.ydelta) + yunits = proxy(OutputStream.yunits) + blocking = proxy(OutputStream.blocking) + complex = proxy(OutputStream.complex) + keywords = proxy(OutputStream.keywords) + + @inherit_doc(bulkio.sri.hasKeyword) + def hasKeyword(self, name): + return bulkio.sri.hasKeyword(self._sri, name) + + @inherit_doc(bulkio.sri.getKeyword) + def getKeyword(self, name): + return bulkio.sri.getKeyword(self._sri, name) + + @inherit_doc(bulkio.sri.setKeyword) + def setKeyword(self, name, value, format=None): + if self._stream: + self._stream.setKeyword(name, value, format) + bulkio.sri.setKeyword(self._sri, name, value, format) + + @inherit_doc(bulkio.sri.eraseKeyword) + def eraseKeyword(self, name): + if self._stream: + self._stream.eraseKeyword(name) + bulkio.sri.eraseKeyword(self._sri, name) diff --git a/bulkioInterfaces/libsrc/python/bulkio/sri.py b/bulkioInterfaces/libsrc/python/bulkio/sri.py new file mode 100644 index 000000000..9d42e47b2 --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/sri.py @@ -0,0 +1,182 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import omniORB.any + +from ossie.cf import CF +import ossie.properties + +from bulkio.bulkioInterfaces import BULKIO + +# Bit flags for SRI fields +NONE = 0 +HVERSION = (1<<0) +XSTART = (1<<1) +XDELTA = (1<<2) +XUNITS = (1<<3) +SUBSIZE = (1<<4) +YSTART = (1<<5) +YDELTA = (1<<6) +YUNITS = (1<<7) +MODE = (1<<8) +STREAMID = (1<<9) +BLOCKING = (1<<10) +KEYWORDS = (1<<11) + +def _compareKeywords(keywordsA, keywordsB): + if len(keywordsA) != len(keywordsB): + return False + for keyA, keyB in zip(keywordsA, keywordsB): + if keyA.value._t != keyB.value._t: + return False + if keyA.value._v != keyB.value._v: + return False + return True + +def compare(sriA, sriB): + """ + Will compare two BULKIO.StreamSRI objects and return True + if they are both equal, and false otherwise + """ + if not sriA or not sriB: + return False + + if sriA.hversion != sriB.hversion: + return False + if sriA.xstart != sriB.xstart: + return False + if sriA.xdelta != sriB.xdelta: + return False + if sriA.xunits != sriB.xunits: + return False + if sriA.subsize != sriB.subsize: + return False + if sriA.ystart != sriB.ystart: + return False + if sriA.ydelta != sriB.ydelta: + return False + if sriA.yunits != sriB.yunits: + return False + if sriA.mode != sriB.mode: + return False + if sriA.streamID != sriB.streamID: + return False + if sriA.blocking != sriB.blocking: + return False + if len(sriA.keywords) != len(sriB.keywords): + return False + for keyA, keyB in zip(sriA.keywords, sriB.keywords): + if keyA.value._t != keyB.value._t: + return False + if keyA.value._v != keyB.value._v: + return False + return True + +def compareFields(sriA, sriB): + """ + Field-by-field comparison of two SRIs, returning a combination of bit flags + indicating the fields that are different. + """ + result = NONE + if sriA.hversion != sriB.hversion: + result = result | HVERSION + if sriA.xstart != sriB.xstart: + result = result | XSTART + if sriA.xdelta != sriB.xdelta: + result = result | XDELTA + if sriA.xunits != sriB.xunits: + result = result | XUNITS + if sriA.subsize != sriB.subsize: + result = result | SUBSIZE + if sriA.ystart != sriB.ystart: + result = result | YSTART + if sriA.ydelta != sriB.ydelta: + result = result | YDELTA + if sriA.yunits != sriB.yunits: + result = result | YUNITS + if sriA.mode != sriB.mode: + result = result | MODE + if sriA.streamID != sriB.streamID: + result = result | STREAMID + if sriA.blocking != sriB.blocking: + result = result | BLOCKING + if not _compareKeywords(sriA.keywords, sriB.keywords): + result = result | KEYWORDS + return result + +def create( sid='defStream', srate=1.0, xunits=1 ): + return BULKIO.StreamSRI(hversion=1, xstart=0.0, xdelta=1.0/srate, + xunits=xunits, subsize=0, ystart=0.0, ydelta=0.0, + yunits=0, mode=0, streamID=sid, blocking=False, keywords=[]) + +def hasKeyword(sri, name): + """ + Checks for the presence of a keyword in the SRI. + """ + for dt in sri.keywords: + if dt.id == name: + return True + return False + +def getKeyword(sri, name): + """ + Gets the current value of a keyword in the SRI. + + Allows for easy lookup of keyword values in the SRI. To avoid exceptions on + missing keywords, the presence of a keyword can be checked with + hasKeyword(). + """ + for dt in sri.keywords: + if dt.id == name: + return omniORB.any.from_any(dt.value) + raise KeyError(name) + +def setKeyword(sri, name, value, format=None): + """ + Sets the current value of a keyword in the SRI. + + If the keyword "name" already exists, its value is updated to "value". If + the keyword "name" does not exist, the new keyword is appended. + + If the optional 'format' argument is given, it must be the name of the + desired CORBA type. Otherwise, the CORBA type is determined based on the + Python type of 'value'. + """ + if format is None: + value = omniORB.any.to_any(value) + else: + value = ossie.properties.to_tc_value(value, format) + + for dt in sri.keywords: + if dt.id == name: + dt.value = value + return + sri.keywords.append(CF.DataType(name, value)) + +def eraseKeyword(sri, name): + """ + Erases the first instance of the keyword "name" from the SRI keywords. If + no keyword "name" is found, the keywords are not modified. + """ + for index in xrange(len(sri.keywords)): + if sri.keywords[index].id == name: + del sri.keywords[index] + return + diff --git a/bulkioInterfaces/libsrc/python/statistics.py b/bulkioInterfaces/libsrc/python/bulkio/statistics.py similarity index 82% rename from bulkioInterfaces/libsrc/python/statistics.py rename to bulkioInterfaces/libsrc/python/bulkio/statistics.py index 698cce6cc..377803e82 100644 --- a/bulkioInterfaces/libsrc/python/statistics.py +++ b/bulkioInterfaces/libsrc/python/bulkio/statistics.py @@ -32,14 +32,18 @@ def __init__(self): self.queueSize = 0.0 self.secs = 0.0 - def __init__(self, name, element_type ): + def __init__(self, name, element_type='', bits=0): + # Backwards-compatibility: accept an element type string for use with + # struct.calcsize + if bits == 0: + bits = struct.calcsize(element_type) * 8 self.enabled = True self.flushTime = None self.historyWindow = 10 self.receivedStatistics = [] self.name = name self.receivedStatistics_idx = 0 - self.bitSize = struct.calcsize(element_type) * 8 + self.bitSize = bits self.activeStreamIDs = [] for i in range(self.historyWindow): self.receivedStatistics.append(self.statPoint()) @@ -123,9 +127,13 @@ def __init__(self): self.secs = 0.0 self.streamID = "" - def __init__(self, name, element_type ): + def __init__(self, name, element_type='', bits=0): + # Backwards-compatibility: accept an element type string for use with + # struct.calcsize + if not bits: + bits = struct.calcsize(element_type) * 8 self.enabled = True - self.bitSize = struct.calcsize(element_type) * 8 + self.bitSize = bits self.historyWindow = 10 self.receivedStatistics = {} self.name = name @@ -144,6 +152,11 @@ def connectionErrors(self, connection_id, n ): self.connection_errors.setdefault(connection_id,0) self.connection_errors[connection_id] = self.connection_errors[connection_id]+n return self.connection_errors[connection_id] + + def add(self, connectionId): + self.receivedStatistics[connectionId] = [self.statPoint() for xx in xrange(self.historyWindow)] + self.receivedStatistics_idx[connectionId] = 0 + self.connection_errors[connectionId] = 0 def remove(self, connectionId): if self.receivedStatistics.has_key(connectionId): @@ -157,24 +170,14 @@ def update(self, elementsReceived, queueSize, EOS, streamID, connectionId): if not self.enabled: return - if self.receivedStatistics.has_key(connectionId): - self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].elements = elementsReceived - self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].queueSize = queueSize - self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].secs = time.time() - self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].streamID = streamID - self.receivedStatistics_idx[connectionId] += 1 - self.receivedStatistics_idx[connectionId] = self.receivedStatistics_idx[connectionId]%self.historyWindow - else: - self.receivedStatistics[connectionId] = [] - self.receivedStatistics_idx[connectionId] = 0 - for i in range(self.historyWindow): - self.receivedStatistics[connectionId].append(self.statPoint()) - self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].elements = elementsReceived - self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].queueSize = queueSize - self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].secs = time.time() - self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].streamID = streamID - self.receivedStatistics_idx[connectionId] += 1 - self.receivedStatistics_idx[connectionId] = self.receivedStatistics_idx[connectionId] % self.historyWindow + if not self.receivedStatistics.has_key(connectionId): + self.add(connectionId) + self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].elements = elementsReceived + self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].queueSize = queueSize + self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].secs = time.time() + self.receivedStatistics[connectionId][self.receivedStatistics_idx[connectionId]].streamID = streamID + self.receivedStatistics_idx[connectionId] += 1 + self.receivedStatistics_idx[connectionId] = self.receivedStatistics_idx[connectionId]%self.historyWindow def retrieve(self): if not self.enabled: diff --git a/bulkioInterfaces/libsrc/python/bulkio/stream_base.py b/bulkioInterfaces/libsrc/python/bulkio/stream_base.py new file mode 100644 index 000000000..1e64cab29 --- /dev/null +++ b/bulkioInterfaces/libsrc/python/bulkio/stream_base.py @@ -0,0 +1,199 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import bulkio.sri + +class StreamBase(object): + """ + Base class for input and output streams. + + StreamBase encapsulates a single BulkIO stream. It implements the basic + common API for input and output streams, providing attributes for StreamSRI + fields. + """ + def __init__(self, sri): + self._sri = sri + + @property + def streamID(self): + """ + str: The stream's unique identifier. + + The stream ID is immutable and cannot be changed. + """ + return self._sri.streamID + + @property + def sri(self): + """ + BULKIO.StreamSRI: The current stream metadata. + """ + return self._sri + + @property + def xstart(self): + """ + float: Starting coordinate of the first sample in the X direction. + + For contiguous data, this is the start of the stream in terms of + xunits. For framed data, this specifies the starting abscissa value, in + terms of xunits, associated with the first element in + """ + return self._sri.xstart + + @property + def xdelta(self): + """ + float: The distance between two adjacent samples in the X direction. + + Because the X-axis is commonly in terms of time (that is, sri.xunits is + BULKIO.UNITS_TIME), this is typically the reciprocal of the sample + rate. + + For framed data, this is the interval between consecutive samples in a + frame. + """ + return self._sri.xdelta + + @property + def xunits(self): + """ + int: The unit code for the xstart and xdelta values. + + Axis units are specified using constants in the BULKIO package. For + contiguous data, the X-axis is commonly in terms of time, + BULKIO.UNITS_TIME. For framed data, the X-axis is often in terms of + frequency, BULKIO.UNITS_FREQUENCY. + """ + return self._sri.xunits + + @property + def subsize(self): + """ + int: The length of a row for framed data, or 0 if the data is + contiguous. + + A subsize of 0 indicates that the data is contiguous; this is the + default setting. For contiguous data, only the X-axis fields are + applicable. + + A non-zero subsize indicates that the data is framed, with each row + having a length of subsize. For framed data, both the X-axis and Y-axis + fields are applicable. + """ + return self._sri.subsize + + @property + def ystart(self): + """ + float: Starting coordinate of the first frame in the Y direction. + + This specifies the start of the stream in terms of yunits. + + Note: + Y-axis fields are only applicable when subsize is non-zero. + """ + return self._sri.ystart + + @property + def ydelta(self): + """ + float: The distance between two adjacent frames in the Y direction. + + This specifies the interval between frames in terms of yunits. + + Note: + Y-axis fields are only applicable when subsize is non-zero. + """ + return self._sri.ydelta + + @property + def yunits(self): + """ + int: The unit code for the ystart and ydelta values. + + Axis units are specified using constants in the BULKIO package. + + Note: + Y-axis fields are only applicable when subsize is non-zero. + """ + return self._sri.yunits + + @property + def complex(self): + """ + bool: The complex mode of this stream. + + A stream is considered complex if sri.mode is non-zero. + """ + return self._sri.mode != 0 + + @property + def blocking(self): + """ + bool: The blocking mode of this stream. + """ + return self._sri.blocking + + @property + def keywords(self): + """ + list: User-defined keywords. + + See Also: + hasKeyword() + getKeyword() + """ + return self._sri.keywords + + def hasKeyword(self, name): + """ + Checks for the presence of a keyword in the SRI. + + Args: + name: The name of the keyword. + + Returns: + True: If keyword `name` exists. + False: If keyword `name` does not exist. + + See Also: + bulkio.sri.hasKeyword() + """ + return bulkio.sri.hasKeyword(self._sri, name) + + def getKeyword(self, name): + """ + Gets the current value of a keyword in the SRI. + + Allows for easy lookup of keyword values in the SRI. To avoid + exceptions on missing keywords, the presence of a keyword can be + checked with hasKeyword(). + + Returns: + Value of keyword `name` as a Python object. + + Raises: + KeyError: If keyword `name` does not exist. + + See Also: + bulkio.sri.getKeyword() + """ + return bulkio.sri.getKeyword(self._sri, name) diff --git a/bulkioInterfaces/libsrc/python/timestamp.py b/bulkioInterfaces/libsrc/python/bulkio/timestamp.py similarity index 100% rename from bulkioInterfaces/libsrc/python/timestamp.py rename to bulkioInterfaces/libsrc/python/bulkio/timestamp.py diff --git a/bulkioInterfaces/libsrc/python/input_ports.py b/bulkioInterfaces/libsrc/python/input_ports.py deleted file mode 100644 index c9a845393..000000000 --- a/bulkioInterfaces/libsrc/python/input_ports.py +++ /dev/null @@ -1,665 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - - -import threading -import collections -import copy -import time -import logging -from ossie.utils import uuid -from bulkio.statistics import InStats -from bulkio import sri -from bulkio import timestamp -from bulkio import const -from ossie.cf.CF import Port - -from bulkio.bulkioInterfaces import BULKIO, BULKIO__POA #@UnusedImport - - -class InPort: - DATA_BUFFER=0 - TIME_STAMP=1 - END_OF_STREAM=2 - STREAM_ID=3 - SRI=4 - SRI_CHG=5 - QUEUE_FLUSH=6 - _TYPE_ = 'c' - - # Backwards-compatible DataTransfer type can still be unpacked like a tuple - # but also supports named fields - DataTransfer = collections.namedtuple('DataTransfer', 'dataBuffer T EOS streamID SRI sriChanged inputQueueFlushed') - - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100, PortTransferType=_TYPE_ ): - self.name = name - self.logger = logger - self.queue = collections.deque() - self._maxSize = maxsize - self.port_lock = threading.Lock() - self._not_full = threading.Condition(self.port_lock) - self._not_empty = threading.Condition(self.port_lock) - self._breakBlock = False - self.stats = InStats(name, PortTransferType) - self.blocking = False - self.sri_cmp = sriCompare - self.newSriCallback = newSriCallback - self.sriChangeCallback = sriChangeCallback - self.sriDict = {} # key=streamID, value=StreamSRI - - if logger==None: - self.logger = logging.getLogger("redhawk.bulkio.input."+name) - - _cmpMsg = "DEFAULT" - _newSriMsg = "EMPTY" - _sriChangeMsg = "EMPTY" - if sriCompare != sri.compare: - _cmpMsg = "USER_DEFINED" - if newSriCallback: - _newSriMsg = "USER_DEFINED" - if sriChangeCallback: - _sriChangeMsg = "USER_DEFINED" - - if self.logger: - self.logger.debug( "bulkio::InPort CTOR port:" + str(name) + - " Blocking/MaxInputQueueSize " + str(self.blocking) + "/" + str(maxsize) + - " SriCompare/NewSriCallback/SriChangeCallback " + _cmpMsg + "/" + _newSriMsg + "/" + _sriChangeMsg ); - - def setNewSriListener(self, newSriCallback): - self.port_lock.acquire() - try: - self.newSriCallback = newSriCallback - finally: - self.port_lock.release() - - def setSriChangeListener(self, sriChangeCallback): - self.port_lock.acquire() - try: - self.sriChangeCallback = sriChangeCallback - finally: - self.port_lock.release() - - def enableStats(self, enabled): - self.stats.setEnabled(enabled) - - def _get_statistics(self): - self.port_lock.acquire() - try: - return self.stats.retrieve() - finally: - self.port_lock.release() - - def _get_state(self): - self.port_lock.acquire() - try: - if len(self.queue) == 0: - return BULKIO.IDLE - elif len(self.queue) == self._maxSize: - return BULKIO.BUSY - else: - return BULKIO.ACTIVE - finally: - self.port_lock.release() - - def _get_activeSRIs(self): - self.port_lock.acquire() - try: - return [self.sriDict[entry][0] for entry in self.sriDict] - finally: - self.port_lock.release() - - def getCurrentQueueDepth(self): - self.port_lock.acquire() - try: - return len(self.queue) - finally: - self.port_lock.release() - - def getMaxQueueDepth(self): - self.port_lock.acquire() - try: - return self._maxSize - finally: - self.port_lock.release() - - #set to -1 for infinite queue - def setMaxQueueDepth(self, newDepth): - self.port_lock.acquire() - try: - self._maxSize = int(newDepth) - finally: - self.port_lock.release() - - def unblock(self): - self.port_lock.acquire() - try: - self._breakBlock = False - finally: - self.port_lock.release() - - def block(self): - self.port_lock.acquire() - try: - self._breakBlock = True - self._not_empty.notifyAll() - finally: - self.port_lock.release() - - # Provide standard interface for start/stop - startPort = unblock - stopPort = block - - def pushSRI(self, H): - - if self.logger: - self.logger.trace( "bulkio::InPort pushSRI ENTER (port=" + str(self.name) +")" ) - - self.port_lock.acquire() - try: - if H.streamID not in self.sriDict: - sriChanged = True - if self.logger: - self.logger.debug( "pushSRI PORT:" + str(self.name) + " NEW SRI:" + str(H.streamID) ) - if self.newSriCallback: - self.newSriCallback( H ) - self.sriDict[H.streamID] = (copy.deepcopy(H), True) - if H.blocking: - self.blocking = True - else: - sri, sriChanged = self.sriDict[H.streamID] - if self.sri_cmp: - if not self.sri_cmp(sri, H): - self.sriDict[H.streamID] = (copy.deepcopy(H), True) - if H.blocking: - self.blocking = True - if self.sriChangeCallback: - self.sriChangeCallback( H ) - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace( "bulkio::InPort pushSRI EXIT (port=" + str(self.name) +")" ) - - - def pushPacket(self, data, T, EOS, streamID): - - if self.logger: - self.logger.trace( "bulkio::InPort pushPacket ENTER (port=" + str(self.name) +")" ) - - self.port_lock.acquire() - try: - if self._maxSize == 0: - if self.logger: - self.logger.trace( "bulkio::InPort pushPacket EXIT (port=" + str(self.name) +")" ) - return - if self.sriDict.has_key(streamID): - sri, sriChanged = self.sriDict[streamID] - if EOS: - self.sriDict[streamID] = (sri, True) - else: - self.sriDict[streamID] = (sri, False) - else: - # Create a default SRI for the stream ID - if self.logger: - self.logger.warn("bulkio::InPort pushPacket received data for stream '%s' with no SRI", streamID) - sri = BULKIO.StreamSRI(1, 0.0, 1.0, 1, 0, 0.0, 0.0, 0, 0, streamID, False, []) - if self.newSriCallback: - self.newSriCallback(sri) - self.sriDict[streamID] = (sri, False) - sriChanged = True - - queueFlushed = False - flagEOS = False - if self.blocking: - while len(self.queue) >= self._maxSize: - self._not_full.wait() - else: - # Flush the queue if not using infinite queue (maxSize == -1), blocking is not on, and - # current length of queue is >= maxSize - if len(self.queue) >= self._maxSize and self._maxSize > -1: - queueFlushed = True - if self.logger: - self.logger.debug("bulkio::InPort pushPacket PURGE INPUT QUEUE (SIZE=%d)", len(self.queue)) - - foundSRIChanged = False - for data, T, EOS, streamID, sri, sriChangeHappened, inputQueueFlushed in self.queue: - if foundSRIChanged and flagEOS: - break - if sriChangeHappened: - sriChanged = True - foundSRIChanged = True - if EOS: - flagEOS = True - self.queue.clear() - - if flagEOS: - EOS = True - packet = (data, T, EOS, streamID, copy.deepcopy(sri), sriChanged, queueFlushed) - self.stats.update(self._packetSize(data), float(len(self.queue))/float(self._maxSize), EOS, streamID, queueFlushed) - if self.logger: - self.logger.trace("bulkio::InPort pushPacket NEW Packet (QUEUE=%d)", len(self.queue)) - self.queue.append(packet) - - # Let one waiting getPacket call know there is a packet available - self._not_empty.notify() - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace( "bulkio::InPort pushPacket EXIT (port=" + str(self.name) +")" ) - - def getPacket(self, timeout=const.NON_BLOCKING): - - if self.logger: - self.logger.trace( "bulkio::InPort getPacket ENTER (port=" + str(self.name) +")" ) - - self.port_lock.acquire() - try: - if timeout < 0.0: - while not self.queue and not self._breakBlock: - self._not_empty.wait() - elif timeout > 0.0: - # Determine the absolute time at which the timeout expires - end = time.time() + timeout - while not self.queue and not self._breakBlock: - # Calculate remaining timeout - remain = end - time.time() - if remain <= 0.0: - break - self._not_empty.wait(remain) - - if not self.queue: - return InPort.DataTransfer(None, None, None, None, None, None, None) - - data, T, EOS, streamID, sri, sriChanged, inputQueueFlushed = self.queue.popleft() - - # Let one waiting pushPacket call know there is space available - self._not_full.notify() - - if EOS: - if self.sriDict.has_key(streamID): - (a,b) = self.sriDict.pop(streamID) - if sri.blocking: - stillBlock = False - for _sri, _sriChanged in self.sriDict.values(): - if _sri.blocking: - stillBlock = True - break - if not stillBlock: - self.blocking = False - return InPort.DataTransfer(data, T, EOS, streamID, sri, sriChanged, inputQueueFlushed) - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace( "bulkio::InPort getPacket EXIT (port=" + str(self.name) +")" ) - - def _packetSize(self, data): - return len(data) - - -class InCharPort(InPort, BULKIO__POA.dataChar): - _TYPE_ = 'c' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InCharPort._TYPE_ ) - -class InOctetPort(InPort, BULKIO__POA.dataOctet): - _TYPE_ = 'B' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InOctetPort._TYPE_ ) - -class InShortPort(InPort, BULKIO__POA.dataShort): - _TYPE_ = 'h' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InShortPort._TYPE_ ) - -class InUShortPort(InPort, BULKIO__POA.dataUshort): - _TYPE_ = 'H' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InUShortPort._TYPE_ ) - -class InLongPort(InPort, BULKIO__POA.dataLong): - _TYPE_ = 'i' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InLongPort._TYPE_ ) - -class InULongPort(InPort, BULKIO__POA.dataUlong): - _TYPE_ = 'I' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InULongPort._TYPE_ ) - -class InLongLongPort(InPort, BULKIO__POA.dataLongLong): - _TYPE_ = 'q' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InLongLongPort._TYPE_ ) - - -class InULongLongPort(InPort, BULKIO__POA.dataUlongLong): - _TYPE_ = 'Q' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InULongLongPort._TYPE_ ) - - -class InFloatPort(InPort, BULKIO__POA.dataFloat): - _TYPE_ = 'f' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InFloatPort._TYPE_ ) - - -class InDoublePort(InPort, BULKIO__POA.dataDouble): - _TYPE_ = 'd' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InDoublePort._TYPE_ ) - - -class InFilePort(InPort, BULKIO__POA.dataFile): - _TYPE_ = 'd' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InFilePort._TYPE_ ) - - def _packetSize(self, data): - # For statistics, consider the entire URL a single element - return 1 - - -class InXMLPort(InPort, BULKIO__POA.dataXML): - _TYPE_ = 'd' - def __init__(self, name, logger=None, sriCompare=sri.compare, newSriCallback=None, sriChangeCallback=None, maxsize=100 ): - InPort.__init__(self, name, logger, sriCompare, newSriCallback, sriChangeCallback, maxsize, InXMLPort._TYPE_ ) - - def pushPacket(self, xml_string, EOS, streamID): - # Insert a None for the timestamp and use parent implementation - InPort.pushPacket(self, xml_string, None, EOS, streamID) - -class InAttachablePort: - _TYPE_='b' - def __init__(self, name, logger=None, attachDetachCallback=None, sriCmp=sri.compare, timeCmp=timestamp.compare, PortType = _TYPE_, newSriCallback=None, sriChangeCallback=None,interface=None): - self.name = name - self.logger = logger - self.port_lock = threading.Lock() - self.sri_query_lock = threading.Lock() - self._attachedStreams = {} # key=attach_id, value = (streamDef, userid) - self.stats = InStats(name, PortType ) - self.sriDict = {} # key=streamID, value=(StreamSRI, PrecisionUTCTime) - self.attachDetachCallback = attachDetachCallback - self.newSriCallback = newSriCallback - self.sriChangeCallback = sriChangeCallback - self.sri_cmp = sriCmp - self.time_cmp = timeCmp - self.sriChanged = False - if not interface: - if self.logger: - self.logger.error("InAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49") - raise Port.InvalidPort(1, "InAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49") - self.interface=interface # BULKIO port interface (valid options are BULKIO.dataSDDS or BULKIO.dataVITA49) - self.setNewAttachDetachListener(attachDetachCallback) - if self.logger: - self.logger.debug("bulkio::InAttachablePort CTOR port:" + str(self.name) + " using interface " + str(self.interface)) - - def setNewAttachDetachListener(self, attachDetachCallback ): - self.port_lock.acquire() - try: - self.attachDetachCallback = attachDetachCallback - - # Set _attach_cb - try: - self._attach_cb = getattr(attachDetachCallback, "attach") - if not callable(self._attach_cb): - self._attach_cb = None - except AttributeError: - self._attach_cb = None - - # Set _detach_cb - try: - self._detach_cb = getattr(attachDetachCallback, "detach") - if not callable(self._detach_cb): - self._detach_cb = None - except AttributeError: - self._detach_cb = None - - finally: - self.port_lock.release() - - def setNewSriListener(self, newSriCallback): - self.port_lock.acquire() - try: - self.newSriCallback = newSriCallback - finally: - self.port_lock.release() - - def setSriChangeListener(self, sriChangeCallback): - self.port_lock.acquire() - try: - self.sriChangeCallback = sriChangeCallback - finally: - self.port_lock.release() - - def setBitSize(self, bitSize): - self.stats.setBitSize(bitSize) - - def enableStats(self, enabled): - self.stats.setEnabled(enabled) - - def updateStats(self, elementsReceived, queueSize, streamID): - self.port_lock.acquire() - try: - self.stats.update(elementsReceived, queueSize, streamID) - finally: - self.port_lock.release() - - def _get_statistics(self): - self.port_lock.acquire() - try: - recStat = self.stats.retrieve() - finally: - self.port_lock.release() - return recStat - - def _get_state(self): - self.port_lock.acquire() - try: - numAttachedStreams = len(self._attachedStreams.values()) - finally: - self.port_lock.release() - if numAttachedStreams == 0: - return BULKIO.IDLE - # default behavior is to limit to one connection - elif numAttachedStreams == 1: - return BULKIO.BUSY - else: - return BULKIO.ACTIVE - - def _get_attachedSRIs(self): - sris = [] - self.sri_query_lock.acquire() - try: - for entry in self.sriDict: - # First value of sriDict entry is the StreamSRI object - sris.append(copy.deepcopy(self.sriDict[entry][0])) - finally: - self.sri_query_lock.release() - return sris - - def _get_usageState(self): - self.port_lock.acquire() - try: - numAttachedStreams = len(self._attachedStreams.values()) - finally: - self.port_lock.release() - if numAttachedStreams == 0: - return self.interface.IDLE - # default behavior is to limit to one connection - elif numAttachedStreams == 1: - return self.interface.BUSY - else: - return self.interface.ACTIVE - - def _get_attachedStreams(self): - return [x[0] for x in self._attachedStreams.values()] - - def _get_attachmentIds(self): - return self._attachedStreams.keys() - - def attach(self, streamDef, userid): - - if self.logger: - self.logger.trace("bulkio::InAttachablePort attach ENTER (port=" + str(self.name) +")" ) - self.logger.debug("InAttachablePort.attach() - ATTACH REQUEST, STREAM/USER" + str(streamDef) + '/' + str(userid)) - - attachId = None - self.port_lock.acquire() - try: - try: - if self.logger: - self.logger.debug("InAttachablePort.attach() - CALLING ATTACH CALLBACK, STREAM/USER" + str(streamDef) + '/' + str(userid) ) - if self._attach_cb != None: - attachId = self._attach_cb(streamDef, userid) - except Exception, e: - if self.logger: - self.logger.error("InAttachablePort.attach() - ATTACH CALLBACK EXCEPTION : " + str(e) + " STREAM/USER" + str(streamDef) + '/' + str(userid) ) - raise self.interface.AttachError(str(e)) - - if attachId == None: - attachId = str(uuid.uuid4()) - - self._attachedStreams[attachId] = (streamDef, userid) - - finally: - self.port_lock.release() - - if self.logger: - self.logger.debug("InAttachablePort.attach() - ATTACH COMPLETED, ID:" + str(attachId) + " STREAM/USER: " + str(streamDef) + '/' + str(userid)) - self.logger.trace("bulkio::InAttachablePort attach EXIT (port=" + str(self.name) +")" ) - - return attachId - - def detach(self, attachId): - - if self.logger: - self.logger.trace("bulkio::InAttachablePort detach ENTER (port=" + str(self.name) +")" ) - self.logger.debug("InAttachablePort.detach() - DETACH REQUESTED, ID:" + str(attachId) ) - - self.port_lock.acquire() - try: - if not self._attachedStreams.has_key(attachId): - - if self.logger: - self.logger.debug("InAttachablePort.detach() - DETACH UNKNOWN ID:" + str(attachId) ) - - if attachId: - raise self.interface.DetachError("Stream %s not attached" % str(attachId)) - else: - raise self.interface.DetachError("Cannot detach Unkown ID") - - attachedStreamDef, refcnf = self._attachedStreams[attachId] - - # - # Deallocate capacity here if applicable - # - try: - if self.logger: - self.logger.debug("InAttachablePort.detach() - CALLING DETACH CALLBACK, ID:" + str(attachId) ) - - if self._detach_cb != None: - self._detach_cb(attachId) - except Exception, e: - if self.logger: - self.logger.error("InAttachablePort.detach() - DETACH CALLBACK EXCEPTION: " + str(e) ) - raise self.interface.DetachError(str(e)) - - # Remove the attachment from our list - del self._attachedStreams[attachId] - - finally: - self.port_lock.release() - - if self.logger: - self.logger.debug("InAttachablePort.detach() - DETACH SUCCESS, ID:" + str(attachId) ) - self.logger.trace("bulkio::InAttachablePort detach EXIT (port=" + str(self.name) +")" ) - - def getStreamDefinition(self, attachId): - try: - return self._attachedStreams[attachId][0] - except KeyError: - raise self.interface.StreamInputError("Stream %s not attached" % attachId) - - def getUser(self, attachId): - try: - return self._attachedStreams[attachId][1] - except KeyError: - raise self.interface.StreamInputError("Stream %s not attached" % attachId) - - def _get_activeSRIs(self): - self.sri_query_lock.acquire() - try: - activeSRIs = [self.sriDict[entry][0] for entry in self.sriDict] - finally: - self.sri_query_lock.release() - return activeSRIs - - def pushSRI(self, H, T): - - if self.logger: - self.logger.trace("bulkio::InAttachablePort pushSRI ENTER (port=" + str(self.name) +")" ) - - self.port_lock.acquire() - try: - if H.streamID not in self.sriDict: - if self.newSriCallback: - self.newSriCallback( H ) - # Disable querying while adding a new SRI - self.sri_query_lock.acquire() - try: - self.sriDict[H.streamID] = (copy.deepcopy(H), copy.deepcopy(T)) - finally: - self.sri_query_lock.release() - else: - cur_H, cur_T = self.sriDict[H.streamID] - s_same = False - if self.sri_cmp: - s_same = self.sri_cmp(cur_H, H) - - t_same = False - if self.time_cmp: - t_same = self.time_cmp(cur_T, T) - - self.sriChanged = ( s_same == False ) or ( t_same == False ) - if self.sriChanged and self.sriChangeCallback: - self.sriChangeCallback( H ) - # Disable querying while adding a new SRI - self.sri_query_lock.acquire() - try: - self.sriDict[H.streamID] = (copy.deepcopy(H), copy.deepcopy(T)) - finally: - self.sri_query_lock.release() - - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace("bulkio::InAttachablePort pushSRI EXIT (port=" + str(self.name) +")" ) - -class InSDDSPort(BULKIO__POA.dataSDDS,InAttachablePort): - def __init__(self, name, logger=None, attachDetachCallback=None, sriCmp=None, timeCmp=None, PortType = 'b', newSriCallback=None, sriChangeCallback=None ): - InAttachablePort.__init__(self, name, logger, attachDetachCallback, sriCmp, timeCmp, PortType, newSriCallback, sriChangeCallback, interface=BULKIO.dataSDDS) - -class InVITA49Port(BULKIO__POA.dataVITA49,InAttachablePort): - def __init__(self, name, logger=None, attachDetachCallback=None, sriCmp=None, timeCmp=None, PortType = 'b', newSriCallback=None, sriChangeCallback=None ): - InAttachablePort.__init__(self, name, logger, attachDetachCallback, sriCmp, timeCmp, PortType, newSriCallback, sriChangeCallback, interface=BULKIO.dataVITA49) diff --git a/bulkioInterfaces/libsrc/python/output_ports.py b/bulkioInterfaces/libsrc/python/output_ports.py deleted file mode 100644 index a1b1454bd..000000000 --- a/bulkioInterfaces/libsrc/python/output_ports.py +++ /dev/null @@ -1,1246 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -import threading -import copy -import time -import sys -import struct - -from ossie.cf import ExtendedCF -from ossie.cf.CF import Port -from ossie.utils import uuid -from ossie.properties import simple_property -import logging -from bulkio.statistics import OutStats -from bulkio import sri -from bulkio import timestamp -from bulkio.bulkioInterfaces import BULKIO, BULKIO__POA -from bulkio.const import MAX_TRANSFER_BYTES -import traceback - -class connection_descriptor_struct(object): - connection_id = simple_property(id_="connectionTable::connection_id", - name="connection_id", - type_="string") - - stream_id = simple_property(id_="connectionTable::stream_id", - name="stream_id", - type_="string") - - port_name = simple_property(id_="connectionTable::port_name", - name="port_name", - type_="string") - - def __init__(self, connection_id="", stream_id="", port_name=""): - self.connection_id = connection_id - self.stream_id = stream_id - self.port_name = port_name - - def __str__(self): - """Return a string representation of this structure""" - d = {} - d["connection_id"] = self.connection_id - d["stream_id"] = self.stream_id - d["port_name"] = self.port_name - return str(d) - - def getId(self): - return "connectionTable::connection_descriptor" - - def isStruct(self): - return True - - def getMembers(self): - return [("connection_id",self.connection_id),("stream_id",self.stream_id),("port_name",self.port_name)] - - -class OutPort (BULKIO__POA.UsesPortStatisticsProvider ): - - class SriMapStruct: - def __init__( self, sri=None, connections=None, time=None): - self.sri=sri - self.connections = connections #set of connection ID strings that have received this SRI - self.time=time - - TRANSFER_TYPE='c' - def __init__(self, name, PortTypeClass, PortTransferType=TRANSFER_TYPE, logger=None, noData=None ): - self.name = name - self.logger = logger - self.PortType = PortTypeClass - self.PortTransferType=PortTransferType - self.outConnections = {} # key=connectionId, value=port - self.stats = OutStats(self.name, PortTransferType ) - self.port_lock = threading.Lock() - self.sriDict = {} # key=streamID value=SriMapStruct - self.filterTable = [] - if noData==None: - self.noData = [] - else: - self.noData = noData - - # Determine maximum transfer size in advance - self.byteSize = 1 - if self.PortTransferType: - self.byteSize = struct.calcsize(PortTransferType) - # Multiply by some number < 1 to leave some margin for the CORBA header - self.maxSamplesPerPush = int(MAX_TRANSFER_BYTES*.9)/self.byteSize - # Make sure maxSamplesPerPush is even so that complex data case is handled properly - if self.maxSamplesPerPush%2 != 0: - self.maxSamplesPerPush = self.maxSamplesPerPush - 1 - - if self.logger == None: - self.logger = logging.getLogger("redhawk.bulkio.outport."+name) - if self.logger: - self.logger.debug('bulkio::OutPort CTOR port:' + str(self.name)) - - - def connectPort(self, connection, connectionId): - - if self.logger: - self.logger.trace('bulkio::OutPort connectPort ENTER ') - - self.port_lock.acquire() - try: - try: - port = connection._narrow(self.PortType) - if port == None: - raise Port.InvalidPort(1, "Invalid Port for Connection ID:" + str(connectionId)) - self.outConnections[str(connectionId)] = port - - if self.logger: - self.logger.debug('bulkio::OutPort CONNECT PORT:' + str(self.name) + ' CONNECTION:' + str(connectionId)) - - except: - if self.logger: - self.logger.error('bulkio::OutPort CONNECT PORT:' + str(self.name) + ' PORT FAILED NARROW') - raise Port.InvalidPort(1, "Invalid Port for Connection ID:" + str(connectionId)) - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace('bulkio::OutPort connectPort EXIT ') - - def disconnectPort(self, connectionId): - if self.logger: - self.logger.trace('bulkio::OutPort disconnectPort ENTER ') - if not self.outConnections.has_key(connectionId): - if self.logger: - self.logger.debug("bulkio::OutPort disconnectPort() - connectionId " + str(connectionId) + " is not contained in list of outConnections") - return - self.port_lock.acquire() - try: - connId = str(connectionId) - portListed = False - for filt in self.filterTable: - if filt.port_name == self.name: - portList = True - break - for streamid in self.sriDict.keys(): - sid = str(streamid) - empty_timestamp = timestamp.notSet() - if portListed: - for filt in self.filterTable: - if self.name == filt.port_name and sid == filt.stream_id and connId == filt.connection_name: - try: - self.outConnections[connId].pushPacket(self.noData, empty_timestamp, True, sid) - except Exception, e: - if self.logger: - self.logger.error("PUSH-PACKET FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - else: - try: - self.outConnections[connId].pushPacket(self.noData, empty_timestamp, True, sid) - except Exception, e: - if self.logger: - self.logger.error("PUSH-PACKET FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - - - self.outConnections.pop(connId, None) - self.stats.remove(connectionId) - for key in self.sriDict.keys(): - # if connID exist in set, remove it, otherwise do nothing (that is what discard does) - self.sriDict[key].connections.discard(connId) - if self.logger: - self.logger.debug( "bulkio::OutPort DISCONNECT PORT:" + str(self.name) + " CONNECTION:" + str(connId)) - self.logger.trace( "bulkio::OutPort DISCONNECT PORT:" + str(self.name) + " updated sriDict" + str(self.sriDict)) - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace('bulkio::OutPort disconnectPort EXIT ') - - def enableStats(self, enabled): - self.stats.setEnabled(enabled) - - def setBitSize(self, bitSize): - self.stats.setBitSize(bitSize) - - - def reportConnectionErrors(self, cid): - retval=False - if ( self.stats.connectionErrors(cid, 1) < 11 ): retval=True - return retval - - def _get_connections(self): - currentConnections = [] - self.port_lock.acquire() - try: - for id_, port in self.outConnections.items(): - currentConnections.append(ExtendedCF.UsesConnection(id_, port)) - finally: - self.port_lock.release() - return currentConnections - - def _get_statistics(self): - self.port_lock.acquire() - try: - recStat = self.stats.retrieve() - finally: - self.port_lock.release() - return recStat - - def _get_state(self): - self.port_lock.acquire() - try: - numberOutgoingConnections = len(self.outConnections) - finally: - self.port_lock.release() - if numberOutgoingConnections == 0: - return BULKIO.IDLE - else: - return BULKIO.ACTIVE - - def _get_activeSRIs(self): - self.port_lock.acquire() - try: - sris = [] - for entry in self.sriDict: - sris.append(copy.deepcopy(self.sriDict[entry].sri)) - finally: - self.port_lock.release() - return sris - - def updateConnectionFilter(self, _filterTable): - self.port_lock.acquire() - try: - if _filterTable == None : - _filterTable = [] - self.filterTable = _filterTable - finally: - self.port_lock.release() - - def pushSRI(self, H): - if self.logger: - self.logger.trace('bulkio::OutPort pushSRI ENTER ') - self.port_lock.acquire() - try: - self.sriDict[H.streamID] = OutPort.SriMapStruct(sri=copy.deepcopy(H), connections=set()) - portListed = False - for connId, port in self.outConnections.items(): - for ftPtr in self.filterTable: - - # check if port was listed in connection filter table - if ftPtr.port_name == self.name: - portListed = True - - if (ftPtr.port_name == self.name) and (ftPtr.connection_id == connId) and (ftPtr.stream_id == H.streamID): - try: - if port != None: - port.pushSRI(H) - self.sriDict[H.streamID].connections.add(connId) - except Exception, e: - if self.reportConnectionErrors(connId) : - if self.logger: - self.logger.error("PUSH-SRI FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - - if not portListed: - for connId, port in self.outConnections.items(): - try: - if port != None: - port.pushSRI(H) - self.sriDict[H.streamID].connections.add(connId) - except Exception, e: - if self.reportConnectionErrors(connId) : - if self.logger: - self.logger.error("PUSH-SRI FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - - finally: - self.port_lock.release() - if self.logger: - self.logger.trace('bulkio::OutPort pushSRI EXIT ') - - def _pushOversizedPacket(self, data, T, EOS, streamID): - # If there is no need to break data into smaller packets, skip straight - # to the pushPacket call and return. - subsize = 0 - if self.sriDict.has_key(streamID): - subsize = self.sriDict[streamID].sri.subsize - if subsize != 0: - if self.maxSamplesPerPush%subsize != 0: - self.maxSamplesPerPush = int(MAX_TRANSFER_BYTES*.9)/self.byteSize - while (self.maxSamplesPerPush%subsize != 0): - self.maxSamplesPerPush -= self.maxSamplesPerPush%subsize - # Make sure maxSamplesPerPush is even so that complex data case is handled properly - if self.maxSamplesPerPush%2 != 0: - self.maxSamplesPerPush -= 1 - - if len(data) <= self.maxSamplesPerPush: - self._pushPacket(data, T, EOS, streamID); - return - - # Determine xdelta for this streamID to be used for time increment for subpackets - xdelta = 0.0 - if self.sriDict.has_key(streamID): - xdelta = self.sriDict[streamID].sri.xdelta - - # Intialize time for the first subpacket - packetTime = T - - # Push sub-packets maxSamplesPerPush at a time - for start in xrange(0, len(data), self.maxSamplesPerPush): - # The end index of the packet may exceed the length of the data; - # the Python slice operator will clamp it to the actual end - end = start + self.maxSamplesPerPush - - # Send end-of-stream as false for all sub-packets except for the - # last one (when the end of the sub-packet goes past the end of the - # input data), which gets the input EOS. - if end >= len(data): - packetEOS = EOS - else: - packetEOS = False - - # Push the current slice of the input data - if self.logger: - self.logger.trace("_pushOversizedPacket() calling pushPacket with pushSize " + str(len(data[start:end])) + " and packetTime twsec: " + str(packetTime.twsec) + " tfsec: " + str(packetTime.tfsec)) - self._pushPacket(data[start:end], packetTime, packetEOS, streamID); - data_xfer_len = len(data[start:end]) - if self.sriDict.has_key(streamID): - if self.sriDict[streamID].sri.mode == 1: - data_xfer_len = data_xfer_len / 2 - packetTime = timestamp.addSampleOffset(packetTime, data_xfer_len, xdelta) - - def _pushPacket(self, data, T, EOS, streamID): - - portListed = False - for connId, port in self.outConnections.items(): - for ftPtr in self.filterTable: - - if ftPtr.port_name == self.name : - portListed = True - - if (ftPtr.port_name == self.name) and (ftPtr.connection_id == connId) and (ftPtr.stream_id == streamID): - try: - if port != None: - if connId not in self.sriDict[streamID].connections: - port.pushSRI(self.sriDict[streamID].sri) - self.sriDict[streamID].connections.add(connId) - port.pushPacket(data, T, EOS, streamID) - self.stats.update(len(data), 0, EOS, streamID, connId) - except Exception, e: - if self.reportConnectionErrors(connId) : - if self.logger: - self.logger.error("PUSH-PACKET FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - - if not portListed: - for connId, port in self.outConnections.items(): - try: - if port != None: - if connId not in self.sriDict[streamID].connections: - port.pushSRI(self.sriDict[streamID].sri) - self.sriDict[streamID].connections.add(connId) - port.pushPacket(data, T, EOS, streamID) - self.stats.update(len(data), 0, EOS, streamID, connId) - except Exception, e: - if self.reportConnectionErrors(connId) : - if self.logger: - self.logger.error("PUSH-PACKET FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - if EOS==True: - if self.sriDict.has_key(streamID): - tmp = self.sriDict.pop(streamID) - - def pushPacket(self, data, T, EOS, streamID): - - if self.logger: - self.logger.trace('bulkio::OutPort pushPacket ENTER ') - - if not self.sriDict.has_key(streamID): - sri = BULKIO.StreamSRI(1, 0.0, 1.0, 1, 0, 0.0, 0.0, 0, 0, streamID, False, []) - self.pushSRI(sri) - - self.port_lock.acquire() - try: - self._pushOversizedPacket(data, T, EOS, streamID) - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace('bulkio::OutPort pushPacket EXIT ') - - -class OutCharPort(OutPort): - TRANSFER_TYPE = 'c' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataChar, OutCharPort.TRANSFER_TYPE , logger, noData='' ) - -class OutOctetPort(OutPort): - TRANSFER_TYPE = 'B' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataOctet, OutOctetPort.TRANSFER_TYPE , logger, noData='') - -class OutShortPort(OutPort): - TRANSFER_TYPE = 'h' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataShort, OutShortPort.TRANSFER_TYPE , logger ) - -class OutUShortPort(OutPort): - TRANSFER_TYPE = 'H' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataUshort, OutUShortPort.TRANSFER_TYPE , logger ) - -class OutLongPort(OutPort): - TRANSFER_TYPE = 'i' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataLong, OutLongPort.TRANSFER_TYPE , logger ) - -class OutULongPort(OutPort): - TRANSFER_TYPE = 'I' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataUlong, OutULongPort.TRANSFER_TYPE , logger ) - -class OutLongLongPort(OutPort): - TRANSFER_TYPE = 'q' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataLongLong, OutLongLongPort.TRANSFER_TYPE , logger ) - -class OutULongLongPort(OutPort): - TRANSFER_TYPE = 'Q' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataUlongLong, OutULongLongPort.TRANSFER_TYPE , logger ) - -class OutFloatPort(OutPort): - TRANSFER_TYPE = 'f' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataFloat, OutFloatPort.TRANSFER_TYPE , logger ) - -class OutDoublePort(OutPort): - TRANSFER_TYPE = 'd' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataDouble, OutDoublePort.TRANSFER_TYPE , logger ) - -class OutFilePort(OutPort): - TRANSFER_TYPE = 'c' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataFile, OutFilePort.TRANSFER_TYPE , logger, noData='' ) - - def pushPacket(self, URL, T, EOS, streamID): - - if self.logger: - self.logger.trace('bulkio::OutFilePort pushPacket ENTER ') - - if not self.sriDict.has_key(streamID): - sri = BULKIO.StreamSRI(1, 0.0, 1.0, 1, 0, 0.0, 0.0, 0, 0, streamID, False, []) - self.pushSRI(sri) - - self.port_lock.acquire() - - try: - portListed = False - for connId, port in self.outConnections.items(): - for ftPtr in self.filterTable: - if ftPtr.port_name == self.name : - portListed = True - if (ftPtr.port_name == self.name) and (ftPtr.connection_id == connId) and (ftPtr.stream_id == streamID): - try: - if port != None: - port.pushPacket(URL, T, EOS, streamID) - self.stats.update(1, 0, EOS, streamID, connId) - except Exception, e : - if self.reportConnectionErrors(connId) : - if self.logger: - self.logger.error("PUSH-PACKET (file port) FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - - if not portListed: - for connId, port in self.outConnections.items(): - try: - if port != None: - port.pushPacket(URL, T, EOS, streamID) - self.stats.update(1, 0, EOS, streamID, connId) - except Exception, e: - if self.reportConnectionErrors(connId) : - if self.logger : - self.logger.error("PUSH-PACKET (file port) FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - if EOS==True: - if self.sriDict.has_key(streamID): - tmp = self.sriDict.pop(streamID) - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace('bulkio::OutFilePort pushPacket EXIT ') - -class OutXMLPort(OutPort): - TRANSFER_TYPE = 'c' - def __init__(self, name, logger=None ): - OutPort.__init__(self, name, BULKIO.dataXML, OutXMLPort.TRANSFER_TYPE , logger, noData='' ) - - def pushPacket(self, xml_string, EOS, streamID): - - if self.logger: - self.logger.trace('bulkio::OutXMLPort pushPacket ENTER ') - - if not self.sriDict.has_key(streamID): - sri = BULKIO.StreamSRI(1, 0.0, 1.0, 1, 0, 0.0, 0.0, 0, 0, streamID, False, []) - self.pushSRI(sri) - - self.port_lock.acquire() - - try: - portListed = False - for connId, port in self.outConnections.items(): - for ftPtr in self.filterTable: - if ftPtr.port_name == self.name : - portList = True - if (ftPtr.port_name == self.name) and (ftPtr.connection_id == connId) and (ftPtr.stream_id == streamID): - try: - if port != None: - port.pushPacket(xml_string, EOS, streamID) - self.stats.update(len(xml_string), 0, EOS, streamID, connId) - except Exception: - if self.reportConnectionErrors(connId) : - if self.logger : - self.logger.error("PUSH-PACKET (xml port) FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - if not portListed: - for connId, port in self.outConnections.items(): - try: - if port != None: - port.pushPacket(xml_string, EOS, streamID) - self.stats.update(len(xml_string), 0, EOS, streamID, connId) - except Exception: - if self.reportConnectionErrors(connId) : - if self.logger : - self.logger.error("PUSH-PACKET (xml port) FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - if EOS==True: - if self.sriDict.has_key(streamID): - tmp = self.sriDict.pop(streamID) - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace('bulkio::OutXMLPort pushPacket EXIT ') - - def disconnectPort(self, connectionId): - if self.logger: - self.logger.trace('bulkio::OutXMLPort disconnectPort ENTER ') - if not self.outConnections.has_key(connectionId): - if self.logger: - self.logger.warn("bulkio::OutXMLPort disconnectPort() - connectionId " + str(connectionId) + " is not contained in list of outConnections") - return - self.port_lock.acquire() - try: - connId = str(connectionId) - portListed = False - for filt in self.filterTable: - if filt.port_name == self.name: - portList = True - break - for streamid in self.sriDict.keys(): - sid = str(streamid) - if portListed: - for filt in self.filterTable: - if self.name == filt.port_name and sid == filt.stream_id and connId == filt.connection_name: - try: - self.outConnections[connId].pushPacket(self.noData, True, sid) - except Exception, e: - if self.logger: - self.logger.error("PUSH-PACKET FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - else: - try: - self.outConnections[connId].pushPacket(self.noData, True, sid) - except Exception, e: - if self.logger: - self.logger.error("PUSH-PACKET FAILED, PORT/CONNECTION: %s/%s , EXCEPTION: %s", self.name, connId, str(e)) - - self.outConnections.pop(connId, None) - for key,value in self.sriDict.items(): - # if connID exist in set, remove it, otherwise do nothing (that is what discard does) - self.sriDict[key].connections.discard(connId) - if self.logger: - self.logger.debug( "bulkio::OutXMLPort DISCONNECT PORT:" + str(self.name) + " CONNECTION:" + str(connId) ) - self.logger.trace( "bulkio::OutXMLPort DISCONNECT PORT:" + str(self.name) + " updated sriDict" + str(self.sriDict) ) - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace('bulkio::OutXMLPort disconnectPort EXIT ') - -class OutAttachablePort(OutPort): - class StreamAttachment: - def __init__(self, connectionId, attachId, inputPort, inStream=None): - self.connectionId=connectionId - self.attachId=attachId - self.inputPort=inputPort - self.stream=inStream - self.logger=None - - def setLogger(self, inLogger ): - self.logger= inLogger - - def setLogger(self, inLogger ): - self.logger= inLogger - - def detach(self): - p = None - if self.stream: - p = self.stream.getPort() - try: - self.inputPort.detach(self.attachId) - if p : p.updateStats(self.connectionId) - except Exception, e: - if p and p.reportConnectionErrors(self.connectionId) : - if self.logger: - self.logger.error("DETACH FAILURE, CONNECTION: %s , EXCEPTION: %s", self.connectionId, str(e)) - - class Stream: - def __init__(self, streamDef, name, streamId=None, streamAttachments=[], sri=None, time=None, port=None): - self.streamDef=streamDef - self.name = name - self.streamId=streamId - self.streamAttachments=streamAttachments[:] - self.sri=sri - self.time=time - self.port = port - self.logger=None - - def detachAll(self): - for att in list(self.streamAttachments): - att.detach() - self.streamAttachments.remove(att) - - def detachByConnectionId(self, connectionId): - for att in list(self.streamAttachments): - if att.connectionId == connectionId and att.inputPort and att.attachId: - att.detach() - self.streamAttachments.remove(att) - - def detachByAttachId(self, attachId): - for att in list(self.streamAttachments): - if att.attachId and att.inputPort and att.attachId == attachId: - att.detach() - self.streamAttachments.remove(att) - - def detachByAttachIdConnectionId(self, connectionId): - for att in list(self.streamAttachments): - if att.attachId and att.inputPort and att.attachId == attachId and att.connectionId == connectionId: - att.detach() - self.streamAttachments.remove(att) - - def createNewAttachment(self,connectionId, port): - newAttachment = OutAttachablePort.StreamAttachment(connectionId=connectionId, attachId=None, inputPort=port, inStream=self) - newAttachment.setLogger(self.logger) - try: - newAttachment.attachId = port.attach(self.streamDef, self.name) - self.streamAttachments.append(newAttachment) - except Exception, e: - if self.logger: - self.logger.trace( "ATTACH FAILURE, CONNECTION/STREAM %s/%s , EXCEPTION: %s" , connectionId, self.streamDef.id, str(e)) - raise - - def hasConnectionId(self, connectionId): - for att in list(self.streamAttachments): - if att.connectionId == connectionId: - return True - return False - - def getPort(self): - return self.port - - def setPort(self, inPort): - self.port = inPort - - def setLogger(self, inlogger): - self.logger=inlogger - for att in self.streamAttachments: - att.setLogger(inlogger) - - def getConnectionIds(self): - connectionIds = [] - for att in list(self.streamAttachments): - connectionIds.append(att.connectionId) - return connectionIds - - def updateAttachments(self, expectedAttachments): - expectedConnectionIds = [] - # Add new attachments that do not already exist - for att in expectedAttachments: - if not self.hasConnectionId(att.connectionId): - self.createNewAttachment(att.connectionId, att.inputPort) - expectedConnectionIds.append(att.connectionId) - - # Iterate through attachments and compare to expected connectionIds - connectionsToRemove = [] - for att in self.streamAttachments: - existingConnectionId = att.connectionId - detachConnection = True - for connId in expectedConnectionIds: - if existingConnectionId == connId: - detachConnection = False - break - if detachConnection == True: - # Store off and apply detach outside of this loop - # Removing now will mess up iterator - connectionsToRemove.append(existingConnectionId) - - for connId in connectionsToRemove: - self.detachByConnectionId(connId) - - def detachAll(self): - for att in list(self.streamAttachments): - att.detach() - self.streamAttachments.remove(att) - - - class StreamContainer: - def __init__(self, streams=None): - if streams == None: - self.streams = [] - else: - self.streams = streams - self.logger = None - - def printState(self, title): - if self.logger: - self.logger.debug(title) - for stream in self.streams: - self.printBlock("Stream", stream.streamId,0) - for att in stream.streamAttachments: - self.printBlock("Attachment",att.attachId,1) - if self.logger: - self.logger.debug("") - - def printBlock(self, title, id, indents): - indent = "" - for ii in range(indents): - indent += " " - line = "---------------" - - if self.logger: - self.logger.debug(indent + " |" + line) - self.logger.debug(indent + " |" + str(title)) - self.logger.debug(indent + " | '" + str(id) + "'") - self.logger.debug(indent + " |" + line) - - def hasStreams(self): - if len(self.streams) > 0: - return True - else: - return False - - def hasStreamId(self, streamId): - for stream in self.streams: - if stream.streamId == streamId: - return True - return False - - def getStreamIds(self): - streamIds = [] - for stream in self.streams: - streamIds.append(stream.streamId) - return streamIds - - def addConnectionToAllStreams(self, connectionId, port): - for stream in self.streams: - if not stream.hasConnectionId(connectionId): - stream.createNewAttachment(connectionId, port) - - def addConnectionToStream(self, connectionId, port, streamId): - for stream in self.streams: - if stream.streamId == streamId: - if not stream.hasConnectionId(connectionId): - stream.createNewAttachment(connectionId, port) - - def updateSRIForAllStreams(self, currentSRIs): - for stream in self.streams: - if currentSRIs.has_key(stream.streamId): - stream.sri = currentSRIs[stream.streamId].sri - stream.time = currentSRIs[stream.streamId].time - - def updateStreamSRI(self, streamId, sri): - for stream in self.streams: - if stream.streamId == streamId: - stream.sri = sri - - def updateStreamTime(self, streamId, time): - for stream in self.streams: - if stream.streamId == streamId: - stream.time = time - - def updateStreamSRIAndTime(self, streamId, sri, time): - for stream in self.streams: - if stream.streamId == streamId: - stream.sri = sri - stream.time = time - - def addStream(self, stream): - self.streams.append(stream) - - def removeStreamByStreamId(self, streamId): - for s in list(self.streams): - if s.streamId == streamId: - s.detachAll() - self.streams.remove(s) - - def findByStreamId(self, streamId): - for s in self.streams: - if s.streamId == streamId: - return s - return None - - def detachByAttachIdConnectionId(self, attachId=None, connectionId=None): - for stream in self.streams: - for atts in list(stream.streamAttachments): - if atts.connectionId == connectionId and atts.inputPort and atts.attachId and atts.attachId == attachId: - atts.detach() - stream.streamAttachments.remove(atts) - - def detachAllStreams(self): - for stream in self.streams: - for atts in list(stream.streamAttachments): - if atts.inputPort and atts.attachId: - atts.detach() - stream.streamAttachments.remove(atts) - - def detachByConnectionId(self, connectionId=None): - for stream in self.streams: - for atts in list(stream.streamAttachments): - if atts.connectionId == connectionId and atts.inputPort and atts.attachId: - atts.detach() - stream.streamAttachments.remove(atts) - - def detachByAttachId(self, attachId=None): - for stream in self.streams: - for atts in list(stream.streamAttachments): - if atts.attachId and atts.attachId == attachId and atts.inputPort: - atts.detach() - - def findStreamAttachmentsByAttachId(self, attachId): - attachList = [] - for stream in self.streams: - for att in stream.streamAttachments: - if att.attachId == attachId: - attachList.append(att) - return attachList - - def setLogger(self, inlogger): - self.logger = inlogger - for stream in self.streams: - stream.setLogger(inlogger) - - - TRANSFER_TYPE = 'c' - def __init__(self, name, max_attachments=None, logger=None, interface=None ): - OutPort.__init__(self, name, interface, OutAttachablePort.TRANSFER_TYPE , logger ) - self.max_attachments = max_attachments - self.streamContainer = OutAttachablePort.StreamContainer() - self.sriDict = {} # key=streamID value=SriMapStruct - self.filterTable = [] - if not interface: - if self.logger: - self.logger.error("OutAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49") - raise Port.InvalidPort(1, "OutAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49") - self.interface=interface # BULKIO port interface (valid options are BULKIO.dataSDDS or BULKIO.dataVITA49) - self.setLogger(self.logger) - - def setLogger(self, logger): - self.logger = logger; - self.streamContainer.setLogger(logger) - - def _get_state(self): - self.port_lock.acquire() - try: - numberAttachedStreams = len(self._attachedStreams.values()) - finally: - self.port_lock.release() - if numberAttachedStreams == 0: - return BULKIO.IDLE - else: - return BULKIO.ACTIVE - - def _get_attachedSRIs(self): - return self._get_activeSRIs() - - def attachedStreams(self): - streams = [] - for stream in self.streamContainer.streams: - streams.append(stream.streamDef) - return streams - - def attachmentIds(self): - ids = [] - for stream in self.streamContainer.streams: - for atts in stream.streamAttachments: - ids.append(atts.attachId) - return ids - - def attachmentIds(self,streamId): - ids = [] - for stream in self.streamContainer.streams: - if stream.streamId == streamId: - for atts in stream.streamAttachments: - ids.append(atts.attachId) - break - return ids - - def connectPort(self, connection, connectionId): - OutPort.connectPort( self, connection, connectionId ) - self.port_lock.acquire() - try: - try: - portListed = False - port = self.outConnections[str(connectionId)] - - if self.logger: - self.logger.trace("bulkio::OutAttachablePort connectPort(), Filter Table %s" % self.filterTable) - for ftPtr in self.filterTable: - # check if port was listed in connection filter table - if ftPtr.port_name == self.name: - portListed = True - - if (ftPtr.port_name == self.name) and (ftPtr.connection_id == connectionId): - desiredStreamId = ftPtr.stream_id - self.streamContainer.addConnectionToStream(connectionId,port,desiredStreamId) - - if not portListed: - self.streamContainer.addConnectionToAllStreams(connectionId,port) - - self.updateSRIForAllConnections() - except Exception, e: - if self.logger: - self.logger.error("CONNECTION FAILED, CONNECTION %s , EXCEPTION: %s" , connectionId, str(e)) - raise Port.InvalidPort(1, "Invalid Port for Connection ID:" + str(connectionId) ) - finally: - self.port_lock.release() - self.streamContainer.printState("After connectPort") - - def disconnectPort(self, connectionId): - self.port_lock.acquire() - try: - try: - self.streamContainer.detachByConnectionId(connectionId) - except Exception, e: - if self.logger: - self.logger.error("Unable to detach from stream before disconnecting port, Connection: %s , Exception: %s", str(connectionId), str(e)) - - if not self.outConnections.has_key(connectionId): - if self.logger: - self.logger.warn("bulkio::OutAttachablePort disconnectPort() - connectionId " + str(connectionId) + " is not contained in list of outConnections") - else: - self.outConnections.pop(connectionId, None) - for key in self.sriDict.keys(): - # if connID exist in set, remove it, otherwise do nothing (that is what discard does) - self.sriDict[key].connections.discard(connectionId) - if self.logger: - self.logger.debug( "bulkio::OutAttachablePort DISCONNECT PORT:" + str(self.name) + " CONNECTION:" + str(connectionId)) - self.logger.trace( "bulkio::OutAttachablePort DISCONNECT PORT:" + str(self.name) + " updated sriDict" + str(self.sriDict)) - finally: - self.port_lock.release() - self.streamContainer.printState("After disconnectPort") - - def detach(self, attachId=None, connectionId=None): - if self.logger: - self.logger.trace("bulkio::OutAttachablePort, DETACH ENTER ") - - self.port_lock.acquire() - try: - if connectionId: - for stream in self.streamContainer.streams: - stream.detachByConnectionId(connectionId) - - if attachId: - for stream in self.streamContainer.streams: - for atts in list(stream.streamAttachments): - if atts.attachId == attachId: - atts.detach(attachId) - stream.streamAttachments.pop(atts) - - if not attachId and not connectionId: - for stream in self.streamContainer.streams: - for atts in list(stream.streamAttachments): - atts.detach() - self.streamContainer = OutAttachablePort.StreamContainer() - self.streamContainer.setLogger(self.logger) - - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace("bulkio::OutAttachablePort, DETACH EXIT ") - - def attach(self, streamData, name): - # Eventually deprecate attach() method for output port - self.streamContainer.removeStreamByStreamId(streamData.id) - self.addStream(streamData) - return "" - - def updateStream(self, streamData): - self.port_lock.acquire() - streamId = streamData.id - if (not self.streamContainer.hasStreamId(streamId)): - return False; - - self.streamContainer.removeStreamByStreamId(streamId) - self.port_lock.release() - return self.addStream(streamData) - - - def addStream(self, streamData): - if self.logger: - self.logger.trace("bulkio::OutAttachablePort, addStream ENTER ") - - ids = [] - self.port_lock.acquire() - try: - if self.streamContainer.hasStreamId(streamData.id): - return False; - - stream = OutAttachablePort.Stream(streamDef=streamData, name="", streamId=streamData.id) - stream.setLogger(self.logger) - - - portListed = False - for connId, port in self.outConnections.items(): - for ftPtr in self.filterTable: - - # check if port was listed in connection filter table - if ftPtr.port_name == self.name: - portListed = True - - if (ftPtr.port_name == self.name) and (ftPtr.connection_id == connId) and (ftPtr.stream_id == stream.streamId): - try: - if self.sriDict.has_key(stream.streamId): - sriMap = self.sriDict[stream.streamId] - stream.sri = sriMap.sri - stream.time = sriMap.time - stream.createNewAttachment(connId,port) - except Exception, e: - if self.reportConnectionErrors(connId) : - if self.logger: - self.logger.error("ATTACH FAILED, PORT/CONNECTION %s/%s , EXCEPTION: %s" , str(self.name), str(connId), str(e)) - - if not portListed: - if self.sriDict.has_key(stream.streamId): - sriMap = self.sriDict[stream.streamId] - stream.sri = sriMap.sri - stream.time = sriMap.time - for connId,port in self.outConnections.items(): - try: - stream.createNewAttachment(connId,port) - except Exception, e: - if self.reportConnectionErrors(connId) : - if self.logger: - self.logger.error("ATTACH FAILED, PORT/CONNECTION %s/%s , EXCEPTION: %s" , str(self.name), str(connId), str(e)) - - self.streamContainer.addStream(stream) - - finally: - self.port_lock.release() - - for atts in stream.streamAttachments: - ids.append(atts.attachId) - if self.logger: - self.logger.debug("bulkio.OutAttachablePort addStream() PORT, ATTACH COMPLETED ID " + str(atts.attachId) + " CONNECTION ID:" + str(atts.connectionId)) - - if self.logger: - self.logger.trace("bulkio::OutAttachablePort, addStream EXIT ") - - self.streamContainer.printState("After addStream") - return True - - def removeStream(self, streamId): - self.streamContainer.removeStreamByStreamId(streamId) - self.streamContainer.printState("After removeStream") - - def getStreamDefinition(self, attachId): - streamDefList = [] - for stream in self.streamContainer.streams: - for atts in stream.streamAttachments: - if atts.attachId == attachId: - streamDefList.append(stream.streamDef) - return streamDefList - - def getUser(self, attachId): - nameList = [] - for stream in self.streamContainer.streams: - for atts in stream.streamAttachments: - if atts.attachId == attachId: - nameList.append(stream.name) - return nameList - - def pushSRI(self, H, T): - if self.logger: - self.logger.trace("bulkio::OutAttachablePort, PUSH-SRI ENTER ") - - self.port_lock.acquire() - try: - sri = copy.deepcopy(H) - sriTime = copy.deepcopy(T) - self.sriDict[H.streamID] = OutPort.SriMapStruct(sri=sri, connections=set(), time=sriTime) - portListed = False - self.streamContainer.updateStreamSRIAndTime(H.streamID, sri, sriTime) - - for connId, port in self.outConnections.items(): - for ftPtr in self.filterTable: - - # check if port was listed in connection filter table - if ftPtr.port_name == self.name: - portListed = True - - if (ftPtr.port_name == self.name) and (ftPtr.connection_id == connId) and (ftPtr.stream_id == H.streamID): - try: - if port != None: - port.pushSRI(H, T) - self.sriDict[H.streamID].connections.add(connId) - except Exception, e: - if self.reportConnectionErrors(connId) : - if self.logger: - self.logger.error("PUSH-SRI (attachable) FAILED, PORT/CONNECTION %s/%s , EXCEPTION: %s ", str(self.name), connId, str(e)) - - if not portListed: - for connId, port in self.outConnections.items(): - try: - if port != None: - port.pushSRI(H, T) - self.sriDict[H.streamID].connections.add(connId) - except Exception, e: - if self.reportConnectionErrors(connId) : - if self.logger: - self.logger.error("PUSH-SRI (attachable) FAILED, PORT/CONNECTION %s/%s , EXCEPTION: %s ", str(self.name), connId, str(e)) - finally: - self.port_lock.release() - - if self.logger: - self.logger.trace("bulkio::OutAttachablePort, PUSH-SRI EXIT ") - - def updateConnectionFilter(self, _filterTable): - self.port_lock.acquire() - try: - if _filterTable == None : - _filterTable = [] - self.filterTable = _filterTable - - #1. loop over filterTable - #A. ignore other port_names - #B. create mapping of streamid->connections(attachments) - - hasPortEntry = False - streamsFound = {} - streamAttachments = {} - # Populate streamsFound - knownStreamIds = self.streamContainer.getStreamIds() - for id in knownStreamIds: - streamsFound[id] = False - - # Iterate through each filterTable entry and capture state - for entry in self.filterTable: - if entry.port_name != self.name: - continue - - hasPortEntry = True - if entry.connection_id in self.outConnections.keys(): - connectedPort = self.outConnections.get(entry.connection_id) - else: - if self.logger: - self.logger.trace("bulkio::OutAttachablePort, updateConnectionFilter() Unable to find connected port with connectionId: " + entry.connection_id) - continue - - if self.streamContainer.hasStreamId(entry.stream_id): - streamsFound[entry.stream_id] = True - expectedAttachment = OutAttachablePort.StreamAttachment(entry.connection_id, None, connectedPort) - if not streamAttachments.has_key(entry.stream_id): - streamAttachments[entry.stream_id] = [] - streamAttachments[entry.stream_id].append(expectedAttachment) - - for streamId, expectedAttachements in streamAttachments.iteritems(): - foundStream = self.streamContainer.findByStreamId(streamId) - if foundStream: - foundStream.updateAttachments(expectedAttachements) - else: - if self.logger: - self.logger.warn("bulkio::OutAttachablePort, updateConnectionFilter() Unable to locate stream definition for streamId: " +streamId) - - - if hasPortEntry: - # If there's a valid port entry, we need to detach unmentioned streams - for streamId,found in streamsFound.items(): - if not found: - stream = self.streamContainer.findByStreamId(streamId) - if stream: - stream.detachAll() - else: - # No port entry == All connections on - for connId, port in self.outConnections.items(): - self.streamContainer.addConnectionToAllStreams(connId,port) - - self.updateSRIForAllConnections() - - finally: - self.port_lock.release() - self.streamContainer.printState("After updateFilterTable") - - def updateSRIForAllConnections(self): - # Iterate through stream objects in container - # Check if sriDict has stream entry - # Yes: Check that ALL connections are listed in sriDict entry - # Update currentSRI - # No: PushSRI on all attachment ports - # Update currentSRI - - # Iterate through all registered streams - for stream in self.streamContainer.streams: - streamConnIds = stream.getConnectionIds() - - # Check if sriDict has entry for StreamId - if self.sriDict.has_key(stream.streamId): - sriMap = self.sriDict[stream.streamId] - - # Check if all connections on the streams have pushed SRI - currentSRIConnIds = sriMap.connections - for connId in streamConnIds: - - # If not found, pushSRI and update currentSRIs container - if not connId in currentSRIConnIds: - - # Grab the port - if self.outConnections.has_key(connId): - connectedPort = self.outConnections[connId] - # Push sri and update sriMap - connectedPort.pushSRI(sriMap.sri, sriMap.time) - sriMap.connections.add(connId) - else: - if self.logger: - self.logger.debug("updateSRIForAllConnections() Unable to find connected port with connectionId: " + connId) - -class OutSDDSPort(OutAttachablePort): - def __init__(self, name, max_attachments=None, logger=None ): - OutAttachablePort.__init__(self, name, max_attachments, logger, interface=BULKIO.dataSDDS) - -class OutVITA49Port(OutAttachablePort): - def __init__(self, name, max_attachments=None, logger=None ): - OutAttachablePort.__init__(self, name, max_attachments, logger, interface=BULKIO.dataVITA49) diff --git a/bulkioInterfaces/libsrc/python/sri.py b/bulkioInterfaces/libsrc/python/sri.py deleted file mode 100644 index d43c18ec8..000000000 --- a/bulkioInterfaces/libsrc/python/sri.py +++ /dev/null @@ -1,69 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -try: - from bulkio.bulkioInterfaces import BULKIO, BULKIO__POA -except: - pass - -def compare(sriA, sriB): - """ - Will compare two BULKIO.StreamSRI objects and return True - if they are both equal, and false otherwise - """ - if not sriA or not sriB: - return False - - if sriA.hversion != sriB.hversion: - return False - if sriA.xstart != sriB.xstart: - return False - if sriA.xdelta != sriB.xdelta: - return False - if sriA.xunits != sriB.xunits: - return False - if sriA.subsize != sriB.subsize: - return False - if sriA.ystart != sriB.ystart: - return False - if sriA.ydelta != sriB.ydelta: - return False - if sriA.yunits != sriB.yunits: - return False - if sriA.mode != sriB.mode: - return False - if sriA.streamID != sriB.streamID: - return False - if sriA.blocking != sriB.blocking: - return False - if len(sriA.keywords) != len(sriB.keywords): - return False - for keyA, keyB in zip(sriA.keywords, sriB.keywords): - if keyA.value._t != keyB.value._t: - return False - if keyA.value._v != keyB.value._v: - return False - return True - -def create( sid='defStream', srate=1.0, xunits=1 ): - return BULKIO.StreamSRI(hversion=1, xstart=0.0, xdelta=1.0/srate, - xunits=xunits, subsize=0, ystart=0.0, ydelta=0.0, - yunits=0, mode=0, streamID=sid, blocking=False, keywords=[]) - diff --git a/bulkioInterfaces/libsrc/setup.py b/bulkioInterfaces/libsrc/setup.py index 706751d75..6de272e02 100644 --- a/bulkioInterfaces/libsrc/setup.py +++ b/bulkioInterfaces/libsrc/setup.py @@ -1,4 +1,4 @@ - +#!/usr/bin/env python # # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. @@ -18,23 +18,15 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # -#!/usr/bin/env python - -from distutils.core import setup -import os - -packages = ['bulkio' ] -# Allow the version to be replaced by the ant build script; but, if nothing -# replaces it (i.e. a developer does a command-line build), use 1.X.X -version='__VERSION__' -if version.find('__') == 0: - version = '2.0.9' +from setuptools import setup -setup( - name='bulkio', - version=version, - description='Python Classes for REDHAWK BULKIO Interfaces', - packages=packages, - package_dir = { 'bulkio' : 'python' } - ) +setup(name='bulkio', + version='2.2.1', + description='Python Classes for REDHAWK BULKIO Interfaces', + packages=['bulkio', + 'bulkio.sandbox'], + package_dir={ '' : 'python' }, + entry_points={'redhawk.sandbox.helpers':['StreamSink=bulkio.sandbox:StreamSink', + 'StreamSource=bulkio.sandbox:StreamSource']} + ) diff --git a/bulkioInterfaces/libsrc/testing/.gitignore b/bulkioInterfaces/libsrc/testing/.gitignore index 4dfd0928a..6e322c583 100644 --- a/bulkioInterfaces/libsrc/testing/.gitignore +++ b/bulkioInterfaces/libsrc/testing/.gitignore @@ -5,6 +5,8 @@ components/Oversized_framedata/java/bin components/TestLargePush/cpp/TestLargePush components/TestLargePush/java/bin/ components/multiout_attachable/cpp/multiout_attachable +components/snk_slow/cpp/snk_slow.so +components/src/cpp/src components/sri_changed_cpp/cpp/sri_changed_cpp devices/dev_snk/java/bin/ devices/dev_src/java/bin/ diff --git a/bulkioInterfaces/libsrc/testing/Makefile.am b/bulkioInterfaces/libsrc/testing/Makefile.am new file mode 100644 index 000000000..82268ee15 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/Makefile.am @@ -0,0 +1,34 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +SUBDIRS = components/CPP_Ports/cpp components/sri_changed_cpp/cpp +SUBDIRS += components/TestLargePush/cpp +SUBDIRS += components/multiout_attachable/cpp +SUBDIRS += components/Oversized_framedata/cpp +SUBDIRS += components/src/cpp +SUBDIRS += components/snk_slow/cpp +if HAVE_JAVASUPPORT +SUBDIRS += components/TestLargePush/java +SUBDIRS += components/Java_Ports/java +SUBDIRS += components/multiout_attachable/java +SUBDIRS += components/Oversized_framedata/java +SUBDIRS += devices/dev_src/java devices/dev_snk/java +endif +SUBDIRS += tests/cpp diff --git a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/CPP_Ports.spec b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/CPP_Ports.spec deleted file mode 100644 index 3d1cf56ad..000000000 --- a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/CPP_Ports.spec +++ /dev/null @@ -1,102 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: CPP_Ports -Summary: Component %{name} -Version: 1.0.0 -Release: 1 -License: None -Group: REDHAWK/Components -Source: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-root - -Requires: redhawk >= 1.8 -BuildRequires: redhawk >= 1.8 -BuildRequires: autoconf automake libtool - -# Interface requirements -Requires: bulkioInterfaces -BuildRequires: bulkioInterfaces - -# C++ requirements -Requires: libomniORB4.1 -Requires: boost >= 1.41 -Requires: apache-log4cxx >= 0.10 -BuildRequires: boost-devel >= 1.41 -BuildRequires: libomniORB4.1-devel -BuildRequires: apache-log4cxx-devel >= 0.10 - -# Java requirements -Requires: java -BuildRequires: jdk - -# Python requirements -Requires: python omniORBpy -BuildRequires: libomniORBpy3-devel -BuildRequires: python-devel >= 2.3 - -%description -Component %{name} - -%prep -%setup - -%build -# Implementation cpp -pushd cpp -./reconf -%define _bindir %{_prefix}/dom/components/CPP_Ports/cpp -%configure -make -popd - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation cpp -pushd cpp -%define _bindir %{_prefix}/dom/components/CPP_Ports/cpp -make install DESTDIR=$RPM_BUILD_ROOT -popd - -%clean -rm -rf $RPM_BUILD_ROOT - -%files -%defattr(-,redhawk,redhawk) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/CPP_Ports.scd.xml -%{_prefix}/dom/components/%{name}/CPP_Ports.prf.xml -%{_prefix}/dom/components/%{name}/CPP_Ports.spd.xml -%{_prefix}/dom/components/%{name}/cpp -%{_prefix}/dom/components/%{name}/python -%{_prefix}/dom/components/%{name}/java -%{_prefix}/dom/components/%{name}/ - diff --git a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/build.sh b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/build.sh deleted file mode 100755 index 690b18e57..000000000 --- a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/build.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -if [ "$1" == "rpm" ]; then - # A very simplistic RPM build scenario - if [ -e CPP_Ports.spec ]; then - mydir=`dirname $0` - tmpdir=`mktemp -d` - cp -r ${mydir} ${tmpdir}/CPP_Ports-1.0.0 - tar czf ${tmpdir}/CPP_Ports-1.0.0.tar.gz --exclude=".svn" -C ${tmpdir} CPP_Ports-1.0.0 - rpmbuild -ta ${tmpdir}/CPP_Ports-1.0.0.tar.gz - rm -rf $tmpdir - else - echo "Missing RPM spec file in" `pwd` - exit 1 - fi -else - for impl in java2 ; do - pushd $impl &> /dev/null - if [ -e build.sh ]; then - ./build.sh $* - else - echo "No build.sh found for $impl" - fi - popd &> /dev/null - done -fi diff --git a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/.md5sums b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/.md5sums new file mode 100644 index 000000000..7b040aa22 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/.md5sums @@ -0,0 +1 @@ +e44b9b9cef8e221832f111c9a4b7c718 Makefile.am diff --git a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/CPP_Ports.cpp b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/CPP_Ports.cpp index 3d7210e37..724dd1926 100644 --- a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/CPP_Ports.cpp +++ b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/CPP_Ports.cpp @@ -193,7 +193,7 @@ void CPP_Ports_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::Sys ************************************************************************************************/ template < typename IPT, typename OPT > -void DoPort( IPT *iport, OPT *oport, const char *tname , rh_logger::LoggerPtr logger ) { +int DoPort( IPT *iport, OPT *oport, const char *tname , rh_logger::LoggerPtr logger ) { typename IPT::dataTransfer *p1 = iport->getPacket( bulkio::Const::NON_BLOCKING ); @@ -205,48 +205,18 @@ void DoPort( IPT *iport, OPT *oport, const char *tname , rh_logger::LoggerPtr l if ( p1->sriChanged ) { oport->pushSRI( p1->SRI ); } - //typename OPT::TransportSequence odata; - //std::copy( p1->dataBuffer.begin(), p1->dataBuffer.end(), std::back_inserter(odata) ); - //oport->pushPacket( odata, p1->T, p1->EOS, p1->streamID ); oport->pushPacket( p1->dataBuffer, p1->T, p1->EOS, p1->streamID ); delete p1; } + return 1; } else { RH_TRACE(logger, "CPP_PORTS::SVC_FUN TYPE:" << tname << " NO DATA...." ) ; + return 0; } } - -template <> -void DoPort< bulkio::InCharPort, bulkio::OutCharPort >( bulkio::InCharPort *iport, bulkio::OutCharPort *oport, const char *tname, rh_logger::LoggerPtr logger ) { - - bulkio::InCharPort::dataTransfer *p1 = iport->getPacket( bulkio::Const::NON_BLOCKING ); - - if ( p1 ) { - RH_DEBUG(logger, "CPP_PORTS::SVC_FUN TYPE:" << tname << " DATALEN:" << p1->dataBuffer.size() ); - if ( oport ) { - - if ( p1->sriChanged ) { - oport->pushSRI( p1->SRI ); - } - - std::vector< bulkio::OutCharPort::NativeType > d; - int dlen = p1->dataBuffer.size(); - d.resize( dlen ); - std::copy( &p1->dataBuffer[0], &(p1->dataBuffer[0])+dlen, &(d[0]) ); - oport->pushPacket( d, p1->T, p1->EOS, p1->streamID ); - delete p1; - } - } - else { - RH_TRACE(logger, "CPP_PORTS::SVC_FUN TYPE:" << tname << " NO DATA...." ) ; - } -} - - -template <> -void DoPort< bulkio::InFilePort, bulkio::OutFilePort >( bulkio::InFilePort *iport, bulkio::OutFilePort *oport, const char *tname, rh_logger::LoggerPtr logger ) { +int DoPort( bulkio::InFilePort *iport, bulkio::OutFilePort *oport, const char *tname, rh_logger::LoggerPtr logger ) { bulkio::InFilePort::dataTransfer *p1 = iport->getPacket( bulkio::Const::NON_BLOCKING ); @@ -266,15 +236,16 @@ void DoPort< bulkio::InFilePort, bulkio::OutFilePort >( bulkio::InFilePort *ipo oport->pushPacket( d.c_str(), p1->T, p1->EOS, p1->streamID ); delete p1; } + return 1; } else { RH_TRACE(logger, "CPP_PORTS::SVC_FUN TYPE:" << tname << " NO DATA...." ) ; + return 0; } } -template <> -void DoPort< bulkio::InXMLPort, bulkio::OutXMLPort >( bulkio::InXMLPort *iport, bulkio::OutXMLPort *oport, const char *tname, rh_logger::LoggerPtr logger ) { +int DoPort( bulkio::InXMLPort *iport, bulkio::OutXMLPort *oport, const char *tname, rh_logger::LoggerPtr logger ) { bulkio::InXMLPort::dataTransfer *p1 = iport->getPacket( bulkio::Const::NON_BLOCKING ); @@ -294,9 +265,11 @@ void DoPort< bulkio::InXMLPort, bulkio::OutXMLPort >( bulkio::InXMLPort *iport, oport->pushPacket( d.c_str(), p1->EOS, p1->streamID ); delete p1; } + return 1; } else { RH_TRACE(logger, "CPP_PORTS::SVC_FUN TYPE:" << tname << " NO DATA...." ) ; + return 0; } } @@ -304,27 +277,31 @@ void DoPort< bulkio::InXMLPort, bulkio::OutXMLPort >( bulkio::InXMLPort *iport, int CPP_Ports_i::serviceFunction() { LOG_DEBUG(CPP_Ports_i, "serviceFunction() example log message"); - - DoPort( dataFloatIn, dataFloatOut, "FLOAT", CPP_Ports_i::__logger); - DoPort( dataDoubleIn, dataDoubleOut, "DOUBLE", CPP_Ports_i::__logger); - DoPort( dataCharIn, dataCharOut, "CHAR", CPP_Ports_i::__logger); - DoPort( dataOctetIn, dataOctetOut, "OCTET", CPP_Ports_i::__logger); - DoPort( dataShortIn, dataShortOut, "SHORT", CPP_Ports_i::__logger); - DoPort( dataUShortIn, dataUShortOut, "USHORT", CPP_Ports_i::__logger); - DoPort( dataLongIn, dataLongOut, "LONG", CPP_Ports_i::__logger); - DoPort( dataULongIn, dataULongOut, "ULONG", CPP_Ports_i::__logger); + int serviced = 0; + serviced += DoPort( dataFloatIn, dataFloatOut, "FLOAT", CPP_Ports_i::__logger); + serviced += DoPort( dataDoubleIn, dataDoubleOut, "DOUBLE", CPP_Ports_i::__logger); + serviced += DoPort( dataCharIn, dataCharOut, "CHAR", CPP_Ports_i::__logger); + serviced += DoPort( dataOctetIn, dataOctetOut, "OCTET", CPP_Ports_i::__logger); + serviced += DoPort( dataShortIn, dataShortOut, "SHORT", CPP_Ports_i::__logger); + serviced += DoPort( dataUShortIn, dataUShortOut, "USHORT", CPP_Ports_i::__logger); - DoPort( dataLongLongIn, dataLongLongOut, "LONGLONG", CPP_Ports_i::__logger); - DoPort( dataULongLongIn, dataULongLongOut, "ULONGLONG", CPP_Ports_i::__logger); + serviced += DoPort( dataLongIn, dataLongOut, "LONG", CPP_Ports_i::__logger); + serviced += DoPort( dataULongIn, dataULongOut, "ULONG", CPP_Ports_i::__logger); + serviced += DoPort( dataLongLongIn, dataLongLongOut, "LONGLONG", CPP_Ports_i::__logger); + serviced += DoPort( dataULongLongIn, dataULongLongOut, "ULONGLONG", CPP_Ports_i::__logger); - DoPort( dataFileIn, dataFileOut, "URL", CPP_Ports_i::__logger); - DoPort( dataXMLIn, dataXMLOut, "XML", CPP_Ports_i::__logger); + + serviced += DoPort( dataFileIn, dataFileOut, "URL", CPP_Ports_i::__logger); + serviced += DoPort( dataXMLIn, dataXMLOut, "XML", CPP_Ports_i::__logger); LOG_DEBUG(CPP_Ports_i, "CPP_Ports:SVC_FUNC END" ); boost::this_thread::sleep( boost::posix_time::milliseconds(2)); - return NOOP; - + if (serviced) { + return NORMAL; + } else { + return NOOP; + } } diff --git a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/Makefile.am b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/Makefile.am index c5048173e..1908f43e9 100644 --- a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/Makefile.am @@ -18,17 +18,16 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # ossieName = CPP_Ports -bindir = $(prefix)/dom/components/CPP_Ports/cpp/ +bindir = $(prefix)/dom/components/CPP_Ports/cpp bin_PROGRAMS = CPP_Ports -xmldir = $(prefix)/dom/components/CPP_Ports/ -dist_xml_DATA = ../CPP_Ports.prf.xml ../CPP_Ports.scd.xml ../CPP_Ports.spd.xml - -# this is use to build against local bulkio interface and library and not installed version -bulkio_top=../../../../../ -bulkio_libsrc_top=$(bulkio_top)/libsrc +xmldir = $(prefix)/dom/components/CPP_Ports +dist_xml_DATA = ../CPP_Ports.scd.xml ../CPP_Ports.prf.xml ../CPP_Ports.spd.xml +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects distclean-local: + rm -rf m4 rm -f config.* rm -rf autom4te.cache rm -f acinclude.m4 @@ -41,12 +40,13 @@ distclean-local: rm -f missing rm -rf .deps + # Sources, libraries and library directories are auto-included from a file # generated by the REDHAWK IDE. You can remove/modify the following lines if # you wish to manually control these options. include $(srcdir)/Makefile.am.ide CPP_Ports_SOURCES = $(redhawk_SOURCES_auto) -##CPP_Ports_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) -I$(bulkio_libsrc_top)/cpp -I$(bulkio_top)/src/cpp -I$(bulkio_top)/src/cpp/ossie $(BOOST_CPPFLAGS) $(RH_DEPS_CFLAGS) $(redhawk_INCLUDES_auto) -CPP_Ports_CXXFLAGS = -Wall -I$(bulkio_libsrc_top)/cpp -I$(bulkio_top)/src/cpp -I$(bulkio_top)/src/cpp/ossie $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(RH_DEPS_CFLAGS) $(redhawk_INCLUDES_auto) -CPP_Ports_LDADD = -L$(bulkio_libsrc_top)/.libs -L$(bulkio_top)/.libs $(BIO_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) -CPP_Ports_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) +CPP_Ports_LDADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +CPP_Ports_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +CPP_Ports_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) + diff --git a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/build.sh b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/build.sh deleted file mode 100755 index 50619f198..000000000 --- a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/build.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -configure='configure' -makefile_in='Makefile.in' -config_ac='configure.ac' -make_am='Makefile.am' -makefile='Makefile' - -if [ "$1" == 'clean' ]; then - make clean -else - # Checks if build is newer than makefile (based on modification time) - if [[ ! -e $configure || ! -e $makefile_in || $config_ac -nt $makefile || $make_am -nt $makefile ]]; then - ./reconf - ./configure - fi - make - exit 0 -fi diff --git a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/configure.ac b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/configure.ac deleted file mode 100644 index db7b4c73a..000000000 --- a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/configure.ac +++ /dev/null @@ -1,44 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -AC_INIT(CPP_Ports, 1.0.0) -AM_INIT_AUTOMAKE(nostdinc) - -AC_PROG_CC -AC_PROG_CXX -AC_PROG_INSTALL - -AC_CORBA_ORB -OSSIE_CHECK_OSSIE -OSSIE_SDRROOT_AS_PREFIX - -# Dependencies -export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig" -PKG_CHECK_MODULES([PROJECTDEPS], [ossie >= 1.8 omniORB4 >= 4.0.0]) -OSSIE_ENABLE_LOG4CXX -AX_BOOST_BASE([1.41]) -AX_BOOST_THREAD -AX_BOOST_SYSTEM -#CHECK_VECTOR_IMPL - -export PKG_CONFIG_PATH="../../../..:../../..":$PKG_CONFIG_PATH -PKG_CHECK_MODULES([BIO], [bulkio >= 1.10]) - -AC_CONFIG_FILES(Makefile) -AC_OUTPUT diff --git a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/reconf b/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/reconf deleted file mode 100755 index c17e1c744..000000000 --- a/bulkioInterfaces/libsrc/testing/components/CPP_Ports/cpp/reconf +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -rm -f config.cache - -# Setup the libtool stuff -if [ -e /usr/local/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/local/share/aclocal/libtool.m4 aclocal.d/acinclude.m4 -elif [ -e /usr/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/share/aclocal/libtool.m4 acinclude.m4 -fi -libtoolize --force --automake - -# Search in expected locations for the OSSIE acincludes -if [ -n ${OSSIEHOME} ] && [ -d ${OSSIEHOME}/share/aclocal/ossie ]; then - OSSIE_AC_INCLUDE=${OSSIEHOME}/share/aclocal/ossie -else - echo "Error: Cannot find the OSSIE aclocal files. This is not expected!" -fi - -if [ -n ${OSSIE_AC_INCLUDE} ]; then - aclocal -I ${OSSIE_AC_INCLUDE} -else - aclocal -fi - -autoconf -automake --foreign --add-missing \ No newline at end of file diff --git a/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/.md5sums b/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/.md5sums new file mode 100644 index 000000000..f05ec4e24 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/.md5sums @@ -0,0 +1 @@ +79c5784ed8f81f67fa0a0074dd737db0 Makefile.am diff --git a/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/Makefile.am b/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/Makefile.am index 3b5b4f2be..aebe9d40e 100644 --- a/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/Makefile.am @@ -17,17 +17,22 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects + +Java_Ports_jar_CLASSPATH = $(SOFTPKG_CLASSPATH):$(REDHAWK_CLASSPATH):$(BULKIO_CLASSPATH) Java_Ports.jar$(EXEEXT): $(Java_Ports_jar_SOURCES) - @mkdir -p bin - $(JAVAC) -cp $(OSSIE_HOME)/lib/CFInterfaces.jar:$(OSSIE_HOME)/lib/log4j-1.2.15.jar:$(OSSIE_HOME)/lib/ossie.jar:$(bulkio_libsrc_top)/bulkio.jar:$(bulkio_top)/BULKIOInterfaces.jar -d bin $(Java_Ports_jar_SOURCES) + mkdir -p bin + $(JAVAC) -cp $(Java_Ports_jar_CLASSPATH) -g -d bin $(Java_Ports_jar_SOURCES) $(JAR) cf ./Java_Ports.jar -C bin . - + $(JAR) uf ./Java_Ports.jar -C src . clean-local: rm -rf bin distclean-local: + rm -rf m4 rm -f config.* rm -rf autom4te.cache rm -f acinclude.m4 @@ -45,14 +50,8 @@ bindir = $(prefix)/dom/components/Java_Ports/java/ bin_PROGRAMS = Java_Ports.jar Java_Ports_jar_SOURCES := $(shell find ./src -name "*.java") -## -## This is used to build against local bulkio interface library and not installed version -## -bulkio_top=../../../../.. -bulkio_libsrc_top=$(bulkio_top)/libsrc - xmldir = $(prefix)/dom/components/Java_Ports/ -dist_xml_DATA = ../Java_Ports.prf.xml ../Java_Ports.scd.xml ../Java_Ports.spd.xml +dist_xml_DATA = ../Java_Ports.scd.xml ../Java_Ports.prf.xml ../Java_Ports.spd.xml domdir = $(prefix)/dom/components/Java_Ports/java/ dist_dom_SCRIPTS = startJava.sh diff --git a/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/src/Java_Ports/java/Java_Ports.java b/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/src/Java_Ports/java/Java_Ports.java index 51ac98d37..325c2eaef 100644 --- a/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/src/Java_Ports/java/Java_Ports.java +++ b/bulkioInterfaces/libsrc/testing/components/Java_Ports/java/src/Java_Ports/java/Java_Ports.java @@ -321,22 +321,23 @@ public void run() //begin-user-code // Process data here try { - - SF(port_dataFloatIn,port_dataFloatOut, "FLOAT" ); - SF(port_dataDoubleIn,port_dataDoubleOut, "DOUBLE"); - SF(port_dataCharIn,port_dataCharOut, "CHAR"); - SF(port_dataOctetIn,port_dataOctetOut, "OCTET"); - SF(port_dataShortIn,port_dataShortOut, "SHORT"); - SF(port_dataUShortIn,port_dataUShortOut, "USHORT"); - SF(port_dataLongIn,port_dataLongOut, "LONG"); - SF(port_dataULongIn,port_dataULongOut, "ULONG"); - SF(port_dataLongLongIn,port_dataLongLongOut, "LONGLONG"); - SF(port_dataULongLongIn,port_dataULongLongOut, "ULONGLONG"); - SF(port_dataFileIn,port_dataFileOut, "FILE"); - SF(port_dataXMLIn,port_dataXMLOut, "XML"); - - logger.debug("run() example log message"); - Thread.sleep(1000); + int serviced = 0; + serviced += SF(port_dataFloatIn,port_dataFloatOut, "FLOAT" ); + serviced += SF(port_dataDoubleIn,port_dataDoubleOut, "DOUBLE"); + serviced += SF(port_dataCharIn,port_dataCharOut, "CHAR"); + serviced += SF(port_dataOctetIn,port_dataOctetOut, "OCTET"); + serviced += SF(port_dataShortIn,port_dataShortOut, "SHORT"); + serviced += SF(port_dataUShortIn,port_dataUShortOut, "USHORT"); + serviced += SF(port_dataLongIn,port_dataLongOut, "LONG"); + serviced += SF(port_dataULongIn,port_dataULongOut, "ULONG"); + serviced += SF(port_dataLongLongIn,port_dataLongLongOut, "LONGLONG"); + serviced += SF(port_dataULongLongIn,port_dataULongLongOut, "ULONGLONG"); + serviced += SF(port_dataFileIn,port_dataFileOut, "FILE"); + serviced += SF(port_dataXMLIn,port_dataXMLOut, "XML"); + + if (serviced == 0) { + Thread.sleep(1000); + } } catch (InterruptedException e) { break; } @@ -349,7 +350,7 @@ public void run() } - void SF( bulkio.InFloatPort inPort, bulkio.OutFloatPort outPort, String portType ) { + int SF( bulkio.InFloatPort inPort, bulkio.OutFloatPort outPort, String portType ) { bulkio.InFloatPort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); @@ -361,10 +362,12 @@ void SF( bulkio.InFloatPort inPort, bulkio.OutFloatPort outPort, String portType logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); + return 1; } + return 0; } - void SF( bulkio.InDoublePort inPort, bulkio.OutDoublePort outPort, String portType ) { + int SF( bulkio.InDoublePort inPort, bulkio.OutDoublePort outPort, String portType ) { bulkio.InDoublePort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); @@ -376,13 +379,14 @@ void SF( bulkio.InDoublePort inPort, bulkio.OutDoublePort outPort, String portTy logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); + return 1; } - + return 0; } - public void SF( bulkio.InInt8Port inPort, bulkio.OutInt8Port outPort, String portType ) { + public int SF( bulkio.InCharPort inPort, bulkio.OutCharPort outPort, String portType ) { - bulkio.InInt8Port.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); + bulkio.InCharPort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); if ( pkt != null ) { if (pkt.sriChanged ) { @@ -392,13 +396,14 @@ public void SF( bulkio.InInt8Port inPort, bulkio.OutInt8Port outPort, String por logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); - } - + return 1; + } + return 0; } - public void SF( bulkio.InInt16Port inPort, bulkio.OutInt16Port outPort, String portType ) { + public int SF( bulkio.InShortPort inPort, bulkio.OutShortPort outPort, String portType ) { - bulkio.InInt16Port.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); + bulkio.InShortPort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); if ( pkt != null ) { if (pkt.sriChanged ) { @@ -408,14 +413,15 @@ public void SF( bulkio.InInt16Port inPort, bulkio.OutInt16Port outPort, String p logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); - } - + return 1; + } + return 0; } - public void SF( bulkio.InInt32Port inPort, bulkio.OutInt32Port outPort, String portType ) { + public int SF( bulkio.InLongPort inPort, bulkio.OutLongPort outPort, String portType ) { - bulkio.InInt32Port.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); + bulkio.InLongPort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); if ( pkt != null ) { if (pkt.sriChanged ) { @@ -425,13 +431,14 @@ public void SF( bulkio.InInt32Port inPort, bulkio.OutInt32Port outPort, String p logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); - } - + return 1; + } + return 0; } - public void SF( bulkio.InInt64Port inPort, bulkio.OutInt64Port outPort, String portType ) { + public int SF( bulkio.InLongLongPort inPort, bulkio.OutLongLongPort outPort, String portType ) { - bulkio.InInt64Port.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); + bulkio.InLongLongPort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); if ( pkt != null ) { if (pkt.sriChanged ) { @@ -441,15 +448,16 @@ public void SF( bulkio.InInt64Port inPort, bulkio.OutInt64Port outPort, String p logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); - } - + return 1; + } + return 0; } - public void SF( bulkio.InUInt8Port inPort, bulkio.OutUInt8Port outPort, String portType ) { + public int SF( bulkio.InOctetPort inPort, bulkio.OutOctetPort outPort, String portType ) { - bulkio.InUInt8Port.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); + bulkio.InOctetPort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); if ( pkt != null ) { if (pkt.sriChanged ) { @@ -459,13 +467,14 @@ public void SF( bulkio.InUInt8Port inPort, bulkio.OutUInt8Port outPort, String p logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); - } - + return 1; + } + return 0; } - public void SF( bulkio.InUInt16Port inPort, bulkio.OutUInt16Port outPort, String portType ) { + public int SF( bulkio.InUShortPort inPort, bulkio.OutUShortPort outPort, String portType ) { - bulkio.InUInt16Port.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); + bulkio.InUShortPort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); if ( pkt != null ) { if (pkt.sriChanged ) { @@ -475,14 +484,15 @@ public void SF( bulkio.InUInt16Port inPort, bulkio.OutUInt16Port outPort, String logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); - } - + return 1; + } + return 0; } - public void SF( bulkio.InUInt32Port inPort, bulkio.OutUInt32Port outPort, String portType ) { + public int SF( bulkio.InULongPort inPort, bulkio.OutULongPort outPort, String portType ) { - bulkio.InUInt32Port.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); + bulkio.InULongPort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); if ( pkt != null ) { if (pkt.sriChanged ) { @@ -492,13 +502,14 @@ public void SF( bulkio.InUInt32Port inPort, bulkio.OutUInt32Port outPort, String logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); - } - + return 1; + } + return 0; } - public void SF( bulkio.InUInt64Port inPort, bulkio.OutUInt64Port outPort, String portType ) { + public int SF( bulkio.InULongLongPort inPort, bulkio.OutULongLongPort outPort, String portType ) { - bulkio.InUInt64Port.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); + bulkio.InULongLongPort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); if ( pkt != null ) { if (pkt.sriChanged ) { @@ -508,14 +519,15 @@ public void SF( bulkio.InUInt64Port inPort, bulkio.OutUInt64Port outPort, String logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); - } - + return 1; + } + return 0; } - public void SF( bulkio.InFilePort inPort, bulkio.OutFilePort outPort, String portType ) { + public int SF( bulkio.InFilePort inPort, bulkio.OutFilePort outPort, String portType ) { bulkio.InFilePort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); @@ -527,11 +539,12 @@ public void SF( bulkio.InFilePort inPort, bulkio.OutFilePort outPort, String por logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length() ); outPort.pushPacket( pkt.dataBuffer, pkt.T, pkt.EOS, pkt.streamID ); - } - + return 1; + } + return 0; } - public void SF( bulkio.InXMLPort inPort, bulkio.OutXMLPort outPort, String portType ) { + public int SF( bulkio.InXMLPort inPort, bulkio.OutXMLPort outPort, String portType ) { bulkio.InXMLPort.Packet pkt = inPort.getPacket( bulkio.Const.NON_BLOCKING ); @@ -543,12 +556,13 @@ public void SF( bulkio.InXMLPort inPort, bulkio.OutXMLPort outPort, String portT logger.debug( "SF TYPE:" + portType + " DATALEN:" + pkt.dataBuffer.length() ); outPort.pushPacket( pkt.dataBuffer, pkt.EOS, pkt.streamID ); - } - + return 1; + } + return 0; } - public void SF( bulkio.InSDDSPort inPort, bulkio.OutSDDSPort outPort, String portType ) { - + public int SF( bulkio.InSDDSPort inPort, bulkio.OutSDDSPort outPort, String portType ) { + return 0; } diff --git a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/.md5sums b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/.md5sums new file mode 100644 index 000000000..dd3adbd1c --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/.md5sums @@ -0,0 +1 @@ +ba80f42e76c8861c9423b8ed602dce02 Makefile.am diff --git a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/Makefile.am b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/Makefile.am index 46b484dc9..fb9724690 100644 --- a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/Makefile.am @@ -18,15 +18,16 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # ossieName = Oversized_framedata -bindir = $(prefix)/dom/components/Oversized_framedata/cpp/ +bindir = $(prefix)/dom/components/Oversized_framedata/cpp bin_PROGRAMS = Oversized_framedata -xmldir = $(prefix)/dom/components/Oversized_framedata/ +xmldir = $(prefix)/dom/components/Oversized_framedata dist_xml_DATA = ../Oversized_framedata.scd.xml ../Oversized_framedata.prf.xml ../Oversized_framedata.spd.xml ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie - +AUTOMAKE_OPTIONS = subdir-objects distclean-local: + rm -rf m4 rm -f config.* rm -rf autom4te.cache rm -f acinclude.m4 @@ -45,7 +46,7 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide Oversized_framedata_SOURCES = $(redhawk_SOURCES_auto) -Oversized_framedata_LDADD = $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) -Oversized_framedata_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +Oversized_framedata_LDADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +Oversized_framedata_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) Oversized_framedata_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/configure.ac b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/configure.ac deleted file mode 100644 index 3c45e494d..000000000 --- a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/configure.ac +++ /dev/null @@ -1,45 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -AC_INIT(Oversized_framedata, 1.0.0) -AM_INIT_AUTOMAKE([nostdinc foreign]) - -AC_PROG_CC -AC_PROG_CXX -AC_PROG_INSTALL - -AC_CORBA_ORB -OSSIE_CHECK_OSSIE -OSSIE_SDRROOT_AS_PREFIX - -m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) - -# Dependencies -export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig" -PKG_CHECK_MODULES([PROJECTDEPS], [ossie >= 1.10 omniORB4 >= 4.1.0 ]) -PKG_CHECK_MODULES([INTERFACEDEPS], [bulkio >= 1.10]) -OSSIE_ENABLE_LOG4CXX -AX_BOOST_BASE([1.41]) -AX_BOOST_SYSTEM -AX_BOOST_THREAD -AX_BOOST_REGEX - -AC_CONFIG_FILES([Makefile]) -AC_OUTPUT - diff --git a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/reconf b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/reconf deleted file mode 100755 index ece304047..000000000 --- a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/reconf +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -rm -f config.cache -autoreconf -i diff --git a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/java/.md5sums b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/java/.md5sums new file mode 100644 index 000000000..536519357 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/java/.md5sums @@ -0,0 +1 @@ +59a728f0968103426e50f8a81120ee62 Makefile.am diff --git a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/java/Makefile.am b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/java/Makefile.am index 77eabbec4..c84a476b2 100644 --- a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/java/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/java/Makefile.am @@ -17,17 +17,22 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # -Oversized_framedata_jar_CLASSPATH = $(CLASSPATH_SOFTPKG_DEP)$(OSSIE_HOME)/lib/CFInterfaces.jar:$(OSSIE_HOME)/lib/log4j-1.2.15.jar:$(OSSIE_HOME)/lib/ossie.jar:$(OSSIE_HOME)/lib/bulkio.jar:$(OSSIE_HOME)/lib/BULKIOInterfaces.jar +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects + +Oversized_framedata_jar_CLASSPATH = $(SOFTPKG_CLASSPATH):$(REDHAWK_CLASSPATH):$(BULKIO_CLASSPATH) Oversized_framedata.jar$(EXEEXT): $(Oversized_framedata_jar_SOURCES) mkdir -p bin - $(JAVAC) -cp $(Oversized_framedata_jar_CLASSPATH) -d bin $(Oversized_framedata_jar_SOURCES) + $(JAVAC) -cp $(Oversized_framedata_jar_CLASSPATH) -g -d bin $(Oversized_framedata_jar_SOURCES) $(JAR) cf ./Oversized_framedata.jar -C bin . + $(JAR) uf ./Oversized_framedata.jar -C src . clean-local: rm -rf bin distclean-local: + rm -rf m4 rm -f config.* rm -rf autom4te.cache rm -f acinclude.m4 diff --git a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/tests/test_Oversized_framedata.py b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/tests/test_Oversized_framedata.py index 949880ad6..7cc20f82e 100755 --- a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/tests/test_Oversized_framedata.py +++ b/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/tests/test_Oversized_framedata.py @@ -30,9 +30,7 @@ class ResourceTests(ossie.utils.testing.ScaComponentTestCase): def testConsistentSize(self): ####################################################################### # Launch the resource with the default execparams - execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False) - execparams = dict([(x.id, any.from_any(x.value)) for x in execparams]) - self.launch(execparams) + self.launch() snk=sb.DataSink() snk.start() @@ -42,7 +40,7 @@ def testConsistentSize(self): # Make sure start and stop can be called without throwing exceptions self.comp.start() - (retval, timestamps) = snk._sink.retrieveData(20000000) + (retval, timestamps) = snk._sink.retrieveData(19999744) self.assertEquals(timestamps[1][0]%1024,0) ####################################################################### diff --git a/bulkioInterfaces/libsrc/testing/components/Python_Ports/python/Python_Ports.py b/bulkioInterfaces/libsrc/testing/components/Python_Ports/python/Python_Ports.py index 8c8f1f0e6..74aee5f44 100755 --- a/bulkioInterfaces/libsrc/testing/components/Python_Ports/python/Python_Ports.py +++ b/bulkioInterfaces/libsrc/testing/components/Python_Ports/python/Python_Ports.py @@ -60,6 +60,10 @@ def DoPort(self, inPort, outPort, pname ): self._log.debug( "SF TYPE:" + pname + " DATALEN:" + str(len(p1[inPort.DATA_BUFFER])) ) #print "SF TYPE:" + pname + " DATALEN:" + str(len(p1[inPort.DATA_BUFFER])) outPort.pushPacket( p1[0], p1[1], p1[2], p1[3] ) + return 1 + + # No packets processed + return 0 def process(self): """ @@ -129,30 +133,27 @@ def process(self): return NORMAL """ - - # TODO fill in your code here - self._log.debug("process() example log message") - #LOG_DEBUG(Python_Ports_i, "serviceFunction() example log message"); - - self.DoPort( self.port_dataFloatIn, self.port_dataFloatOut, "FLOAT"); - self.DoPort( self.port_dataDoubleIn, self.port_dataDoubleOut, "DOUBLE"); - self.DoPort( self.port_dataCharIn, self.port_dataCharOut, "CHAR"); - self.DoPort( self.port_dataOctetIn, self.port_dataOctetOut, "OCTET"); - self.DoPort( self.port_dataShortIn, self.port_dataShortOut, "SHORT"); - self.DoPort( self.port_dataUShortIn, self.port_dataUShortOut, "USHORT"); - self.DoPort( self.port_dataLongIn, self.port_dataLongOut, "LONG"); - self.DoPort( self.port_dataULongIn, self.port_dataULongOut, "ULONG"); - self.DoPort( self.port_dataLongLongIn, self.port_dataLongLongOut, "LONGLONG"); - self.DoPort( self.port_dataULongLongIn, self.port_dataULongLongOut, "ULONGLONG"); - self.DoPort( self.port_dataFileIn, self.port_dataFileOut, "URL"); - self.DoPort( self.port_dataXMLIn, self.port_dataXMLOut, "XML"); + serviced = 0 + serviced += self.DoPort( self.port_dataFloatIn, self.port_dataFloatOut, "FLOAT"); + serviced += self.DoPort( self.port_dataDoubleIn, self.port_dataDoubleOut, "DOUBLE"); + serviced += self.DoPort( self.port_dataCharIn, self.port_dataCharOut, "CHAR"); + serviced += self.DoPort( self.port_dataOctetIn, self.port_dataOctetOut, "OCTET"); + serviced += self.DoPort( self.port_dataShortIn, self.port_dataShortOut, "SHORT"); + serviced += self.DoPort( self.port_dataUShortIn, self.port_dataUShortOut, "USHORT"); + serviced += self.DoPort( self.port_dataLongIn, self.port_dataLongOut, "LONG"); + serviced += self.DoPort( self.port_dataULongIn, self.port_dataULongOut, "ULONG"); + serviced += self.DoPort( self.port_dataLongLongIn, self.port_dataLongLongOut, "LONGLONG"); + serviced += self.DoPort( self.port_dataULongLongIn, self.port_dataULongLongOut, "ULONGLONG"); + serviced += self.DoPort( self.port_dataFileIn, self.port_dataFileOut, "URL"); + serviced += self.DoPort( self.port_dataXMLIn, self.port_dataXMLOut, "XML"); self._log.debug( "--TestRCV::SVC_FUNC END" ) - time.sleep(.5); - return NORMAL - + if serviced > 0: + return NORMAL + else: + return NOOP diff --git a/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/.md5sums b/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/.md5sums new file mode 100644 index 000000000..569c0c562 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/.md5sums @@ -0,0 +1 @@ +396c1d1970d0a4e4a57991df37e1a198 Makefile.am diff --git a/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/Makefile.am b/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/Makefile.am index d0462899d..ef71f1178 100644 --- a/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/Makefile.am @@ -18,17 +18,16 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # ossieName = TestLargePush -bindir = $(prefix)/dom/components/TestLargePush/cpp/ +bindir = $(prefix)/dom/components/TestLargePush/cpp bin_PROGRAMS = TestLargePush -xmldir = $(prefix)/dom/components/TestLargePush/ +xmldir = $(prefix)/dom/components/TestLargePush dist_xml_DATA = ../TestLargePush.scd.xml ../TestLargePush.prf.xml ../TestLargePush.spd.xml - -# this is use to build against local bulkio interface and library and not installed version -bulkio_top=../../../../../ -bulkio_libsrc_top=$(bulkio_top)/libsrc +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects distclean-local: + rm -rf m4 rm -f config.* rm -rf autom4te.cache rm -f acinclude.m4 @@ -47,8 +46,7 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide TestLargePush_SOURCES = $(redhawk_SOURCES_auto) -TestLargePush_LDADD = -L$(bulkio_libsrc_top)/.libs -L$(bulkio_top)/.libs $(BIO_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) -#TestLargePush_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -TestLargePush_CXXFLAGS = -Wall -I$(bulkio_libsrc_top)/cpp -I$(bulkio_top)/src/cpp $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -TestLargePush_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) +TestLargePush_LDADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +TestLargePush_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +TestLargePush_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/build.sh b/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/build.sh deleted file mode 100755 index c6089ee81..000000000 --- a/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/build.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -if [ "$1" = "clean" ]; then - make clean -else - # Checks if build is newer than makefile (based on modification time) - if [ ! -e configure ] || [ ! -e Makefile ] || [ configure.ac -nt Makefile ] || [ Makefile.am -nt Makefile ]; then - ./reconf - ./configure - fi - make -j - exit 0 -fi diff --git a/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/configure.ac b/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/configure.ac deleted file mode 100644 index fd2ec8bde..000000000 --- a/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/configure.ac +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -AC_INIT(TestLargePush, 1.0.0) -AM_INIT_AUTOMAKE(nostdinc) - - -AC_PROG_CC -AC_PROG_CXX -AC_PROG_INSTALL - -AC_CORBA_ORB -OSSIE_CHECK_OSSIE -OSSIE_SDRROOT_AS_PREFIX - -m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) - -# Dependencies -export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig" -PKG_CHECK_MODULES([PROJECTDEPS], [ossie >= 1.9 omniORB4 >= 4.1.0 ]) -OSSIE_ENABLE_LOG4CXX -AX_BOOST_BASE([1.41]) -AX_BOOST_THREAD - -AX_BOOST_SYSTEM - -export PKG_CONFIG_PATH="../../../..:../../..":$PKG_CONFIG_PATH -PKG_CHECK_MODULES([BIO], [bulkio >= 1.10]) - -AC_CONFIG_FILES([Makefile]) -AC_OUTPUT - diff --git a/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/reconf b/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/reconf deleted file mode 100755 index ba319ec54..000000000 --- a/bulkioInterfaces/libsrc/testing/components/TestLargePush/cpp/reconf +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -rm -f config.cache - -# Setup the libtool stuff -if [ -e /usr/local/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/local/share/aclocal/libtool.m4 aclocal.d/acinclude.m4 -elif [ -e /usr/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/share/aclocal/libtool.m4 acinclude.m4 -fi -libtoolize --force --automake - -# Search in expected locations for the OSSIE acincludes -if [ -n ${OSSIEHOME} ] && [ -d ${OSSIEHOME}/share/aclocal/ossie ]; then - OSSIE_AC_INCLUDE=${OSSIEHOME}/share/aclocal/ossie -else - echo "Error: Cannot find the OSSIE aclocal files. This is not expected!" -fi - -if [ -n ${OSSIE_AC_INCLUDE} ]; then - aclocal -I ${OSSIE_AC_INCLUDE} -else - aclocal -fi - -autoconf -automake --foreign --add-missing - diff --git a/bulkioInterfaces/libsrc/testing/components/TestLargePush/java/.md5sums b/bulkioInterfaces/libsrc/testing/components/TestLargePush/java/.md5sums new file mode 100644 index 000000000..b79be2cf2 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/TestLargePush/java/.md5sums @@ -0,0 +1 @@ +e7e48351847aee365b95df73f16cac70 Makefile.am diff --git a/bulkioInterfaces/libsrc/testing/components/TestLargePush/java/Makefile.am b/bulkioInterfaces/libsrc/testing/components/TestLargePush/java/Makefile.am index 45921db8b..911e1a896 100644 --- a/bulkioInterfaces/libsrc/testing/components/TestLargePush/java/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/components/TestLargePush/java/Makefile.am @@ -17,15 +17,22 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects + +TestLargePush_jar_CLASSPATH = $(SOFTPKG_CLASSPATH):$(REDHAWK_CLASSPATH):$(BULKIO_CLASSPATH) + TestLargePush.jar$(EXEEXT): $(TestLargePush_jar_SOURCES) mkdir -p bin - $(JAVAC) -cp $(CLASSPATH_SOFTPKG_DEP)$(OSSIE_HOME)/lib/CFInterfaces.jar:$(OSSIE_HOME)/lib/log4j-1.2.15.jar:$(OSSIE_HOME)/lib/ossie.jar:$(bulkio_top)/BULKIOInterfaces.jar:$(bulkio_libsrc_top)/bulkio.jar -d bin $(TestLargePush_jar_SOURCES) + $(JAVAC) -cp $(TestLargePush_jar_CLASSPATH) -g -d bin $(TestLargePush_jar_SOURCES) $(JAR) cf ./TestLargePush.jar -C bin . + $(JAR) uf ./TestLargePush.jar -C src . clean-local: rm -rf bin distclean-local: + rm -rf m4 rm -f config.* rm -rf autom4te.cache rm -f acinclude.m4 @@ -43,12 +50,6 @@ bindir = $(prefix)/dom/components/TestLargePush/java/ bin_PROGRAMS = TestLargePush.jar TestLargePush_jar_SOURCES := $(shell find ./src -name "*.java") -## -## This is used to build against local bulkio interface library and not installed version -## -bulkio_top=../../../../.. -bulkio_libsrc_top=$(bulkio_top)/libsrc - xmldir = $(prefix)/dom/components/TestLargePush/ dist_xml_DATA = ../TestLargePush.scd.xml ../TestLargePush.prf.xml ../TestLargePush.spd.xml diff --git a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/.md5sums b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/.md5sums index c66941dc1..90d946248 100644 --- a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/.md5sums +++ b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/.md5sums @@ -1,7 +1,7 @@ 3e90cd586ef55340f720fa01c394f810 main.cpp c99df9fa4ab0cd042eac42fcee6441c3 reconf 93debcdba56b6826a722d6572c43802a configure.ac -310ea7ad0ede382044a0cc94aa9aa24a Makefile.am +b3244a77701ca4fd7a6d4e08c0901e6d Makefile.am ccfa2e64e7cf8f57a2bc5056e90f0297 struct_props.h 1f8c0483ef3c90c0c9cd0f1402679791 Makefile.am.ide bcb90affbdcff3322084421ffb5edb92 multiout_attachable_base.cpp diff --git a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/Makefile.am b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/Makefile.am index b1514928e..0b9d53e4a 100644 --- a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/Makefile.am @@ -18,17 +18,16 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # ossieName = multiout_attachable -bindir = $(prefix)/dom/components/multiout_attachable/cpp/ +bindir = $(prefix)/dom/components/multiout_attachable/cpp bin_PROGRAMS = multiout_attachable -xmldir = $(prefix)/dom/components/multiout_attachable/ +xmldir = $(prefix)/dom/components/multiout_attachable dist_xml_DATA = ../multiout_attachable.scd.xml ../multiout_attachable.prf.xml ../multiout_attachable.spd.xml - -# this is use to build against local bulkio interface and library and not installed version -bulkio_top=../../../../../ -bulkio_libsrc_top=$(bulkio_top)/libsrc +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects distclean-local: + rm -rf m4 rm -f config.* rm -rf autom4te.cache rm -f acinclude.m4 @@ -47,8 +46,7 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide multiout_attachable_SOURCES = $(redhawk_SOURCES_auto) -multiout_attachable_LDADD = -L$(bulkio_libsrc_top)/.libs -L$(bulkio_top)/.libs $(BIO_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) -#multiout_attachable_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -multiout_attachable_CXXFLAGS = -Wall -I$(bulkio_libsrc_top)/cpp -I$(bulkio_top)/src/cpp $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +multiout_attachable_LDADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +multiout_attachable_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) multiout_attachable_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/build.sh b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/build.sh deleted file mode 100755 index c6089ee81..000000000 --- a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/build.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -if [ "$1" = "clean" ]; then - make clean -else - # Checks if build is newer than makefile (based on modification time) - if [ ! -e configure ] || [ ! -e Makefile ] || [ configure.ac -nt Makefile ] || [ Makefile.am -nt Makefile ]; then - ./reconf - ./configure - fi - make -j - exit 0 -fi diff --git a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/configure.ac b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/configure.ac deleted file mode 100644 index 37a61cf4f..000000000 --- a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/configure.ac +++ /dev/null @@ -1,47 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -AC_INIT(multiout_attachable, 1.0.0) -AM_INIT_AUTOMAKE(nostdinc) - -AC_PROG_CC -AC_PROG_CXX -AC_PROG_INSTALL - -AC_CORBA_ORB -OSSIE_CHECK_OSSIE -OSSIE_SDRROOT_AS_PREFIX - -m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) - -# Dependencies -export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig" -PKG_CHECK_MODULES([PROJECTDEPS], [ossie >= 1.10 omniORB4 >= 4.1.0 ]) -OSSIE_ENABLE_LOG4CXX -AX_BOOST_BASE([1.41]) -AX_BOOST_SYSTEM -AX_BOOST_THREAD -AX_BOOST_REGEX - -export PKG_CONFIG_PATH="../../../..:../../..":$PKG_CONFIG_PATH -PKG_CHECK_MODULES([BIO], [bulkio >= 1.10]) - -AC_CONFIG_FILES([Makefile]) -AC_OUTPUT - diff --git a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/reconf b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/reconf deleted file mode 100755 index ba319ec54..000000000 --- a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/cpp/reconf +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -rm -f config.cache - -# Setup the libtool stuff -if [ -e /usr/local/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/local/share/aclocal/libtool.m4 aclocal.d/acinclude.m4 -elif [ -e /usr/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/share/aclocal/libtool.m4 acinclude.m4 -fi -libtoolize --force --automake - -# Search in expected locations for the OSSIE acincludes -if [ -n ${OSSIEHOME} ] && [ -d ${OSSIEHOME}/share/aclocal/ossie ]; then - OSSIE_AC_INCLUDE=${OSSIEHOME}/share/aclocal/ossie -else - echo "Error: Cannot find the OSSIE aclocal files. This is not expected!" -fi - -if [ -n ${OSSIE_AC_INCLUDE} ]; then - aclocal -I ${OSSIE_AC_INCLUDE} -else - aclocal -fi - -autoconf -automake --foreign --add-missing - diff --git a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/.md5sums b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/.md5sums index 28e74c938..25538c424 100644 --- a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/.md5sums +++ b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/.md5sums @@ -3,4 +3,4 @@ c99df9fa4ab0cd042eac42fcee6441c3 reconf 9ec485b3eae2b94286c8b7a91eb6b645 configure.ac d6ed4ba3d1efb79c37961c7de007975b startJava.sh 20646b5bde62d405d38d41c3c15e95b7 src/multiout_attachable/java/multiout_attachable.java -8c165edc10e1cafc735199598bc76847 Makefile.am +956df491c9d35608c1e9c5732df5a16b Makefile.am diff --git a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/Makefile.am b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/Makefile.am index 872d778d1..ffd5715a9 100644 --- a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/Makefile.am @@ -17,17 +17,22 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # -multiout_attachable_jar_CLASSPATH = $(CLASSPATH_SOFTPKG_DEP)$(OSSIE_HOME)/lib/CFInterfaces.jar:$(OSSIE_HOME)/lib/log4j-1.2.15.jar:$(OSSIE_HOME)/lib/ossie.jar:$(bulkio_libsrc_top)/bulkio.jar:$(bulkio_top)/BULKIOInterfaces.jar +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects + +multiout_attachable_jar_CLASSPATH = $(SOFTPKG_CLASSPATH):$(REDHAWK_CLASSPATH):$(BULKIO_CLASSPATH) multiout_attachable.jar$(EXEEXT): $(multiout_attachable_jar_SOURCES) mkdir -p bin - $(JAVAC) -cp $(multiout_attachable_jar_CLASSPATH) -d bin $(multiout_attachable_jar_SOURCES) + $(JAVAC) -cp $(multiout_attachable_jar_CLASSPATH) -g -d bin $(multiout_attachable_jar_SOURCES) $(JAR) cf ./multiout_attachable.jar -C bin . + $(JAR) uf ./multiout_attachable.jar -C src . clean-local: rm -rf bin distclean-local: + rm -rf m4 rm -f config.* rm -rf autom4te.cache rm -f acinclude.m4 @@ -45,12 +50,6 @@ bindir = $(prefix)/dom/components/multiout_attachable/java/ bin_PROGRAMS = multiout_attachable.jar multiout_attachable_jar_SOURCES := $(shell find ./src -name "*.java") -## -## This is used to build against local bulkio interface library and not installed version -## -bulkio_top=../../../../.. -bulkio_libsrc_top=$(bulkio_top)/libsrc - xmldir = $(prefix)/dom/components/multiout_attachable/ dist_xml_DATA = ../multiout_attachable.scd.xml ../multiout_attachable.prf.xml ../multiout_attachable.spd.xml diff --git a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/configure.ac b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/configure.ac deleted file mode 100644 index 510f5bccb..000000000 --- a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/configure.ac +++ /dev/null @@ -1,49 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -AC_INIT(multiout_attachable, 1.0.0) -AM_INIT_AUTOMAKE(nostdinc) - -AC_CORBA_ORB -OSSIE_CHECK_OSSIE -OSSIE_SDRROOT_AS_PREFIX - -export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig" -CLASSPATH_SOFTPKG_DEP="" -AC_SUBST(CLASSPATH_SOFTPKG_DEP) -PKG_CHECK_MODULES([OSSIE], [ossie >= 1.10]) - -AC_CHECK_PROG([IDLJ], [idlj], [idlj], [AC_MSG_ERROR([cannot find idlj program])]) -AC_CHECK_PROG([JAVAC], [javac], [javac], [AC_MSG_ERROR([cannot find Java compiler])]) -AC_CHECK_PROG([JAR], [jar], [jar], [AC_MSG_ERROR([cannot find jar program])]) - -AC_PATH_PROG(JAVAC, javac, [not found], [${JAVA_HOME}/bin]) -AC_PATH_PROG(JAVAH, javah, [not found], [${JAVA_HOME}/bin]) -AC_PATH_PROG(JAVA, java, [not found], [${JAVA_HOME}/bin]) -AC_PATH_PROG(JAR, jar, [not found], [${JAVA_HOME}/bin]) -AC_SUBST(JAVAC) -AC_SUBST(JAVAH) -AC_SUBST(JAR) -AC_SUBST(JAVA) - -export PKG_CONFIG_PATH="../../../..:../../..":$PKG_CONFIG_PATH -PKG_CHECK_MODULES([BIO], [bulkio >= 1.10]) - -AC_CONFIG_FILES(Makefile) -AC_OUTPUT diff --git a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/reconf b/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/reconf deleted file mode 100755 index ba319ec54..000000000 --- a/bulkioInterfaces/libsrc/testing/components/multiout_attachable/java/reconf +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -rm -f config.cache - -# Setup the libtool stuff -if [ -e /usr/local/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/local/share/aclocal/libtool.m4 aclocal.d/acinclude.m4 -elif [ -e /usr/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/share/aclocal/libtool.m4 acinclude.m4 -fi -libtoolize --force --automake - -# Search in expected locations for the OSSIE acincludes -if [ -n ${OSSIEHOME} ] && [ -d ${OSSIEHOME}/share/aclocal/ossie ]; then - OSSIE_AC_INCLUDE=${OSSIEHOME}/share/aclocal/ossie -else - echo "Error: Cannot find the OSSIE aclocal files. This is not expected!" -fi - -if [ -n ${OSSIE_AC_INCLUDE} ]; then - aclocal -I ${OSSIE_AC_INCLUDE} -else - aclocal -fi - -autoconf -automake --foreign --add-missing - diff --git a/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/Makefile.am b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/Makefile.am new file mode 100644 index 000000000..9fa7b5dd5 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/Makefile.am @@ -0,0 +1,45 @@ +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects + +ossieName = snk_slow +libdir = $(prefix)/dom/components/snk_slow/cpp +lib_LTLIBRARIES = snk_slow.la + +xmldir = $(prefix)/dom/components/snk_slow +dist_xml_DATA = ../snk_slow.scd.xml ../snk_slow.prf.xml ../snk_slow.spd.xml + +.PHONY: convenience-link clean-convenience-link + +all-local : convenience-link +clean-local : clean-convenience-link + +convenience-link : snk_slow.la + @ln -fs .libs/snk_slow.so + +clean-convenience-link: + @rm -f snk_slow.so + +distclean-local: + rm -rf m4 + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +include $(srcdir)/Makefile.am.ide +snk_slow_la_SOURCES = $(redhawk_SOURCES_auto) +snk_slow_la_LIBADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +snk_slow_la_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +snk_slow_la_LDFLAGS = -shared -module -export-dynamic -export-symbols-regex 'make_component' -avoid-version $(redhawk_LDFLAGS_auto) + diff --git a/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/Makefile.am.ide b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/Makefile.am.ide new file mode 100644 index 000000000..355344fdf --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/Makefile.am.ide @@ -0,0 +1,10 @@ +# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! +# Files can be excluded by right-clicking on the file in the project explorer +# and choosing Resource Configurations -> Exclude from build. Re-include files +# by opening the Properties dialog of your project and choosing C/C++ Build -> +# Tool Chain Editor, and un-checking "Exclude resource from build " +redhawk_SOURCES_auto = main.cpp +redhawk_SOURCES_auto += snk_slow.cpp +redhawk_SOURCES_auto += snk_slow.h +redhawk_SOURCES_auto += snk_slow_base.cpp +redhawk_SOURCES_auto += snk_slow_base.h diff --git a/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/main.cpp b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/main.cpp new file mode 100644 index 000000000..e2e125686 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/main.cpp @@ -0,0 +1,11 @@ +#include +#include "ossie/ossieSupport.h" + +#include "snk_slow.h" +extern "C" { + Resource_impl* make_component(const std::string& uuid, const std::string& identifier) + { + return new snk_slow_i(uuid.c_str(), identifier.c_str()); + } +} + diff --git a/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow.cpp b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow.cpp new file mode 100644 index 000000000..f9f761de3 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow.cpp @@ -0,0 +1,253 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "snk_slow.h" + +PREPARE_LOGGING(snk_slow_i) + +snk_slow_i::snk_slow_i(const char *uuid, const char *label) : + snk_slow_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +snk_slow_i::~snk_slow_i() +{ +} + +void snk_slow_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void snk_slow_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &snk_slow_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (snk_slow_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &snk_slow_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to snk_slow.cpp + snk_slow_i::snk_slow_i(const char *uuid, const char *label) : + snk_slow_base(uuid, label) + { + addPropertyListener(scaleValue, this, &snk_slow_i::scaleChanged); + addPropertyListener(status, this, &snk_slow_i::statusChanged); + } + + void snk_slow_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(snk_slow_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void snk_slow_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(snk_slow_i, "status changed"); + } + + //Add to snk_slow.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int snk_slow_i::serviceFunction() +{ + bulkio::InFloatStream inputStream = this->dataFloat->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + bulkio::FloatDataBlock block = inputStream.read(); + return NOOP; +} + diff --git a/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow.h b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow.h new file mode 100644 index 000000000..f4f9b0ef4 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow.h @@ -0,0 +1,18 @@ +#ifndef SNK_SLOW_I_IMPL_H +#define SNK_SLOW_I_IMPL_H + +#include "snk_slow_base.h" + +class snk_slow_i : public snk_slow_base +{ + ENABLE_LOGGING + public: + snk_slow_i(const char *uuid, const char *label); + ~snk_slow_i(); + + void constructor(); + + int serviceFunction(); +}; + +#endif // SNK_SLOW_I_IMPL_H diff --git a/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow_base.cpp b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow_base.cpp new file mode 100644 index 000000000..995fb71d3 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow_base.cpp @@ -0,0 +1,65 @@ +#include "snk_slow_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +snk_slow_base::snk_slow_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + setThreadName(label); + + loadProperties(); + + dataFloat = new bulkio::InFloatPort("dataFloat"); + addPort("dataFloat", dataFloat); +} + +snk_slow_base::~snk_slow_base() +{ + dataFloat->_remove_ref(); + dataFloat = 0; +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void snk_slow_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void snk_slow_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void snk_slow_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void snk_slow_base::loadProperties() +{ +} + + diff --git a/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow_base.h b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow_base.h new file mode 100644 index 000000000..0945fd9e5 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/snk_slow/cpp/snk_slow_base.h @@ -0,0 +1,32 @@ +#ifndef SNK_SLOW_BASE_IMPL_BASE_H +#define SNK_SLOW_BASE_IMPL_BASE_H + +#include +#include +#include + +#include + +class snk_slow_base : public Component, protected ThreadedComponent +{ + public: + snk_slow_base(const char *uuid, const char *label); + ~snk_slow_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + + // Ports + /// Port: dataFloat + bulkio::InFloatPort *dataFloat; + + private: +}; +#endif // SNK_SLOW_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/svc_error_cpp.prf.xml b/bulkioInterfaces/libsrc/testing/components/snk_slow/snk_slow.prf.xml similarity index 100% rename from redhawk/src/testing/sdr/dom/components/svc_error_cpp/svc_error_cpp.prf.xml rename to bulkioInterfaces/libsrc/testing/components/snk_slow/snk_slow.prf.xml diff --git a/bulkioInterfaces/libsrc/testing/components/snk_slow/snk_slow.scd.xml b/bulkioInterfaces/libsrc/testing/components/snk_slow/snk_slow.scd.xml new file mode 100644 index 000000000..897f913be --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/snk_slow/snk_slow.scd.xml @@ -0,0 +1,53 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/bulkioInterfaces/libsrc/testing/components/snk_slow/snk_slow.spd.xml b/bulkioInterfaces/libsrc/testing/components/snk_slow/snk_slow.spd.xml new file mode 100644 index 000000000..63a211cc5 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/snk_slow/snk_slow.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/snk_slow.so + + + + + + + + + diff --git a/bulkioInterfaces/libsrc/testing/components/snk_slow/tests/test_snk_slow.py b/bulkioInterfaces/libsrc/testing/components/snk_slow/tests/test_snk_slow.py new file mode 100755 index 000000000..508f510d7 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/snk_slow/tests/test_snk_slow.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +import ossie.utils.testing +from ossie.utils import sb + +class ComponentTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the component. + SPD_FILE = '../snk_slow.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a component using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the component, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl) + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def testBasicBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + self.comp.start() + self.comp.stop() + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/bulkioInterfaces/libsrc/testing/components/src/cpp/Makefile.am b/bulkioInterfaces/libsrc/testing/components/src/cpp/Makefile.am new file mode 100644 index 000000000..ae441b604 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/cpp/Makefile.am @@ -0,0 +1,49 @@ +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +ossieName = src +bindir = $(prefix)/dom/components/src/cpp +bin_PROGRAMS = src + +xmldir = $(prefix)/dom/components/src +dist_xml_DATA = ../src.scd.xml ../src.prf.xml ../src.spd.xml +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects + +distclean-local: + rm -rf m4 + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +include $(srcdir)/Makefile.am.ide +src_SOURCES = $(redhawk_SOURCES_auto) +src_LDADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +src_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +src_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) + diff --git a/bulkioInterfaces/libsrc/testing/components/src/cpp/Makefile.am.ide b/bulkioInterfaces/libsrc/testing/components/src/cpp/Makefile.am.ide new file mode 100644 index 000000000..4056deef8 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/cpp/Makefile.am.ide @@ -0,0 +1,10 @@ +# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! +# Files can be excluded by right-clicking on the file in the project explorer +# and choosing Resource Configurations -> Exclude from build. Re-include files +# by opening the Properties dialog of your project and choosing C/C++ Build -> +# Tool Chain Editor, and un-checking "Exclude resource from build " +redhawk_SOURCES_auto = main.cpp +redhawk_SOURCES_auto += src.cpp +redhawk_SOURCES_auto += src.h +redhawk_SOURCES_auto += src_base.cpp +redhawk_SOURCES_auto += src_base.h diff --git a/bulkioInterfaces/libsrc/testing/components/src/cpp/main.cpp b/bulkioInterfaces/libsrc/testing/components/src/cpp/main.cpp new file mode 100644 index 000000000..83ecfe617 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/cpp/main.cpp @@ -0,0 +1,11 @@ +#include +#include "ossie/ossieSupport.h" + +#include "src.h" +int main(int argc, char* argv[]) +{ + src_i* src_servant; + Component::start_component(src_servant, argc, argv); + return 0; +} + diff --git a/bulkioInterfaces/libsrc/testing/components/src/cpp/src.cpp b/bulkioInterfaces/libsrc/testing/components/src/cpp/src.cpp new file mode 100644 index 000000000..9e5919fae --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/cpp/src.cpp @@ -0,0 +1,253 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "src.h" + +PREPARE_LOGGING(src_i) + +src_i::src_i(const char *uuid, const char *label) : + src_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +src_i::~src_i() +{ +} + +void src_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ + stream = dataFloat->createStream("hello"); + stream.xdelta(0.4); + stream.blocking(true); +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) and string-based (dataString, dataXML and + dataFile) do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + // The component class must have an output stream member; add to + // src.h: + // bulkio::OutFloatStream outputStream; + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + short* inputData = block.data(); + std::vector outputData; + outputData.resize(block.size()); + for (size_t index = 0; index < block.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // If there is no output stream open, create one + if (!outputStream) { + outputStream = dataFloat_out->createStream(block.sri()); + } else if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Write to the output stream + outputStream.write(outputData, block.getTimestamps()); + + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide functions that return the correct interpretation of the data + buffer and number of complex elements: + + if (block.complex()) { + std::complex* data = block.cxdata(); + for (size_t index = 0; index < block.cxsize(); ++index) { + data[index] = std::abs(data[index]); + } + outputStream.write(data, block.cxsize(), bulkio::time::utils::now()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void src_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &src_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (src_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &src_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to src.cpp + src_i::src_i(const char *uuid, const char *label) : + src_base(uuid, label) + { + addPropertyListener(scaleValue, this, &src_i::scaleChanged); + addPropertyListener(status, this, &src_i::statusChanged); + } + + void src_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(src_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void src_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(src_i, "status changed"); + } + + //Add to src.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int src_i::serviceFunction() +{ + redhawk::buffer data(10000); + stream.write(data, bulkio::time::utils::now()); + + return NORMAL; +} + diff --git a/bulkioInterfaces/libsrc/testing/components/src/cpp/src.h b/bulkioInterfaces/libsrc/testing/components/src/cpp/src.h new file mode 100644 index 000000000..dc70e5199 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/cpp/src.h @@ -0,0 +1,19 @@ +#ifndef SRC_I_IMPL_H +#define SRC_I_IMPL_H + +#include "src_base.h" + +class src_i : public src_base +{ + ENABLE_LOGGING + public: + src_i(const char *uuid, const char *label); + ~src_i(); + + void constructor(); + + int serviceFunction(); + bulkio::OutFloatStream stream; +}; + +#endif // SRC_I_IMPL_H diff --git a/bulkioInterfaces/libsrc/testing/components/src/cpp/src_base.cpp b/bulkioInterfaces/libsrc/testing/components/src/cpp/src_base.cpp new file mode 100644 index 000000000..b7e3cf24e --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/cpp/src_base.cpp @@ -0,0 +1,65 @@ +#include "src_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +src_base::src_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + setThreadName(label); + + loadProperties(); + + dataFloat = new bulkio::OutFloatPort("dataFloat"); + addPort("dataFloat", dataFloat); +} + +src_base::~src_base() +{ + dataFloat->_remove_ref(); + dataFloat = 0; +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void src_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void src_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void src_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void src_base::loadProperties() +{ +} + + diff --git a/bulkioInterfaces/libsrc/testing/components/src/cpp/src_base.h b/bulkioInterfaces/libsrc/testing/components/src/cpp/src_base.h new file mode 100644 index 000000000..cbdfad230 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/cpp/src_base.h @@ -0,0 +1,32 @@ +#ifndef SRC_BASE_IMPL_BASE_H +#define SRC_BASE_IMPL_BASE_H + +#include +#include +#include + +#include + +class src_base : public Component, protected ThreadedComponent +{ + public: + src_base(const char *uuid, const char *label); + ~src_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + + // Ports + /// Port: dataFloat + bulkio::OutFloatPort *dataFloat; + + private: +}; +#endif // SRC_BASE_IMPL_BASE_H diff --git a/bulkioInterfaces/libsrc/testing/components/src/src.prf.xml b/bulkioInterfaces/libsrc/testing/components/src/src.prf.xml new file mode 100644 index 000000000..8f537d89f --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/src.prf.xml @@ -0,0 +1,3 @@ + + + diff --git a/bulkioInterfaces/libsrc/testing/components/src/src.scd.xml b/bulkioInterfaces/libsrc/testing/components/src/src.scd.xml new file mode 100644 index 000000000..a4abbbd6f --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/src.scd.xml @@ -0,0 +1,53 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/bulkioInterfaces/libsrc/testing/components/src/src.spd.xml b/bulkioInterfaces/libsrc/testing/components/src/src.spd.xml new file mode 100644 index 000000000..946f1de02 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/src.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/src + + + + + + + + + diff --git a/bulkioInterfaces/libsrc/testing/components/src/tests/test_src.py b/bulkioInterfaces/libsrc/testing/components/src/tests/test_src.py new file mode 100755 index 000000000..0e4d60f40 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/components/src/tests/test_src.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python + +import ossie.utils.testing +from ossie.utils import sb +import time + +class ComponentTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the component. + SPD_FILE = '../src.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a component using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the component, using the selected implementation + self.comp = sb.launch('../../src/src.spd.xml') + self.snk = sb.launch('../../snk_slow/snk_slow.spd.xml') + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def testSrcSnkBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + self.comp.connect(self.snk) + self.comp.start() + self.snk.start() + time.sleep(1) + try: + self.comp.stop() + except: + pass + self.snk.stop() + self.comp.releaseObject() + self.snk.releaseObject() + + def testSnkSrcBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + self.comp.connect(self.snk) + self.comp.start() + self.snk.start() + time.sleep(1) + self.snk.stop() + try: + self.comp.stop() + except: + pass + self.comp.releaseObject() + self.snk.releaseObject() + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/.md5sums b/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/.md5sums index 2fc8aa61f..54782304a 100644 --- a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/.md5sums +++ b/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/.md5sums @@ -1,2 +1,2 @@ ad203121cc82e53a70d18fe05fb10cad sri_changed_cpp.spec -a9b4faa48de105491e9e2ec3e98c4e31 build.sh +78dbd56164ba142955871befa3fd1aca build.sh diff --git a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/.md5sums b/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/.md5sums index 2aa9c15b9..2d005319c 100644 --- a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/.md5sums +++ b/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/.md5sums @@ -4,7 +4,7 @@ c99df9fa4ab0cd042eac42fcee6441c3 reconf ae264440c7ce05f34ca3e62f86a41bee sri_changed_cpp_base.h be32db27e87a9c3e8d69629e4369789b sri_changed_cpp_base.cpp c3d08a91dc24cfede3ddf8b9035e1af9 configure.ac -0524e1037a2e8935e4923dfd9d588e8e Makefile.am +7974ec73e836992ee425ecece625d439 Makefile.am e1748f0f729341e18286dea4e4065da3 Makefile.am.ide 8ef82849c233a496d7bd4ae800a2195c sri_changed_cpp.h 0d1975802982b41325f73129696f8a63 build.sh diff --git a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/Makefile.am b/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/Makefile.am index b9b33720c..3183a8acd 100644 --- a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/Makefile.am @@ -18,19 +18,16 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # ossieName = sri_changed_cpp -bindir = $(prefix)/dom/components/sri_changed_cpp/cpp/ +bindir = $(prefix)/dom/components/sri_changed_cpp/cpp bin_PROGRAMS = sri_changed_cpp -xmldir = $(prefix)/dom/components/sri_changed_cpp/ +xmldir = $(prefix)/dom/components/sri_changed_cpp dist_xml_DATA = ../sri_changed_cpp.scd.xml ../sri_changed_cpp.prf.xml ../sri_changed_cpp.spd.xml ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie - -# this is use to build against local bulkio interface and library and not installed version -bulkio_top=../../../../../ -bulkio_libsrc_top=$(bulkio_top)/libsrc +AUTOMAKE_OPTIONS = subdir-objects distclean-local: - rm -f m4/* + rm -rf m4 rm -f config.* rm -rf autom4te.cache rm -f acinclude.m4 @@ -49,8 +46,7 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide sri_changed_cpp_SOURCES = $(redhawk_SOURCES_auto) -sri_changed_cpp_LDADD = -L$(bulkio_libsrc_top)/.libs -L$(bulkio_top)/.libs $(BIO_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) -#sri_changed_cpp_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -sri_changed_cpp_CXXFLAGS = -Wall -I$(bulkio_libsrc_top)/cpp -I$(bulkio_top)/src/cpp -I$(bulkio_top)/src/cpp/ossie $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(RH_DEPS_CFLAGS) $(redhawk_INCLUDES_auto) +sri_changed_cpp_LDADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +sri_changed_cpp_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) sri_changed_cpp_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/build.sh b/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/build.sh deleted file mode 100755 index 74e64498a..000000000 --- a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/build.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -# Create the Makefile if necessary -if [ ! -e Makefile ]; then - ./reconf - ./configure -fi - -make -j $* - diff --git a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/configure.ac b/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/configure.ac deleted file mode 100644 index 7faedd0ec..000000000 --- a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/configure.ac +++ /dev/null @@ -1,47 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -AC_INIT(sri_changed_cpp, 1.0.0) -AM_INIT_AUTOMAKE([nostdinc foreign]) - -AC_PROG_CC -AC_PROG_CXX -AC_PROG_INSTALL - -AC_CORBA_ORB -OSSIE_CHECK_OSSIE -OSSIE_SDRROOT_AS_PREFIX - -m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) - -# Dependencies -export PKG_CONFIG_PATH="$PKG_CONFIG_PATH:/usr/local/lib/pkgconfig" -PKG_CHECK_MODULES([PROJECTDEPS], [ossie >= 1.10 omniORB4 >= 4.1.0 ]) -OSSIE_ENABLE_LOG4CXX -AX_BOOST_BASE([1.41]) -AX_BOOST_SYSTEM -AX_BOOST_THREAD -AX_BOOST_REGEX - -export PKG_CONFIG_PATH="../../../..:../../..":$PKG_CONFIG_PATH -PKG_CHECK_MODULES([INTERFACEDEPS], [bulkio >= 1.10]) - -AC_CONFIG_FILES([Makefile]) -AC_OUTPUT - diff --git a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/reconf b/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/reconf deleted file mode 100755 index ece304047..000000000 --- a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/reconf +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -rm -f config.cache -autoreconf -i diff --git a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/sri_changed_cpp.spec b/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/sri_changed_cpp.spec deleted file mode 100644 index b0a4d9446..000000000 --- a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/sri_changed_cpp.spec +++ /dev/null @@ -1,87 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: sri_changed_cpp -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.10 -Requires: redhawk >= 1.10 - -# Interface requirements -BuildRequires: bulkioInterfaces >= 1.10 -Requires: bulkioInterfaces >= 1.10 - -%description -Component %{name} - - -%prep -%setup -q - - -%build -# Implementation cpp -pushd cpp -./reconf -%define _bindir %{_prefix}/dom/components/sri_changed_cpp/cpp -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation cpp -pushd cpp -%define _bindir %{_prefix}/dom/components/sri_changed_cpp/cpp -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/sri_changed_cpp.scd.xml -%{_prefix}/dom/components/%{name}/sri_changed_cpp.prf.xml -%{_prefix}/dom/components/%{name}/sri_changed_cpp.spd.xml -%{_prefix}/dom/components/%{name}/cpp - diff --git a/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/.md5sums b/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/.md5sums new file mode 100644 index 000000000..874074737 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/.md5sums @@ -0,0 +1 @@ +3d284eeeddf792b62e329376caeee0df Makefile.am diff --git a/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/Makefile.am b/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/Makefile.am index f9269d097..bf1cc60d6 100644 --- a/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/Makefile.am @@ -20,7 +20,7 @@ ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie AUTOMAKE_OPTIONS = subdir-objects -dev_snk_jar_CLASSPATH = $(SOFTPKG_CLASSPATH):$(REDHAWK_CLASSPATH):$(OSSIE_HOME)/lib/bulkio.jar:$(OSSIE_HOME)/lib/BULKIOInterfaces.jar +dev_snk_jar_CLASSPATH = $(SOFTPKG_CLASSPATH):$(REDHAWK_CLASSPATH):$(BULKIO_CLASSPATH) dev_snk.jar$(EXEEXT): $(dev_snk_jar_SOURCES) mkdir -p bin diff --git a/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/configure.ac b/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/configure.ac deleted file mode 100644 index 81c87e020..000000000 --- a/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/configure.ac +++ /dev/null @@ -1,37 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -AC_INIT(dev_snk, 1.0.0) -AM_INIT_AUTOMAKE([nostdinc foreign]) -AC_CONFIG_MACRO_DIR([m4]) - -OSSIE_CHECK_OSSIE -OSSIE_SDRROOT_AS_PREFIX - -PKG_CHECK_MODULES([OSSIE], [ossie >= 2.0]) - -RH_JAVA_HOME -RH_PROG_JAVAC([1.6]) -RH_PROG_JAR - -RH_PKG_CLASSPATH([REDHAWK], [ossie]) -PKG_CHECK_MODULES([INTERFACEDEPS], [bulkio >= 2.0]) - -AC_CONFIG_FILES([Makefile]) -AC_OUTPUT diff --git a/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/reconf b/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/reconf deleted file mode 100755 index 03a46fa0f..000000000 --- a/bulkioInterfaces/libsrc/testing/devices/dev_snk/java/reconf +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -rm -f config.cache -[ -d m4 ] || mkdir m4 -autoreconf -i - diff --git a/bulkioInterfaces/libsrc/testing/devices/dev_src/build.sh b/bulkioInterfaces/libsrc/testing/devices/dev_src/build.sh deleted file mode 100755 index 46ebbef8c..000000000 --- a/bulkioInterfaces/libsrc/testing/devices/dev_src/build.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -if [ "$1" = "rpm" ]; then - # A very simplistic RPM build scenario - if [ -e dev_src.spec ]; then - mydir=`dirname $0` - tmpdir=`mktemp -d` - cp -r ${mydir} ${tmpdir}/dev_src-1.0.0 - tar czf ${tmpdir}/dev_src-1.0.0.tar.gz --exclude=".svn" -C ${tmpdir} dev_src-1.0.0 - rpmbuild -ta ${tmpdir}/dev_src-1.0.0.tar.gz - rm -rf $tmpdir - else - echo "Missing RPM spec file in" `pwd` - exit 1 - fi -else - for impl in java ; do - if [ ! -d "$impl" ]; then - echo "Directory '$impl' does not exist...continuing" - continue - fi - cd $impl - if [ -e build.sh ]; then - if [ $# == 1 ]; then - if [ $1 == 'clean' ]; then - rm -f Makefile - rm -f config.* - ./build.sh distclean - else - ./build.sh $* - fi - else - ./build.sh $* - fi - elif [ -e Makefile ] && [ Makefile.am -ot Makefile ]; then - make $* - elif [ -e reconf ]; then - ./reconf && ./configure && make $* - else - echo "No build.sh found for $impl" - fi - cd - - done -fi diff --git a/bulkioInterfaces/libsrc/testing/devices/dev_src/java/.md5sums b/bulkioInterfaces/libsrc/testing/devices/dev_src/java/.md5sums new file mode 100644 index 000000000..98780807a --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/devices/dev_src/java/.md5sums @@ -0,0 +1 @@ +0245698762351f18b66f77d7e1330660 Makefile.am diff --git a/bulkioInterfaces/libsrc/testing/devices/dev_src/java/Makefile.am b/bulkioInterfaces/libsrc/testing/devices/dev_src/java/Makefile.am index a2d463ed5..60cb36526 100644 --- a/bulkioInterfaces/libsrc/testing/devices/dev_src/java/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/devices/dev_src/java/Makefile.am @@ -20,7 +20,7 @@ ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie AUTOMAKE_OPTIONS = subdir-objects -dev_src_jar_CLASSPATH = $(SOFTPKG_CLASSPATH):$(REDHAWK_CLASSPATH):$(OSSIE_HOME)/lib/bulkio.jar:$(OSSIE_HOME)/lib/BULKIOInterfaces.jar +dev_src_jar_CLASSPATH = $(SOFTPKG_CLASSPATH):$(REDHAWK_CLASSPATH):$(BULKIO_CLASSPATH) dev_src.jar$(EXEEXT): $(dev_src_jar_SOURCES) mkdir -p bin diff --git a/bulkioInterfaces/libsrc/testing/devices/dev_src/java/configure.ac b/bulkioInterfaces/libsrc/testing/devices/dev_src/java/configure.ac deleted file mode 100644 index 9cfc7ce23..000000000 --- a/bulkioInterfaces/libsrc/testing/devices/dev_src/java/configure.ac +++ /dev/null @@ -1,37 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -AC_INIT(dev_src, 1.0.0) -AM_INIT_AUTOMAKE([nostdinc foreign]) -AC_CONFIG_MACRO_DIR([m4]) - -OSSIE_CHECK_OSSIE -OSSIE_SDRROOT_AS_PREFIX - -PKG_CHECK_MODULES([OSSIE], [ossie >= 2.0]) - -RH_JAVA_HOME -RH_PROG_JAVAC([1.6]) -RH_PROG_JAR - -RH_PKG_CLASSPATH([REDHAWK], [ossie]) -PKG_CHECK_MODULES([INTERFACEDEPS], [bulkio >= 2.0]) - -AC_CONFIG_FILES([Makefile]) -AC_OUTPUT diff --git a/bulkioInterfaces/libsrc/testing/devices/dev_src/java/reconf b/bulkioInterfaces/libsrc/testing/devices/dev_src/java/reconf deleted file mode 100755 index 03a46fa0f..000000000 --- a/bulkioInterfaces/libsrc/testing/devices/dev_src/java/reconf +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -rm -f config.cache -[ -d m4 ] || mkdir m4 -autoreconf -i - diff --git a/bulkioInterfaces/libsrc/testing/devices/dev_src/tests/test_dev_src.py b/bulkioInterfaces/libsrc/testing/devices/dev_src/tests/test_dev_src.py index 4b6821f6a..482143e53 100644 --- a/bulkioInterfaces/libsrc/testing/devices/dev_src/tests/test_dev_src.py +++ b/bulkioInterfaces/libsrc/testing/devices/dev_src/tests/test_dev_src.py @@ -57,7 +57,11 @@ def testRHBasicBehavior(self): ####################################################################### # Make sure start and stop can be called without throwing exceptions self.comp.connect(self.comp_snk) - self.assertTrue(self.comp.ports[0]._get_connections()[0].port._is_equivalent(self.comp_snk.ports[0].ref)) + # Explicitly get the CORBA port references for comparison because the + # IDL may not be installed, throwing exceptions on comp.ports + src = self.comp.getPort('dataFloat_out') + sink = self.comp_snk.getPort('dataFloat_in') + self.assertTrue(src._get_connections()[0].port._is_equivalent(sink)) if __name__ == "__main__": ossie.utils.testing.main("../dev_src.spd.xml") # By default tests all implementations diff --git a/bulkioInterfaces/libsrc/testing/tests/.gitignore b/bulkioInterfaces/libsrc/testing/tests/.gitignore new file mode 100644 index 000000000..79da69a41 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/.gitignore @@ -0,0 +1,2 @@ +TEST-*.xml +cppunit-results.xml diff --git a/bulkioInterfaces/libsrc/testing/tests/base_ports.py b/bulkioInterfaces/libsrc/testing/tests/base_ports.py index 688b5af28..dea6f3251 100644 --- a/bulkioInterfaces/libsrc/testing/tests/base_ports.py +++ b/bulkioInterfaces/libsrc/testing/tests/base_ports.py @@ -30,12 +30,7 @@ # Add the local search paths to find local IDL files from ossie.utils import model -from ossie.utils.idllib import IDLLibrary -model._idllib = IDLLibrary() model._idllib.addSearchPath('../../../idl') -model._idllib.addSearchPath('/usr/local/redhawk/core/share/idl') -if 'OSSIEHOME' in _os.environ: - model._idllib.addSearchPath(_os.path.join(_os.environ['OSSIEHOME'], 'share/idl')) def str_to_class(s): if s in globals() and isinstance(globals()[s], types.ClassType): @@ -102,16 +97,10 @@ class BaseVectorPort(unittest.TestCase): 'Xml' : [ 'dataXMLIn', 'dataXMLOut', 'xmlIn' ] } - def __init__( - self, - methodName='runTest', - ptype='Int8', - cname=None, - srcData=None, - cmpData=None, - bio_in_module=bulkio.InCharPort, - bio_out_module=bulkio.OutCharPort ): - unittest.TestCase.__init__(self, methodName) + def __init__(self, ptype, cname, *args, **kwargs): + srcData = kwargs.pop('srcData', None) + cmpData = kwargs.pop('cmpData', None) + unittest.TestCase.__init__(self, *args, **kwargs) self.c_dir = 'components' self.c_name = cname self.ptype = ptype @@ -122,8 +111,6 @@ def __init__( self.srcData = srcData self.cmpData = cmpData self.ctx = dict().fromkeys(BaseVectorPort.KEYS) - self.bio_in_module = bio_in_module - self.bio_out_module = bio_out_module def getPortFlow(self, ptype='Int8' ): return BaseVectorPort.PORT_FLOW[ptype] @@ -262,87 +249,6 @@ def test_inport_using_component(self): iport.pushSRI(sri) - def test_inport_python_api(self): - ## - ## test bulkio base class standalone - ## - bio = self.bio_in_module("xxx") - - ps = bio._get_statistics() - self.assertNotEqual(ps,None,"Cannot get Port Statistics") - - s = bio._get_state() - self.assertNotEqual(s,None,"Cannot get Port State") - self.assertEqual(s,IDLE,"Invalid Port State") - - streams = bio._get_activeSRIs() - self.assertNotEqual(streams,None,"Cannot get Streams List") - - qed = bio.getMaxQueueDepth() - self.assertEqual(qed,100,"Get Stream Depth Failed") - - bio.setMaxQueueDepth(22) - qed = bio.getMaxQueueDepth() - self.assertEqual(qed,22,"Set/Get Stream Depth Failed") - - ts = bulkio.timestamp.now() - sri = bulkio.sri.create() - sri.streamID = "test_port_api" - bio.pushSRI(sri) - - data=range(50) - bio.pushPacket(data, ts, False, "test_port_api") - - # result of getPacket - # DATA_BUFFER=0 - # TIME_STAMP=1 - # END_OF_STREAM=2 - # STREAM_ID=3 - # SRI=4 - # SRI_CHG=5 - # QUEUE_FLUSH=6 - ## this is missing in python - ##pkt = bio.getPacket(bulkio.const.NON_BLOCKING) - pkt = bio.getPacket() - self.assertNotEqual(pkt,None,"pushPacket .. getPacket Failed") - self.assertNotEqual(pkt[bulkio.InPort.DATA_BUFFER],None,"pushPacket .. getPacket Failed") - self.assertNotEqual(pkt[bulkio.InPort.TIME_STAMP],None,"pushPacket .. getPacket Failed") - self.assertNotEqual(pkt[bulkio.InPort.END_OF_STREAM],None,"pushPacket .. getPacket Failed") - self.assertNotEqual(pkt[bulkio.InPort.STREAM_ID],None,"pushPacket .. getPacket Failed") - self.assertNotEqual(pkt[bulkio.InPort.SRI],None,"pushPacket .. getPacket Failed") - self.assertNotEqual(pkt[bulkio.InPort.SRI_CHG],None,"pushPacket .. getPacket Failed") - self.assertNotEqual(pkt[bulkio.InPort.QUEUE_FLUSH],None,"pushPacket .. getPacket Failed") - - pkt = bio.getPacket() - self.assertNotEqual(pkt,None,"Second getPacket should be Empty") - self.assertEqual(pkt[bulkio.InPort.DATA_BUFFER],None,"pushPacket .. getPacket Failed") - self.assertEqual(pkt[bulkio.InPort.TIME_STAMP],None,"pushPacket .. getPacket Failed") - self.assertEqual(pkt[bulkio.InPort.END_OF_STREAM],None,"pushPacket .. getPacket Failed") - self.assertEqual(pkt[bulkio.InPort.STREAM_ID],None,"pushPacket .. getPacket Failed") - self.assertEqual(pkt[bulkio.InPort.SRI],None,"pushPacket .. getPacket Failed") - self.assertEqual(pkt[bulkio.InPort.SRI_CHG],None,"pushPacket .. getPacket Failed") - self.assertEqual(pkt[bulkio.InPort.QUEUE_FLUSH],None,"pushPacket .. getPacket Failed") - - sri.streamID = "test_port_api" - sri.mode = 1 - bio.pushSRI(sri) - data=range(50) - bio.pushPacket(data, ts, True, "test_port_api") - pkt = bio.getPacket() - self.assertNotEqual(pkt,None,"pushPacket... getPacket FAILED") - self.assertNotEqual(pkt[bulkio.InPort.DATA_BUFFER],None,"EOS: pushPacket .. getPacket Failed") - self.assertEqual(pkt[bulkio.InPort.END_OF_STREAM],True,"EOS: pushPacket .. getPacket EOS TEST Failed") - self.assertEqual(pkt[bulkio.InPort.SRI].mode,1,"EOS: pushPacket .. getPacket COMPLEX MODE Failed") - - pkt = bio.getPacket() - self.assertEqual(pkt[bulkio.InPort.DATA_BUFFER],None,"pushPacket .. getPacket EOS Failed") - self.assertEqual(pkt[bulkio.InPort.TIME_STAMP],None,"pushPacket .. getPacket EOS Failed") - self.assertEqual(pkt[bulkio.InPort.END_OF_STREAM],None,"pushPacket .. getPacket EOS Failed") - self.assertEqual(pkt[bulkio.InPort.STREAM_ID],None,"pushPacket .. getPacket EOS Failed") - self.assertEqual(pkt[bulkio.InPort.SRI],None,"pushPacket .. getPacket EOS Failed") - self.assertEqual(pkt[bulkio.InPort.SRI_CHG],None,"pushPacket .. getPacket EOS Failed") - self.assertEqual(pkt[bulkio.InPort.QUEUE_FLUSH],None,"pushPacket .. getPacket EOS Failed") - def test_outport_using_component(self): c_spd_xml = test_dir + self.c_dir + '/' + self.c_name + '/' + self.c_name + '.spd.xml' print "Test Component:" + c_spd_xml @@ -369,90 +275,3 @@ def test_outport_using_component(self): cl = oport._get_connections() self.assertNotEqual(cl,None,"Cannot get Connections List") self.assertEqual(len(cl),0,"Incorrect Connections List Length") - - ## - ## Create bulkio base class port object - ## - bio = self.bio_out_module("xxx") - cl = bio._get_connections() - self.assertNotEqual(cl,None,"Cannot get Connections List") - self.assertEqual(len(cl),0,"Incorrect Connections List Length") - - ts = bulkio.timestamp.now() - sri = bulkio.sri.create() - sri.streamID = "test_port_api" - bio.pushSRI(sri) - - data=range(50) - bio.pushPacket(data, ts, False, "test_port_api") - bio.pushPacket(data, ts, True, "test_port_api") - bio.pushPacket(data, ts, False, "unknown_port_api") - - ps = bio._get_statistics() - self.assertNotEqual(ps,None,"Cannot get Port Statistics") - - cnt = len(bio.sriDict) - self.assertEqual(cnt,1,"SRI list should be 1") - - bio.enableStats(False) - - - - def test_outport_python_api(self): - ## - ## Create bulkio base class port object - ## - bio = self.bio_out_module("xxx") - cl = bio._get_connections() - self.assertNotEqual(cl,None,"Cannot get Connections List") - self.assertEqual(len(cl),0,"Incorrect Connections List Length") - - connectionName="testing-connection-list" - dsink=sb.DataSink() - inport=dsink.getPort(self.sink_inport) - bio.connectPort(inport, connectionName ) - - - cl = bio._get_connections() - self.assertNotEqual(cl,None,"Cannot get Connections List") - self.assertEqual(len(cl),1,"Incorrect Connections List Length") - - bio.disconnectPort(connectionName) - bio.disconnectPort(connectionName) - - cl = bio._get_connections() - self.assertNotEqual(cl,None,"Cannot get Connections List") - self.assertEqual(len(cl),0,"Incorrect Connections List Length") - - ts = bulkio.timestamp.now() - sri = bulkio.sri.create() - sri.streamID = "test_port_api" - bio.pushSRI(sri) - - data=range(50) - bio.pushPacket(data, ts, False, "test_port_api") - bio.pushPacket(data, ts, True, "test_port_api") - bio.pushPacket(data, ts, False, "unknown_port_api") - - ps = bio._get_statistics() - self.assertNotEqual(ps,None,"Cannot get Port Statistics") - - cnt = len(bio.sriDict) - self.assertEqual(cnt,1,"SRI list should be 1") - - bio.enableStats(False) - - # repeating connect/disconnect to test ticket #1996 - connectionName="testing-connection-list" - dsink=sb.DataSink() - inport=dsink.getPort(self.sink_inport) - bio.connectPort(inport, connectionName ) - - cl = bio._get_connections() - self.assertNotEqual(cl,None,"Cannot get Connections List") - self.assertEqual(len(cl),1,"Incorrect Connections List Length") - - bio.disconnectPort(connectionName) - bio.disconnectPort(connectionName) - - diff --git a/bulkioInterfaces/libsrc/testing/tests/buildtests b/bulkioInterfaces/libsrc/testing/tests/buildtests index 0588870f1..1f849361d 100755 --- a/bulkioInterfaces/libsrc/testing/tests/buildtests +++ b/bulkioInterfaces/libsrc/testing/tests/buildtests @@ -1,64 +1,5 @@ # # Build supporting components for bulkio test framework # - -bulkio_top=../../../ -bulkio_libsrc_top=$bulkio_top/libsrc -export LD_LIBRARY_PATH=$bulkio_libsrc_top/.libs:$bulkio_top/.libs:${LD_LIBRARY_PATH} -export PYTHONPATH=$bulkio_libsrc_top/build/lib:${PYTHONPATH} - -cd ../components/CPP_Ports/cpp -./reconf; ./configure; make -j -cd - - -cd ../components/sri_changed_cpp/cpp -./reconf; ./configure; make -j -cd - - -cd ../components/Java_Ports/java -./reconf; ./configure; make -cd - - -cd ../components/Python_Ports/python -./reconf; ./configure; make -cd - - -cd ../components/TestLargePush/cpp -./reconf; ./configure; make -j -cd - - -cd ../components/TestLargePush/java -./reconf; ./configure; make -cd - - -cd ../components/TestLargePush/python -./reconf; ./configure; make -cd - - -cd ../components/multiout_attachable/cpp -./reconf; ./configure; make -j -cd - - -cd ../components/multiout_attachable/java -./reconf; ./configure; make -cd - - -cd ../components/Oversized_framedata/cpp -./reconf; ./configure; make -j -cd - - -cd ../components/Oversized_framedata/java -./reconf; ./configure; make -cd - - -cd ../devices/dev_src/java -./reconf; ./configure; make -cd - - -cd ../devices/dev_snk/java -./reconf; ./configure; make -cd - - -cd cpp -./reconf; ./configure; -cd - +echo >&2 "$(basename $0) is deprecated; use 'make' in testing directory" +(cd .. && make -j) diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio.cpp deleted file mode 100644 index d52374bb9..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio.cpp +++ /dev/null @@ -1,62 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#include -#include -#include -#include -#include -#include -#include -#include "log4cxx/logger.h" -#include "log4cxx/basicconfigurator.h" -#include "log4cxx/helpers/exception.h" -using namespace std; - - -int main(int argc, char* argv[]) -{ - - // Set up a simple configuration that logs on the console. - log4cxx::BasicConfigurator::configure(); - - // Get the top level suite from the registry - CppUnit::Test *suite = CppUnit::TestFactoryRegistry::getRegistry().makeTest(); - - // Create the event manager and test controller - CppUnit::TestResult controller; - // Add a listener that collects test result - CppUnit::TestResultCollector result; - controller.addListener ( &result ); - CppUnit::TextUi::TestRunner *runner = new CppUnit::TextUi::TestRunner; - - ofstream xmlout ( "../cppunit-results.xml" ); - CppUnit::XmlOutputter xmlOutputter ( &result, xmlout ); - CppUnit::CompilerOutputter compilerOutputter ( &result, std::cerr ); - - // Run the tests. - runner->addTest( suite ); - runner->run( controller ); - xmlOutputter.write(); - compilerOutputter.write(); - - // Return error code 1 if the one of test failed. - return result.wasSuccessful() ? 0 : 1; -} - diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_Helper_Fixture.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_Helper_Fixture.cpp deleted file mode 100644 index 8f796373f..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_Helper_Fixture.cpp +++ /dev/null @@ -1,255 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#include "Bulkio_Helper_Fixture.h" -#include "bulkio.h" - -// Registers the fixture into the 'registry' -CPPUNIT_TEST_SUITE_REGISTRATION( Bulkio_Helper_Fixture ); - - -void -Bulkio_Helper_Fixture::setUp() -{ -} - - -void -Bulkio_Helper_Fixture::tearDown() -{ -} - - -void -Bulkio_Helper_Fixture::test_sri_create() -{ - BULKIO::StreamSRI sri = bulkio::sri::create(); -} - - -void -Bulkio_Helper_Fixture::test_sri_compare() -{ - BULKIO::StreamSRI A = bulkio::sri::create(); - BULKIO::StreamSRI B = bulkio::sri::create(); - BULKIO::StreamSRI C = bulkio::sri::create(); - - C.streamID = std::string("No Match").c_str(); - - CPPUNIT_ASSERT( bulkio::sri::DefaultComparator(A , B) == true ); - CPPUNIT_ASSERT( bulkio::sri::DefaultComparator(A , C) == false ); - -} - -void -Bulkio_Helper_Fixture::test_time_now() -{ - BULKIO::PrecisionUTCTime T = bulkio::time::utils::now(); -} - -void -Bulkio_Helper_Fixture::test_time_create() -{ - const double wsec = 100.0; - const double fsec = 0.125; - BULKIO::PrecisionUTCTime T = bulkio::time::utils::create(100.0, 0.125); - - CPPUNIT_ASSERT( T.twsec == wsec ); - CPPUNIT_ASSERT( T.tfsec == fsec ); -} - -void -Bulkio_Helper_Fixture::test_time_compare() -{ - BULKIO::PrecisionUTCTime t1 = bulkio::time::utils::create(100.0, 0.5); - BULKIO::PrecisionUTCTime t2 = bulkio::time::utils::create(100.0, 0.5); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Identical times did not compare equal", t1, t2); - CPPUNIT_ASSERT_MESSAGE("Identical times did not compare as >=", t1 >= t2); - CPPUNIT_ASSERT_MESSAGE("Identical times did not compare as <=", t2 <= t1); - CPPUNIT_ASSERT_MESSAGE("Identical times compared as >", !(t1 > t2)); - CPPUNIT_ASSERT_MESSAGE("Identical times compared as <", !(t1 < t2)); - - // Only fractional seconds differ - t1 = bulkio::time::utils::create(100.0, 0.5); - t2 = bulkio::time::utils::create(100.0, 0.25); - CPPUNIT_ASSERT_MESSAGE("Different times did not compare !=", t1 != t2); - CPPUNIT_ASSERT_MESSAGE("Time with larger fractional did not compare >", t1 > t2); - CPPUNIT_ASSERT_MESSAGE("Time with smaller fractional did not compare <", t2 < t1); - - // Only whole seconds differ - t1 = bulkio::time::utils::create(100.0, 0.75); - t2 = bulkio::time::utils::create(101.0, 0.75); - CPPUNIT_ASSERT_MESSAGE("Different times did not compare !=", t1 != t2); - CPPUNIT_ASSERT_MESSAGE("Time with smaller whole did not compare <=", t1 <= t2); - CPPUNIT_ASSERT_MESSAGE("Time with larger whole did not compare >=", t2 >= t1); - - // Whole seconds differ, but fractional seconds have the opposite ordering (which has no effect) - t1 = bulkio::time::utils::create(100.0, 0.75); - t2 = bulkio::time::utils::create(5000.0, 0.25); - CPPUNIT_ASSERT_MESSAGE("Different times compared equal", !(t1 == t2)); - CPPUNIT_ASSERT_MESSAGE("Time with smaller whole and larger fractional did not compare >", t1 < t2); - CPPUNIT_ASSERT_MESSAGE("Time with larger whole and smaller fractional did not compare <", t2 > t1); -} - -void -Bulkio_Helper_Fixture::test_time_normalize() -{ - // NOTE: All tests use fractional portions that are exact binary fractions to - // avoid potential roundoff issues - - // Already normalized, no change - BULKIO::PrecisionUTCTime time = bulkio::time::utils::create(100.0, 0.5); - bulkio::time::utils::normalize(time); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Already normalized time", bulkio::time::utils::create(100.0, 0.5), time); - - // Whole seconds has fractional portion, should be moved to fractional seconds - time.twsec = 100.25; - time.tfsec = 0.25; - bulkio::time::utils::normalize(time); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing whole", bulkio::time::utils::create(100.0, 0.5), time); - - // Whole seconds has fractional portion, should be moved to fractional seconds - // leading to carry - time.twsec = 100.75; - time.tfsec = 0.75; - bulkio::time::utils::normalize(time); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing whole with carry", bulkio::time::utils::create(101.0, 0.5), time); - - // Fractional seconds contains whole portion, should be moved to whole seconds - time.twsec = 100.0; - time.tfsec = 2.5; - bulkio::time::utils::normalize(time); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing fractional", bulkio::time::utils::create(102.0, 0.5), time); - - // Both parts require normalization; fractional portion of whole seconds adds an - // additional carry - time.twsec = 100.75; - time.tfsec = 2.75; - bulkio::time::utils::normalize(time); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing both", bulkio::time::utils::create(103.0, 0.5), time); - - // Negative fractional value should borrow - time.twsec = 100.0; - time.tfsec = -0.25; - bulkio::time::utils::normalize(time); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing negative fractional", bulkio::time::utils::create(99.0, 0.75), time); - - // Negative fractional value with magnitude greater than one - time.twsec = 100.0; - time.tfsec = -3.125; - bulkio::time::utils::normalize(time); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing negative fractional > 1", bulkio::time::utils::create(96.0, 0.875), time); - - // Fractional portion of whole seconds greater than negative fractional seconds - time.twsec = 100.5; - time.tfsec = -.125; - bulkio::time::utils::normalize(time); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing both with negative fractional", bulkio::time::utils::create(100.0, 0.375), time); - - // Negative fractional seconds greater than fractional portion of whole seconds - time.twsec = 100.125; - time.tfsec = -.5; - bulkio::time::utils::normalize(time); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing both with borrow", bulkio::time::utils::create(99.0, 0.625), time); - - // Negative fractional seconds have whole portion, but seconds whole seconds have - // fractional portion with larger magnitude than remaining fractional seconds - time.twsec = 100.75; - time.tfsec = -2.5; - bulkio::time::utils::normalize(time); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing both with negative fractional > 1", bulkio::time::utils::create(98.0, 0.25), time); -} - -void -Bulkio_Helper_Fixture::test_time_operators() -{ - // NOTE: All tests use fractional portions that are exact binary fractions to - // avoid potential roundoff issues - - // Test that copy works as expected - const BULKIO::PrecisionUTCTime reference = bulkio::time::utils::create(100.0, 0.5); - BULKIO::PrecisionUTCTime t1 = reference; - CPPUNIT_ASSERT_EQUAL_MESSAGE("Copy returned different values", reference, t1); - - // Add a positive offset - BULKIO::PrecisionUTCTime result = t1 + 1.75; - BULKIO::PrecisionUTCTime expected = bulkio::time::utils::create(102.0, 0.25); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Original value modified", reference, t1); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Add positive offset", expected, result); - - // Add a negative offset (i.e., subtract) - result = t1 + -1.75; - expected = bulkio::time::utils::create(98.0, 0.75); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Original value modified", reference, t1); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Add negative offset", expected, result); - - // Increment by positive offset - t1 += 2.25; - expected = bulkio::time::utils::create(102.0, 0.75); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Increment by positive offset", expected, t1); - - // Increment by negative offset (i.e., decrement) - t1 += -3.875; - expected = bulkio::time::utils::create(98.0, 0.875); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Increment by negative offset", expected, t1); - - // Reset to reference time and subtract a positive offset - t1 = reference; - result = t1 - 1.25; - expected = bulkio::time::utils::create(99.0, 0.25); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Original value modified", reference, t1); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Subtract positive offset", expected, result); - - // Subtract a negative offset (i.e., add) - result = t1 - -4.875; - expected = bulkio::time::utils::create(105.0, 0.375); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Original value modified", reference, t1); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Subtract negative offset", expected, result); - - // Decrement by positive offset - t1 -= 2.75; - expected = bulkio::time::utils::create(97.0, 0.75); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Decrement by positive offset", expected, t1); - - // Decrement by negative offset (i.e., increment) - t1 -= -3.375; - expected = bulkio::time::utils::create(101.0, 0.125); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Decrement by negative offset", expected, t1); - - // Difference, both positive and negative (exact binary fractions used to allow - // exact comparison) - t1 = reference + 8.875; - CPPUNIT_ASSERT_EQUAL_MESSAGE("Positive time difference", t1 - reference, 8.875); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Negative time difference", reference - t1, -8.875); -} - -void -Bulkio_Helper_Fixture::test_time_string() -{ - // Test the default epoch (Unix time) - BULKIO::PrecisionUTCTime time = bulkio::time::utils::create(0.0, 0.0); - std::ostringstream oss; - oss << time; - CPPUNIT_ASSERT_EQUAL_MESSAGE("Epoch", std::string("1970:01:01::00:00:00.000000"), oss.str()); - - // Use a recent time with rounding at the microsecond level - oss.str(""); - oss << bulkio::time::utils::create(1451933967.0, 0.2893569); - CPPUNIT_ASSERT_EQUAL_MESSAGE("Reference", std::string("2016:01:04::18:59:27.289357"), oss.str()); -} diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_Helper_Fixture.h b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_Helper_Fixture.h deleted file mode 100644 index 067851479..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_Helper_Fixture.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef BULKIO_HELPER_FIXTURE_H -#define BULKIO_HELPER_FIXTURE_H - -#include - -class Bulkio_Helper_Fixture : public CppUnit::TestFixture -{ - CPPUNIT_TEST_SUITE( Bulkio_Helper_Fixture ); - CPPUNIT_TEST( test_sri_create ); - CPPUNIT_TEST( test_sri_compare ); - CPPUNIT_TEST( test_time_now ); - CPPUNIT_TEST( test_time_create ); - CPPUNIT_TEST( test_time_compare ); - CPPUNIT_TEST( test_time_normalize ); - CPPUNIT_TEST( test_time_operators ); - CPPUNIT_TEST( test_time_string ); - CPPUNIT_TEST_SUITE_END(); - -public: - void setUp(); - void tearDown(); - - void test_sri_create(); - void test_sri_compare(); - - void test_time_now(); - void test_time_create(); - void test_time_compare(); - void test_time_normalize(); - void test_time_operators(); - void test_time_string(); -}; - -#endif // BULKIO_HELPER_FIXTURE_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_InPort_Fixture.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_InPort_Fixture.cpp deleted file mode 100644 index 8f9242a48..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_InPort_Fixture.cpp +++ /dev/null @@ -1,774 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#include - -#include "Bulkio_InPort_Fixture.h" -#include "bulkio.h" - -// Registers the fixture into the 'registry' -CPPUNIT_TEST_SUITE_REGISTRATION( Bulkio_InPort_Fixture ); - -class SriListener { -public: - SriListener() : - sri_(), - sriChanged_(false) - { - } - - void updateSRI(BULKIO::StreamSRI& sri) - { - sri_ = sri; - sriChanged_ = true; - } - - void reset() - { - sriChanged_ = false; - } - - bool sriChanged() - { - return sriChanged_; - } - -private: - BULKIO::StreamSRI sri_; - bool sriChanged_; -}; - - -class MyFloatPort : public bulkio::InFloatPort { - -public: - - MyFloatPort( std::string pname, bulkio::LOGGER_PTR logger ) : - bulkio::InFloatPort( pname, logger ) {}; - - // - // over ride default behavior for pushPacket and pushSRI - // - void pushPacket(const bulkio::InFloatPort::PortSequenceType & data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) { - stats->update(10, (float)workQueue.size()/(float)queueSem->getMaxValue(), EOS, streamID, false); - queueSem->setCurrValue(workQueue.size()); - bulkio::InFloatPort::pushPacket( data, T, EOS, streamID ); - } - - void pushSRI(const BULKIO::StreamSRI& H) { - queueSem->setCurrValue(workQueue.size()); - bulkio::InFloatPort::pushSRI(H); - } -}; - - -void -Bulkio_InPort_Fixture::setUp() -{ - logger =rh_logger::Logger::getLogger("BulkioInPort"); - logger->setLevel( rh_logger::Level::getInfo()); -} - - -void -Bulkio_InPort_Fixture::tearDown() -{ -} - -template< typename T> -void Bulkio_InPort_Fixture::test_port_api( T *port ) { - - RH_DEBUG(logger, "Running tests port:" << port->getName() ); - - BULKIO::PortStatistics *stats = port->statistics(); - CPPUNIT_ASSERT( stats != NULL ); - delete stats; - - BULKIO::PortUsageType rt = port->state(); - CPPUNIT_ASSERT( rt == BULKIO::IDLE ); - - BULKIO::StreamSRISequence *streams = port->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - delete streams; - - int tmp = port->getMaxQueueDepth(); - CPPUNIT_ASSERT( tmp == 100 ); - - tmp = port->getCurrentQueueDepth(); - CPPUNIT_ASSERT( tmp == 0 ); - - port->setMaxQueueDepth(22); - tmp = port->getMaxQueueDepth(); - CPPUNIT_ASSERT( tmp == 22 ); - - // check that port queue is empty - typename T::dataTransfer *pkt = port->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT( pkt == NULL ); - - BULKIO::StreamSRI sri; - sri = bulkio::sri::create(); - sri.streamID = "test_port_api"; - port->pushSRI( sri ); - - streams = port->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - CPPUNIT_ASSERT( streams->length() == 1 ); - delete streams; - - typename T::PortSequenceType v; - BULKIO::PrecisionUTCTime TS; - port->pushPacket( v, TS, false, "test_port_api" ); - - // grab off packet - pkt = port->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT( pkt != NULL ); - CPPUNIT_ASSERT( pkt->EOS == 0 ) ; - CPPUNIT_ASSERT( pkt->SRI.mode == 0 ) ; - delete pkt; - - sri.mode = 1; - port->pushSRI(sri); - - streams = port->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - CPPUNIT_ASSERT( streams->length() == 1 ); - delete streams; - - port->pushPacket( v, TS, false, "test_port_api" ); - - // grab off packet - pkt = port->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT( pkt != NULL ); - CPPUNIT_ASSERT( pkt->EOS == 0 ) ; - CPPUNIT_ASSERT( pkt->SRI.mode == 1 ) ; - delete pkt; - - // test for EOS.. - port->pushPacket( v, TS, true, "test_port_api" ); - - // grab off packet - pkt = port->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT( pkt != NULL ); - CPPUNIT_ASSERT( pkt->EOS == 1 ) ; - CPPUNIT_ASSERT( pkt->SRI.mode == 1 ) ; - delete pkt; - - port->enableStats( false ); - - port->block(); - - port->unblock(); - - test_sri_change(port); -} - -template<> -void Bulkio_InPort_Fixture::test_port_api( bulkio::InFilePort *port ) { - - BULKIO::PortStatistics *stats = port->statistics(); - CPPUNIT_ASSERT( stats != NULL ); - delete stats; - - BULKIO::PortUsageType rt = port->state(); - CPPUNIT_ASSERT( rt == BULKIO::IDLE ); - - BULKIO::StreamSRISequence *streams = port->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - delete streams; - - int tmp = port->getMaxQueueDepth(); - CPPUNIT_ASSERT( tmp == 100 ); - - tmp = port->getCurrentQueueDepth(); - CPPUNIT_ASSERT( tmp == 0 ); - - port->setMaxQueueDepth(22); - tmp = port->getMaxQueueDepth(); - CPPUNIT_ASSERT( tmp == 22 ); - - // check that port queue is empty - bulkio::InFilePort::dataTransfer *pkt = port->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT( pkt == NULL ); - - BULKIO::StreamSRI sri; - port->pushSRI( sri ); - - streams = port->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - CPPUNIT_ASSERT( streams->length() == 1 ); - delete streams; - - bulkio::InFilePort::PortSequenceType v = new bulkio::InFilePort::TransportType[1]; - BULKIO::PrecisionUTCTime TS; - port->pushPacket( v, TS, false, "test_port_api" ); - - // grab off packet - pkt = port->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT( pkt != NULL ); - delete pkt; - - port->enableStats( false ); - - port->block(); - - port->unblock(); - - test_sri_change(port); -} - - -template<> -void Bulkio_InPort_Fixture::test_port_api( bulkio::InXMLPort *port ) { - - BULKIO::PortStatistics *stats = port->statistics(); - CPPUNIT_ASSERT( stats != NULL ); - delete stats; - - BULKIO::PortUsageType rt = port->state(); - CPPUNIT_ASSERT( rt == BULKIO::IDLE ); - - BULKIO::StreamSRISequence *streams = port->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - delete streams; - - int tmp = port->getMaxQueueDepth(); - CPPUNIT_ASSERT( tmp == 100 ); - - tmp = port->getCurrentQueueDepth(); - CPPUNIT_ASSERT( tmp == 0 ); - - port->setMaxQueueDepth(22); - tmp = port->getMaxQueueDepth(); - CPPUNIT_ASSERT( tmp == 22 ); - - // check that port queue is empty - bulkio::InXMLPort::dataTransfer *pkt = port->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT( pkt == NULL ); - - BULKIO::StreamSRI sri; - port->pushSRI( sri ); - - streams = port->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - CPPUNIT_ASSERT( streams->length() == 1 ); - delete streams; - - bulkio::InXMLPort::PortSequenceType v = new bulkio::InXMLPort::TransportType[1]; - BULKIO::PrecisionUTCTime TS; - port->pushPacket( v, TS, false, "test_port_api" ); - - // grab off packet - pkt = port->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT( pkt != NULL ); - delete pkt; - - port->enableStats( false ); - - port->block(); - - port->unblock(); - - test_sri_change(port); -} - -template< typename T> -void Bulkio_InPort_Fixture::test_sri_change( T *port ) { - typename T::PortSequenceType v; - BULKIO::PrecisionUTCTime TS; - - // Push data without an SRI to check that the sriChanged flag is still set - // and the SRI callback gets called - boost::scoped_ptr packet; - SriListener listener; - port->setNewStreamListener(&listener, &SriListener::updateSRI); - port->pushPacket(v, TS, false, "invalid_stream"); - packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); - CPPUNIT_ASSERT(packet); - CPPUNIT_ASSERT(packet->sriChanged == true); - CPPUNIT_ASSERT(listener.sriChanged() == true); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - listener.reset(); - port->pushPacket(v, TS, false, "invalid_stream"); - packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); - CPPUNIT_ASSERT(packet); - CPPUNIT_ASSERT(packet->sriChanged == false); - CPPUNIT_ASSERT(listener.sriChanged() == false); -} - -template -void Bulkio_InPort_Fixture::test_stream_disable(T* port) -{ - typedef typename T::PortSequenceType PortSequenceType; - typedef typename T::StreamType StreamType; - typedef typename StreamType::DataBlockType DataBlockType; - - // Remove any existing stream listener - port->setNewStreamListener((bulkio::SriListener*) 0); - - // Create a new stream and push some data to it - BULKIO::StreamSRI sri = bulkio::sri::create("test_stream_disable"); - port->pushSRI(sri); - PortSequenceType data; - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); - - // Get the input stream and read the first packet - StreamType stream = port->getStream("test_stream_disable"); - CPPUNIT_ASSERT_EQUAL(!stream, false); - - DataBlockType block = stream.read(); - CPPUNIT_ASSERT_EQUAL(!block, false); - - // Push a couple more packets, but only read part of the first - int current_depth = port->getCurrentQueueDepth(); - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); - - // Read half of the first packet - block = stream.read(512); - - // Disable the stream - stream.disable(); - CPPUNIT_ASSERT(!stream.enabled()); - CPPUNIT_ASSERT(!stream.ready()); - CPPUNIT_ASSERT_EQUAL((size_t) 0, stream.samplesAvailable()); - CPPUNIT_ASSERT_EQUAL(current_depth, port->getCurrentQueueDepth()); - - // Push a couple more packets; they should get dropped - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); - CPPUNIT_ASSERT_EQUAL(current_depth, port->getCurrentQueueDepth()); - - // Push an end-of-stream packet - port->pushPacket(data, bulkio::time::utils::notSet(), true, sri.streamID); - - // Re-enable the stream - stream.enable(); - block = stream.read(); - CPPUNIT_ASSERT(!block); - CPPUNIT_ASSERT(stream.eos()); -} - - -template -void Bulkio_InPort_Fixture::test_stream_sri_changed(T* port) -{ - typedef typename T::PortSequenceType PortSequenceType; - typedef typename T::StreamType StreamType; - typedef typename StreamType::DataBlockType DataBlockType; - - // Create a new stream and push some data to it - BULKIO::StreamSRI sri = bulkio::sri::create("test_stream_sri_changed"); - // push sri , data seqeunce - port->pushSRI(sri); - PortSequenceType data; - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); - - // Get the input stream and read the first packet - StreamType stream = port->getStream("test_stream_sri_changed"); - CPPUNIT_ASSERT_EQUAL(!stream, false); - - DataBlockType block = stream.read(); - CPPUNIT_ASSERT_EQUAL(!block, false); - - CPPUNIT_ASSERT_EQUAL(true, block.sriChanged()); - - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); - block = stream.read(); - CPPUNIT_ASSERT_EQUAL(!block, false); - CPPUNIT_ASSERT_EQUAL(false, block.sriChanged()); - - // push sri , data seqeunce - sri.mode = 1; - port->pushSRI(sri); - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); - - block = stream.read(); - CPPUNIT_ASSERT_EQUAL(true, block.sriChanged()); - int srichangedflags = block.sriChangeFlags(); - bool modeset = srichangedflags == bulkio::sri::MODE; - CPPUNIT_ASSERT_EQUAL(true, modeset); - - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); - block = stream.read(); - CPPUNIT_ASSERT_EQUAL(!block, false); - CPPUNIT_ASSERT_EQUAL(false, block.sriChanged()); - - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), true, sri.streamID); - block = stream.read(); - CPPUNIT_ASSERT_EQUAL(!block, false); - CPPUNIT_ASSERT_EQUAL(false, block.sriChanged()); - CPPUNIT_ASSERT_EQUAL(true, stream.eos()); - -} - - - - -template<> -void Bulkio_InPort_Fixture::test_port_api( bulkio::InSDDSPort *port ) { - - BULKIO::PortStatistics *stats = port->statistics(); - CPPUNIT_ASSERT( stats != NULL ); - delete stats; - - BULKIO::PortUsageType rt = port->state(); - CPPUNIT_ASSERT( rt == BULKIO::IDLE ); - - BULKIO::StreamSRISequence *streams = port->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - delete streams; - - BULKIO::StreamSRI sri; - BULKIO::PrecisionUTCTime TS; - port->pushSRI( sri, TS ); - - streams = port->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - CPPUNIT_ASSERT( streams->length() == 1 ); - delete streams; - - - BULKIO::SDDSStreamDefinition sdef; - sdef.id = "test_sdds_id"; - sdef.dataFormat = BULKIO::SDDS_SB; - sdef.multicastAddress = "1.1.1.1"; - sdef.vlan = 1234; - sdef.port = 5678; - char *aid = port->attach( sdef, "test_sdds_port_api" ); - CPPUNIT_ASSERT( aid != NULL ); - - BULKIO::SDDSStreamSequence *sss = port->attachedStreams(); - CPPUNIT_ASSERT( sss != NULL ); - CPPUNIT_ASSERT( sss->length() == 1 ); - std::string paddr; - paddr = (*sss)[0].multicastAddress; - //std::cout << "port address " << paddr << std::endl; - - CPPUNIT_ASSERT( strcmp( paddr.c_str(), "1.1.1.1") == 0 ); - delete sss; - - char *uid = port->getUser(aid); - CPPUNIT_ASSERT( uid != NULL ); - //std::cout << "user id " << uid << std::endl; - CPPUNIT_ASSERT( strcmp( uid, "test_sdds_port_api" ) == 0 ); - - - port->detach( aid ); - - sss = port->attachedStreams(); - CPPUNIT_ASSERT( sss != NULL ); - CPPUNIT_ASSERT( sss->length() == 0 ); - delete sss; - - port->enableStats( false ); - - -} - - - - -void -Bulkio_InPort_Fixture::test_create_int8() -{ - bulkio::InInt8Port *port = new bulkio::InInt8Port("test_ctor_int8", logger ); - CPPUNIT_ASSERT( port != NULL ); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_InPort_Fixture::test_int8() -{ - bulkio::InInt8Port *port = new bulkio::InInt8Port("test_api_int8", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - test_stream_disable( port ); - test_stream_sri_changed( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - -void -Bulkio_InPort_Fixture::test_create_int16() -{ - bulkio::InInt16Port *port = new bulkio::InInt16Port("test_ctor_int16"); - CPPUNIT_ASSERT( port != NULL ); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_InPort_Fixture::test_int16() -{ - bulkio::InInt16Port *port = new bulkio::InInt16Port("test_api_int16"); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - test_stream_disable( port ); - test_stream_sri_changed( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - -void -Bulkio_InPort_Fixture::test_create_int32() -{ - bulkio::InInt32Port *port = new bulkio::InInt32Port("test_ctor_int32"); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_InPort_Fixture::test_int32() -{ - bulkio::InInt32Port *port = new bulkio::InInt32Port("test_api_int32"); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - test_stream_disable( port ); - test_stream_sri_changed( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - - -void -Bulkio_InPort_Fixture::test_create_int64() -{ - bulkio::InInt64Port *port = new bulkio::InInt64Port("test_ctor_int64"); - CPPUNIT_ASSERT( port != NULL ); -} - - -void -Bulkio_InPort_Fixture::test_int64() -{ - bulkio::InInt64Port *port = new bulkio::InInt64Port("test_api_int64"); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - test_stream_disable( port ); - test_stream_sri_changed( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - - -void -Bulkio_InPort_Fixture::test_create_uint8() -{ - bulkio::InUInt8Port *port = new bulkio::InUInt8Port("test_ctor_uint8"); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_InPort_Fixture::test_uint8() -{ - bulkio::InUInt8Port *port = new bulkio::InUInt8Port("test_api_uint8"); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - test_stream_disable( port ); - test_stream_sri_changed( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - -void -Bulkio_InPort_Fixture::test_create_uint16() -{ - bulkio::InUInt16Port *port = new bulkio::InUInt16Port("test_ctor_uint16"); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_InPort_Fixture::test_uint16() -{ - bulkio::InUInt16Port *port = new bulkio::InUInt16Port("test_api_uint16"); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - test_stream_disable( port ); - test_stream_sri_changed( port ); - - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - - -void -Bulkio_InPort_Fixture::test_create_uint32() -{ - bulkio::InUInt32Port *port = new bulkio::InUInt32Port("test_ctor_uint32"); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_InPort_Fixture::test_uint32() -{ - bulkio::InUInt32Port *port = new bulkio::InUInt32Port("test_api_uint32"); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - test_stream_disable( port ); - test_stream_sri_changed( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - - -void -Bulkio_InPort_Fixture::test_create_uint64() -{ - bulkio::InUInt64Port *port = new bulkio::InUInt64Port("test_ctor_uint64"); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_InPort_Fixture::test_uint64() -{ - bulkio::InUInt64Port *port = new bulkio::InUInt64Port("test_api_uint64"); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - test_stream_disable( port ); - test_stream_sri_changed( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - - - -void -Bulkio_InPort_Fixture::test_create_float() -{ - bulkio::InFloatPort *port = new bulkio::InFloatPort("test_ctor_float"); - CPPUNIT_ASSERT( port != NULL ); -} - - -void -Bulkio_InPort_Fixture::test_create_double() -{ - bulkio::InDoublePort *port = new bulkio::InDoublePort("test_ctor_float"); - CPPUNIT_ASSERT( port != NULL ); -} - - - -void -Bulkio_InPort_Fixture::test_create_file() -{ - bulkio::InFilePort *port = new bulkio::InFilePort("test_ctor_file", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - - -void -Bulkio_InPort_Fixture::test_file() -{ - bulkio::InFilePort *port = new bulkio::InFilePort("test_api_file", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - - -void -Bulkio_InPort_Fixture::test_create_xml() -{ - bulkio::InXMLPort *port = new bulkio::InXMLPort("test_ctor_xml", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - - - -void -Bulkio_InPort_Fixture::test_xml() -{ - bulkio::InXMLPort *port = new bulkio::InXMLPort("test_api_xml", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - - -void -Bulkio_InPort_Fixture::test_create_sdds() -{ - bulkio::InSDDSPort *port = new bulkio::InSDDSPort("test_ctor_sdds", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - - - -void -Bulkio_InPort_Fixture::test_sdds() -{ - bulkio::InSDDSPort *port = new bulkio::InSDDSPort("test_api_sdds", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); - -} - - -void -Bulkio_InPort_Fixture::test_subclass() -{ - bulkio::InFloatPort *port = new MyFloatPort("test_api_subclass", logger ); - - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_InPort_Fixture.h b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_InPort_Fixture.h deleted file mode 100644 index 04eef309a..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_InPort_Fixture.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef BULKIO_INPORT_FIXTURE_H -#define BULKIO_INPORT_FIXTURE_H - -#include -#include - -class Bulkio_InPort_Fixture : public CppUnit::TestFixture -{ - CPPUNIT_TEST_SUITE( Bulkio_InPort_Fixture ); - CPPUNIT_TEST( test_create_int8 ); - CPPUNIT_TEST( test_int8 ); - CPPUNIT_TEST( test_create_int16 ); - CPPUNIT_TEST( test_int16 ); - CPPUNIT_TEST( test_create_int32); - CPPUNIT_TEST( test_int32 ); - CPPUNIT_TEST( test_create_int64); - CPPUNIT_TEST( test_int64 ); - CPPUNIT_TEST( test_create_uint8 ); - CPPUNIT_TEST( test_uint8 ); - CPPUNIT_TEST( test_create_uint16 ); - CPPUNIT_TEST( test_uint16 ); - CPPUNIT_TEST( test_create_uint32); - CPPUNIT_TEST( test_uint32 ); - CPPUNIT_TEST( test_create_uint64); - CPPUNIT_TEST( test_uint64 ); - CPPUNIT_TEST( test_create_float ); - CPPUNIT_TEST( test_create_double ); - CPPUNIT_TEST( test_create_file ); - CPPUNIT_TEST( test_file ); - CPPUNIT_TEST( test_create_xml ); - CPPUNIT_TEST( test_xml ); - CPPUNIT_TEST( test_create_sdds ); - CPPUNIT_TEST( test_sdds ); - CPPUNIT_TEST( test_subclass ); - CPPUNIT_TEST_SUITE_END(); - -public: - void setUp(); - void tearDown(); - - void test_create_int8(); - void test_int8(); - void test_create_int16(); - void test_int16(); - void test_create_int32(); - void test_int32(); - void test_create_int64(); - void test_int64(); - void test_create_uint8(); - void test_uint8(); - void test_create_uint16(); - void test_uint16(); - void test_create_uint32(); - void test_uint32(); - void test_create_uint64(); - void test_uint64(); - void test_create_float(); - void test_create_double(); - void test_create_file(); - void test_file(); - void test_create_xml(); - void test_xml(); - void test_create_sdds(); - void test_sdds(); - void test_subclass(); - - template < typename T > void test_port_api( T *port ); - template < typename T > void test_sri_change( T *port ); - template < typename T > void test_stream_disable( T *port ); - template < typename T > void test_stream_sri_changed( T *port ); - - rh_logger::LoggerPtr logger; -}; - -#endif // BULKIO_InPort_FIXTURE_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_MultiOut_Port.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_MultiOut_Port.cpp index 674eb7114..f697cfb1d 100644 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_MultiOut_Port.cpp +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_MultiOut_Port.cpp @@ -25,7 +25,6 @@ void Bulkio_MultiOut_Port< OUT_PORT, IN_PORT >::setUp() { logger = rh_logger::Logger::getLogger("Bulkio-MultiOutPort-" + lname ); logger->setLevel( rh_logger::Level::getInfo()); - orb = ossie::corba::CorbaInit(0,NULL); RH_DEBUG(this->logger, "Setup - Multiout Create Ports Table " ); @@ -97,667 +96,9 @@ void Bulkio_MultiOut_Port< OUT_PORT, IN_PORT >::tearDown() ossie::corba::RootPOA()->deactivate_object(ip3_oid); ossie::corba::RootPOA()->deactivate_object(ip4_oid); ossie::corba::RootPOA()->deactivate_object(port_oid); - - RH_DEBUG(this->logger, "TearDown - Shutdown the ORB " ); - //orb->shutdown(1); -} - -// -// test_multiout_sri_filtered() -// -// Test pushing out SRI to a single port and ensure other ports did not receive the SRI data -// - -template < typename OUT_PORT, typename IN_PORT > -void Bulkio_MultiOut_Data_Port< OUT_PORT, IN_PORT >::test_multiout_sri_filtered( ) { - - RH_DEBUG(this->logger, "Multiout SRI Filtered - BEGIN " ); - - ExtendedCF::UsesConnectionSequence *clist = this->port->connections(); - CPPUNIT_ASSERT( clist != NULL ); - delete clist; - - RH_DEBUG(this->logger, "Multiout SRI Filtered - Create Connections and Filter list " ); - this->port->connectPort( this->ip1->_this(), "connection_1"); - this->port->connectPort( this->ip2->_this(), "connection_2"); - this->port->connectPort( this->ip3->_this(), "connection_3"); - this->port->connectPort( this->ip4->_this(), "connection_4"); - this->port->updateConnectionFilter( this->desc_list ); - - // - // Push SRI for IP1 - // - - std::string filter_stream_id( "stream-1-1" ); - double srate=11.0; - double xdelta = 1.0/srate; - BULKIO::StreamSRI sri; - BULKIO::PrecisionUTCTime TS = bulkio::time::utils::now(); - typename OUT_PORT::NativeSequenceType v(91); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - BULKIO::StreamSRISequence *streams = this->ip1->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - CPPUNIT_ASSERT( streams->length() == 1 ); - delete streams; - - streams = this->ip2->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 2 Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 2 SRI was Received, Failed", streams->length() == 0 ); - delete streams; - - streams = this->ip3->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 3, Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 3, SRI was Received, Failed", streams->length() == 0 ); - delete streams; - - streams = this->ip4->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 4, Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 4, SRI was Received, Failed", streams->length() == 0 ); - delete streams; - -} - - - -// -// test_multiout_sri_eos_filtered() -// -// Test pushing out SRI to each port and ensure other ports did not receive the SRI data, -// then terminate the data flow for each stream with EOS and then check each ports -// active SRI list is empty -// -template < typename OUT_PORT, typename IN_PORT > -void Bulkio_MultiOut_Data_Port< OUT_PORT, IN_PORT >::test_multiout_sri_eos_filtered( ) { - - RH_DEBUG(this->logger, "Multiout SRI Filtered - BEGIN " ); - - ExtendedCF::UsesConnectionSequence *clist = this->port->connections(); - CPPUNIT_ASSERT( clist != NULL ); - delete clist; - - RH_DEBUG(this->logger, "Multiout SRI Filtered - Create Connections and Filter list " ); - this->port->connectPort( this->ip1->_this(), "connection_1"); - this->port->connectPort( this->ip2->_this(), "connection_2"); - this->port->connectPort( this->ip3->_this(), "connection_3"); - this->port->connectPort( this->ip4->_this(), "connection_4"); - this->port->updateConnectionFilter( this->desc_list ); - - // - // Push SRI for IP1 - // - - std::string filter_stream_id( "stream-1-1" ); - double srate=11.0; - double xdelta = 1.0/srate; - BULKIO::StreamSRI sri; - BULKIO::PrecisionUTCTime TS = bulkio::time::utils::now(); - typename OUT_PORT::NativeSequenceType v(0); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - BULKIO::StreamSRISequence *streams = this->ip1->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - CPPUNIT_ASSERT( streams->length() == 1 ); - BULKIO::StreamSRI asri; - asri=(*streams)[0]; - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - StreamID Mismatch", strcmp( asri.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - SRI Mismatch:", asri.mode == 0 ) ; - delete streams; - - streams = this->ip2->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 2 Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 2 SRI was Received, Failed", streams->length() == 0 ); - delete streams; - - streams = this->ip3->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 3, Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 3, SRI was Received, Failed", streams->length() == 0 ); - delete streams; - - streams = this->ip4->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 4, Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 4, SRI was Received, Failed", streams->length() == 0 ); - delete streams; - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout SRI Filter - sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - streams = this->ip1->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 1 Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 1 StreamsLength, Failed", streams->length() == 1 ); - asri=(*streams)[0]; - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - StreamID Mismatch", strcmp( asri.streamID, "stream-1-1" ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - SRI Mismatch:", asri.mode == 0 ) ; - delete streams; - - streams = this->ip2->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - CPPUNIT_ASSERT( streams->length() == 1 ); - asri=(*streams)[0]; - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - StreamID Mismatch", strcmp( asri.streamID, "stream-2-1" ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - SRI Mismatch:", asri.mode == 0 ) ; - delete streams; - - streams = this->ip3->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 3, Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 3, SRI was Received, Failed", streams->length() == 0 ); - delete streams; - - streams = this->ip4->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 4, Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 4, SRI was Received, Failed", streams->length() == 0 ); - delete streams; - - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout SRI Filter - sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - streams = this->ip1->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 1 Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 1 StreamsLength, Failed", streams->length() == 1 ); - asri=(*streams)[0]; - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - StreamID Mismatch", strcmp( asri.streamID, "stream-1-1" ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - SRI Mismatch:", asri.mode == 0 ) ; - delete streams; - - streams = this->ip2->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 2, Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 2 StreamsLength, Failed", streams->length() == 1 ); - asri=(*streams)[0]; - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - StreamID Mismatch", strcmp( asri.streamID, "stream-2-1" ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - SRI Mismatch:", asri.mode == 0 ) ; - delete streams; - - - streams = this->ip3->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - CPPUNIT_ASSERT( streams->length() == 1 ); - asri=(*streams)[0]; - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - StreamID Mismatch", strcmp( asri.streamID, "stream-3-1" ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - SRI Mismatch:", asri.mode == 0 ) ; - delete streams; - - streams = this->ip4->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 4, Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 4, SRI was Received, Failed", streams->length() == 0 ); - delete streams; - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout SRI Filter - sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - streams = this->ip1->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 1 Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 1 StreamsLength, Failed", streams->length() == 1 ); - asri=(*streams)[0]; - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - StreamID Mismatch", strcmp( asri.streamID, "stream-1-1" ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - SRI Mismatch:", asri.mode == 0 ) ; - delete streams; - - streams = this->ip2->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 2, Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 2 StreamsLength, Failed", streams->length() == 1 ); - asri=(*streams)[0]; - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - StreamID Mismatch", strcmp( asri.streamID, "stream-2-1" ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - SRI Mismatch:", asri.mode == 0 ) ; - delete streams; - - streams = this->ip3->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 3, Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 3 StreamsLength, Failed", streams->length() == 1 ); - asri=(*streams)[0]; - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - StreamID Mismatch", strcmp( asri.streamID, "stream-3-1" ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - SRI Mismatch:", asri.mode == 0 ) ; - delete streams; - - streams = this->ip4->activeSRIs(); - CPPUNIT_ASSERT( streams != NULL ); - CPPUNIT_ASSERT( streams->length() == 1 ); - asri=(*streams)[0]; - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - StreamID Mismatch", strcmp( asri.streamID, "stream-4-1" ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "activeSRIs - SRI Mismatch:", asri.mode == 0 ) ; - delete streams; - - // - // Send EOS downstream and check activeSRIs - // - filter_stream_id = "stream-1-1"; - this->port->pushPacket( v, TS, true, filter_stream_id ); - - typename IN_PORT::dataTransfer *pkt; - pkt = this->ip1->getPacket(bulkio::Const::NON_BLOCKING );; - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 1) ; - - filter_stream_id = "stream-2-1"; - this->port->pushPacket( v, TS, true, filter_stream_id ); - pkt = this->ip2->getPacket(bulkio::Const::NON_BLOCKING );; - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 1) ; - - filter_stream_id = "stream-3-1"; - this->port->pushPacket( v, TS, true, filter_stream_id ); - pkt = this->ip3->getPacket(bulkio::Const::NON_BLOCKING );; - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 1) ; - - filter_stream_id = "stream-4-1"; - this->port->pushPacket( v, TS, true, filter_stream_id ); - pkt = this->ip4->getPacket(bulkio::Const::NON_BLOCKING );; - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 1) ; - - streams = this->ip1->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 1 Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 1 SRI was Received, Failed", streams->length() == 0 ); - delete streams; - streams = this->ip2->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 2 Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 2 SRI was Received, Failed", streams->length() == 0 ); - delete streams; - streams = this->ip3->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 3 Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 3 SRI was Received, Failed", streams->length() == 0 ); - delete streams; - streams = this->ip3->activeSRIs(); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 4 Stream Failed", streams != NULL ); - CPPUNIT_ASSERT_MESSAGE( "Multiout SRI Filtered - Port 4 SRI was Received, Failed", streams->length() == 0 ); - delete streams; - } -template < typename OUT_PORT, typename IN_PORT > -void Bulkio_MultiOut_Data_Port< OUT_PORT, IN_PORT >::test_multiout_data_filtered( ) { - - RH_DEBUG(this->logger, "Multiout Data Filter - 1 stream id , 4 independent consumers" ); - - RH_DEBUG(this->logger, "Multiout Data Filter - setup connections" ); - this->port->connectPort( this->ip1->_this(), "connection_1"); - this->port->connectPort( this->ip2->_this(), "connection_2"); - this->port->connectPort( this->ip3->_this(), "connection_3"); - this->port->connectPort( this->ip4->_this(), "connection_4"); - - ExtendedCF::UsesConnectionSequence *clist = this->port->connections(); - CPPUNIT_ASSERT( clist != NULL ); - RH_DEBUG(this->logger, "Multiout Data Filter - Check connections:" << clist->length() ); - CPPUNIT_ASSERT( clist->length() == 4 ); - delete clist; - - this->port->updateConnectionFilter( this->desc_list ); - - // - // Test Filter for IP1 - // - - std::string filter_stream_id( "stream-1-1" ); - double srate=11.0; - double xdelta = 1.0/srate; - BULKIO::StreamSRI sri; - BULKIO::PrecisionUTCTime TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout Data Filter - Pushing vector to consumers, sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - typename OUT_PORT::NativeSequenceType v(91); - this->port->pushPacket( v, TS, false, filter_stream_id ); - - // check all the consumers to see if they got the correct packet - typename IN_PORT::dataTransfer *pkt ; - pkt = this->ip1->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - SRI Mismatch:", pkt->SRI.mode == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - Data Length:", pkt->dataBuffer.size() == 91 ) ; - if ( pkt ) delete pkt; - - // - // make sure others did not get data ip2, ip3, ip4 - // - pkt = this->ip2->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip3->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip4->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // Test Filter for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout Data Filter - Pushing vector to consumers, sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - this->port->pushPacket( v, TS, false, filter_stream_id ); - - // check all the consumers to see if they got the correct packet - pkt = this->ip1->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // make sure others did not get data ip1, ip3, ip4 - // - pkt = this->ip2->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - SRI Mismatch:", pkt->SRI.mode == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - Data Length:", pkt->dataBuffer.size() == 91 ) ; - if ( pkt ) delete pkt; - - pkt = this->ip3->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip4->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // Test Filter for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout Data Filter - Pushing vector to consumers, sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - this->port->pushPacket( v, TS, false, filter_stream_id ); - - // check all the consumers to see if they got the correct packet - pkt = this->ip1->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // make sure others did not get data ip1, ip3, ip4 - // - pkt = this->ip2->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip3->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - SRI Mismatch:", pkt->SRI.mode == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - Data Length:", pkt->dataBuffer.size() == 91 ) ; - if ( pkt ) delete pkt; - - - pkt = this->ip4->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // Test Filter for IP4 - // - filter_stream_id = "stream-4-1"; - srate = 44.0; - xdelta = 1.0/srate; - TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout Data Filter - Pushing vector to consumers, sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - this->port->pushPacket( v, TS, false, filter_stream_id ); - - // check all the consumers to see if they got the correct packet - pkt = this->ip1->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // make sure others did not get data ip1, ip3, ip4 - // - pkt = this->ip2->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip3->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - - pkt = this->ip4->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - SRI Mismatch:", pkt->SRI.mode == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - Data Length:", pkt->dataBuffer.size() == 91 ) ; - if ( pkt ) delete pkt; - -} - - -// -// test_multiout_data_sri_filtered( ) -// -// Test pushPacket data operations on each port do not affect the other port's state -// -template < typename OUT_PORT, typename IN_PORT > -void Bulkio_MultiOut_Data_Port< OUT_PORT, IN_PORT >::test_multiout_data_sri_filtered( ) { - - RH_DEBUG(this->logger, "Multiout Data/SRI Filter - 1 stream id , 4 independent consumers" ); - - RH_DEBUG(this->logger, "Multiout Data Filter - setup connections" ); - this->port->connectPort( this->ip1->_this(), "connection_1"); - this->port->connectPort( this->ip2->_this(), "connection_2"); - this->port->connectPort( this->ip3->_this(), "connection_3"); - this->port->connectPort( this->ip4->_this(), "connection_4"); - - ExtendedCF::UsesConnectionSequence *clist = this->port->connections(); - CPPUNIT_ASSERT( clist != NULL ); - RH_DEBUG(this->logger, "Multiout Data Filter - Check connections:" << clist->length() ); - CPPUNIT_ASSERT( clist->length() == 4 ); - delete clist; - - this->port->updateConnectionFilter( this->desc_list ); - - // - // Test Filter for IP1 - // - - std::string filter_stream_id( "stream-1-1" ); - double srate=11.0; - double xdelta = 1.0/srate; - BULKIO::StreamSRI sri; - BULKIO::PrecisionUTCTime TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout Data Filter - Pushing vector to consumers, sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - typename OUT_PORT::NativeSequenceType v(91); - this->port->pushPacket( v, TS, false, filter_stream_id ); - - // check all the consumers to see if they got the correct packet - typename IN_PORT::dataTransfer *pkt ; - pkt = this->ip1->getPacket(bulkio::Const::NON_BLOCKING ); - RH_DEBUG(this->logger, "Multiout Data Filter - " << pkt->SRI.streamID << " exp:" << filter_stream_id ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - SRI Mismatch:", pkt->SRI.mode == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - Data Length:", pkt->dataBuffer.size() == 91 ) ; - if ( pkt ) delete pkt; - - // - // make sure others did not get data ip2, ip3, ip4 - // - pkt = this->ip2->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip3->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip4->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // Test Filter for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout Data Filter - Pushing vector to consumers, sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - this->port->pushPacket( v, TS, false, filter_stream_id ); - - // check all the consumers to see if they got the correct packet - pkt = this->ip1->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // make sure others did not get data ip1, ip3, ip4 - // - pkt = this->ip2->getPacket(bulkio::Const::NON_BLOCKING ); - RH_DEBUG(this->logger, "Multiout Data Filter - " << pkt->SRI.streamID << " exp:" << filter_stream_id ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - SRI Mismatch:", pkt->SRI.mode == 0 ) ; - CPPUNIT_ASSERT_DOUBLES_EQUAL( xdelta, pkt->SRI.xdelta, 0.01 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - Data Length:", pkt->dataBuffer.size() == 91 ) ; - if ( pkt ) delete pkt; - - pkt = this->ip3->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip4->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // Test Filter for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout Data Filter - Pushing vector to consumers, sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - this->port->pushPacket( v, TS, false, filter_stream_id ); - - // check all the consumers to see if they got the correct packet - pkt = this->ip1->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // make sure others did not get data ip1, ip3, ip4 - // - pkt = this->ip2->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip3->getPacket(bulkio::Const::NON_BLOCKING ); - RH_DEBUG(this->logger, "Multiout Data Filter - " << pkt->SRI.streamID << " exp:" << filter_stream_id ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:" , pkt->EOS == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - SRI Mismatch:", pkt->SRI.mode == 0 ) ; - CPPUNIT_ASSERT_DOUBLES_EQUAL( xdelta, pkt->SRI.xdelta, 0.01 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - Data Length:", pkt->dataBuffer.size() == 91 ) ; - if ( pkt ) delete pkt; - - pkt = this->ip4->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // Test Filter for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - TS = bulkio::time::utils::now(); - RH_DEBUG(this->logger, "Multiout Data Filter - Pushing vector to consumers, sid:" << filter_stream_id ); - sri = bulkio::sri::create( filter_stream_id, srate); - this->port->pushSRI( sri ); - - this->port->pushPacket( v, TS, false, filter_stream_id ); - - // check all the consumers to see if they got the correct packet - pkt = this->ip1->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - // - // make sure others did not get data ip1, ip3, ip4 - // - pkt = this->ip2->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip3->getPacket(bulkio::Const::NON_BLOCKING ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was NOT EMPTY", pkt == NULL ); - if ( pkt ) delete pkt; - - pkt = this->ip4->getPacket(bulkio::Const::NON_BLOCKING ); - RH_DEBUG(this->logger, "Multiout Data Filter - " << pkt->SRI.streamID << " exp:" << filter_stream_id ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - PKT was empty", pkt != NULL ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - StreamID Mismatch", strcmp( pkt->SRI.streamID, filter_stream_id.c_str() ) == 0 ); - CPPUNIT_ASSERT_MESSAGE( "getPacket - EOS Mismatch:", pkt->EOS == 0 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - SRI Mismatch:", pkt->SRI.mode == 0 ) ; - CPPUNIT_ASSERT_DOUBLES_EQUAL( xdelta, pkt->SRI.xdelta, 0.01 ) ; - CPPUNIT_ASSERT_MESSAGE( "getPacket - Data Length:", pkt->dataBuffer.size() == 91 ) ; - if ( pkt ) delete pkt; - -} - // // test_multiout_sri() // @@ -1090,28 +431,9 @@ void Bulkio_MultiOut_Attachable_Port< OUT_PORT, IN_PORT, STREAM_DEF >::test_mul // Registers the fixture into the 'registry' // this also worked sans type name in output CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutUInt8 ); -CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutUInt8_Port ); -CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutInt16_Port ); -CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutUInt16_Port ); -CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutInt32_Port ); -CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutUInt32_Port ); -CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutInt64_Port ); -CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutUInt64_Port ); -CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutDouble_Port ); -CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutFloat_Port ); CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutSDDS_Port ); CPPUNIT_TEST_SUITE_REGISTRATION( MultiOutVITA49_Port ); -//template class Bulkio_MultiOut_Data_Port< bulkio::OutCharPort, bulkio::InInt8Port >; -template class Bulkio_MultiOut_Data_Port< bulkio::OutOctetPort, bulkio::InUInt8Port >; -template class Bulkio_MultiOut_Data_Port< bulkio::OutInt16Port, bulkio::InInt16Port >; -template class Bulkio_MultiOut_Data_Port< bulkio::OutUInt16Port, bulkio::InUInt16Port >; -template class Bulkio_MultiOut_Data_Port< bulkio::OutInt32Port, bulkio::InInt32Port >; -template class Bulkio_MultiOut_Data_Port< bulkio::OutUInt32Port, bulkio::InUInt32Port >; -template class Bulkio_MultiOut_Data_Port< bulkio::OutInt64Port, bulkio::InInt64Port >; -template class Bulkio_MultiOut_Data_Port< bulkio::OutUInt64Port, bulkio::InUInt64Port >; -template class Bulkio_MultiOut_Data_Port< bulkio::OutDoublePort, bulkio::InDoublePort >; -template class Bulkio_MultiOut_Data_Port< bulkio::OutFloatPort, bulkio::InFloatPort >; template class Bulkio_MultiOut_Attachable_Port< bulkio::OutSDDSPort, bulkio::InSDDSPort, BULKIO::SDDSStreamDefinition>; template class Bulkio_MultiOut_Attachable_Port< bulkio::OutVITA49Port, bulkio::InVITA49Port, BULKIO::VITA49StreamDefinition >; diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_MultiOut_Port.h b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_MultiOut_Port.h index 33c719352..9ac66aa4f 100644 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_MultiOut_Port.h +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_MultiOut_Port.h @@ -37,8 +37,6 @@ class Bulkio_MultiOut_Port : public CppUnit::TestFixture rh_logger::LoggerPtr logger; - CORBA::ORB_ptr orb; - std::string lname; IN_PORT *ip1; @@ -56,24 +54,6 @@ class Bulkio_MultiOut_Port : public CppUnit::TestFixture }; -template< typename OUT_PORT, typename IN_PORT > -class Bulkio_MultiOut_Data_Port : public Bulkio_MultiOut_Port -{ - CPPUNIT_TEST_SUITE( Bulkio_MultiOut_Data_Port ); - CPPUNIT_TEST( test_multiout_sri_filtered ); - CPPUNIT_TEST( test_multiout_sri_eos_filtered ); - CPPUNIT_TEST( test_multiout_data_filtered ); - CPPUNIT_TEST( test_multiout_data_sri_filtered ); - CPPUNIT_TEST_SUITE_END(); - -public: - virtual void test_multiout_sri_filtered(); - virtual void test_multiout_sri_eos_filtered(); - virtual void test_multiout_data_filtered(); - virtual void test_multiout_data_sri_filtered(); -}; - - template< typename OUT_PORT, typename IN_PORT, typename STREAM_DEF > class Bulkio_MultiOut_Attachable_Port : public Bulkio_MultiOut_Port { @@ -156,46 +136,10 @@ class DefinitionGenerator { }; */ -typedef Bulkio_MultiOut_Data_Port< bulkio::OutOctetPort, bulkio::InUInt8Port > MultiOutUInt8; -typedef Bulkio_MultiOut_Data_Port< bulkio::OutInt16Port, bulkio::InInt16Port > MultiOutInt16; -typedef Bulkio_MultiOut_Data_Port< bulkio::OutUInt16Port, bulkio::InUInt16Port > MultiOutUInt16; -typedef Bulkio_MultiOut_Data_Port< bulkio::OutInt32Port, bulkio::InInt32Port > MultiOutInt32; -typedef Bulkio_MultiOut_Data_Port< bulkio::OutUInt32Port, bulkio::InUInt32Port > MultiOutUInt32; -typedef Bulkio_MultiOut_Data_Port< bulkio::OutInt64Port, bulkio::InInt64Port > MultiOutInt64; -typedef Bulkio_MultiOut_Data_Port< bulkio::OutUInt64Port, bulkio::InUInt64Port > MultiOutUInt64; -typedef Bulkio_MultiOut_Data_Port< bulkio::OutDoublePort, bulkio::InDoublePort > MultiOutDouble; -typedef Bulkio_MultiOut_Data_Port< bulkio::OutFloatPort, bulkio::InFloatPort > MultiOutFloat; - typedef Bulkio_MultiOut_Attachable_Port< bulkio::OutSDDSPort, bulkio::InSDDSPort, BULKIO::SDDSStreamDefinition > MultiOutSDDS; typedef Bulkio_MultiOut_Attachable_Port< bulkio::OutVITA49Port, bulkio::InVITA49Port, BULKIO::VITA49StreamDefinition> MultiOutVITA49; -#define DEF_TEST( IP, OP, NAME ) class MultiOut##IP##_Port : public Bulkio_MultiOut_Data_Port< bulkio::Out##OP##Port, bulkio::In##IP##Port > \ -{ \ - CPPUNIT_TEST_SUITE( MultiOut##IP##_Port ); \ - CPPUNIT_TEST( test_multiout_sri_filtered ); \ - CPPUNIT_TEST( test_multiout_sri_eos_filtered ); \ - CPPUNIT_TEST( test_multiout_data_filtered ); \ - CPPUNIT_TEST( test_multiout_data_sri_filtered ); \ - CPPUNIT_TEST_SUITE_END(); \ -public: \ -\ - MultiOut##IP##_Port() : MultiOut##IP (){ \ - this->lname=#NAME; \ - }; \ -}; - -DEF_TEST( UInt8, Octet, UINT8 ); -DEF_TEST( Int16, Int16, INT16 ); -DEF_TEST( UInt16, UInt16, UINT16 ); -DEF_TEST( Int32, Int32, INT32 ); -DEF_TEST( UInt32, UInt32, UINT32 ); -DEF_TEST( Int64, Int64, INT64 ); -DEF_TEST( UInt64, UInt64, UINT64 ); -DEF_TEST( Double, Double, DOUBLE ); -DEF_TEST( Float, Float, FLOAT ); - - #define DEF_ATTACHABLE_TEST( NAME ) class MultiOut##NAME##_Port : public Bulkio_MultiOut_Attachable_Port< bulkio::Out##NAME##Port, bulkio::In##NAME##Port, BULKIO::NAME##StreamDefinition > \ { \ CPPUNIT_TEST_SUITE( MultiOut##NAME##_Port ); \ diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_OutPort_Fixture.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_OutPort_Fixture.cpp deleted file mode 100644 index 1673ab009..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_OutPort_Fixture.cpp +++ /dev/null @@ -1,694 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#include "Bulkio_OutPort_Fixture.h" -#include "bulkio.h" - - -// Registers the fixture into the 'registry' -CPPUNIT_TEST_SUITE_REGISTRATION( Bulkio_OutPort_Fixture ); - - -class MyOutFloatPort : public bulkio::OutFloatPort { - -public: - - MyOutFloatPort( std::string pname, bulkio::LOGGER_PTR logger ) : - bulkio::OutFloatPort( pname, logger ) {}; - - - void pushPacket( bulkio::OutFloatPort::NativeSequenceType & data, BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamID) { - - stats[streamID].update( 1, 1.0, false, "testing" ); - bulkio::OutFloatPort::pushPacket( data, T, EOS, streamID ); - } - -}; - -class NewSriCallback { - -public: - - std::vector sids; - - ~NewSriCallback() {}; - - void newSriCB( const BULKIO::StreamSRI& sri) { - std::string sid(sri.streamID); - sids.push_back( sid ); - } -}; - - -// Global connection/disconnection callbacks -static void port_connected( const char* connectionId ) { - -} - -static void port_disconnected( const char* connectionId ) { - -} - - - -void -Bulkio_OutPort_Fixture::setUp() -{ - logger = rh_logger::Logger::getLogger("BulkioOutPort"); - logger->setLevel( rh_logger::Level::getInfo()); - orb = ossie::corba::CorbaInit(0,NULL); -} - - -void -Bulkio_OutPort_Fixture::tearDown() -{ -} - -template< typename T, typename IP > -void Bulkio_OutPort_Fixture::test_port_api( T *port ) { - - RH_DEBUG(logger, "Running tests port:" << port->getName() ); - - ExtendedCF::UsesConnectionSequence *clist = port->connections(); - CPPUNIT_ASSERT( clist != NULL ); - delete clist; - - port->setNewConnectListener(&port_connected); - port->setNewDisconnectListener(&port_disconnected); - - { - CORBA::Object_ptr p = CORBA::Object::_nil(); - // narrowing exception expected here - // set logging level to Fatal to ignore port Error message - // and then set back to Info - logger->setLevel( rh_logger::Level::getFatal()); - CPPUNIT_ASSERT_THROW(port->connectPort( p, "connection_1"), CF::Port::InvalidPort ); - logger->setLevel( rh_logger::Level::getInfo()); - } - - IP *p = new IP("sink_1", logger ); - //PortableServer::ObjectId_var p_oid = ossie::corba::RootPOA()->activate_object(p); - port->connectPort( p->_this(), "connection_1"); - - port->disconnectPort( "connection_1"); - port->disconnectPort( "connection_1"); - //ossie::corba::RootPOA()->deactivate_object(p_oid); - - BULKIO::StreamSRI sri; - port->pushSRI( sri ); - - // Push using sequences - typename T::NativeSequenceType v; - BULKIO::PrecisionUTCTime TS; - port->pushPacket( v, TS, false, "test_port_api" ); - - port->pushPacket( v, TS, true, "test_port_api" ); - - port->pushPacket( v, TS, true, "unknown_stream_id" ); - - // Push using pointers - size_t size = 100; - typename T::TransportType* buff = new typename T::TransportType[size]; - port->pushPacket( buff, size, TS, false, "test_port_api" ); - - port->pushPacket( buff, size, TS, true, "test_port_api" ); - - port->pushPacket( buff, size, TS, true, "unknown_stream_id" ); - delete[] buff; - - BULKIO::UsesPortStatisticsSequence *stats = port->statistics(); - CPPUNIT_ASSERT( stats != NULL ); - delete stats; - - BULKIO::PortUsageType rt = port->state(); - CPPUNIT_ASSERT( rt == BULKIO::IDLE ); - - - typename T::ConnectionsList cl = port->_getConnections(); - std::string sid="none"; - int cnt= port->getCurrentSRI().count(sid); - CPPUNIT_ASSERT( cnt == 0 ); - - port->enableStats( false ); - - port->setLogger(logger); -} - -template -void Bulkio_OutPort_Fixture::test_port_statistics(OutPort* outPort) { - InPort* in_port(new InPort("sink_1", logger)); - CORBA::Object_var objref = in_port->_this(); - - const std::string connection_id = "test_port_statistics"; - outPort->connectPort(objref, connection_id.c_str()); - - BULKIO::UsesPortStatisticsSequence_var uses_stats = outPort->statistics(); - CPPUNIT_ASSERT_EQUAL((CORBA::ULong)1, uses_stats->length()); - CPPUNIT_ASSERT_EQUAL(connection_id, std::string(uses_stats[0].connectionId)); - - BULKIO::StreamSRI sri = bulkio::sri::create("test_stream"); - outPort->pushSRI(sri); - - typename OutPort::NativeSequenceType data; - data.resize(1024); - BULKIO::PrecisionUTCTime time; - outPort->pushPacket(data, time, false, std::string(sri.streamID)); - - uses_stats = outPort->statistics(); - CPPUNIT_ASSERT_EQUAL((CORBA::ULong)1, uses_stats->length()); - const BULKIO::PortStatistics& stats = uses_stats[0].statistics; - - CPPUNIT_ASSERT(stats.elementsPerSecond > 0.0); - size_t bits_per_element = round(stats.bitsPerSecond / stats.elementsPerSecond); - CPPUNIT_ASSERT_EQUAL(8 * sizeof(typename OutPort::NativeType), bits_per_element); -} - -template< > -void Bulkio_OutPort_Fixture::test_port_api< bulkio::OutCharPort, bulkio::InCharPort >( bulkio::OutCharPort *port ) { - - ExtendedCF::UsesConnectionSequence *clist = port->connections(); - CPPUNIT_ASSERT( clist != NULL ); - delete clist; - - port->setNewConnectListener(&port_connected); - port->setNewDisconnectListener(&port_disconnected); - - bulkio::InCharPort *p = new bulkio::InCharPort("sink_1", logger ); - PortableServer::ObjectId_var p_oid = ossie::corba::RootPOA()->activate_object(p); - port->connectPort( p->_this(), "connection_1"); - - port->disconnectPort( "connection_1"); - port->disconnectPort( "connection_1"); - ossie::corba::RootPOA()->deactivate_object(p_oid); - - BULKIO::StreamSRI sri; - port->pushSRI( sri ); - - // Push packets using sequence - std::vector< bulkio::OutCharPort::NativeType > v; - BULKIO::PrecisionUTCTime TS; - port->pushPacket( v, TS, false, "test_port_api" ); - - std::vector< bulkio::Char > v1; - port->pushPacket( v1, TS, false, "test_port_api" ); - - // Push packets using pointers - size_t size = 100; - char* buff = new char[size]; - port->pushPacket( buff, size, TS, false, "test_port_api" ); - delete[] buff; - - bulkio::Int8* buff1 = new bulkio::Int8[size]; - port->pushPacket( buff1, size, TS, false, "test_port_api" ); - delete[] buff1; - - BULKIO::UsesPortStatisticsSequence *stats = port->statistics(); - CPPUNIT_ASSERT( stats != NULL ); - delete stats; - - BULKIO::PortUsageType rt = port->state(); - CPPUNIT_ASSERT( rt == BULKIO::IDLE ); - - port->enableStats( false ); - - port->setLogger(logger); -} - - -template< > -void Bulkio_OutPort_Fixture::test_port_api< bulkio::OutFilePort, bulkio::InFilePort >( bulkio::OutFilePort *port ) { - - ExtendedCF::UsesConnectionSequence *clist = port->connections(); - CPPUNIT_ASSERT( clist != NULL ); - delete clist; - - port->setNewConnectListener(&port_connected); - port->setNewDisconnectListener(&port_disconnected); - - bulkio::InFilePort *p = new bulkio::InFilePort("sink_1", logger ); - PortableServer::ObjectId_var p_oid = ossie::corba::RootPOA()->activate_object(p); - port->connectPort( p->_this(), "connection_1"); - - port->disconnectPort( "connection_1"); - port->disconnectPort( "connection_1"); - ossie::corba::RootPOA()->deactivate_object(p_oid); - - BULKIO::StreamSRI sri; - port->pushSRI( sri ); - - bulkio::OutFilePort::NativeSequenceType v; - BULKIO::PrecisionUTCTime TS; - port->pushPacket( v, TS, false, "test_port_api" ); - - port->pushPacket( v, TS, true, "test_port_api" ); - - BULKIO::UsesPortStatisticsSequence *stats = port->statistics(); - CPPUNIT_ASSERT( stats != NULL ); - delete stats; - - BULKIO::PortUsageType rt = port->state(); - CPPUNIT_ASSERT( rt == BULKIO::IDLE ); - - port->enableStats( false ); - - port->setLogger(logger); -} - - - -template<> -void Bulkio_OutPort_Fixture::test_port_api< bulkio::OutXMLPort, bulkio::InXMLPort >( bulkio::OutXMLPort *port ) { - - ExtendedCF::UsesConnectionSequence *clist = port->connections(); - CPPUNIT_ASSERT( clist != NULL ); - delete clist; - - port->setNewConnectListener(&port_connected); - port->setNewDisconnectListener(&port_disconnected); - - bulkio::InXMLPort *p = new bulkio::InXMLPort("sink_1", logger ); - PortableServer::ObjectId_var p_oid = ossie::corba::RootPOA()->activate_object(p); - port->connectPort( p->_this(), "connection_1"); - - port->disconnectPort( "connection_1"); - port->disconnectPort( "connection_1"); - ossie::corba::RootPOA()->deactivate_object(p_oid); - - BULKIO::StreamSRI sri; - port->pushSRI( sri ); - - bulkio::OutXMLPort::NativeSequenceType v; - BULKIO::PrecisionUTCTime TS; - port->pushPacket( v, false, "test_port_api" ); - - port->pushPacket( v, TS, true, "test_port_api" ); - - BULKIO::UsesPortStatisticsSequence *stats = port->statistics(); - CPPUNIT_ASSERT( stats != NULL ); - delete stats; - - BULKIO::PortUsageType rt = port->state(); - CPPUNIT_ASSERT( rt == BULKIO::IDLE ); - - port->enableStats( false ); - - port->setLogger(logger); -} - - -template< > -void Bulkio_OutPort_Fixture::test_port_api< bulkio::OutSDDSPort, bulkio::InSDDSPort >( bulkio::OutSDDSPort *port ) { - - ExtendedCF::UsesConnectionSequence *clist = port->connections(); - CPPUNIT_ASSERT( clist != NULL ); - delete clist; - - port->setNewConnectListener(&port_connected); - port->setNewDisconnectListener(&port_disconnected); - - - bulkio::InSDDSPort *p = new bulkio::InSDDSPort("sink_1", logger ); - PortableServer::ObjectId_var p_oid = ossie::corba::RootPOA()->activate_object(p); - port->connectPort( p->_this(), "connection_1"); - - port->disconnectPort( "connection_1"); - port->disconnectPort( "connection_1"); - ossie::corba::RootPOA()->deactivate_object(p_oid); - - - BULKIO::StreamSRI sri; - BULKIO::PrecisionUTCTime TS; - port->pushSRI( sri, TS ); - - BULKIO::UsesPortStatisticsSequence *stats = port->statistics(); - CPPUNIT_ASSERT( stats != NULL ); - delete stats; - - BULKIO::PortUsageType rt = port->state(); - CPPUNIT_ASSERT( rt == BULKIO::IDLE ); - - port->enableStats( false ); - - // create a connection - port->connectPort( p->_this(), "connection_1"); - port->enableStats( true ); - port->setBitSize(10); - port->updateStats( 12, 1, false, "stream1"); - - stats = port->statistics(); - CPPUNIT_ASSERT( stats != NULL ); - int slen = stats->length(); - //std::cout << " slen :" << slen << std::endl; - CPPUNIT_ASSERT( slen == 1 ) ; - CPPUNIT_ASSERT( strcmp((*stats)[0].connectionId, "connection_1") == 0 ); - delete stats; - - port->setLogger(logger); -} - -template< > -void Bulkio_OutPort_Fixture::test_port_sri< bulkio::OutSDDSPort, bulkio::InSDDSPort >( bulkio::OutSDDSPort *port ) { - - ExtendedCF::UsesConnectionSequence *clist = port->connections(); - CPPUNIT_ASSERT( clist != NULL ); - delete clist; - - - NewSriCallback sri_cb; - bulkio::InSDDSPort *p = new bulkio::InSDDSPort("sink_1", logger ); - PortableServer::ObjectId_var p_oid = ossie::corba::RootPOA()->activate_object(p); - p->setNewSriListener(&sri_cb, &NewSriCallback::newSriCB ); - - BULKIO::StreamSRI sri; - BULKIO::SDDSStreamDefinition sdds; - sri.streamID = "stream1"; - sri.xdelta = 1/1000.0; - sdds.id = "stream1"; - sdds.dataFormat = BULKIO::SDDS_SB; - sdds.multicastAddress = "bad.ip.address"; - sdds.port = 9999; - sdds.vlan = 0; - port->addStream(sdds); - port->pushSRI(sri, bulkio::time::utils::now()); - - sri.streamID = "stream2"; - sdds.id = "stream2"; - port->addStream(sdds); - port->pushSRI(sri, bulkio::time::utils::now()); - - port->connectPort( p->_this(), "connection_1"); - - int slen = sri_cb.sids.size(); - CPPUNIT_ASSERT( slen == 2 ) ; - - port->disconnectPort( "connection_1"); - ossie::corba::RootPOA()->deactivate_object(p_oid); -} - - - - - -void -Bulkio_OutPort_Fixture::test_create_int8() -{ - bulkio::OutCharPort *port = new bulkio::OutCharPort("test_int8", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_OutPort_Fixture::test_int8() -{ - bulkio::OutCharPort *port = new bulkio::OutCharPort("test_api_int8", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - test_port_statistics(port); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_OutPort_Fixture::test_create_int16() -{ - bulkio::OutInt16Port *port = new bulkio::OutInt16Port("test_ctor_int16", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - - -void -Bulkio_OutPort_Fixture::test_int16() -{ - bulkio::OutInt16Port *port = new bulkio::OutInt16Port("test_api_int16", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - test_port_statistics(port); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_OutPort_Fixture::test_create_int32() -{ - bulkio::OutInt32Port *port = new bulkio::OutInt32Port("test_ctor_int32", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_OutPort_Fixture::test_int32() -{ - bulkio::OutInt32Port *port = new bulkio::OutInt32Port("test_api_int32", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - test_port_statistics(port); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - - -void -Bulkio_OutPort_Fixture::test_create_int64() -{ - bulkio::OutInt64Port *port = new bulkio::OutInt64Port("test_ctor_int64", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_OutPort_Fixture::test_int64() -{ - bulkio::OutInt64Port *port = new bulkio::OutInt64Port("test_api_int64", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - test_port_statistics(port); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - - -void -Bulkio_OutPort_Fixture::test_create_uint8() -{ - bulkio::OutOctetPort *port = new bulkio::OutOctetPort("test_ctor_uint8", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_OutPort_Fixture::test_create_uint16() -{ - bulkio::OutUInt16Port *port = new bulkio::OutUInt16Port("test_ctor_uint16", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_OutPort_Fixture::test_uint16() -{ - bulkio::OutUInt16Port *port = new bulkio::OutUInt16Port("test_api_uint16", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - test_port_statistics(port); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_OutPort_Fixture::test_create_uint32() -{ - bulkio::OutUInt32Port *port = new bulkio::OutUInt32Port("test_ctor_uint32", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_OutPort_Fixture::test_uint32() -{ - bulkio::OutUInt32Port *port = new bulkio::OutUInt32Port("test_api_uint32", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - test_port_statistics(port); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_OutPort_Fixture::test_create_uint64() -{ - bulkio::OutUInt64Port *port = new bulkio::OutUInt64Port("test_ctor_uint64", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_OutPort_Fixture::test_uint64() -{ - bulkio::OutUInt64Port *port = new bulkio::OutUInt64Port("test_api_uint64", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - test_port_statistics(port); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_OutPort_Fixture::test_create_float() -{ - bulkio::OutFloatPort *port = new bulkio::OutFloatPort("test_ctor_float", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_OutPort_Fixture::test_float() -{ - bulkio::OutFloatPort *port = new bulkio::OutFloatPort("test_api_float", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - test_port_statistics(port); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_OutPort_Fixture::test_create_double() -{ - bulkio::OutDoublePort *port = new bulkio::OutDoublePort("test_ctor_double", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_OutPort_Fixture::test_double() -{ - bulkio::OutDoublePort *port = new bulkio::OutDoublePort("test_api_double", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - test_port_statistics(port); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_OutPort_Fixture::test_create_file() -{ - bulkio::OutFilePort *port = new bulkio::OutFilePort("test_ctor_file", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - -void -Bulkio_OutPort_Fixture::test_file() -{ - bulkio::OutFilePort *port = new bulkio::OutFilePort("test_api_file", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api< bulkio::OutFilePort, bulkio::InFilePort >( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_OutPort_Fixture::test_create_xml() -{ - bulkio::OutXMLPort *port = new bulkio::OutXMLPort("test_ctor_xml", logger ); - CPPUNIT_ASSERT( port != NULL ); -} - - - - -void -Bulkio_OutPort_Fixture::test_xml() -{ - bulkio::OutXMLPort *port = new bulkio::OutXMLPort("test_api_xml", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api< bulkio::OutXMLPort, bulkio::InXMLPort >( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_OutPort_Fixture::test_create_sdds() -{ - bulkio::OutSDDSPort *port = new bulkio::OutSDDSPort("test_ctor_sdds", logger); - CPPUNIT_ASSERT( port != NULL ); -} - - - - -void -Bulkio_OutPort_Fixture::test_sdds() -{ - bulkio::OutSDDSPort *port = new bulkio::OutSDDSPort("test_api_sdds", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api< bulkio::OutSDDSPort, bulkio::InSDDSPort > ( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - -void -Bulkio_OutPort_Fixture::test_sdds_sri() -{ - bulkio::OutSDDSPort *port = new bulkio::OutSDDSPort("test_sdds_sri", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_sri< bulkio::OutSDDSPort, bulkio::InSDDSPort > ( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - - - -void -Bulkio_OutPort_Fixture::test_subclass() -{ - bulkio::OutFloatPort *port = new MyOutFloatPort("test_api_subclass", logger ); - CPPUNIT_ASSERT( port != NULL ); - - test_port_api( port ); - - CPPUNIT_ASSERT_NO_THROW( port ); -} - diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_OutPort_Fixture.h b/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_OutPort_Fixture.h deleted file mode 100644 index 2f0d39fc3..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/Bulkio_OutPort_Fixture.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef BULKIO_OUTPORT_FIXTURE_H -#define BULKIO_OUTPORT_FIXTURE_H - -#include -#include -#include -class Bulkio_OutPort_Fixture : public CppUnit::TestFixture -{ - CPPUNIT_TEST_SUITE( Bulkio_OutPort_Fixture ); - CPPUNIT_TEST( test_create_int8 ); - CPPUNIT_TEST( test_int8 ); - CPPUNIT_TEST( test_create_int16 ); - CPPUNIT_TEST( test_int16 ); - CPPUNIT_TEST( test_create_int32); - CPPUNIT_TEST( test_int32 ); - CPPUNIT_TEST( test_create_int64); - CPPUNIT_TEST( test_int64 ); - CPPUNIT_TEST( test_create_uint8 ); - CPPUNIT_TEST( test_create_uint16 ); - CPPUNIT_TEST( test_uint16 ); - CPPUNIT_TEST( test_create_uint32); - CPPUNIT_TEST( test_uint32 ); - CPPUNIT_TEST( test_create_uint64); - CPPUNIT_TEST( test_uint64 ); - CPPUNIT_TEST( test_create_float ); - CPPUNIT_TEST( test_float ); - CPPUNIT_TEST( test_create_double ); - CPPUNIT_TEST( test_double ); - CPPUNIT_TEST( test_create_file ); - CPPUNIT_TEST( test_file ); - CPPUNIT_TEST( test_create_xml ); - CPPUNIT_TEST( test_xml ); - CPPUNIT_TEST( test_create_sdds ); - CPPUNIT_TEST( test_sdds ); - CPPUNIT_TEST( test_sdds_sri ); - CPPUNIT_TEST( test_subclass ); - CPPUNIT_TEST_SUITE_END(); - -public: - void setUp(); - void tearDown(); - - void test_create_int8(); - void test_int8(); - void test_create_int16(); - void test_int16(); - void test_create_int32(); - void test_int32(); - void test_create_int64(); - void test_int64(); - void test_create_uint8(); - void test_create_uint16(); - void test_uint16(); - void test_create_uint32(); - void test_uint32(); - void test_create_uint64(); - void test_uint64(); - void test_create_float(); - void test_float(); - void test_create_double(); - void test_double(); - void test_create_file(); - void test_file(); - void test_create_xml(); - void test_xml(); - void test_create_sdds(); - void test_sdds(); - void test_sdds_sri(); - void test_subclass(); - - - template < typename T, typename IP > void test_port_api( T *port ); - - template < typename T, typename IP > void test_port_sri( T *port ); - - template < typename InPort, typename OutPort > void test_port_statistics(OutPort* outPort); - - rh_logger::LoggerPtr logger; - - CORBA::ORB_ptr orb; -}; - -#endif // BULKIO_OutPort_FIXTURE_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/InPortStub.h b/bulkioInterfaces/libsrc/testing/tests/cpp/InPortStub.h index bd15b94e3..7a63ff462 100644 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/InPortStub.h +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/InPortStub.h @@ -22,14 +22,11 @@ #include "bulkio.h" -template -class InPortStub : public virtual PortTraits::POAPortType +template +class InPortStubBase : public virtual bulkio::CorbaTraits::POAType { public: - typedef typename PortTraits::SequenceType SequenceType; - typedef typename PortTraits::PushType PushType; - - InPortStub() + InPortStubBase() { } @@ -53,13 +50,22 @@ class InPortStub : public virtual PortTraits::POAPortType return new BULKIO::StreamSRISequence(); } - virtual void pushPacket(PushType data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) + std::vector H; +}; + +template +class InPortStub : public InPortStubBase +{ +public: + typedef typename bulkio::CorbaTraits::SequenceType SequenceType; + + virtual void pushPacket(const SequenceType& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) { packets.push_back(Packet(data, T, EOS, streamID)); } struct Packet { - Packet(PushType data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) : + Packet(const SequenceType& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) : data(data), T(T), EOS(EOS), @@ -67,13 +73,85 @@ class InPortStub : public virtual PortTraits::POAPortType { } + size_t size() const + { + return data.length(); + } + SequenceType data; BULKIO::PrecisionUTCTime T; bool EOS; std::string streamID; }; - std::vector H; + std::vector packets; +}; + +template <> +inline size_t InPortStub::Packet::size() const +{ + return data.bits; +} + +template <> +class InPortStub : public InPortStubBase +{ +public: + virtual void pushPacket(const char* data, CORBA::Boolean EOS, const char* streamID) + { + this->packets.push_back(Packet(data, EOS, streamID)); + } + + struct Packet { + Packet(const char* data, CORBA::Boolean EOS, const char* streamID) : + data(data), + EOS(EOS), + streamID(streamID) + { + } + + size_t size() const + { + return data.size(); + } + + std::string data; + bool EOS; + std::string streamID; + }; + + std::vector packets; +}; + +template <> +class InPortStub : public InPortStubBase +{ +public: + virtual void pushPacket(const char* data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) + { + packets.push_back(Packet(data, T, EOS, streamID)); + } + + struct Packet { + Packet(const char* data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) : + data(data), + T(T), + EOS(EOS), + streamID(streamID) + { + } + + size_t size() const + { + return data.size(); + } + + std::string data; + BULKIO::PrecisionUTCTime T; + bool EOS; + std::string streamID; + }; + std::vector packets; }; diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/InPortTest.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/InPortTest.cpp new file mode 100644 index 000000000..fa11ed778 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/InPortTest.cpp @@ -0,0 +1,491 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "InPortTest.h" + +#include + +class SriListener { +public: + void updateSRI(BULKIO::StreamSRI& sri) + { + this->sri.push_back(sri); + } + + std::vector sri; +}; + +template +void InPortTest::testLegacyAPI() +{ + // Test for methods that are technically still supported, but discouraged + port->enableStats(false); + + port->block(); + + port->unblock(); +} + +template +void InPortTest::testGetPacket() +{ + // Port queue starts empty + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(!packet); + + const char* stream_id = "test_get_packet"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + + BULKIO::PrecisionUTCTime ts = bulkio::time::utils::now(); + this->_pushTestPacket(50, ts, false, stream_id); + + // Check result of getPacket + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL((size_t) 50, packet->dataBuffer.size()); + CPPUNIT_ASSERT(!packet->EOS); + CPPUNIT_ASSERT_EQUAL(std::string(stream_id), packet->streamID); + CPPUNIT_ASSERT(bulkio::sri::DefaultComparator(sri, packet->SRI)); + CPPUNIT_ASSERT(packet->sriChanged); + CPPUNIT_ASSERT(!packet->inputQueueFlushed); + + // No packet, should return null + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(!packet); + + // Change mode to complex and push another packet with EOS set + sri.mode = 1; + port->pushSRI(sri); + this->_pushTestPacket(100, ts, true, sri.streamID); + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL((size_t) 100, packet->dataBuffer.size()); + CPPUNIT_ASSERT(packet->EOS); + CPPUNIT_ASSERT(packet->sriChanged); + CPPUNIT_ASSERT_EQUAL(1, (int) packet->SRI.mode); +} + +template +void InPortTest::testGetPacketStreamRemoved() +{ + typedef typename Port::StreamList StreamList; + + // Create a new stream and push some data to it + const char* stream_id = "test_get_packet_stream_removed"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + + this->_pushTestPacket(50, bulkio::time::utils::now(), true, stream_id); + + StreamList streams = port->getStreams(); + CPPUNIT_ASSERT_EQUAL((size_t) 1, streams.size()); + + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL(true, packet->EOS); + + // The set of streams should be empty again + streams = port->getStreams(); + CPPUNIT_ASSERT(streams.empty()); +} + +template +void InPortTest::testActiveSRIs() +{ + BULKIO::StreamSRISequence_var active_sris = port->activeSRIs(); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 0, active_sris->length()); + + // Push a new SRI, and make sure that it is immediately visible and + // correct in activeSRIs + const char* stream_id_1 = "active_sri_1"; + BULKIO::StreamSRI sri_1 = bulkio::sri::create(stream_id_1); + port->pushSRI(sri_1); + active_sris = port->activeSRIs(); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 1, active_sris->length()); + CPPUNIT_ASSERT(bulkio::sri::DefaultComparator(active_sris[0], sri_1)); + + // Push a second SRI, and make sure that activeSRIs is up-to-date + const char* stream_id_2 = "active_sri_2"; + BULKIO::StreamSRI sri_2 = bulkio::sri::create(stream_id_2); + port->pushSRI(sri_2); + active_sris = port->activeSRIs(); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 2, active_sris->length()); + for (size_t index = 0; index < active_sris->length(); ++index) { + const std::string current_id(active_sris[index].streamID); + if (current_id == stream_id_2) { + CPPUNIT_ASSERT(bulkio::sri::DefaultComparator(active_sris[index], sri_2)); + } else if (current_id != stream_id_1) { + CPPUNIT_FAIL("unexpected SRI '" + current_id + "'"); + } + } + + // Push an end-of-stream, retrieve the packet, and verify that the + // stream is no longer in activeSRIs + this->_pushTestPacket(0, bulkio::time::utils::notSet(), true, stream_id_1); + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(packet->EOS); + active_sris = port->activeSRIs(); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 1, active_sris->length()); + CPPUNIT_ASSERT_EQUAL(std::string(stream_id_2), std::string(active_sris[0].streamID)); +} + +template +void InPortTest::testQueueDepth() +{ + const char* stream_id = "test_state"; + + // The port had better start with an empty queue + CPPUNIT_ASSERT_EQUAL(0, port->getCurrentQueueDepth()); + + // Use a non-blocking stream to allow queue flushing + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + sri.blocking = false; + port->pushSRI(sri); + + // Push some test packets, the queue should start growing + for (int ii = 0; ii < 4; ii++) { + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + } + CPPUNIT_ASSERT_EQUAL(4, port->getCurrentQueueDepth()); + + // Read a packet and make sure the current depth drops + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL(3, port->getCurrentQueueDepth()); + + // Reduce the max queue size and push another packet, causing a flush + port->setMaxQueueDepth(3); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + CPPUNIT_ASSERT_EQUAL(1, port->getCurrentQueueDepth()); + + // Read the packet and make sure the flush is reported + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(packet->inputQueueFlushed); + + // One more packet, should not report a flush + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(!packet->inputQueueFlushed); +} + +template +void InPortTest::testState() +{ + const char* stream_id = "test_state"; + + // Port starts out idle + CPPUNIT_ASSERT_EQUAL(BULKIO::IDLE, port->state()); + + // Push one test packet, state goes to active + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + CPPUNIT_ASSERT_EQUAL(BULKIO::ACTIVE, port->state()); + + // Full queue should report busy + port->setMaxQueueDepth(2); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + CPPUNIT_ASSERT_EQUAL(BULKIO::BUSY, port->state()); + + // Drop below max, back to active + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL(BULKIO::ACTIVE, port->state()); + + // Empty queue, back to idle + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL(BULKIO::IDLE, port->state()); +} + +template +void InPortTest::testSriChanged() +{ + const char* stream_id = "sri_changed"; + + SriListener listener; + port->setNewStreamListener(&listener, &SriListener::updateSRI); + CPPUNIT_ASSERT(listener.sri.empty()); + + // Create a default SRI and push it, which should trigger the callback + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + CPPUNIT_ASSERT_EQUAL((size_t) 1, listener.sri.size()); + CPPUNIT_ASSERT_EQUAL(std::string(stream_id), std::string(listener.sri.back().streamID)); + + // SRI should report changed for first packet + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(packet->sriChanged); + CPPUNIT_ASSERT_EQUAL((size_t) 1, listener.sri.size()); + + // No SRI change for second packet + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + CPPUNIT_ASSERT_EQUAL((size_t) 1, listener.sri.size()); + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(!packet->sriChanged); + + // Change the SRI, should flag the packet + sri.mode = 1; + port->pushSRI(sri); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + CPPUNIT_ASSERT_EQUAL((size_t) 1, listener.sri.size()); + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(packet->sriChanged); +} + +template +void InPortTest::testSriChangedFlush() +{ + const char* stream_id = "sri_changed_flush"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + + // Reduce the queue size so we can force a flush + port->setMaxQueueDepth(2); + + // Push a packet, change the SRI, and push two more packets so that the + // packet with the associated SRI change gets flushed + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + sri.xdelta = 0.5; + port->pushSRI(sri); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, stream_id); + + // Get the last packet and verify that the queue has flushed, and the SRI + // change is still reported + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(packet->inputQueueFlushed); + CPPUNIT_ASSERT(packet->sriChanged); +} + +template +void InPortTest::testSriChangedInvalidStream() +{ + const char* stream_id = "invalid_stream"; + + // Turn off the port's logging to avoid dumping a warning to the screen + port->getLogger()->setLevel(rh_logger::Level::getOff()); + + SriListener listener; + port->setNewStreamListener(&listener, &SriListener::updateSRI); + CPPUNIT_ASSERT(listener.sri.empty()); + + // Push data without an SRI to check that the sriChanged flag is still set + // and the SRI callback gets called + this->_pushTestPacket(100, BULKIO::PrecisionUTCTime(), false, stream_id); + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(packet->sriChanged); + CPPUNIT_ASSERT_EQUAL((size_t) 1, listener.sri.size()); + CPPUNIT_ASSERT_EQUAL(std::string(stream_id), std::string(listener.sri.back().streamID)); + + // Push again to the same stream ID; sriChanged should now be false and the + // SRI callback should not get called + this->_pushTestPacket(100, BULKIO::PrecisionUTCTime(), false, stream_id); + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(!(packet->sriChanged)); + CPPUNIT_ASSERT_EQUAL((size_t) 1, listener.sri.size()); +} + +template +void InPortTest::testStatistics() +{ + // Push a packet of data to trigger meaningful statistics + const char* stream_id = "port_stats"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + this->_pushTestPacket(1024, bulkio::time::utils::now(), false, stream_id); + + // Check that the statistics report the right element size + BULKIO::PortStatistics_var stats = port->statistics(); + CPPUNIT_ASSERT(stats->elementsPerSecond > 0.0); + size_t bits_per_element = round(stats->bitsPerSecond / stats->elementsPerSecond); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Incorrect bits per element", BITS_PER_ELEMENT, bits_per_element); +} + +template +void InPortTest::testDiscardEmptyPacket() +{ + // Push an empty, non-EOS packet + const char* stream_id = "empty_packet"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + this->_pushTestPacket(0, bulkio::time::utils::now(), false, stream_id); + + // No packet should be returned + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(!packet); +} + +template +void InPortTest::testQueueFlushFlags() +{ + // Push 1 packet for the normal data stream + BULKIO::StreamSRI sri_data = bulkio::sri::create("stream_data"); + sri_data.blocking = false; + port->pushSRI(sri_data); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, sri_data.streamID); + + // Push 1 packet for the EOS test stream + BULKIO::StreamSRI sri_eos = bulkio::sri::create("stream_eos"); + sri_eos.blocking = false; + port->pushSRI(sri_eos); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, sri_eos.streamID); + + // Push 1 packet for the SRI change stream + BULKIO::StreamSRI sri_change = bulkio::sri::create("stream_sri"); + sri_change.blocking = false; + sri_change.mode = 0; + port->pushSRI(sri_change); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, sri_change.streamID); + + // Grab the packets to ensure the initial conditions are correct + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL(std::string(sri_data.streamID), packet->streamID); + + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL(std::string(sri_eos.streamID), packet->streamID); + + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL(std::string(sri_change.streamID), packet->streamID); + + // Push an EOS packet for the EOS stream + this->_pushTestPacket(0, bulkio::time::utils::notSet(), true, sri_eos.streamID); + + // Modify the SRI for the SRI change stream and push another packet + sri_change.mode = 1; + port->pushSRI(sri_change); + this->_pushTestPacket(2, bulkio::time::utils::now(), false, sri_change.streamID); + + // Cause a queue flush by lowering the ceiling and pushing packets + port->setMaxQueueDepth(3); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, sri_data.streamID); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, sri_data.streamID); + + // Push another packet for the SRI change stream + this->_pushTestPacket(2, bulkio::time::utils::now(), false, sri_change.streamID); + + // 1st packet should be for EOS stream, with no data or SRI change flag + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL(std::string(sri_eos.streamID), packet->streamID); + CPPUNIT_ASSERT(packet->inputQueueFlushed); + CPPUNIT_ASSERT(packet->EOS); + CPPUNIT_ASSERT(!packet->sriChanged); + CPPUNIT_ASSERT(packet->dataBuffer.empty()); + + // 2nd packet should be for data stream, with no EOS or SRI change flag + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL(std::string(sri_data.streamID), packet->streamID); + CPPUNIT_ASSERT(!packet->inputQueueFlushed); + CPPUNIT_ASSERT(!packet->EOS); + CPPUNIT_ASSERT(!packet->sriChanged); + + // 3rd packet should contain the "lost" SRI change flag + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT_EQUAL(std::string(sri_change.streamID), packet->streamID); + CPPUNIT_ASSERT(!packet->inputQueueFlushed); + CPPUNIT_ASSERT(!packet->EOS); + CPPUNIT_ASSERT(packet->sriChanged); +} + +template +void InPortTest::testQueueSize() +{ + BULKIO::StreamSRI sri = bulkio::sri::create("queue_size"); + port->pushSRI(sri); + + // Start with a reasonably small queue depth and check that a flush occurs at + // the expected time + port->setMaxQueueDepth(10); + for (int ii = 0; ii < 10; ++ii) { + this->_pushTestPacket(1, bulkio::time::utils::now(), false, sri.streamID); + } + CPPUNIT_ASSERT_EQUAL(10, port->getCurrentQueueDepth()); + this->_pushTestPacket(1, bulkio::time::utils::now(), false, sri.streamID); + CPPUNIT_ASSERT_EQUAL(1, port->getCurrentQueueDepth()); + + boost::scoped_ptr packet; + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(packet->inputQueueFlushed); + + // Set queue depth to unlimited and push a lot of packets + port->setMaxQueueDepth(-1); + const int QUEUE_SIZE = 250; + for (int ii = 0; ii < QUEUE_SIZE; ++ii) { + this->_pushTestPacket(1, bulkio::time::utils::now(), false, sri.streamID); + } + CPPUNIT_ASSERT_EQUAL(QUEUE_SIZE, port->getCurrentQueueDepth()); + for (int ii = 0; ii < QUEUE_SIZE; ++ii) { + packet.reset(port->getPacket(bulkio::Const::NON_BLOCKING)); + CPPUNIT_ASSERT(packet); + CPPUNIT_ASSERT(!packet->inputQueueFlushed); + } +} + +#define CREATE_TEST(x,BITS) \ + class In##x##PortTest : public InPortTest \ + { \ + CPPUNIT_TEST_SUB_SUITE(In##x##PortTest, InPortTest); \ + CPPUNIT_TEST_SUITE_END(); \ + }; \ + template <> \ + const size_t InPortTest::BITS_PER_ELEMENT = BITS; \ + CPPUNIT_TEST_SUITE_REGISTRATION(In##x##PortTest); + +CREATE_TEST(Octet, 8); +CREATE_TEST(Char, 8); +CREATE_TEST(Short, 16); +CREATE_TEST(UShort, 16); +CREATE_TEST(Long, 32); +CREATE_TEST(ULong, 32); +CREATE_TEST(LongLong, 64); +CREATE_TEST(ULongLong, 64); +CREATE_TEST(Float, 32); +CREATE_TEST(Double, 64); +CREATE_TEST(Bit, 1); +CREATE_TEST(XML, 8); +CREATE_TEST(File, 8); diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/InPortTest.h b/bulkioInterfaces/libsrc/testing/tests/cpp/InPortTest.h new file mode 100644 index 000000000..7ed3b29e6 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/InPortTest.h @@ -0,0 +1,70 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef BULKIO_INPORTTEST_H +#define BULKIO_INPORTTEST_H + +#include "InPortTestFixture.h" + +template +class InPortTest : public InPortTestFixture +{ + typedef InPortTestFixture TestBase; + + CPPUNIT_TEST_SUITE(InPortTest); + CPPUNIT_TEST(testLegacyAPI); + CPPUNIT_TEST(testGetPacket); + CPPUNIT_TEST(testGetPacketStreamRemoved); + CPPUNIT_TEST(testActiveSRIs); + CPPUNIT_TEST(testQueueDepth); + CPPUNIT_TEST(testState); + CPPUNIT_TEST(testSriChanged); + CPPUNIT_TEST(testSriChangedFlush); + CPPUNIT_TEST(testSriChangedInvalidStream); + CPPUNIT_TEST(testStatistics); + CPPUNIT_TEST(testDiscardEmptyPacket); + CPPUNIT_TEST(testQueueFlushFlags); + CPPUNIT_TEST(testQueueSize); + CPPUNIT_TEST_SUITE_END(); + +public: + void testLegacyAPI(); + void testGetPacket(); + void testGetPacketStreamRemoved(); + void testActiveSRIs(); + void testQueueDepth(); + void testState(); + void testSriChanged(); + void testSriChangedFlush(); + void testSriChangedInvalidStream(); + void testStatistics(); + void testDiscardEmptyPacket(); + void testQueueFlushFlags(); + void testQueueSize(); + +protected: + typedef typename Port::dataTransfer PacketType; + typedef typename Port::CorbaType CorbaType; + + static const size_t BITS_PER_ELEMENT; + + using TestBase::port; +}; + +#endif // BULKIO_INPORTTEST_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/InPortTestFixture.h b/bulkioInterfaces/libsrc/testing/tests/cpp/InPortTestFixture.h new file mode 100644 index 000000000..8c5de02d0 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/InPortTestFixture.h @@ -0,0 +1,90 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef BULKIO_INPORTTESTFIXTURE_H +#define BULKIO_INPORTTESTFIXTURE_H + +#include + +#include + +template +class InPortTestFixture : public CppUnit::TestFixture +{ +public: + void setUp() + { + port = new Port(getPortName()); + } + + void tearDown() + { + delete port; + } + +protected: + typedef typename Port::CorbaType CorbaType; + + virtual std::string getPortName() const + { + std::string name = bulkio::CorbaTraits::name(); + return name + "_in"; + } + + inline void _pushTestPacket(size_t length, const BULKIO::PrecisionUTCTime& time, + bool eos, const char* streamID) + { + typename Port::PortSequenceType data; + data.length(length); + port->pushPacket(data, time, eos, streamID); + } + + Port* port; +}; + +template <> +inline void InPortTestFixture::_pushTestPacket(size_t length, + const BULKIO::PrecisionUTCTime& time, + bool eos, const char* streamID) +{ + std::string data(length, ' '); + port->pushPacket(data.c_str(), time, eos, streamID); +} + +template <> +inline void InPortTestFixture::_pushTestPacket(size_t length, + const BULKIO::PrecisionUTCTime&, + bool eos, const char* streamID) +{ + std::string data(length, ' '); + port->pushPacket(data.c_str(), eos, streamID); +} + +template <> +inline void InPortTestFixture::_pushTestPacket(size_t length, + const BULKIO::PrecisionUTCTime& time, + bool eos, const char* streamID) +{ + BULKIO::BitSequence data; + data.data.length((length+7)/8); + data.bits = length; + port->pushPacket(data, time, eos, streamID); +} + +#endif // BULKIO_INPORTTESTFIXTURE_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/InStreamTest.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/InStreamTest.cpp index d047fd59b..c2c9375a1 100644 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/InStreamTest.cpp +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/InStreamTest.cpp @@ -22,43 +22,80 @@ #include "bulkio.h" template -void InStreamTest::setUp() +void InStreamTest::testTimestamp() { - std::string name = "data" + getPortName() + "_in"; - port = new Port(name); + // Create a new stream and push data with a known timestamp to it + BULKIO::StreamSRI sri = bulkio::sri::create("time_stamp"); + port->pushSRI(sri); + BULKIO::PrecisionUTCTime ts = bulkio::time::utils::create(1520883276.8045831); + this->_pushTestPacket(16, ts, false, sri.streamID); + + // Get the input stream and read the packet as a data block; it should + // contain exactly 1 timestamp, equal to the one that was pushed + StreamType stream = port->getStream("time_stamp"); + CPPUNIT_ASSERT_EQUAL(!stream, false); + DataBlockType block = stream.read(); + CPPUNIT_ASSERT(block); + std::list timestamps = block.getTimestamps(); + CPPUNIT_ASSERT_EQUAL((size_t) 1, timestamps.size()); + CPPUNIT_ASSERT_EQUAL(ts, timestamps.begin()->time); + CPPUNIT_ASSERT_EQUAL((size_t) 0, timestamps.begin()->offset); + CPPUNIT_ASSERT_EQUAL(false, timestamps.begin()->synthetic); + + // getStartTime() should always return the first timestamp + CPPUNIT_ASSERT_EQUAL(ts, block.getStartTime()); +} + +// Specialization for XML ports, which do not pass timestamp information +template <> +void InStreamTest::testTimestamp() +{ + // Create a new stream and push some data to it + BULKIO::StreamSRI sri = bulkio::sri::create("time_stamp"); + port->pushSRI(sri); + this->_pushTestPacket(16, bulkio::time::utils::notSet(), false, sri.streamID); + + // Get the input stream and read the packet as a data block; it should not + // contain any timestamps + StreamType stream = port->getStream("time_stamp"); + CPPUNIT_ASSERT_EQUAL(!stream, false); + DataBlockType block = stream.read(); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT_EQUAL((size_t) 0, block.getTimestamps().size()); + // Calling getStartTime() may seg fault, or otherwise behave unreliably } template -void InStreamTest::tearDown() +void InStreamTest::testGetCurrentStreamEmptyPacket() { - delete port; + // Create a new stream and push an empty, non-EOS packet to it + BULKIO::StreamSRI sri = bulkio::sri::create("empty_packet"); + port->pushSRI(sri); + this->_pushTestPacket(0, bulkio::time::utils::now(), false, sri.streamID); + + // getCurrentStream() should not return any stream + StreamType stream = port->getCurrentStream(bulkio::Const::NON_BLOCKING); + CPPUNIT_ASSERT(!stream); } template void InStreamTest::testGetCurrentStreamEmptyEos() { - typedef typename Port::PortSequenceType PortSequenceType; - typedef typename Port::StreamType StreamType; - typedef typename StreamType::DataBlockType DataBlockType; - // Create a new stream and push some data to it BULKIO::StreamSRI sri = bulkio::sri::create("empty_eos"); port->pushSRI(sri); - PortSequenceType data; - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); + this->_pushTestPacket(1024, bulkio::time::utils::now(), false, sri.streamID); // Get the input stream and read the first packet StreamType stream = port->getStream("empty_eos"); CPPUNIT_ASSERT_EQUAL(!stream, false); DataBlockType block = stream.read(); CPPUNIT_ASSERT(block); - CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.size()); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); CPPUNIT_ASSERT(!stream.eos()); // Push an end-of-stream packet with no data and get the stream again - data.length(0); - port->pushPacket(data, bulkio::time::utils::notSet(), true, sri.streamID); + this->_pushTestPacket(0, bulkio::time::utils::notSet(), true, sri.streamID); stream = port->getCurrentStream(bulkio::Const::NON_BLOCKING); CPPUNIT_ASSERT(stream); block = stream.read(); @@ -76,33 +113,26 @@ void InStreamTest::testGetCurrentStreamEmptyEos() template void InStreamTest::testGetCurrentStreamDataEos() { - typedef typename Port::PortSequenceType PortSequenceType; - typedef typename Port::StreamType StreamType; - typedef typename StreamType::DataBlockType DataBlockType; - // Create a new stream and push some data to it BULKIO::StreamSRI sri = bulkio::sri::create("empty_eos"); port->pushSRI(sri); - PortSequenceType data; - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), false, sri.streamID); + this->_pushTestPacket(1024, bulkio::time::utils::now(), false, sri.streamID); // Get the input stream and read the first packet StreamType stream = port->getStream("empty_eos"); CPPUNIT_ASSERT_EQUAL(!stream, false); DataBlockType block = stream.read(); CPPUNIT_ASSERT(block); - CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.size()); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); CPPUNIT_ASSERT(!stream.eos()); // Push an end-of-stream packet with data and get the stream again - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), true, sri.streamID); + this->_pushTestPacket(1024, bulkio::time::utils::now(), true, sri.streamID); stream = port->getCurrentStream(bulkio::Const::NON_BLOCKING); CPPUNIT_ASSERT(stream); block = stream.read(); CPPUNIT_ASSERT(block); - CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.size()); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); // Try to get the current stream again; since the end-of-stream has not been // checked yet, it should return the existing stream (as with above) @@ -121,147 +151,451 @@ void InStreamTest::testGetCurrentStreamDataEos() } template -void InStreamTest::testSizedReadEmptyEos() +void InStreamTest::testSriChanges() { - typedef typename Port::StreamType StreamType; - typedef typename Port::PortSequenceType PortSequenceType; - typedef typename StreamType::DataBlockType DataBlockType; + const char* stream_id = "sri_changes"; // Create a new stream and push some data to it - const char* stream_id = "read_empty_eos"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + sri.xstart = 0.0; + sri.xdelta = 1.0; + port->pushSRI(sri); + this->_pushTestPacket(1024, bulkio::time::utils::now(), false, sri.streamID); + + // Get the input stream and read the first packet + StreamType stream = port->getStream(stream_id); + CPPUNIT_ASSERT_EQUAL(!stream, false); + DataBlockType block = stream.read(); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); + CPPUNIT_ASSERT(!stream.eos()); + CPPUNIT_ASSERT_EQUAL(sri.xdelta, block.sri().xdelta); + + // Change xdelta (based on sample rate of 2.5Msps) + sri.xdelta = 1.0 / 2.5e6; + port->pushSRI(sri); + this->_pushTestPacket(1024, bulkio::time::utils::now(), false, sri.streamID); + block = stream.read(); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); + CPPUNIT_ASSERT(!stream.eos()); + CPPUNIT_ASSERT(block.sriChanged()); + int flags = bulkio::sri::XDELTA; + CPPUNIT_ASSERT_EQUAL_MESSAGE("SRI change flags incorrect", flags, block.sriChangeFlags()); + CPPUNIT_ASSERT_EQUAL_MESSAGE("SRI xdelta incorrect", sri.xdelta, block.sri().xdelta); + + // Add a keyword, change xdelta back and update xstart + ossie::corba::push_back(sri.keywords, redhawk::PropertyType("COL_RF", 101.1e6)); + sri.xstart = 100.0; + sri.xdelta = 1.0; + port->pushSRI(sri); + this->_pushTestPacket(1024, bulkio::time::utils::now(), false, sri.streamID); + block = stream.read(); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); + CPPUNIT_ASSERT(!stream.eos()); + CPPUNIT_ASSERT(block.sriChanged()); + flags = bulkio::sri::XSTART | bulkio::sri::XDELTA | bulkio::sri::KEYWORDS; + CPPUNIT_ASSERT_EQUAL_MESSAGE("SRI change flags incorrect", flags, block.sriChangeFlags()); + CPPUNIT_ASSERT_EQUAL_MESSAGE("SRI xstart incorrect", sri.xstart, block.sri().xstart); + CPPUNIT_ASSERT_EQUAL_MESSAGE("SRI xdelta incorrect", sri.xdelta, block.sri().xdelta); +} + +template +void InStreamTest::testDisable() +{ + const char* stream_id = "disable"; // Create a new stream and push some data to it BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); port->pushSRI(sri); - PortSequenceType data; - port->pushPacket(data, bulkio::time::utils::now(), true, stream_id); + this->_pushTestPacket(16, bulkio::time::utils::now(), false, sri.streamID); // Get the input stream and read the first packet StreamType stream = port->getStream(stream_id); - CPPUNIT_ASSERT_EQUAL(!stream, false); - DataBlockType block = stream.read(1); + CPPUNIT_ASSERT_EQUAL(false, !stream); + + DataBlockType block = stream.read(); + CPPUNIT_ASSERT_EQUAL(false, !block); + + // Push a couple more packets + this->_pushTestPacket(16, bulkio::time::utils::now(), false, sri.streamID); + this->_pushTestPacket(16, bulkio::time::utils::now(), false, sri.streamID); + CPPUNIT_ASSERT_EQUAL(2, port->getCurrentQueueDepth()); + + // Disable the stream; this should drop the existing packets + stream.disable(); + CPPUNIT_ASSERT(!stream.enabled()); + CPPUNIT_ASSERT_EQUAL(0, port->getCurrentQueueDepth()); + + // Push a couple more packets; they should get dropped + this->_pushTestPacket(16, bulkio::time::utils::now(), false, sri.streamID); + this->_pushTestPacket(16, bulkio::time::utils::now(), false, sri.streamID); + CPPUNIT_ASSERT_EQUAL(0, port->getCurrentQueueDepth()); + + // Push an end-of-stream packet + this->_pushTestPacket(16, bulkio::time::utils::notSet(), true, sri.streamID); + + // Re-enable the stream and read; it should fail with end-of-stream set + stream.enable(); + block = stream.read(); CPPUNIT_ASSERT(!block); CPPUNIT_ASSERT(stream.eos()); } + template -void InStreamTest::testSizedTryreadEmptyEos() +void BufferedInStreamTest::testSizedReadEmptyEos() { - typedef typename Port::StreamType StreamType; - typedef typename Port::PortSequenceType PortSequenceType; - typedef typename StreamType::DataBlockType DataBlockType; + const char* stream_id = "read_empty_eos"; + + // Create a new stream and push an end-of-stream packet with no data + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + this->_pushTestPacket(0, bulkio::time::utils::notSet(), true, stream_id); + // Try to read a single element; this should return a null block + StreamType stream = port->getStream(stream_id); + CPPUNIT_ASSERT(stream); + DataBlockType block = stream.read(1); + CPPUNIT_ASSERT(!block); + CPPUNIT_ASSERT(stream.eos()); +} + +template +void BufferedInStreamTest::testSizedTryreadEmptyEos() +{ const char* stream_id = "tryread_empty_eos"; - // Create a new stream and push some data to it + // Create a new stream and push an end-of-stream packet with no data BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); port->pushSRI(sri); - PortSequenceType data; - port->pushPacket(data, bulkio::time::utils::now(), true, stream_id); + this->_pushTestPacket(0, bulkio::time::utils::notSet(), true, stream_id); - // Get the input stream and read the first packet + // Try to read a single element; this should return a null block StreamType stream = port->getStream(stream_id); - CPPUNIT_ASSERT_EQUAL(!stream, false); + CPPUNIT_ASSERT(stream); DataBlockType block = stream.tryread(1); CPPUNIT_ASSERT(!block); CPPUNIT_ASSERT(stream.eos()); } template -void InStreamTest::testTryreadPeek() +void BufferedInStreamTest::testTryreadPeek() { - typedef typename Port::StreamType StreamType; - typedef typename Port::PortSequenceType PortSequenceType; - typedef typename StreamType::DataBlockType DataBlockType; - const char* stream_id = "tryread_peek"; // Create a new stream and push some data to it BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); port->pushSRI(sri); - PortSequenceType data; - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), true, stream_id); + this->_pushTestPacket(1024, bulkio::time::utils::now(), true, stream_id); // Get the input stream and read the first packet StreamType stream = port->getStream(stream_id); CPPUNIT_ASSERT_EQUAL(!stream, false); DataBlockType block = stream.tryread(10000,0); - CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.size()); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); block = stream.read(10000); - CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.size()); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); block = stream.read(10000); CPPUNIT_ASSERT(!block); } template -void InStreamTest::testReadPeek() +void BufferedInStreamTest::testReadPeek() { - typedef typename Port::StreamType StreamType; - typedef typename Port::PortSequenceType PortSequenceType; - typedef typename StreamType::DataBlockType DataBlockType; - const char* stream_id = "read_peek"; // Create a new stream and push some data to it BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); port->pushSRI(sri); - PortSequenceType data; - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), true, stream_id); + this->_pushTestPacket(1024, bulkio::time::utils::now(), true, stream_id); // Get the input stream and read the first packet StreamType stream = port->getStream(stream_id); CPPUNIT_ASSERT_EQUAL(!stream, false); DataBlockType block = stream.read(10000,0); - CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.size()); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); block = stream.read(10000); - CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.size()); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); block = stream.read(10000); CPPUNIT_ASSERT(!block); } template -void InStreamTest::testReadPartial() +void BufferedInStreamTest::testReadPartial() { - typedef typename Port::StreamType StreamType; - typedef typename Port::PortSequenceType PortSequenceType; - typedef typename StreamType::DataBlockType DataBlockType; - const char* stream_id = "read_partial"; // Create a new stream and push some data to it BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); port->pushSRI(sri); - PortSequenceType data; - data.length(1024); - port->pushPacket(data, bulkio::time::utils::now(), true, stream_id); + this->_pushTestPacket(1024, bulkio::time::utils::now(), true, stream_id); // Get the input stream and read the first packet StreamType stream = port->getStream(stream_id); CPPUNIT_ASSERT_EQUAL(!stream, false); DataBlockType block = stream.read(10000,2000); - CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.size()); + CPPUNIT_ASSERT_EQUAL((size_t) 1024, block.buffer().size()); block = stream.read(10000); CPPUNIT_ASSERT(!block); } -#define CREATE_TEST(x) \ - class In##x##StreamTest : public InStreamTest \ +template +void BufferedInStreamTest::testReadTimestamps() +{ + const char* stream_id = "read_timestamps"; + + // Create a new stream and push several packets with known timestamps + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + sri.xdelta = 0.0625; + port->pushSRI(sri); + BULKIO::PrecisionUTCTime ts = bulkio::time::utils::create(4000.0, 0.5); + // Push packets of size 32, which should advance the time by exactly 2 + // seconds each + this->_pushTestPacket(32, ts, false, sri.streamID); + this->_pushTestPacket(32, ts+2.0, false, sri.streamID); + this->_pushTestPacket(32, ts+4.0, false, sri.streamID); + this->_pushTestPacket(32, ts+6.0, false, sri.streamID); + + // Get the input stream and read several packets as one block, enough to + // bisect the third packet + StreamType stream = port->getStream(stream_id); + CPPUNIT_ASSERT_EQUAL(!stream, false); + DataBlockType block = stream.read(70); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT_EQUAL((size_t) 70, block.buffer().size()); + + // There should be 3 timestamps, all non-synthetic + std::list timestamps = block.getTimestamps(); + CPPUNIT_ASSERT_EQUAL((size_t) 3, timestamps.size()); + std::list::iterator it = timestamps.begin(); + CPPUNIT_ASSERT_EQUAL(ts, it->time); + CPPUNIT_ASSERT_EQUAL((size_t) 0, it->offset); + CPPUNIT_ASSERT_EQUAL(false, it->synthetic); + CPPUNIT_ASSERT_EQUAL_MESSAGE("getStartTime() doesn't match first timestamp", it->time, block.getStartTime()); + ++it; + CPPUNIT_ASSERT_EQUAL(ts+2.0, it->time); + CPPUNIT_ASSERT_EQUAL((size_t) 32, it->offset); + CPPUNIT_ASSERT_EQUAL(false, it->synthetic); + ++it; + CPPUNIT_ASSERT_EQUAL(ts+4.0, it->time); + CPPUNIT_ASSERT_EQUAL((size_t) 64, it->offset); + CPPUNIT_ASSERT_EQUAL(false, it->synthetic); + + // Read the remaining packet and a half; the first timestamp should be + // synthetic + block = stream.read(58); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT_EQUAL((size_t) 58, block.buffer().size()); + timestamps = block.getTimestamps(); + CPPUNIT_ASSERT_EQUAL((size_t) 2, timestamps.size()); + it = timestamps.begin(); + CPPUNIT_ASSERT_EQUAL_MESSAGE("First timestamp should by synthesized", true, it->synthetic); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Synthesized timestamp is incorrect", ts+4.375, it->time); + CPPUNIT_ASSERT_EQUAL((size_t) 0, it->offset); + CPPUNIT_ASSERT_EQUAL_MESSAGE("getStartTime() doesn't match first timestamp", it->time, block.getStartTime()); + ++it; + CPPUNIT_ASSERT_EQUAL(ts+6.0, it->time); + CPPUNIT_ASSERT_EQUAL((size_t) 26, it->offset); + CPPUNIT_ASSERT_EQUAL(false, it->synthetic); +} + +template +void BufferedInStreamTest::testRepeatStreamIds() +{ + const char* stream_id = "repeat_stream_ids"; + + // Create a new stream and push several packets with known timestamps + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + sri.xdelta = 0.0625; + unsigned int number_streams = 6; + unsigned int number_packets = 4; + for (unsigned int i=0; ipushSRI(sri); + for (unsigned int j=0; j_pushTestPacket(32, bulkio::time::utils::now(), false, sri.streamID); + } + this->_pushTestPacket(32, bulkio::time::utils::now(), true, sri.streamID); + } + + unsigned int received_streams = 0; + for (unsigned int i=0; igetCurrentStream(); + CPPUNIT_ASSERT_EQUAL(!inputStream, false); + received_streams++; + for (unsigned int j=0; j +void BufferedInStreamTest::testDisableDiscard() +{ + const char* stream_id = "disable_discard"; + + // Create a new stream and push a couple of packets to it + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + this->_pushTestPacket(1024, bulkio::time::utils::now(), false, sri.streamID); + CPPUNIT_ASSERT_EQUAL(1, port->getCurrentQueueDepth()); + + // Get the input stream and read half of the first packet + StreamType stream = port->getStream(stream_id); + CPPUNIT_ASSERT_EQUAL(false, !stream); + DataBlockType block = stream.read(512); + CPPUNIT_ASSERT_EQUAL(false, !block); + + // The stream should report samples available, but there should be no + // packets in the port's queue + CPPUNIT_ASSERT(stream.ready()); + CPPUNIT_ASSERT(stream.samplesAvailable() > 0); + CPPUNIT_ASSERT_EQUAL(0, port->getCurrentQueueDepth()); + + // Disable the stream; this should discard + stream.disable(); + CPPUNIT_ASSERT(!stream.ready()); + CPPUNIT_ASSERT_EQUAL((size_t) 0, stream.samplesAvailable()); +} + +template +void NumericInStreamTest::testSriModeChanges() +{ + const char* stream_id = "sri_mode_changes"; + + // Create a new stream and push some data to it + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + this->_pushTestPacket(100, bulkio::time::utils::now(), false, sri.streamID); + + // Get the input stream and read the first packet + StreamType stream = port->getStream(stream_id); + CPPUNIT_ASSERT(stream); + DataBlockType block = stream.read(); + CPPUNIT_ASSERT(block); + + // First block from a new stream reports SRI change + CPPUNIT_ASSERT_EQUAL(true, block.sriChanged()); + + // Change the mode to complex and push more data + sri.mode = 1; + port->pushSRI(sri); + this->_pushTestPacket(200, bulkio::time::utils::now(), false, sri.streamID); + block = stream.read(); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT(block.complex()); + CPPUNIT_ASSERT(block.sriChanged()); + CPPUNIT_ASSERT(block.sriChangeFlags() & bulkio::sri::MODE); + + // Next push should report no SRI changes + this->_pushTestPacket(200, bulkio::time::utils::now(), false, sri.streamID); + block = stream.read(); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT(block.complex()); + CPPUNIT_ASSERT(!block.sriChanged()); + + // Change back to scalar + sri.mode = 0; + port->pushSRI(sri); + this->_pushTestPacket(100, bulkio::time::utils::now(), false, sri.streamID); + block = stream.read(); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT(!block.complex()); + CPPUNIT_ASSERT(block.sriChanged()); + CPPUNIT_ASSERT(block.sriChangeFlags() & bulkio::sri::MODE); +} + +template +void NumericInStreamTest::testReadTimestampsComplex() +{ + const char* stream_id = "read_timestamps_cx"; + + // Create a new complex stream and push several packets with known + // timestamps + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + sri.mode = 1; + sri.xdelta = 0.125; + port->pushSRI(sri); + BULKIO::PrecisionUTCTime ts = bulkio::time::utils::create(100.0, 0.0); + // Push 8 complex values (16 real), which should advance the time by + // exactly 1 second each time + this->_pushTestPacket(16, ts, false, sri.streamID); + this->_pushTestPacket(16, ts+1.0, false, sri.streamID); + this->_pushTestPacket(16, ts+2.0, false, sri.streamID); + this->_pushTestPacket(16, ts+3.0, false, sri.streamID); + + // Get the input stream and read several packets as one block, enough to + // bisect the third packet + StreamType stream = port->getStream(stream_id); + CPPUNIT_ASSERT_EQUAL(!stream, false); + DataBlockType block = stream.read(20); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT_EQUAL((size_t) 20, block.cxsize()); + + // There should be 3 timestamps, all non-synthetic, with sample offsets + // based on the complex type + std::list timestamps = block.getTimestamps(); + CPPUNIT_ASSERT_EQUAL((size_t) 3, timestamps.size()); + std::list::iterator it = timestamps.begin(); + CPPUNIT_ASSERT_EQUAL(ts, it->time); + CPPUNIT_ASSERT_EQUAL((size_t) 0, it->offset); + CPPUNIT_ASSERT_EQUAL(false, it->synthetic); + CPPUNIT_ASSERT_EQUAL_MESSAGE("getStartTime() doesn't match first timestamp", it->time, block.getStartTime()); + ++it; + CPPUNIT_ASSERT_EQUAL(ts+1.0, it->time); + CPPUNIT_ASSERT_EQUAL((size_t) 8, it->offset); + CPPUNIT_ASSERT_EQUAL(false, it->synthetic); + ++it; + CPPUNIT_ASSERT_EQUAL(ts+2.0, it->time); + CPPUNIT_ASSERT_EQUAL((size_t) 16, it->offset); + CPPUNIT_ASSERT_EQUAL(false, it->synthetic); + + // Read the remaining packet and a half; the first timestamp should be + // synthetic + block = stream.read(12); + CPPUNIT_ASSERT(block); + CPPUNIT_ASSERT_EQUAL((size_t) 12, block.cxsize()); + timestamps = block.getTimestamps(); + CPPUNIT_ASSERT_EQUAL((size_t) 2, timestamps.size()); + it = timestamps.begin(); + CPPUNIT_ASSERT_EQUAL_MESSAGE("First timestamp should by synthesized", true, it->synthetic); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Synthesized timestamp is incorrect", ts+2.5, it->time); + CPPUNIT_ASSERT_EQUAL((size_t) 0, it->offset); + CPPUNIT_ASSERT_EQUAL_MESSAGE("getStartTime() doesn't match first timestamp", it->time, block.getStartTime()); + ++it; + CPPUNIT_ASSERT_EQUAL(ts+3.0, it->time); + CPPUNIT_ASSERT_EQUAL((size_t) 4, it->offset); + CPPUNIT_ASSERT_EQUAL(false, it->synthetic); +} + +#define CREATE_TEST(x, BASE) \ + class In##x##StreamTest : public BASE \ { \ - CPPUNIT_TEST_SUB_SUITE(In##x##StreamTest, InStreamTest); \ + CPPUNIT_TEST_SUB_SUITE(In##x##StreamTest, BASE); \ CPPUNIT_TEST_SUITE_END(); \ - virtual std::string getPortName() const { return #x; }; \ }; \ CPPUNIT_TEST_SUITE_REGISTRATION(In##x##StreamTest); -CREATE_TEST(Octet); -CREATE_TEST(Char); -CREATE_TEST(Short); -CREATE_TEST(UShort); -CREATE_TEST(Long); -CREATE_TEST(ULong); -CREATE_TEST(LongLong); -CREATE_TEST(ULongLong); -CREATE_TEST(Float); -CREATE_TEST(Double); +#define CREATE_BASIC_TEST(x) CREATE_TEST(x, InStreamTest) +#define CREATE_NUMERIC_TEST(x) CREATE_TEST(x, NumericInStreamTest) + +CREATE_BASIC_TEST(XML); +CREATE_BASIC_TEST(File); +CREATE_TEST(Bit,BufferedInStreamTest); +CREATE_NUMERIC_TEST(Octet); +CREATE_NUMERIC_TEST(Char); +CREATE_NUMERIC_TEST(Short); +CREATE_NUMERIC_TEST(UShort); +CREATE_NUMERIC_TEST(Long); +CREATE_NUMERIC_TEST(ULong); +CREATE_NUMERIC_TEST(LongLong); +CREATE_NUMERIC_TEST(ULongLong); +CREATE_NUMERIC_TEST(Float); +CREATE_NUMERIC_TEST(Double); diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/InStreamTest.h b/bulkioInterfaces/libsrc/testing/tests/cpp/InStreamTest.h index a8a36267b..bb84837cd 100644 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/InStreamTest.h +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/InStreamTest.h @@ -20,40 +20,92 @@ #ifndef BULKIO_INSTREAMTEST_H #define BULKIO_INSTREAMTEST_H -#include -#include +#include "InPortTestFixture.h" template -class InStreamTest : public CppUnit::TestFixture +class InStreamTest : public InPortTestFixture { + typedef InPortTestFixture TestBase; + CPPUNIT_TEST_SUITE(InStreamTest); + CPPUNIT_TEST(testTimestamp); + CPPUNIT_TEST(testGetCurrentStreamEmptyPacket); CPPUNIT_TEST(testGetCurrentStreamEmptyEos); CPPUNIT_TEST(testGetCurrentStreamDataEos); + CPPUNIT_TEST(testSriChanges); + CPPUNIT_TEST(testDisable); + CPPUNIT_TEST_SUITE_END(); + +public: + void testTimestamp(); + + void testGetCurrentStreamEmptyPacket(); + void testGetCurrentStreamEmptyEos(); + void testGetCurrentStreamDataEos(); + void testSriChanges(); + + void testDisable(); + +protected: + typedef typename Port::StreamType StreamType; + typedef typename StreamType::DataBlockType DataBlockType; + + using TestBase::port; +}; + +template +class BufferedInStreamTest : public InStreamTest +{ + typedef InStreamTest TestBase; + CPPUNIT_TEST_SUB_SUITE(BufferedInStreamTest, TestBase); CPPUNIT_TEST(testSizedReadEmptyEos); CPPUNIT_TEST(testSizedTryreadEmptyEos); CPPUNIT_TEST(testTryreadPeek); CPPUNIT_TEST(testReadPeek); CPPUNIT_TEST(testReadPartial); + CPPUNIT_TEST(testReadTimestamps); + CPPUNIT_TEST(testRepeatStreamIds); + CPPUNIT_TEST(testDisableDiscard); CPPUNIT_TEST_SUITE_END(); public: - void setUp(); - void tearDown(); - - void testGetCurrentStreamEmptyEos(); - void testGetCurrentStreamDataEos(); - void testSizedReadEmptyEos(); void testSizedTryreadEmptyEos(); void testTryreadPeek(); void testReadPeek(); void testReadPartial(); + void testReadTimestamps(); + void testRepeatStreamIds(); + + void testDisableDiscard(); + +protected: + typedef typename Port::StreamType StreamType; + typedef typename StreamType::DataBlockType DataBlockType; + + using TestBase::port; +}; + +template +class NumericInStreamTest : public BufferedInStreamTest +{ + typedef BufferedInStreamTest TestBase; + CPPUNIT_TEST_SUB_SUITE(NumericInStreamTest, TestBase); + CPPUNIT_TEST(testSriModeChanges); + CPPUNIT_TEST(testReadTimestampsComplex); + CPPUNIT_TEST_SUITE_END(); + +public: + void testSriModeChanges(); + + void testReadTimestampsComplex(); private: - virtual std::string getPortName() const = 0; + typedef typename Port::StreamType StreamType; + typedef typename StreamType::DataBlockType DataBlockType; - Port* port; + using TestBase::port; }; #endif // BULKIO_INSTREAMTEST_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/LocalTest.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/LocalTest.cpp new file mode 100644 index 000000000..2ad2ca95f --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/LocalTest.cpp @@ -0,0 +1,202 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "LocalTest.h" +#include + +namespace { + template + bool overlaps(const T start1, const T end1, const T start2, const T end2) + { + return (start2 <= end1) && (start1 <= end2); + } + + template + bool contains(const T start1, const T end1, const T start2, const T end2) + { + return (start2 >= start1) && (end2 <= end1); + } + + template + bool overlaps(const redhawk::shared_buffer& lhs, const redhawk::shared_buffer& rhs) + { + return overlaps(lhs.data(), lhs.data() + lhs.size(), rhs.data(), rhs.data() + rhs.size()); + } + + template + bool contains(const redhawk::shared_buffer& outer, const redhawk::shared_buffer& inner) + { + return contains(outer.data(), outer.data() + outer.size(), inner.data(), inner.data() + inner.size()); + } + + bool overlaps(const redhawk::shared_bitbuffer& lhs, const redhawk::shared_bitbuffer& rhs) + { + // Normalize the starts and ends to be relative to the lower of the two + // base addresses, and in terms of bits + const unsigned char* base = std::min(lhs.data(), rhs.data()); + size_t lstart = (lhs.data() - base) * 8 + lhs.offset(); + size_t rstart = (rhs.data() - base) * 8 + rhs.offset(); + return overlaps(lstart, lstart + lhs.size(), rstart, rstart + rhs.size()); + } + + bool contains(const redhawk::shared_bitbuffer& outer, const redhawk::shared_bitbuffer& inner) + { + // Normalize the starts and ends to be relative to the lower of the two + // base addresses, and in terms of bits + const unsigned char* base = std::min(outer.data(), inner.data()); + size_t ostart = (outer.data() - base) * 8 + outer.offset(); + size_t rstart = (inner.data() - base) * 8 + inner.offset(); + return contains(ostart, ostart + outer.size(), rstart, rstart + inner.size()); + } +} + +template +void LocalTest::setUp() +{ + std::string name = bulkio::CorbaTraits::name(); + outPort = new OutPort(name + "_out"); + inPort = new InPort(name + "_in"); + + PortableServer::ObjectId_var oid = ossie::corba::RootPOA()->activate_object(inPort); + + CORBA::Object_var objref = inPort->_this(); + outPort->connectPort(objref, "local_connection"); +} + +template +void LocalTest::tearDown() +{ + outPort->disconnectPort("local_connection"); + + try { + PortableServer::ObjectId_var oid = ossie::corba::RootPOA()->servant_to_id(inPort); + ossie::corba::RootPOA()->deactivate_object(oid); + } catch (...) { + // Ignore CORBA exceptions + } + inPort->_remove_ref(); + + delete outPort; +} + +template +void LocalTest::testBasicWrite() +{ + // Create an output stream and write a buffer to it + OutStreamType out_stream = outPort->createStream("test_stream"); + MutableBufferType data(1024); + out_stream.write(data, bulkio::time::utils::now()); + + // The corresponding input stream should exist and have data + InStreamType in_stream = inPort->getStream("test_stream"); + CPPUNIT_ASSERT(in_stream); + DataBlockType block = in_stream.tryread(); + CPPUNIT_ASSERT(block); + + // Check that the input stream is sharing the underlying memory + BufferType result = block.buffer(); + CPPUNIT_ASSERT_MESSAGE("Input stream received a copy of data", data.data() == result.data()); + CPPUNIT_ASSERT_EQUAL(data.size(), result.size()); +} + +template +void LocalTest::testLargeWrite() +{ + // Create an output stream and write a buffer that is too large for a + // single CORBA transfer + OutStreamType out_stream = outPort->createStream("test_stream"); + size_t count = (16 * bulkio::Const::MaxTransferBytes()) / bulkio::NativeTraits::bits; + MutableBufferType data(count); + out_stream.write(data, bulkio::time::utils::now()); + + // The corresponding input stream should exist and have data + InStreamType in_stream = inPort->getStream("test_stream"); + CPPUNIT_ASSERT(in_stream); + DataBlockType block = in_stream.tryread(); + CPPUNIT_ASSERT(block); + + // Make sure that the original buffer was preserved as a single transfer + BufferType result = block.buffer(); + CPPUNIT_ASSERT_MESSAGE("Input stream received a copy of data", data.data() == result.data()); + CPPUNIT_ASSERT_EQUAL(data.size(), result.size()); +} + +template +void LocalTest::testReadSlice() +{ + // Create an output stream and write a buffer to it + OutStreamType out_stream = outPort->createStream("test_stream"); + MutableBufferType data(1024); + out_stream.write(data, bulkio::time::utils::now()); + + // The corresponding input stream should exist and have data + const size_t READ_SIZE = 500; + InStreamType in_stream = inPort->getStream("test_stream"); + CPPUNIT_ASSERT(in_stream); + DataBlockType block = in_stream.tryread(READ_SIZE); + CPPUNIT_ASSERT(block); + + // Check that the read buffer is a subset of the original buffer + BufferType result = block.buffer(); + CPPUNIT_ASSERT_EQUAL(READ_SIZE, result.size()); + CPPUNIT_ASSERT_MESSAGE("Input stream received a copy of data", data.data() == result.data()); + + // The next read buffer should point to an offset into the original buffer + block = in_stream.tryread(READ_SIZE); + CPPUNIT_ASSERT(block); + result = block.buffer(); + CPPUNIT_ASSERT_EQUAL(READ_SIZE, result.size()); + CPPUNIT_ASSERT_MESSAGE("Input stream received a copy of data", contains(data, result)); + CPPUNIT_ASSERT_MESSAGE("Input buffer did not advance", data.data() < result.data()); + + // Write a new buffer (copy allocates a new memory block) + BufferType data2 = data.copy(); + out_stream.write(data2, bulkio::time::utils::now()); + + // Read a buffer that we know spans two input buffers; it should still be + // able to read the full amount, but it'll have to make a copy + block = in_stream.tryread(READ_SIZE); + CPPUNIT_ASSERT(block); + result = block.buffer(); + CPPUNIT_ASSERT_EQUAL(READ_SIZE, result.size()); + CPPUNIT_ASSERT(!overlaps(data, result)); + CPPUNIT_ASSERT(!overlaps(data2, result)); +} + +#define CREATE_TEST(x) \ + class Local##x##Test : public LocalTest \ + { \ + typedef LocalTest TestBase; \ + CPPUNIT_TEST_SUB_SUITE(Local##x##Test, TestBase); \ + CPPUNIT_TEST_SUITE_END(); \ + }; \ + CPPUNIT_TEST_SUITE_REGISTRATION(Local##x##Test); + +CREATE_TEST(Octet); +CREATE_TEST(Char); +CREATE_TEST(Short); +CREATE_TEST(UShort); +CREATE_TEST(Long); +CREATE_TEST(ULong); +CREATE_TEST(LongLong); +CREATE_TEST(ULongLong); +CREATE_TEST(Float); +CREATE_TEST(Double); +CREATE_TEST(Bit); diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/LocalTest.h b/bulkioInterfaces/libsrc/testing/tests/cpp/LocalTest.h new file mode 100644 index 000000000..2e047381d --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/LocalTest.h @@ -0,0 +1,57 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef BULKIO_LOCALTEST_H +#define BULKIO_LOCALTEST_H + +#include +#include +#include + +template +class LocalTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(LocalTest); + CPPUNIT_TEST(testBasicWrite); + CPPUNIT_TEST(testLargeWrite); + CPPUNIT_TEST(testReadSlice); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testBasicWrite(); + void testLargeWrite(); + + void testReadSlice(); + +protected: + typedef typename OutPort::StreamType OutStreamType; + typedef typename InPort::StreamType InStreamType; + typedef typename InStreamType::DataBlockType DataBlockType; + typedef typename OutPort::CorbaType CorbaType; + typedef typename bulkio::BufferTraits::BufferType BufferType; + typedef typename bulkio::BufferTraits::MutableBufferType MutableBufferType; + + OutPort* outPort; + InPort* inPort; +}; + +#endif // BULKIO_LOCALTEST_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/Makefile.am b/bulkioInterfaces/libsrc/testing/tests/cpp/Makefile.am index c9390227f..8825bb933 100644 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/Makefile.am +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/Makefile.am @@ -20,10 +20,16 @@ # Rules for the test code (use `make check` to execute) TESTS = Bulkio check_PROGRAMS = $(TESTS) -bulkio_top=../../../../ -bulkio_libsrc_top=$(bulkio_top)/libsrc -Bulkio_SOURCES = Bulkio.cpp Bulkio_Helper_Fixture.cpp Bulkio_InPort_Fixture.cpp Bulkio_OutPort_Fixture.cpp Bulkio_MultiOut_Port.cpp + +Bulkio_SOURCES = main.cpp +Bulkio_SOURCES += Bulkio_MultiOut_Port.cpp +Bulkio_SOURCES += InPortTest.h InPortTest.cpp Bulkio_SOURCES += InStreamTest.h InStreamTest.cpp +Bulkio_SOURCES += OutPortTest.h OutPortTest.cpp Bulkio_SOURCES += OutStreamTest.h OutStreamTest.cpp -Bulkio_CXXFLAGS = $(CPPUNIT_CFLAGS) -I$(bulkio_libsrc_top)/cpp -I$(bulkio_top)/src/cpp -I$(bulkio_top)/src/cpp/ossie $(BOOST_CPPFLAGS) $(RH_DEPS_CFLAGS) -Bulkio_LDADD = -L$(bulkio_libsrc_top)/.libs -L$(bulkio_top)/.libs -lbulkio-2.0 -lbulkioInterfaces $(BOOST_LDFLAGS) $(BOOST_SYSTEM_LIB) $(RH_DEPS_LIBS) $(CPPUNIT_LIBS) -llog4cxx +Bulkio_SOURCES += LocalTest.h LocalTest.cpp +Bulkio_SOURCES += SDDSPortTest.cpp +Bulkio_SOURCES += StreamSRITest.h StreamSRITest.cpp +Bulkio_SOURCES += PrecisionUTCTimeTest.h PrecisionUTCTimeTest.cpp +Bulkio_CXXFLAGS = $(BULKIO_CFLAGS) $(BOOST_CPPFLAGS) $(OSSIE_CFLAGS) $(CPPUNIT_CFLAGS) +Bulkio_LDADD = $(BULKIO_LIBS) $(BOOST_LDFLAGS) $(BOOST_SYSTEM_LIB) $(OSSIE_LIBS) $(CPPUNIT_LIBS) $(LOG4CXX_LIBS) diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/OutPortTest.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/OutPortTest.cpp new file mode 100644 index 000000000..b4ca467c9 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/OutPortTest.cpp @@ -0,0 +1,521 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "OutPortTest.h" + +#include + +// Suppress warnings for access to deprecated methods +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +// Global connection/disconnection callbacks +static void port_connected(const char* connectionId) +{ +} + +static void port_disconnected(const char* connectionId) +{ +} + +template +void OutPortTest::testLegacyAPI() +{ + port->setNewConnectListener(&port_connected); + port->setNewDisconnectListener(&port_disconnected); + + typename Port::ConnectionsList cl = port->_getConnections(); + std::string sid="none"; + CPPUNIT_ASSERT(port->getCurrentSRI().count(sid) == 0); + + port->enableStats(false); + + rh_logger::LoggerPtr logger = rh_logger::Logger::getLogger("BulkioOutPort"); + port->setLogger(logger); +} + +template +void OutPortTest::testConnections() +{ + // Should start with one connection, to the in port stub + ExtendedCF::UsesConnectionSequence_var connections = port->connections(); + CPPUNIT_ASSERT(connections->length() == 1); + CPPUNIT_ASSERT(port->state() == BULKIO::ACTIVE); + + // Should throw an invalid port on a nil + CORBA::Object_var objref; + CPPUNIT_ASSERT_THROW(port->connectPort(objref, "connection_nil"), CF::Port::InvalidPort); + + // Normal connection + StubType* stub2 = this->_createStub(); + objref = stub2->_this(); + port->connectPort(objref, "connection_2"); + connections = port->connections(); + CPPUNIT_ASSERT(connections->length() == 2); + + // Cannot reuse connection ID + CPPUNIT_ASSERT_THROW(port->connectPort(objref, "connection_2"), CF::Port::OccupiedPort); + + // Disconnect second connection + port->disconnectPort("connection_2"); + connections = port->connections(); + CPPUNIT_ASSERT(connections->length() == 1); + + // Bad connection ID on disconnect + CPPUNIT_ASSERT_THROW(port->disconnectPort("connection_bad"), CF::Port::InvalidPort); + + // Disconnect the default stub; port should go to idle + port->disconnectPort("test_connection"); + connections = port->connections(); + CPPUNIT_ASSERT(connections->length() == 0); + CPPUNIT_ASSERT(port->state() == BULKIO::IDLE); +} + +template +void OutPortTest::testStatistics() +{ + const char* stream_id = "port_stats"; + + BULKIO::UsesPortStatisticsSequence_var uses_stats = port->statistics(); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 1, uses_stats->length()); + CPPUNIT_ASSERT_EQUAL(std::string("test_connection"), std::string(uses_stats[0].connectionId)); + + BULKIO::StreamSRI sri = bulkio::sri::create(); + port->pushSRI(sri); + + this->_pushTestPacket(1024, BULKIO::PrecisionUTCTime(), false, stream_id); + + uses_stats = port->statistics(); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 1, uses_stats->length()); + + // Check that the statistics report the right element size + const BULKIO::PortStatistics& stats = uses_stats[0].statistics; + CPPUNIT_ASSERT(stats.elementsPerSecond > 0.0); + size_t bits_per_element = round(stats.bitsPerSecond / stats.elementsPerSecond); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Incorrect bits per element", BITS_PER_ELEMENT, bits_per_element); +} + +template +void OutPortTest::testMultiOut() +{ + StubType* stub2 = this->_createStub(); + CORBA::Object_var objref = stub2->_this(); + port->connectPort(objref, "connection_2"); + + // Set up a connection table that only routes the filtered stream to the + // second stub, and another stream to both connections + const std::string filter_stream_id = "filter_stream"; + _addStreamFilter(filter_stream_id, "connection_2"); + const std::string all_stream_id = "all_stream"; + _addStreamFilter(all_stream_id, "test_connection"); + _addStreamFilter(all_stream_id, "connection_2"); + + // Push an SRI for the filtered stream; it should only be received by the + // second stub + BULKIO::StreamSRI sri = bulkio::sri::create(filter_stream_id, 2.5e6); + port->pushSRI(sri); + CPPUNIT_ASSERT(stub->H.empty()); + CPPUNIT_ASSERT(stub2->H.size() == 1); + CPPUNIT_ASSERT_EQUAL(filter_stream_id, std::string(stub2->H.back().streamID)); + + // Push a packet for the filtered stream; again, only received by #2 + this->_pushTestPacket(91, bulkio::time::utils::now(), false, filter_stream_id); + CPPUNIT_ASSERT(stub->packets.empty()); + CPPUNIT_ASSERT(stub2->packets.size() == 1); + CPPUNIT_ASSERT_EQUAL((size_t) 91, stub2->packets.back().size()); + + // Unknown (to the connection filter) stream should get dropped + const std::string unknown_stream_id = "unknown_stream"; + sri = bulkio::sri::create(unknown_stream_id); + port->pushSRI(sri); + CPPUNIT_ASSERT(stub->H.empty()); + CPPUNIT_ASSERT(stub2->H.size() == 1); + this->_pushTestPacket(50, bulkio::time::utils::now(), false, unknown_stream_id); + CPPUNIT_ASSERT(stub->packets.empty()); + CPPUNIT_ASSERT(stub2->packets.size() == 1); + + // Check SRI routed to both connections... + sri = bulkio::sri::create(all_stream_id, 1e6); + port->pushSRI(sri); + CPPUNIT_ASSERT(stub->H.size() == 1); + CPPUNIT_ASSERT(stub2->H.size() == 2); + CPPUNIT_ASSERT_EQUAL(all_stream_id, std::string(stub->H.back().streamID)); + CPPUNIT_ASSERT_EQUAL(all_stream_id, std::string(stub2->H.back().streamID)); + + // ...and data + this->_pushTestPacket(256, bulkio::time::utils::now(), false, all_stream_id); + CPPUNIT_ASSERT(stub->packets.size() == 1); + CPPUNIT_ASSERT_EQUAL((size_t) 256, stub->packets.back().size()); + CPPUNIT_ASSERT(stub2->packets.size() == 2); + CPPUNIT_ASSERT_EQUAL((size_t) 256, stub2->packets.back().size()); + + // Reset the connection filter and push data for the filtered stream again, + // which should trigger an SRI push to the first stub + connectionTable.clear(); + port->updateConnectionFilter(connectionTable); + this->_pushTestPacket(9, bulkio::time::utils::now(), false, filter_stream_id); + CPPUNIT_ASSERT(stub->H.size() == 2); + CPPUNIT_ASSERT_EQUAL(filter_stream_id, std::string(stub->H.back().streamID)); + CPPUNIT_ASSERT(stub->packets.size() == 2); + CPPUNIT_ASSERT_EQUAL((size_t) 9, stub->packets.back().size()); + CPPUNIT_ASSERT(stub2->packets.size() == 3); + CPPUNIT_ASSERT_EQUAL((size_t) 9, stub2->packets.back().size()); +} + +template +void OutPortTest::_addStreamFilter(const std::string& streamId, const std::string& connectionId) +{ + bulkio::connection_descriptor_struct desc; + desc.stream_id = streamId; + desc.connection_id = connectionId; + desc.port_name = port->getName(); + connectionTable.push_back(desc); + port->updateConnectionFilter(connectionTable); +} + +template +void ChunkingOutPortTest::testPushChunking() +{ + // Set up a basic stream + const char* stream_id = "push_chunking"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + sri.xdelta = 0.125; + port->pushSRI(sri); + + // Test that the push is properly chunked + BULKIO::PrecisionUTCTime time = bulkio::time::utils::create(0, 0); + _testPushOversizedPacket(time, false, stream_id); + + // Check that the synthesized time stamp(s) advanced by the expected time + for (size_t index = 1; index < stub->packets.size(); ++index) { + double expected = stub->packets[index-1].size() * sri.xdelta; + double elapsed = stub->packets[index].T - stub->packets[index-1].T; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Incorrect time stamp delta", expected, elapsed); + } +} + +template +void ChunkingOutPortTest::testPushChunkingEOS() +{ + // Set up a basic stream + const char* stream_id = "push_chunking_eos"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + + // Send a packet with end-of-stream set + _testPushOversizedPacket(BULKIO::PrecisionUTCTime(), true, stream_id); + + // Check that only the final packet has end-of-stream set + CPPUNIT_ASSERT_MESSAGE("Last packet does not have EOS set", stub->packets.back().EOS); + for (size_t index = 0; index < (stub->packets.size() - 1); ++index) { + CPPUNIT_ASSERT_MESSAGE("Intermediate packet has EOS set", !stub->packets[index].EOS); + } +} + +template +void ChunkingOutPortTest::testPushChunkingSubsize() +{ + // Set up a 2-dimensional stream + const char* stream_id = "push_chunking_subsize"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + sri.subsize = 1023; + port->pushSRI(sri); + + this->_testPushOversizedPacket(BULKIO::PrecisionUTCTime(), false, stream_id); + + // Check that each packet is a multiple of the subsize (except the last, + // because the oversized packet was not explicitly quantized to be an exact + // multiple) + for (size_t index = 0; index < (stub->packets.size() - 1); ++index) { + CPPUNIT_ASSERT_MESSAGE("Packet size is not a multiple of subsize", (stub->packets[index].size() % 1023) == 0); + } +} + +template +void ChunkingOutPortTest::_testPushOversizedPacket(const BULKIO::PrecisionUTCTime& time, + bool eos, const std::string& streamID) +{ + // Pick a sufficiently large number of samples that the packet has to span + // multiple packets + const size_t max_bits = 8 * bulkio::Const::MaxTransferBytes(); + const size_t count = 2 * max_bits / BITS_PER_ELEMENT; + this->_pushTestPacket(count, time, eos, streamID); + + // More than one packet must have been received, and no packet can exceed + // the max transfer size + CPPUNIT_ASSERT(stub->packets.size() > 1); + for (size_t index = 0; index < stub->packets.size(); ++index) { + size_t packet_bits = stub->packets[index].size() * BITS_PER_ELEMENT; + CPPUNIT_ASSERT_MESSAGE("Packet too large", packet_bits < max_bits); + } +} + +template +void NumericOutPortTest::testPushPacketData() +{ + _testPushPacketDataImpl(); +} + +template +template +void NumericOutPortTest::_testPushPacketDataImpl() +{ + const char* stream_id = "push_packet"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + + // Create a vector and fill it with a ramp + std::vector buffer; + buffer.resize(1024); + for (size_t ii = 0; ii < buffer.size(); ++ii) { + buffer[ii] = ii; + } + + // Test push-by-vector + port->pushPacket(buffer, BULKIO::PrecisionUTCTime(), false, stream_id); + CPPUNIT_ASSERT_EQUAL((size_t) 1, stub->packets.size()); + CPPUNIT_ASSERT_EQUAL(buffer.size(), stub->packets.back().size()); + // NB: received data is cast to T* for the benefit of dataChar, in which + // there is a mismatch in the C++ types + CPPUNIT_ASSERT(std::equal(buffer.begin(), buffer.end(), (T*)(&stub->packets.back().data[0]))); + + // Test push-by-pointer + T* ptr = &buffer[0]; + port->pushPacket(ptr, buffer.size(), BULKIO::PrecisionUTCTime(), false, stream_id); + CPPUNIT_ASSERT_EQUAL((size_t) 2, stub->packets.size()); + CPPUNIT_ASSERT_EQUAL(buffer.size(), stub->packets.back().size()); + // NB: see above + CPPUNIT_ASSERT(std::equal(buffer.begin(), buffer.end(), (T*)(&stub->packets.back().data[0]))); +} + +template +void NumericOutPortTest::testPushChunkingComplex() +{ + // Set up a complex stream + const char* stream_id = "push_chunking_complex"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + sri.mode = 1; + sri.xdelta = 0.0625; + port->pushSRI(sri); + + // Test that the push is properly chunked + BULKIO::PrecisionUTCTime time = bulkio::time::utils::create(0, 0); + this->_testPushOversizedPacket(time, false, stream_id); + + // Check that each packet contains an even number of samples (i.e., no + // complex value was split) + for (size_t index = 0; index < stub->packets.size(); ++index) { + CPPUNIT_ASSERT_MESSAGE("Packet contains a partial complex value", (stub->packets[index].size() % 2) == 0); + } + + // Check that the synthesized time stamp(s) advanced by the expected time + for (size_t index = 1; index < stub->packets.size(); ++index) { + double expected = stub->packets[index-1].size() * 0.5 * sri.xdelta; + double elapsed = stub->packets[index].T - stub->packets[index-1].T; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Incorrect time stamp delta", expected, elapsed); + } +} + +template +void NumericOutPortTest::testPushChunkingSubsizeComplex() +{ + // Set up a 2-dimensional complex stream + const char* stream_id = "push_chunking_subsize_complex"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + sri.subsize = 2048; + sri.mode = 1; + port->pushSRI(sri); + + this->_testPushOversizedPacket(BULKIO::PrecisionUTCTime(), false, stream_id); + + // Check that each packet is a multiple of the subsize (except the last, + // because the oversized packet was not explicitly quantized to be an exact + // multiple) + for (size_t index = 0; index < (stub->packets.size() - 1); ++index) { + CPPUNIT_ASSERT_MESSAGE("Packet size is not a multiple of subsize", (stub->packets[index].size() % 4096) == 0); + } +} + +class OutCharPortTest : public NumericOutPortTest +{ + typedef NumericOutPortTest TestBase; + + CPPUNIT_TEST_SUB_SUITE(OutCharPortTest, TestBase); + CPPUNIT_TEST(testPushPacketDataChar); + CPPUNIT_TEST_SUITE_END(); + +public: + void testPushPacketDataChar() + { + // Test overloads of pushPacket that take "char" instead of "int8_t" + _testPushPacketDataImpl(); + } + +protected: + using TestBase::port; + using TestBase::stub; +}; + +template <> +const size_t OutPortTest::BITS_PER_ELEMENT = 8; + +CPPUNIT_TEST_SUITE_REGISTRATION(OutCharPortTest); + +class OutBitPortTest : public ChunkingOutPortTest +{ + typedef ChunkingOutPortTest TestBase; + + CPPUNIT_TEST_SUB_SUITE(OutBitPortTest, TestBase); + CPPUNIT_TEST(testPushPacketData); + CPPUNIT_TEST(testPushUnaligned); + CPPUNIT_TEST_SUITE_END(); + +public: + void testPushPacketData() + { + const char* stream_id = "push_packet"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + + // Create a bit buffer, and fill the backing bytes with a ramp + redhawk::bitbuffer buffer(1024); + for (int ii = 0; ii / (buffer.size() / 8); ++ii) { + buffer.data()[ii] = 3 * ii; + } + port->pushPacket(buffer, BULKIO::PrecisionUTCTime(), false, stream_id); + + CPPUNIT_ASSERT_EQUAL((size_t) 1, stub->packets.size()); + CPPUNIT_ASSERT_EQUAL(buffer.size(), stub->packets[0].size()); + const BULKIO::BitSequence& data = stub->packets[0].data; + int status = redhawk::bitops::compare(&data.data[0], 0, buffer.data(), buffer.offset(), buffer.size()); + CPPUNIT_ASSERT_MESSAGE("Received data does not match sent data", status == 0); + } + + void testPushUnaligned() + { + // Create bit buffer with arbitrary data, then use trim so that it does + // not start on a byte boundary + redhawk::bitbuffer buffer(16); + buffer.data()[0] = 0x14; + buffer.data()[1] = 0x85; + buffer.trim(1,14); + CPPUNIT_ASSERT(buffer.offset() == 1); + + const char* stream_id = "bit_unaligned"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + + port->pushPacket(buffer, BULKIO::PrecisionUTCTime(), false, stream_id); + + // Compare received data with sent data, which we know has a non-zero + // offset + CPPUNIT_ASSERT_EQUAL((size_t) 1, stub->packets.size()); + CPPUNIT_ASSERT_EQUAL(buffer.size(), stub->packets[0].size()); + const BULKIO::BitSequence& data = stub->packets[0].data; + int status = redhawk::bitops::compare(&data.data[0], 0, buffer.data(), buffer.offset(), buffer.size()); + CPPUNIT_ASSERT_MESSAGE("Received data does not match sent data", status == 0); + } + +protected: + using TestBase::port; + using TestBase::stub; +}; + +template <> +const size_t OutPortTest::BITS_PER_ELEMENT = 1; + +CPPUNIT_TEST_SUITE_REGISTRATION(OutBitPortTest); + +class OutXMLPortTest : public OutPortTest +{ + typedef OutPortTest TestBase; + + CPPUNIT_TEST_SUB_SUITE(OutXMLPortTest, TestBase); + CPPUNIT_TEST(testPushPacketData); + CPPUNIT_TEST_SUITE_END(); + +public: + void testPushPacketData() + { + const char* stream_id = "push_packet"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + + const std::string document = ""; + port->pushPacket(document, false, stream_id); + + CPPUNIT_ASSERT_EQUAL((size_t) 1, stub->packets.size()); + CPPUNIT_ASSERT_EQUAL(document, stub->packets.back().data); + } +}; + +template <> +const size_t OutPortTest::BITS_PER_ELEMENT = 8; + +CPPUNIT_TEST_SUITE_REGISTRATION(OutXMLPortTest); + +class OutFilePortTest : public OutPortTest +{ + typedef OutPortTest TestBase; + + CPPUNIT_TEST_SUB_SUITE(OutFilePortTest, TestBase); + CPPUNIT_TEST(testPushPacketData); + CPPUNIT_TEST_SUITE_END(); + +public: + void testPushPacketData() + { + const char* stream_id = "push_packet"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + port->pushSRI(sri); + + const std::string uri = "file:///tmp/test.dat"; + port->pushPacket(uri, BULKIO::PrecisionUTCTime(), false, stream_id); + + CPPUNIT_ASSERT_EQUAL((size_t) 1, stub->packets.size()); + CPPUNIT_ASSERT_EQUAL(uri, stub->packets.back().data); + } +}; + +template <> +const size_t OutPortTest::BITS_PER_ELEMENT = 8; + +CPPUNIT_TEST_SUITE_REGISTRATION(OutFilePortTest); + +#define CREATE_TEST(x,BASE,BITS) \ + class Out##x##PortTest : public BASE \ + { \ + CPPUNIT_TEST_SUB_SUITE(Out##x##PortTest, BASE); \ + CPPUNIT_TEST_SUITE_END(); \ + }; \ + template <> \ + const size_t OutPortTest::BITS_PER_ELEMENT = BITS; \ + CPPUNIT_TEST_SUITE_REGISTRATION(Out##x##PortTest); + +#define CREATE_NUMERIC_TEST(x,BITS) CREATE_TEST(x,NumericOutPortTest,BITS) + +CREATE_NUMERIC_TEST(Octet, 8); +CREATE_NUMERIC_TEST(Short, 16); +CREATE_NUMERIC_TEST(UShort, 16); +CREATE_NUMERIC_TEST(Long, 32); +CREATE_NUMERIC_TEST(ULong, 32); +CREATE_NUMERIC_TEST(LongLong, 64); +CREATE_NUMERIC_TEST(ULongLong, 64); +CREATE_NUMERIC_TEST(Float, 32); +CREATE_NUMERIC_TEST(Double, 64); diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/OutPortTest.h b/bulkioInterfaces/libsrc/testing/tests/cpp/OutPortTest.h new file mode 100644 index 000000000..6e799ad6b --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/OutPortTest.h @@ -0,0 +1,111 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef BULKIO_OUTPORTTEST_H +#define BULKIO_OUTPORTTEST_H + +#include "OutPortTestFixture.h" + +template +class OutPortTest : public OutPortTestFixture +{ + typedef OutPortTestFixture TestBase; + + CPPUNIT_TEST_SUITE(OutPortTest); + CPPUNIT_TEST(testLegacyAPI); + CPPUNIT_TEST(testConnections); + CPPUNIT_TEST(testStatistics); + CPPUNIT_TEST(testMultiOut); + CPPUNIT_TEST_SUITE_END(); + +public: + void testLegacyAPI(); + void testConnections(); + void testStatistics(); + void testMultiOut(); + +protected: + typedef typename TestBase::StubType StubType; + typedef typename Port::CorbaType CorbaType; + + static const size_t BITS_PER_ELEMENT; + + void _addStreamFilter(const std::string& streamId, const std::string& connectionId); + + using TestBase::port; + using TestBase::stub; + + std::vector connectionTable; +}; + +template +class ChunkingOutPortTest : public OutPortTest +{ + typedef OutPortTest TestBase; + + CPPUNIT_TEST_SUB_SUITE(ChunkingOutPortTest, TestBase); + CPPUNIT_TEST(testPushChunking); + CPPUNIT_TEST(testPushChunkingEOS); + CPPUNIT_TEST(testPushChunkingSubsize); + CPPUNIT_TEST_SUITE_END(); + +public: + void testPushChunking(); + void testPushChunkingEOS(); + void testPushChunkingSubsize(); + +protected: + typedef typename Port::CorbaType CorbaType; + + void _testPushOversizedPacket(const BULKIO::PrecisionUTCTime& T, bool EOS, const std::string& streamId); + + using TestBase::BITS_PER_ELEMENT; + using TestBase::port; + using TestBase::stub; +}; + +template +class NumericOutPortTest : public ChunkingOutPortTest +{ + typedef ChunkingOutPortTest TestBase; + + CPPUNIT_TEST_SUB_SUITE(NumericOutPortTest, TestBase); + CPPUNIT_TEST(testPushPacketData); + CPPUNIT_TEST(testPushChunkingComplex); + CPPUNIT_TEST(testPushChunkingSubsizeComplex); + CPPUNIT_TEST_SUITE_END(); + +public: + void testPushPacketData(); + void testPushChunkingComplex(); + void testPushChunkingSubsizeComplex(); + +protected: + typedef typename Port::NativeType NativeType; + typedef typename Port::VectorType VectorType; + typedef typename Port::CorbaType CorbaType; + + template + void _testPushPacketDataImpl(); + + using TestBase::port; + using TestBase::stub; +}; + +#endif // BULKIO_OUTPORTTEST_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/OutPortTestFixture.h b/bulkioInterfaces/libsrc/testing/tests/cpp/OutPortTestFixture.h new file mode 100644 index 000000000..3b81d971e --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/OutPortTestFixture.h @@ -0,0 +1,141 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef BULKIO_OUTPORTTESTFIXTURE_H +#define BULKIO_OUTPORTTESTFIXTURE_H + +#include + +#include + +#include "InPortStub.h" + +template +class OutPortTestFixture : public CppUnit::TestFixture +{ +public: + void setUp() + { + port = new Port(getPortName()); + + stub = _createStub(); + + CORBA::Object_var objref = stub->_this(); + port->connectPort(objref, "test_connection"); + } + + void tearDown() + { + try { + _disconnectPorts(); + } catch (...) { + // Ignore disconnection errors + } + + // The port has not been used as a CORBA object, so we can delete it directly + delete port; + + _releaseServants(); + + stub = 0; + } + +protected: + typedef typename Port::CorbaType CorbaType; + typedef InPortStub StubType; + + virtual std::string getPortName() const + { + std::string name = bulkio::CorbaTraits::name(); + return name + "_out"; + }; + + StubType* _createStub() + { + StubType* inport = new StubType(); + PortableServer::ObjectId_var oid = ossie::corba::RootPOA()->activate_object(inport); + _servants.push_back(inport); + return inport; + } + + void _disconnectPorts() + { + ExtendedCF::UsesConnectionSequence_var connections = port->connections(); + for (CORBA::ULong ii = 0; ii < connections->length(); ++ii) { + port->disconnectPort(connections[ii].connectionId); + } + } + + void _releaseServants() + { + for (ServantList::iterator servant = _servants.begin(); servant != _servants.end(); ++servant) { + try { + PortableServer::ObjectId_var oid = ossie::corba::RootPOA()->servant_to_id(*servant); + ossie::corba::RootPOA()->deactivate_object(oid); + } catch (...) { + // Ignore CORBA exceptions + } + (*servant)->_remove_ref(); + } + _servants.clear(); + } + + inline void _pushTestPacket(size_t length, const BULKIO::PrecisionUTCTime& time, + bool eos, const std::string& streamID) + { + typename Port::NativeSequenceType data; + data.resize(length); + port->pushPacket(data, time, eos, streamID); + } + + Port* port; + StubType* stub; + + typedef std::vector ServantList; + ServantList _servants; +}; + +template <> +inline void OutPortTestFixture::_pushTestPacket(size_t length, + const BULKIO::PrecisionUTCTime& time, + bool eos, const std::string& streamID) +{ + redhawk::bitbuffer data(length); + port->pushPacket(data, time, eos, streamID); +} + +template <> +inline void OutPortTestFixture::_pushTestPacket(size_t length, + const BULKIO::PrecisionUTCTime& time, + bool eos, const std::string& streamID) +{ + std::string data(length, ' '); + port->pushPacket(data, time, eos, streamID); +} + +template <> +inline void OutPortTestFixture::_pushTestPacket(size_t length, + const BULKIO::PrecisionUTCTime&, + bool eos, const std::string& streamID) +{ + std::string data(length, ' '); + port->pushPacket(data, eos, streamID); +} + +#endif // BULKIO_OUTPORTTESTFIXTURE_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/OutStreamTest.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/OutStreamTest.cpp index fb66f8335..889849f34 100644 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/OutStreamTest.cpp +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/OutStreamTest.cpp @@ -19,50 +19,533 @@ */ #include "OutStreamTest.h" + +#include #include "bulkio.h" template -void OutStreamTest::setUp() +void OutStreamTest::testOperators() +{ + StreamType null_stream; + CPPUNIT_ASSERT(!null_stream); + if (null_stream) { + // This check is structured as an if/else to avoid using operator! + CPPUNIT_FAIL("Null stream evaluted to true"); + } + CPPUNIT_ASSERT(null_stream == StreamType()); + + // Create a new stream + StreamType good_stream = port->createStream("test_operators"); + CPPUNIT_ASSERT_EQUAL(false, !good_stream); + if (good_stream) { + // This check is structured as an if/else because CppUnit's assert + // macro implicitly uses operator! + } else { + CPPUNIT_FAIL("Valid stream evaluated to false"); + } + CPPUNIT_ASSERT(good_stream != null_stream); + + // Get another handle to the same stream, should be equal + StreamType same_stream = port->getStream("test_operators"); + CPPUNIT_ASSERT(same_stream == good_stream); + + // Create a new stream, should not be equal + StreamType other_stream = port->createStream("test_operators_2"); + CPPUNIT_ASSERT(other_stream != good_stream); +} + +template +void OutStreamTest::testBasicWrite() +{ + StreamType stream = port->createStream("test_basic_write"); + CPPUNIT_ASSERT(stub->packets.empty()); + + const BULKIO::PrecisionUTCTime time = bulkio::time::utils::now(); + _writeSinglePacket(stream, 256, time); + CPPUNIT_ASSERT(stub->packets.size() == 1); + CPPUNIT_ASSERT_EQUAL((size_t) 256, stub->packets[0].size()); + CPPUNIT_ASSERT(!stub->packets[0].EOS); + CPPUNIT_ASSERT_MESSAGE("Received incorrect time stamp", _checkLastTimestamp(time)); + CPPUNIT_ASSERT_EQUAL(stream.streamID(), stub->packets[0].streamID); +} + +template +void OutStreamTest::testSriFields() +{ + BULKIO::StreamSRI sri = bulkio::sri::create("test_sri"); + sri.xstart = -2.5; + sri.xdelta = 0.125; + sri.xunits = BULKIO::UNITS_FREQUENCY; + sri.subsize = 1024; + sri.ystart = 2.5; + sri.ydelta = 1.0; + sri.yunits = BULKIO::UNITS_TIME; + sri.mode = 1; + sri.blocking = 1; + ossie::corba::push_back(sri.keywords, redhawk::PropertyType("string", "value")); + ossie::corba::push_back(sri.keywords, redhawk::PropertyType("number", (CORBA::Long)100)); + + // Create a stream from the SRI; assign to a const variable to ensure that + // all accessors are const-safe + const StreamType stream = port->createStream(sri); + CPPUNIT_ASSERT(stream.streamID() == (const char*) sri.streamID); + CPPUNIT_ASSERT(stream.xstart() == sri.xstart); + CPPUNIT_ASSERT(stream.xdelta() == sri.xdelta); + CPPUNIT_ASSERT(stream.xunits() == sri.xunits); + CPPUNIT_ASSERT(stream.subsize() == sri.subsize); + CPPUNIT_ASSERT(stream.ystart() == sri.ystart); + CPPUNIT_ASSERT(stream.ydelta() == sri.ydelta); + CPPUNIT_ASSERT(stream.yunits() == sri.yunits); + CPPUNIT_ASSERT(stream.complex()); + CPPUNIT_ASSERT(stream.blocking()); + CPPUNIT_ASSERT(sri.keywords.length() == stream.keywords().size()); + CPPUNIT_ASSERT_EQUAL(std::string("value"), stream.getKeyword("string").toString()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 100, stream.getKeyword("number").toLong()); +} + +template +void OutStreamTest::testSriUpdate() { - ossie::corba::CorbaInit(0,0); + // Create initial stream; all changes should be queued up for the first + // write + StreamType stream = port->createStream("test_sri_update"); + double xdelta = 1.0 / 1.25e6; + stream.xdelta(xdelta); + stream.blocking(true); + CPPUNIT_ASSERT(stub->H.empty()); + + // Write data to trigger initial SRI update + _writeSinglePacket(stream, 10); + CPPUNIT_ASSERT(stub->H.size() == 1); + CPPUNIT_ASSERT(stub->H.back().blocking); + CPPUNIT_ASSERT_EQUAL(xdelta, stub->H.back().xdelta); - std::string name = "data" + getPortName() + "_out"; - port = new Port(name); + // Update xdelta; no SRI update should occur + double new_xdelta = 1.0/2.5e6; + stream.xdelta(new_xdelta); + CPPUNIT_ASSERT(stub->H.size() == 1); + CPPUNIT_ASSERT_EQUAL(xdelta, stub->H.back().xdelta); - stub = new InPortStub(); - PortableServer::ObjectId_var oid = ossie::corba::RootPOA()->activate_object(stub); + // Write data to trigger SRI update + _writeSinglePacket(stream, 25); + CPPUNIT_ASSERT(stub->H.size() == 2); + CPPUNIT_ASSERT_EQUAL(new_xdelta, stub->H.back().xdelta); - CORBA::Object_var objref = stub->_this(); - port->connectPort(objref, "test_connection"); + // Change blocking flag, then trigger an SRI update + stream.blocking(false); + CPPUNIT_ASSERT(stub->H.size() == 2); + CPPUNIT_ASSERT(stub->H.back().blocking); + _writeSinglePacket(stream, 25); + CPPUNIT_ASSERT(stub->H.size() == 3); + CPPUNIT_ASSERT(!stub->H.back().blocking); + + // Change multiple fields, but only one SRI update should occur (after the + // next write) + stream.complex(true); + stream.subsize(16); + stream.xstart(-M_PI); + stream.xdelta(2.0 * M_PI / 1024.0); + stream.xunits(BULKIO::UNITS_FREQUENCY); + stream.ydelta(1024.0 / 1.25e6); + stream.yunits(BULKIO::UNITS_TIME); + CPPUNIT_ASSERT(stub->H.size() == 3); + + // Trigger SRI update and verify that it matches + _writeSinglePacket(stream, 1024); + CPPUNIT_ASSERT(stub->H.size() == 4); + CPPUNIT_ASSERT(bulkio::sri::DefaultComparator(stream.sri(), stub->H.back())); } template -void OutStreamTest::tearDown() +void OutStreamTest::testKeywords() { - port->disconnectPort("test_connection"); + StreamType stream = port->createStream("test_keywords"); + _writeSinglePacket(stream, 1); + CPPUNIT_ASSERT(stub->H.size() == 1); + + // Set/get keywords + stream.setKeyword("integer", (CORBA::Long)250); + stream.setKeyword("string", "value"); + stream.setKeyword("double", 101.1e6); + stream.setKeyword("boolean", false); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 250, stream.getKeyword("integer").toLong()); + CPPUNIT_ASSERT_EQUAL(std::string("value"), stream.getKeyword("string").toString()); + CPPUNIT_ASSERT_EQUAL(101.1e6, stream.getKeyword("double").toDouble()); + CPPUNIT_ASSERT(!stream.getKeyword("boolean").toBoolean()); + + // Erase and check for presence of keywords + stream.eraseKeyword("string"); + CPPUNIT_ASSERT(stream.hasKeyword("integer")); + CPPUNIT_ASSERT(!stream.hasKeyword("string")); + CPPUNIT_ASSERT(stream.hasKeyword("double")); + CPPUNIT_ASSERT(stream.hasKeyword("boolean")); + + // Write a packet to trigger an SRI update + CPPUNIT_ASSERT(stub->H.size() == 1); + _writeSinglePacket(stream, 1); + CPPUNIT_ASSERT(stub->H.size() == 2); + { + const redhawk::PropertyMap& keywords = redhawk::PropertyMap::cast(stub->H.back().keywords); + CPPUNIT_ASSERT_EQUAL(stream.keywords().size(), keywords.size()); + CPPUNIT_ASSERT_EQUAL(stream.getKeyword("integer").toLong(), keywords.get("integer").toLong()); + CPPUNIT_ASSERT_EQUAL(stream.getKeyword("double").toDouble(), keywords.get("double").toDouble()); + CPPUNIT_ASSERT_EQUAL(stream.getKeyword("boolean").toBoolean(), keywords.get("boolean").toBoolean()); + } - // The port has not been used as a CORBA object, so we can delete it directly - delete port; + // Replace keywords with a new set + redhawk::PropertyMap new_keywords; + new_keywords["COL_RF"] = 100.0e6; + new_keywords["CHAN_RF"] = 101.1e6; + stream.keywords(new_keywords); + CPPUNIT_ASSERT_EQUAL((size_t) 2, stream.keywords().size()); + CPPUNIT_ASSERT_EQUAL(100.0e6, stream.getKeyword("COL_RF").toDouble()); + CPPUNIT_ASSERT_EQUAL(101.1e6, stream.getKeyword("CHAN_RF").toDouble()); - try { - PortableServer::ObjectId_var oid = ossie::corba::RootPOA()->servant_to_id(stub); - ossie::corba::RootPOA()->deactivate_object(oid); - } catch (...) { - // Ignore CORBA exceptions + // Trigger another SRI update + CPPUNIT_ASSERT(stub->H.size() == 2); + _writeSinglePacket(stream, 1); + CPPUNIT_ASSERT(stub->H.size() == 3); + { + const redhawk::PropertyMap& keywords = redhawk::PropertyMap::cast(stub->H.back().keywords); + CPPUNIT_ASSERT_EQUAL(stream.keywords().size(), keywords.size()); + CPPUNIT_ASSERT_EQUAL(stream.getKeyword("COL_RF").toDouble(), keywords.get("COL_RF").toDouble()); + CPPUNIT_ASSERT_EQUAL(stream.getKeyword("COL_RF").toDouble(), keywords.get("COL_RF").toDouble()); } - stub->_remove_ref(); - stub = 0; } template -void OutStreamTest::testWriteTimestampsReal() +void OutStreamTest::testSendEosOnClose() +{ + StreamType stream = port->createStream("close_eos"); + + CPPUNIT_ASSERT(stub->H.size() == 0); + CPPUNIT_ASSERT(stub->packets.size() == 0); + + _writeSinglePacket(stream, 16); + + CPPUNIT_ASSERT(stub->H.size() == 1); + CPPUNIT_ASSERT(stub->packets.size() == 1); + CPPUNIT_ASSERT(!stub->packets.back().EOS); + + stream.close(); + CPPUNIT_ASSERT(stub->packets.size() == 2); + CPPUNIT_ASSERT(stub->packets.back().EOS); +} + +template +void OutStreamTest::_writeSinglePacket(StreamType& stream, size_t size, + const BULKIO::PrecisionUTCTime& time) +{ + typedef typename StreamType::ScalarType ScalarType; + std::vector buffer; + buffer.resize(size); + stream.write(buffer, time); +} + +template +bool OutStreamTest::_checkLastTimestamp(const BULKIO::PrecisionUTCTime& time) +{ + return (time == stub->packets.back().T); +} + +// Specialization for dataBit, which uses redhawk::bitbuffer +template <> +void OutStreamTest::_writeSinglePacket(StreamType& stream, size_t size, + const BULKIO::PrecisionUTCTime& time) +{ + redhawk::bitbuffer buffer(size); + buffer.fill(0, buffer.size(), 0); + stream.write(buffer, time); +} + +// Specialization for dataFile, which uses std::string +template <> +void OutStreamTest::_writeSinglePacket(StreamType& stream, size_t size, + const BULKIO::PrecisionUTCTime& time) +{ + std::string url(size, 'F'); + stream.write(url, time); +} + +// Specializations for dataXML, which uses std::string and does not include a +// timestamp +template <> +void OutStreamTest::_writeSinglePacket(StreamType& stream, size_t size, + const BULKIO::PrecisionUTCTime& /*unused*/) +{ + std::string xml(size, 'X'); + stream.write(xml); +} + +template <> +bool OutStreamTest::_checkLastTimestamp(const BULKIO::PrecisionUTCTime& /*unused*/) +{ + // dataXML has no time stamp, so the check should always succeed + return true; +} + + +template +void BufferedOutStreamTest::testBufferedWrite() +{ + // Initial state is unbuffered; turn on buffering + StreamType stream = port->createStream("test_buffered_write"); + CPPUNIT_ASSERT_EQUAL((size_t) 0, stream.bufferSize()); + stream.setBufferSize(128); + CPPUNIT_ASSERT_EQUAL((size_t) 128, stream.bufferSize()); + CPPUNIT_ASSERT(stub->packets.size() == 0); + + // First write is below the buffer size + BufferType buffer; + buffer.resize(48); + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->packets.size() == 0); + + // The second write is still below the buffer size + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->packets.size() == 0); + + // The third write goes beyond the buffer size and should trigger a push, + // but only up to the buffer size (48*3 == 144) + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->packets.size() == 1); + CPPUNIT_ASSERT_EQUAL(stream.bufferSize(), stub->packets.back().size()); + + // There should now be 16 samples in the queue; writing another 48 should + // not trigger a push + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->packets.size() == 1); + + // Flush the stream and make sure we get as many samples as expected + stream.flush(); + CPPUNIT_ASSERT(stub->packets.size() == 2); + CPPUNIT_ASSERT_EQUAL((size_t) 64, stub->packets.back().size()); + + // Disable buffering; push should happen immediately + stream.setBufferSize(0); + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->packets.size() == 3); +} + +template +void BufferedOutStreamTest::testWriteSkipBuffer() +{ + // Turn on buffering + StreamType stream = port->createStream("test_skip_buffer"); + stream.setBufferSize(100); + + // With an empty queue, large write should go right through + BufferType buffer; + buffer.resize(256); + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->packets.size() == 1); + CPPUNIT_ASSERT_EQUAL(buffer.size(), stub->packets.back().size()); + + // Queue up a bit of data + buffer.resize(16); + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->packets.size() == 1); + + // With queued data, the large write should get broken up into a buffer- + // sized packet + buffer.resize(128); + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->packets.size() == 2); + CPPUNIT_ASSERT_EQUAL(stream.bufferSize(), stub->packets.back().size()); +} + +template +void BufferedOutStreamTest::testFlush() +{ + // Turn on buffering + StreamType stream = port->createStream("test_flush"); + stream.setBufferSize(64); + + // Queue data (should not flush) + BufferType buffer; + buffer.resize(48); + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->H.size() == 0); + CPPUNIT_ASSERT(stub->packets.size() == 0); + + // Make sure flush sends a packet + stream.flush(); + CPPUNIT_ASSERT(stub->H.size() == 1); + CPPUNIT_ASSERT(stub->packets.size() == 1); + CPPUNIT_ASSERT_EQUAL(buffer.size(), stub->packets.back().size()); +} + +template +void BufferedOutStreamTest::testFlushOnClose() +{ + StreamType stream = port->createStream("test_flush_close"); + stream.setBufferSize(64); + + // Queue data (should not flush) + BufferType buffer; + buffer.resize(48); + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->H.size() == 0); + CPPUNIT_ASSERT(stub->packets.size() == 0); + + // Close the stream; should cause a flush + stream.close(); + CPPUNIT_ASSERT(stub->H.size() == 1); + CPPUNIT_ASSERT(stub->packets.size() == 1); +} + +template +void BufferedOutStreamTest::testFlushOnSriChange() +{ + // Start with known values for important stream metadata + StreamType stream = port->createStream("test_flush_sri"); + stream.setBufferSize(64); + stream.xdelta(0.125); + stream.complex(false); + stream.blocking(false); + stream.subsize(0); + + // Queue data (should not flush) + BufferType buffer; + buffer.resize(48); + stream.write(buffer, bulkio::time::utils::now()); + + // Change the xdelta to cause a flush; the received data should be using + // the old xdelta + CPPUNIT_ASSERT(stub->packets.size() == 0); + stream.xdelta(0.25); + CPPUNIT_ASSERT_MESSAGE("xdelta change did not flush stream", stub->packets.size() == 1); + CPPUNIT_ASSERT_EQUAL(0.125, stub->H.back().xdelta); + + // Queue more data (should not flush) + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->H.size() == 1); + CPPUNIT_ASSERT(stub->packets.size() == 1); + + // Change the mode to complex to cause a flush; the mode shouldn't change + // yet, but xdelta should be up-to-date now + stream.complex(true); + CPPUNIT_ASSERT_MESSAGE("Complex mode change did not flush stream", stub->packets.size() == 2); + CPPUNIT_ASSERT(stub->H.back().mode == 0); + CPPUNIT_ASSERT_EQUAL(stream.xdelta(), stub->H.back().xdelta); + + // Queue more data (should not flush) + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->H.size() == 2); + CPPUNIT_ASSERT(stub->packets.size() == 2); + + // Change the blocking mode to cause a flush; the blocking flag shouldn't + // change yet, but mode should be up-to-date now + stream.blocking(true); + CPPUNIT_ASSERT_MESSAGE("Blocking change did not flush stream", stub->packets.size() == 3); + CPPUNIT_ASSERT(stub->H.back().blocking == 0); + CPPUNIT_ASSERT(stub->H.back().mode != 0); + + // Queue more data (should not flush) + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->H.size() == 3); + CPPUNIT_ASSERT(stub->packets.size() == 3); + + // Change the subsize to cause a flush; the subsize shouldn't change yet, + // but blocking should be up-to-date now + stream.subsize(16); + CPPUNIT_ASSERT_MESSAGE("Subsize change did not flush stream", stub->packets.size() == 4); + CPPUNIT_ASSERT(stub->H.back().subsize == 0); + CPPUNIT_ASSERT(stub->H.back().blocking); +} + +template +void BufferedOutStreamTest::testFlushOnBufferSizeChange() +{ + StreamType stream = port->createStream("test_flush_buffer_size"); + stream.setBufferSize(64); + + // Queue data (should not flush) + BufferType buffer; + buffer.resize(48); + stream.write(buffer, bulkio::time::utils::now()); + CPPUNIT_ASSERT(stub->packets.size() == 0); + + // Reduce the buffer size smaller than the current queue, should trigger a + // flush + stream.setBufferSize(32); + CPPUNIT_ASSERT_MESSAGE("Reducing buffer size below queue size did not flush", stub->packets.size() == 1); + + // Reduce the buffer size again, but not down to the queue size, should not + // trigger a flush + buffer.resize(16); + stream.write(buffer, bulkio::time::utils::now()); + stream.setBufferSize(24); + CPPUNIT_ASSERT_MESSAGE("Reducing buffer size above queue size flushed", stub->packets.size() == 1); + + // Boundary condition: exact size + stream.setBufferSize(16); + CPPUNIT_ASSERT_MESSAGE("Reducing buffer size to exact size did not flush", stub->packets.size() == 2); + + // Increasing the buffer size should not trigger a flush + buffer.resize(8); + stream.write(buffer, bulkio::time::utils::now()); + stream.setBufferSize(128); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Increasing buffer size flushed", (size_t) 2, stub->packets.size()); + + // Disabling buffering must flush + stream.setBufferSize(0); + CPPUNIT_ASSERT_MESSAGE("Disabling buffering did not flush", stub->packets.size() == 3); +} + +template +void NumericOutStreamTest::testStreamWriteCheck() +{ + StreamType stream = port->createStream("compound_push"); + // Generate a ramp using the scalar type; if the data needs to be pushed as + // complex, it will be reintepreted there + std::vector scalars; + size_t number_tstamps = 10; + size_t payload_tstamps = 9; + size_t single_push_size = 100; + scalars.resize(single_push_size*number_tstamps); + for (size_t ii = 0; ii < scalars.size(); ++ii) { + scalars[ii] = ii; + } + size_t sample_count = scalars.size(); + + // Create 9 timestamps + BULKIO::PrecisionUTCTime start = bulkio::time::utils::now(); + std::list tstamps; + for (size_t tstamp_idx=0; tstamp_idxpackets.size()); + + // Check that the packets are at the right offsets (implicitly checking + // that the prior packet was the right size) and have the correct time + size_t scalars_received = 0; + std::list::iterator ts = tstamps.begin(); + for (size_t ii = 0; ii < payload_tstamps; ++ii, ++ts) { + // Take complex data into account for the expected timestamp offset + size_t expected_offset = ts->offset; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Packet timestamp is incorrect", ts->time, stub->packets[ii].T); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Packet offset does not match timestamp offset", expected_offset, scalars_received); + scalars_received += stub->packets[ii].data.length(); + } + CPPUNIT_ASSERT_EQUAL_MESSAGE("Final packet size is incorrect", scalars_received, push_size); +} + +template +void NumericOutStreamTest::testWriteTimestampsReal() { StreamType stream = port->createStream("write_timestamps_real"); _writeTimestampsImpl(stream, false); } template -void OutStreamTest::testWriteTimestampsComplex() +void NumericOutStreamTest::testWriteTimestampsComplex() { StreamType stream = port->createStream("write_timestamps_complex"); stream.complex(true); @@ -70,7 +553,7 @@ void OutStreamTest::testWriteTimestampsComplex() } template -void OutStreamTest::testWriteTimestampsMixed() +void NumericOutStreamTest::testWriteTimestampsMixed() { StreamType stream = port->createStream("write_timestamps_mixed"); stream.complex(true); @@ -78,7 +561,7 @@ void OutStreamTest::testWriteTimestampsMixed() } template -void OutStreamTest::_writeTimestampsImpl(StreamType& stream, bool complexData) +void NumericOutStreamTest::_writeTimestampsImpl(StreamType& stream, bool complexData) { // Generate a ramp using the scalar type; if the data needs to be pushed as // complex, it will be reintepreted there @@ -130,23 +613,30 @@ void OutStreamTest::_writeTimestampsImpl(StreamType& stream, bool complexD CPPUNIT_ASSERT_EQUAL_MESSAGE("Final packet size is incorrect", scalars_received, scalars.size()); } - -#define CREATE_TEST(x) \ - class Out##x##StreamTest : public OutStreamTest \ +#define CREATE_TEST_IMPL(TESTCLASS,PORT,NAME,BASE) \ + class TESTCLASS : public BASE \ { \ - CPPUNIT_TEST_SUB_SUITE(Out##x##StreamTest, OutStreamTest); \ + typedef BASE TestBase; \ + CPPUNIT_TEST_SUB_SUITE(TESTCLASS, TestBase); \ CPPUNIT_TEST_SUITE_END(); \ - virtual std::string getPortName() const { return #x; }; \ }; \ - CPPUNIT_TEST_SUITE_REGISTRATION(Out##x##StreamTest); - -CREATE_TEST(Octet); -CREATE_TEST(Char); -CREATE_TEST(Short); -CREATE_TEST(UShort); -CREATE_TEST(Long); -CREATE_TEST(ULong); -CREATE_TEST(LongLong); -CREATE_TEST(ULongLong); -CREATE_TEST(Float); -CREATE_TEST(Double); + CPPUNIT_TEST_SUITE_REGISTRATION(TESTCLASS); + +#define CREATE_TEST(x,BASE) CREATE_TEST_IMPL(Out##x##StreamTest,bulkio::Out##x##Port,x,BASE) +#define CREATE_BASIC_TEST(x) CREATE_TEST(x,OutStreamTest) +#define CREATE_NUMERIC_TEST(x) CREATE_TEST(x,NumericOutStreamTest) + +CREATE_NUMERIC_TEST(Octet); +CREATE_NUMERIC_TEST(Char); +CREATE_NUMERIC_TEST(Short); +CREATE_NUMERIC_TEST(UShort); +CREATE_NUMERIC_TEST(Long); +CREATE_NUMERIC_TEST(ULong); +CREATE_NUMERIC_TEST(LongLong); +CREATE_NUMERIC_TEST(ULongLong); +CREATE_NUMERIC_TEST(Float); +CREATE_NUMERIC_TEST(Double); + +CREATE_TEST(Bit,BufferedOutStreamTest); +CREATE_BASIC_TEST(XML); +CREATE_BASIC_TEST(File); diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/OutStreamTest.h b/bulkioInterfaces/libsrc/testing/tests/cpp/OutStreamTest.h index 8798ef16f..5e0ce274e 100644 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/OutStreamTest.h +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/OutStreamTest.h @@ -23,21 +23,90 @@ #include #include -#include "InPortStub.h" +#include "OutPortTestFixture.h" template -class OutStreamTest : public CppUnit::TestFixture +class OutStreamTest : public OutPortTestFixture { + typedef OutPortTestFixture TestBase; + CPPUNIT_TEST_SUITE(OutStreamTest); + CPPUNIT_TEST(testOperators); + CPPUNIT_TEST(testBasicWrite); + CPPUNIT_TEST(testSriFields); + CPPUNIT_TEST(testSriUpdate); + CPPUNIT_TEST(testKeywords); + CPPUNIT_TEST(testSendEosOnClose); + CPPUNIT_TEST_SUITE_END(); + +public: + void testOperators(); + + void testBasicWrite(); + + void testSriFields(); + void testSriUpdate(); + + void testKeywords(); + + void testSendEosOnClose(); + +protected: + typedef typename Port::StreamType StreamType; + + void _writeSinglePacket(StreamType& stream, size_t size, + const BULKIO::PrecisionUTCTime& time=bulkio::time::utils::now()); + + bool _checkLastTimestamp(const BULKIO::PrecisionUTCTime& time); + + using TestBase::port; + using TestBase::stub; +}; + +template +class BufferedOutStreamTest : public OutStreamTest +{ + typedef OutStreamTest TestBase; + CPPUNIT_TEST_SUB_SUITE(BufferedOutStreamTest, TestBase); + CPPUNIT_TEST(testBufferedWrite); + CPPUNIT_TEST(testWriteSkipBuffer); + CPPUNIT_TEST(testFlush); + CPPUNIT_TEST(testFlushOnClose); + CPPUNIT_TEST(testFlushOnSriChange); + CPPUNIT_TEST(testFlushOnBufferSizeChange); + CPPUNIT_TEST_SUITE_END(); + +public: + void testBufferedWrite(); + void testWriteSkipBuffer(); + + void testFlush(); + void testFlushOnClose(); + void testFlushOnSriChange(); + void testFlushOnBufferSizeChange(); + +protected: + typedef typename Port::StreamType StreamType; + typedef typename Port::CorbaType CorbaType; + typedef typename bulkio::BufferTraits::MutableBufferType BufferType; + + using TestBase::port; + using TestBase::stub; +}; + +template +class NumericOutStreamTest : public BufferedOutStreamTest +{ + typedef BufferedOutStreamTest TestBase; + CPPUNIT_TEST_SUB_SUITE(NumericOutStreamTest, TestBase); + CPPUNIT_TEST(testStreamWriteCheck); CPPUNIT_TEST(testWriteTimestampsReal); CPPUNIT_TEST(testWriteTimestampsComplex); CPPUNIT_TEST(testWriteTimestampsMixed); CPPUNIT_TEST_SUITE_END(); public: - void setUp(); - void tearDown(); - + void testStreamWriteCheck(); void testWriteTimestampsReal(); void testWriteTimestampsComplex(); void testWriteTimestampsMixed(); @@ -47,14 +116,10 @@ class OutStreamTest : public CppUnit::TestFixture typedef typename StreamType::ScalarType ScalarType; typedef typename StreamType::ComplexType ComplexType; - typedef typename Port::Traits PortTraits; - void _writeTimestampsImpl(StreamType& stream, bool complexData); - virtual std::string getPortName() const = 0; - - Port* port; - InPortStub* stub; + using TestBase::port; + using TestBase::stub; }; #endif // BULKIO_OUTSTREAMTEST_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/PrecisionUTCTimeTest.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/PrecisionUTCTimeTest.cpp new file mode 100644 index 000000000..008868aef --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/PrecisionUTCTimeTest.cpp @@ -0,0 +1,221 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "PrecisionUTCTimeTest.h" + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(PrecisionUTCTimeTest); + +void PrecisionUTCTimeTest::testNow() +{ + BULKIO::PrecisionUTCTime time = bulkio::time::utils::now(); +} + +void PrecisionUTCTimeTest::testCreate() +{ + const double wsec = 100.0; + const double fsec = 0.125; + BULKIO::PrecisionUTCTime time = bulkio::time::utils::create(100.0, 0.125); + + CPPUNIT_ASSERT_EQUAL(wsec, time.twsec); + CPPUNIT_ASSERT_EQUAL(fsec, time.tfsec); +} + +void PrecisionUTCTimeTest::testCompare() +{ + BULKIO::PrecisionUTCTime t1 = bulkio::time::utils::create(100.0, 0.5); + BULKIO::PrecisionUTCTime t2 = bulkio::time::utils::create(100.0, 0.5); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Identical times did not compare equal", t1, t2); + CPPUNIT_ASSERT_MESSAGE("Identical times did not compare as >=", t1 >= t2); + CPPUNIT_ASSERT_MESSAGE("Identical times did not compare as <=", t2 <= t1); + CPPUNIT_ASSERT_MESSAGE("Identical times compared as >", !(t1 > t2)); + CPPUNIT_ASSERT_MESSAGE("Identical times compared as <", !(t1 < t2)); + + // Only fractional seconds differ + t1 = bulkio::time::utils::create(100.0, 0.5); + t2 = bulkio::time::utils::create(100.0, 0.25); + CPPUNIT_ASSERT_MESSAGE("Different times did not compare !=", t1 != t2); + CPPUNIT_ASSERT_MESSAGE("Time with larger fractional did not compare >", t1 > t2); + CPPUNIT_ASSERT_MESSAGE("Time with smaller fractional did not compare <", t2 < t1); + + // Only whole seconds differ + t1 = bulkio::time::utils::create(100.0, 0.75); + t2 = bulkio::time::utils::create(101.0, 0.75); + CPPUNIT_ASSERT_MESSAGE("Different times did not compare !=", t1 != t2); + CPPUNIT_ASSERT_MESSAGE("Time with smaller whole did not compare <=", t1 <= t2); + CPPUNIT_ASSERT_MESSAGE("Time with larger whole did not compare >=", t2 >= t1); + + // Whole seconds differ, but fractional seconds have the opposite ordering (which has no effect) + t1 = bulkio::time::utils::create(100.0, 0.75); + t2 = bulkio::time::utils::create(5000.0, 0.25); + CPPUNIT_ASSERT_MESSAGE("Different times compared equal", !(t1 == t2)); + CPPUNIT_ASSERT_MESSAGE("Time with smaller whole and larger fractional did not compare >", t1 < t2); + CPPUNIT_ASSERT_MESSAGE("Time with larger whole and smaller fractional did not compare <", t2 > t1); +} + +void PrecisionUTCTimeTest::testNormalize() +{ + // NOTE: All tests use fractional portions that are exact binary fractions + // to avoid potential roundoff issues + + // Already normalized, no change + BULKIO::PrecisionUTCTime time = bulkio::time::utils::create(100.0, 0.5); + bulkio::time::utils::normalize(time); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Already normalized time", bulkio::time::utils::create(100.0, 0.5), time); + + // Whole seconds has fractional portion, should be moved to fractional + // seconds + time.twsec = 100.25; + time.tfsec = 0.25; + bulkio::time::utils::normalize(time); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing whole", bulkio::time::utils::create(100.0, 0.5), time); + + // Whole seconds has fractional portion, should be moved to fractional + // seconds, leading to carry + time.twsec = 100.75; + time.tfsec = 0.75; + bulkio::time::utils::normalize(time); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing whole with carry", bulkio::time::utils::create(101.0, 0.5), time); + + // Fractional seconds contains whole portion, should be moved to whole + // seconds + time.twsec = 100.0; + time.tfsec = 2.5; + bulkio::time::utils::normalize(time); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing fractional", bulkio::time::utils::create(102.0, 0.5), time); + + // Both parts require normalization; fractional portion of whole seconds + // adds an additional carry + time.twsec = 100.75; + time.tfsec = 2.75; + bulkio::time::utils::normalize(time); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing both", bulkio::time::utils::create(103.0, 0.5), time); + + // Negative fractional value should borrow + time.twsec = 100.0; + time.tfsec = -0.25; + bulkio::time::utils::normalize(time); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing negative fractional", bulkio::time::utils::create(99.0, 0.75), time); + + // Negative fractional value with magnitude greater than one + time.twsec = 100.0; + time.tfsec = -3.125; + bulkio::time::utils::normalize(time); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing negative fractional > 1", bulkio::time::utils::create(96.0, 0.875), time); + + // Fractional portion of whole seconds greater than negative fractional + // seconds + time.twsec = 100.5; + time.tfsec = -.125; + bulkio::time::utils::normalize(time); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing both with negative fractional", bulkio::time::utils::create(100.0, 0.375), time); + + // Negative fractional seconds greater than fractional portion of whole + // seconds + time.twsec = 100.125; + time.tfsec = -.5; + bulkio::time::utils::normalize(time); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing both with borrow", bulkio::time::utils::create(99.0, 0.625), time); + + // Negative fractional seconds have whole portion, but seconds whole + // seconds have fractional portion with larger magnitude than remaining + // fractional seconds + time.twsec = 100.75; + time.tfsec = -2.5; + bulkio::time::utils::normalize(time); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Normalizing both with negative fractional > 1", bulkio::time::utils::create(98.0, 0.25), time); +} + +void PrecisionUTCTimeTest::testOperators() +{ + // NOTE: All tests use fractional portions that are exact binary fractions + // to avoid potential roundoff issues + + // Test that copy works as expected + const BULKIO::PrecisionUTCTime reference = bulkio::time::utils::create(100.0, 0.5); + BULKIO::PrecisionUTCTime t1 = reference; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Copy returned different values", reference, t1); + + // Add a positive offset + BULKIO::PrecisionUTCTime result = t1 + 1.75; + BULKIO::PrecisionUTCTime expected = bulkio::time::utils::create(102.0, 0.25); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Original value modified", reference, t1); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Add positive offset", expected, result); + + // Add a negative offset (i.e., subtract) + result = t1 + -1.75; + expected = bulkio::time::utils::create(98.0, 0.75); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Original value modified", reference, t1); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Add negative offset", expected, result); + + // Increment by positive offset + t1 += 2.25; + expected = bulkio::time::utils::create(102.0, 0.75); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Increment by positive offset", expected, t1); + + // Increment by negative offset (i.e., decrement) + t1 += -3.875; + expected = bulkio::time::utils::create(98.0, 0.875); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Increment by negative offset", expected, t1); + + // Reset to reference time and subtract a positive offset + t1 = reference; + result = t1 - 1.25; + expected = bulkio::time::utils::create(99.0, 0.25); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Original value modified", reference, t1); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Subtract positive offset", expected, result); + + // Subtract a negative offset (i.e., add) + result = t1 - -4.875; + expected = bulkio::time::utils::create(105.0, 0.375); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Original value modified", reference, t1); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Subtract negative offset", expected, result); + + // Decrement by positive offset + t1 -= 2.75; + expected = bulkio::time::utils::create(97.0, 0.75); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Decrement by positive offset", expected, t1); + + // Decrement by negative offset (i.e., increment) + t1 -= -3.375; + expected = bulkio::time::utils::create(101.0, 0.125); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Decrement by negative offset", expected, t1); + + // Difference, both positive and negative (exact binary fractions used to + // allow exact comparison) + t1 = reference + 8.875; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Positive time difference", t1 - reference, 8.875); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Negative time difference", reference - t1, -8.875); +} + +void PrecisionUTCTimeTest::testString() +{ + // Test the default epoch (Unix time) + BULKIO::PrecisionUTCTime time = bulkio::time::utils::create(0.0, 0.0); + std::ostringstream oss; + oss << time; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Epoch", std::string("1970:01:01::00:00:00.000000"), oss.str()); + + // Use a recent time with rounding at the microsecond level + oss.str(""); + oss << bulkio::time::utils::create(1451933967.0, 0.2893569); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Reference", std::string("2016:01:04::18:59:27.289357"), oss.str()); +} diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/PrecisionUTCTimeTest.h b/bulkioInterfaces/libsrc/testing/tests/cpp/PrecisionUTCTimeTest.h new file mode 100644 index 000000000..8c315b053 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/PrecisionUTCTimeTest.h @@ -0,0 +1,45 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef BULKIO_PRECISIONUTCTIMETEST_H +#define BULKIO_PRECISIONUTCTIMETEST_H + +#include + +class PrecisionUTCTimeTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(PrecisionUTCTimeTest); + CPPUNIT_TEST(testNow); + CPPUNIT_TEST(testCreate); + CPPUNIT_TEST(testCompare); + CPPUNIT_TEST(testNormalize); + CPPUNIT_TEST(testOperators); + CPPUNIT_TEST(testString); + CPPUNIT_TEST_SUITE_END(); + +public: + void testNow(); + void testCreate(); + void testCompare(); + void testNormalize(); + void testOperators(); + void testString(); +}; + +#endif // BULKIO_PRECISIONUTCTIMETEST_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/SDDSPortTest.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/SDDSPortTest.cpp new file mode 100644 index 000000000..3d363015d --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/SDDSPortTest.cpp @@ -0,0 +1,237 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "InPortTestFixture.h" +#include + +class InSDDSPortTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(InSDDSPortTest); + CPPUNIT_TEST(testBasicAPI); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp() + { + port = new bulkio::InSDDSPort("dataSDDS_in"); + } + + void tearDown() + { + delete port; + } + + void testBasicAPI() + { + BULKIO::PortStatistics *stats = port->statistics(); + CPPUNIT_ASSERT( stats != NULL ); + delete stats; + + BULKIO::PortUsageType rt = port->state(); + CPPUNIT_ASSERT( rt == BULKIO::IDLE ); + + BULKIO::StreamSRISequence *streams = port->activeSRIs(); + CPPUNIT_ASSERT( streams != NULL ); + delete streams; + + BULKIO::StreamSRI sri; + BULKIO::PrecisionUTCTime TS; + port->pushSRI( sri, TS ); + + streams = port->activeSRIs(); + CPPUNIT_ASSERT( streams != NULL ); + CPPUNIT_ASSERT( streams->length() == 1 ); + delete streams; + + + BULKIO::SDDSStreamDefinition sdef; + sdef.id = "test_sdds_id"; + sdef.dataFormat = BULKIO::SDDS_SB; + sdef.multicastAddress = "1.1.1.1"; + sdef.vlan = 1234; + sdef.port = 5678; + CORBA::String_var aid = port->attach( sdef, "test_sdds_port_api" ); + CPPUNIT_ASSERT( aid != NULL ); + + BULKIO::SDDSStreamSequence *sss = port->attachedStreams(); + CPPUNIT_ASSERT( sss != NULL ); + CPPUNIT_ASSERT( sss->length() == 1 ); + std::string paddr; + paddr = (*sss)[0].multicastAddress; + //std::cout << "port address " << paddr << std::endl; + + CPPUNIT_ASSERT( strcmp( paddr.c_str(), "1.1.1.1") == 0 ); + delete sss; + + CORBA::String_var uid = port->getUser(aid); + CPPUNIT_ASSERT( uid != NULL ); + //std::cout << "user id " << uid << std::endl; + CPPUNIT_ASSERT( strcmp( uid, "test_sdds_port_api" ) == 0 ); + + + port->detach( aid ); + + sss = port->attachedStreams(); + CPPUNIT_ASSERT( sss != NULL ); + CPPUNIT_ASSERT( sss->length() == 0 ); + delete sss; + + port->enableStats( false ); + } + +private: + bulkio::InSDDSPort* port; +}; + +CPPUNIT_TEST_SUITE_REGISTRATION(InSDDSPortTest); + +class NewSriCallback { + +public: + + std::vector sids; + + ~NewSriCallback() {}; + + void newSriCB( const BULKIO::StreamSRI& sri) { + std::string sid(sri.streamID); + sids.push_back( sid ); + } +}; + +// Global connection/disconnection callbacks +static void port_connected( const char* connectionId ) { + +} + +static void port_disconnected( const char* connectionId ) { + +} + +class OutSDDSPortTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(OutSDDSPortTest); + CPPUNIT_TEST(testBasicAPI); + CPPUNIT_TEST(testSRI); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp() + { + logger = rh_logger::Logger::getLogger("BulkioOutPort"); + logger->setLevel( rh_logger::Level::getInfo()); + port = new bulkio::OutSDDSPort("dataSDDS_out", logger); + } + + void tearDown() + { + delete port; + } + + void testBasicAPI() + { + ExtendedCF::UsesConnectionSequence *clist = port->connections(); + CPPUNIT_ASSERT( clist != NULL ); + delete clist; + + port->setNewConnectListener(&port_connected); + port->setNewDisconnectListener(&port_disconnected); + + bulkio::InSDDSPort *p = new bulkio::InSDDSPort("sink_1", logger ); + PortableServer::ObjectId_var p_oid = ossie::corba::RootPOA()->activate_object(p); + port->connectPort( p->_this(), "connection_1"); + + port->disconnectPort( "connection_1"); + port->disconnectPort( "connection_1"); + ossie::corba::RootPOA()->deactivate_object(p_oid); + + BULKIO::StreamSRI sri; + BULKIO::PrecisionUTCTime TS; + port->pushSRI( sri, TS ); + + BULKIO::UsesPortStatisticsSequence *stats = port->statistics(); + CPPUNIT_ASSERT( stats != NULL ); + delete stats; + + BULKIO::PortUsageType rt = port->state(); + CPPUNIT_ASSERT( rt == BULKIO::IDLE ); + + port->enableStats( false ); + + // create a connection + port->connectPort( p->_this(), "connection_1"); + port->enableStats( true ); + port->setBitSize(10); + port->updateStats( 12, 1, false, "stream1"); + + stats = port->statistics(); + CPPUNIT_ASSERT( stats != NULL ); + int slen = stats->length(); + //std::cout << " slen :" << slen << std::endl; + CPPUNIT_ASSERT( slen == 1 ) ; + CPPUNIT_ASSERT( strcmp((*stats)[0].connectionId, "connection_1") == 0 ); + delete stats; + + port->setLogger(logger); + } + + void testSRI() + { + ExtendedCF::UsesConnectionSequence *clist = port->connections(); + CPPUNIT_ASSERT( clist != NULL ); + delete clist; + + NewSriCallback sri_cb; + bulkio::InSDDSPort *p = new bulkio::InSDDSPort("sink_1", logger ); + PortableServer::ObjectId_var p_oid = ossie::corba::RootPOA()->activate_object(p); + p->setNewSriListener(&sri_cb, &NewSriCallback::newSriCB ); + + BULKIO::StreamSRI sri; + BULKIO::SDDSStreamDefinition sdds; + sri.streamID = "stream1"; + sri.xdelta = 1/1000.0; + sdds.id = "stream1"; + sdds.dataFormat = BULKIO::SDDS_SB; + sdds.multicastAddress = "bad.ip.address"; + sdds.port = 9999; + sdds.vlan = 0; + port->addStream(sdds); + port->pushSRI(sri, bulkio::time::utils::now()); + + sri.streamID = "stream2"; + sdds.id = "stream2"; + port->addStream(sdds); + port->pushSRI(sri, bulkio::time::utils::now()); + + port->connectPort( p->_this(), "connection_1"); + + int slen = sri_cb.sids.size(); + CPPUNIT_ASSERT( slen == 2 ) ; + + port->disconnectPort( "connection_1"); + ossie::corba::RootPOA()->deactivate_object(p_oid); + } + +private: + bulkio::OutSDDSPort* port; + rh_logger::LoggerPtr logger; +}; + +CPPUNIT_TEST_SUITE_REGISTRATION(OutSDDSPortTest); diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/StreamSRITest.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/StreamSRITest.cpp new file mode 100644 index 000000000..7585c682a --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/StreamSRITest.cpp @@ -0,0 +1,42 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "StreamSRITest.h" + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(StreamSRITest); + +void StreamSRITest::testCreate() +{ + BULKIO::StreamSRI sri = bulkio::sri::create(); +} + +void StreamSRITest::testCompare() +{ + BULKIO::StreamSRI A = bulkio::sri::create(); + BULKIO::StreamSRI B = bulkio::sri::create(); + BULKIO::StreamSRI C = bulkio::sri::create(); + + C.streamID = std::string("No Match").c_str(); + + CPPUNIT_ASSERT(bulkio::sri::DefaultComparator(A, B)); + CPPUNIT_ASSERT(!bulkio::sri::DefaultComparator(A, C)); +} diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/StreamSRITest.h b/bulkioInterfaces/libsrc/testing/tests/cpp/StreamSRITest.h new file mode 100644 index 000000000..ec6989e72 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/StreamSRITest.h @@ -0,0 +1,37 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef BULKIO_STREAMSRITEST_H +#define BULKIO_STREAMSRITEST_H + +#include + +class StreamSRITest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(StreamSRITest); + CPPUNIT_TEST(testCreate); + CPPUNIT_TEST(testCompare); + CPPUNIT_TEST_SUITE_END(); + +public: + void testCreate(); + void testCompare(); +}; + +#endif // BULKIO_STREAMSRITEST_H diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/configure.ac b/bulkioInterfaces/libsrc/testing/tests/cpp/configure.ac deleted file mode 100644 index c5a8a761b..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/configure.ac +++ /dev/null @@ -1,46 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - - -dnl Process this file with autoconf to produce a configure script. -AC_INIT([Bulkio_Test], [0.1]) -AC_CONFIG_SRCDIR([Bulkio.cpp]) -AM_INIT_AUTOMAKE -AM_PATH_CPPUNIT(1.9.6) -AC_PROG_CXX -AC_PROG_CC -AC_PROG_INSTALL -AC_PREFIX_DEFAULT(${OSSIEHOME}) -AX_BOOST_BASE([1.41]) -AX_BOOST_THREAD -AX_BOOST_SYSTEM - - -OSSIE_CHECK_OSSIE -OSSIE_OSSIEHOME_AS_PREFIX -PKG_CHECK_MODULES([RH_DEPS], [ossie >= 1.7 omniORB4 >= 4.0.0]) - -# set PKG_CONFIG_PATH to look at local xxx.pc files - -export PKG_CONFIG_PATH="../../../..:../../..":$PKG_CONFIG_PATH -PKG_CHECK_MODULES([BIO], [ bulkio >= 1.0 bulkioInterfaces >= 1.8 ]) - -AC_CORBA_ORB -AC_OUTPUT(Makefile) diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/main.cpp b/bulkioInterfaces/libsrc/testing/tests/cpp/main.cpp new file mode 100644 index 000000000..ccb79b0b8 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/main.cpp @@ -0,0 +1,126 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// log4cxx includes need to follow CorbaUtils, otherwise "ossie/debug.h" will +// issue warnings about the logging macros +#include +#include + +int main(int argc, char* argv[]) +{ + const char* short_options = "v"; + struct option long_options[] = { + { "xunit-file", required_argument, 0, 'x' }, + { "log-level", required_argument, 0, 'l' }, + { "log-config", required_argument, 0, 'c' }, + { "verbose", no_argument, 0, 'v' }, + { 0, 0, 0, 0 } + }; + + bool verbose = false; + const char* xunit_file = 0; + const char* log_config = 0; + std::string log_level; + int status; + while ((status = getopt_long(argc, argv, short_options, long_options, NULL)) >= 0) { + switch (status) { + case '?': // Invalid option + return -1; + case 'x': + xunit_file = optarg; + break; + case 'l': + log_level = optarg; + break; + case 'c': + log_config = optarg; + break; + case 'v': + verbose = true; + break; + } + } + + // Many tests require CORBA, and possibly the REDHAWK ORB singleton, so + // initialize up front. + ossie::corba::CorbaInit(0,0); + + // If a log4j configuration file was given, read it. + if (log_config) { + log4cxx::PropertyConfigurator::configure(log_config); + } else { + // Set up a simple configuration that logs on the console. + log4cxx::BasicConfigurator::configure(); + } + + // Apply the log level (can override config file). + log4cxx::LevelPtr level = log4cxx::Level::toLevel(log_level, log4cxx::Level::getInfo()); + log4cxx::Logger::getRootLogger()->setLevel(level); + + // Create the test runner. + CppUnit::TextTestRunner runner; + + // Enable verbose output, displaying the name of each test as it runs. + if (verbose) { + runner.eventManager().addListener(new CppUnit::BriefTestProgressListener()); + } + + // Use a compiler outputter instead of the default text one. + runner.setOutputter(new CppUnit::CompilerOutputter(&runner.result(), std::cerr)); + + // Get the top level suite from the registry. + CppUnit::Test* suite = CppUnit::TestFactoryRegistry::getRegistry().makeTest(); + runner.addTest(suite); + + // If an argument was given, assume it was the name of a test or suite. + std::string test_path; + if (optind < argc) { + test_path = argv[optind]; + } + + // Run the tests: don't pause, write output, don't print progress in + // verbose mode (which seems ironic, but the test progress listener will + // print each test name) + bool success = runner.run(test_path, false, true, !verbose); + + // Write XML file, if requested. + if (xunit_file) { + std::ofstream file(xunit_file); + CppUnit::XmlOutputter xml_outputter(&runner.result(), file); + xml_outputter.write(); + } + + // Return error code 1 if the one of test failed. + return success ? 0 : 1; +} diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/reconf b/bulkioInterfaces/libsrc/testing/tests/cpp/reconf deleted file mode 100755 index 50400ed26..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/reconf +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -rm -f config.cache - -# Setup the libtool stuff -if [ -e /usr/local/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/local/share/aclocal/libtool.m4 aclocal.d/acinclude.m4 -elif [ -e /usr/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/share/aclocal/libtool.m4 acinclude.m4 -fi -libtoolize --force --automake - -# Search in expected locations for the OSSIE acincludes -# 1. Included with CF source -# 2. Using installed CF -if [ -d ../../common/acinclude ]; then - OSSIE_AC_INCLUDE=../../common/acinclude -elif [ -n ${OSSIEHOME} ] && [ -d ${OSSIEHOME}/share/aclocal/ossie ]; then - OSSIE_AC_INCLUDE=${OSSIEHOME}/share/aclocal/ossie -else - echo "Error: Cannot find the OSSIE aclocal files. This is not expected!" -fi - -if [ -n ${OSSIE_AC_INCLUDE} ]; then - aclocal -I ${OSSIE_AC_INCLUDE} -else - aclocal -fi - -autoconf -automake --foreign --add-missing - -# Due to strange autotools bootstrap issues, -# if ltmain.sh doesn't exists we have to run both again -if [ ! -f ltmain.sh ]; then - libtoolize --force --automake - automake --foreign --add-missing -fi diff --git a/bulkioInterfaces/libsrc/testing/tests/cpp/runtests b/bulkioInterfaces/libsrc/testing/tests/cpp/runtests index b418ece15..077d36121 100755 --- a/bulkioInterfaces/libsrc/testing/tests/cpp/runtests +++ b/bulkioInterfaces/libsrc/testing/tests/cpp/runtests @@ -1,7 +1,5 @@ # # -bulkio_top=../../../.. -bulkio_libsrc_top=$bulkio_top/libsrc -export LD_LIBRARY_PATH=$bulkio_libsrc_top/.libs:$bulkio_top/.libs:${LD_LIBRARY_PATH} -make check -j 10 +make -j 10 Bulkio +./Bulkio --xunit-file ../cppunit-results.xml diff --git a/bulkioInterfaces/libsrc/testing/tests/java/.gitignore b/bulkioInterfaces/libsrc/testing/tests/java/.gitignore index 96966d7c4..d2cb33dd0 100644 --- a/bulkioInterfaces/libsrc/testing/tests/java/.gitignore +++ b/bulkioInterfaces/libsrc/testing/tests/java/.gitignore @@ -1,2 +1,35 @@ *.class -!Makefile +Bulkio +In*PortTest.java +OutCharPortTest.java +OutOctetPortTest.java +OutShortPortTest.java +OutUShortPortTest.java +OutLongPortTest.java +OutULongPortTest.java +OutLongLongPortTest.java +OutULongLongPortTest.java +OutFloatPortTest.java +OutDoublePortTest.java +helpers/CharTestHelper.java +helpers/OctetTestHelper.java +helpers/ShortTestHelper.java +helpers/UShortTestHelper.java +helpers/LongTestHelper.java +helpers/ULongTestHelper.java +helpers/LongLongTestHelper.java +helpers/ULongLongTestHelper.java +helpers/FloatTestHelper.java +helpers/DoubleTestHelper.java +stubs/InCharPortStub.java +stubs/InOctetPortStub.java +stubs/InShortPortStub.java +stubs/InUShortPortStub.java +stubs/InLongPortStub.java +stubs/InULongPortStub.java +stubs/InLongLongPortStub.java +stubs/InULongLongPortStub.java +stubs/InFloatPortStub.java +stubs/InDoublePortStub.java +stubs/InBitPortStub.java +stubs/InFilePortStub.java diff --git a/bulkioInterfaces/libsrc/testing/tests/java/AllTests.java b/bulkioInterfaces/libsrc/testing/tests/java/AllTests.java new file mode 100644 index 000000000..6a70d787b --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/AllTests.java @@ -0,0 +1,59 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.junit.runners.Suite.SuiteClasses; + +@RunWith(Suite.class) +@SuiteClasses({ + InCharPortTest.class, + OutCharPortTest.class, + InOctetPortTest.class, + OutOctetPortTest.class, + InShortPortTest.class, + OutShortPortTest.class, + InUShortPortTest.class, + OutUShortPortTest.class, + InLongPortTest.class, + OutLongPortTest.class, + InULongPortTest.class, + OutULongPortTest.class, + InLongLongPortTest.class, + OutLongLongPortTest.class, + InULongLongPortTest.class, + OutULongLongPortTest.class, + InFloatPortTest.class, + OutFloatPortTest.class, + InDoublePortTest.class, + OutDoublePortTest.class, + InBitPortTest.class, + OutBitPortTest.class, + InFilePortTest.class, + OutFilePortTest.class, + InXMLPortTest.class, + OutXMLPortTest.class, + PrecisionUTCTimeTest.class, + StreamSRITest.class, + InSDDSPort_Test.class, + OutSDDSPort_Test.class +}) +public class AllTests { +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/BulkioHelpers_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/BulkioHelpers_Test.java deleted file mode 100644 index 14b3c6a72..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/BulkioHelpers_Test.java +++ /dev/null @@ -1,440 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import org.ossie.properties.AnyUtils; -import CF.DataType; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class BulkioHelpers_Test { - - class test_fact { - - String name = "InInt8"; - - String port_name = new String("test-inport-api"); - - String sid = new String("test-inport-streamid"); - - short mode = 1; - - double srate=22.0; - - test_fact( String tname ){ - name=tname; - }; - }; - - class test_stream_cb implements bulkio.SriListener { - - test_fact ctx=null; - - test_stream_cb ( test_fact inCtx ) { - ctx=inCtx; - } - - public void newSRI( StreamSRI sri ) { - assertTrue("newSRI SRI Object Invalid", null != sri ); - assertTrue("newSRI StreamID Mismatch", ctx.sid == sri.streamID ); - } - - public boolean changedSRI( StreamSRI sri ) { - assertTrue("changedSRI SRI Object Invalid", null != sri ); - assertTrue("changedSRI Mode Mismatch", ctx.mode == sri.mode ); - return true; - } - } - - - Logger logger = Logger.getRootLogger(); - - test_fact ctx = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - - } - - @After - public void tearDown() { - - } - - @Test - public void test_sri_create( ) { - - logger.info("------ Testing bulkio.sri.utils.create -----"); - - // - // - // - BULKIO.StreamSRI sri = bulkio.sri.utils.create(); - assertEquals("Stream ID mismatch.", "defaultSRI", sri.streamID); - assertEquals("Version mismatch.", 1, sri.hversion ); - assertEquals("XUnits mismatch.", 1, sri.xunits ); - assertEquals("XStart mismatch.", sri.xstart,0.00,3 ); - assertEquals("XDelta mismatch.", sri.xdelta,1.00,3); - assertEquals("YUnits mismatch.", sri.yunits,0); - assertEquals("YStart mismatch.", sri.ystart,0.00,3); - assertEquals( "YDelta mismatch.", sri.ydelta,0.00,3); - assertEquals("Subsize mismatch.", sri.subsize,0); - assertFalse("Blocking mismatch.", sri.blocking); - CF.DataType []empty= new CF.DataType[0]; - assertEquals("Keywords mismatch.", sri.keywords,empty); - sri = bulkio.sri.utils.create( "NEW-STREAM-ID", 22.0, (short)22, false); - assertEquals("Stream ID mismatch.", sri.streamID,"NEW-STREAM-ID"); - assertEquals("SRATE mismatch.", sri.xdelta,1/22.00, 3); - assertEquals("XUNITS mismatch.", sri.xunits,22); - assertFalse("BLOCKING mismatch.", sri.blocking ); - } - - - @Test - public void test_sri_compare( ) { - - logger.info("------ Testing bulkio.sri.DefaultComparator -----");; - - BULKIO.StreamSRI a_sri = bulkio.sri.utils.create(); - BULKIO.StreamSRI b_sri = bulkio.sri.utils.create(); - BULKIO.StreamSRI c_sri = bulkio.sri.utils.create(); - c_sri.streamID = "THIS_DOES_NOT_MATCH"; - - bulkio.sri.Comparator bio_cmp = new bulkio.sri.DefaultComparator();; - - assertTrue("bio_cmp method - same.", bio_cmp.compare( a_sri, b_sri ) ); - assertFalse(" bio_cmp method - different - StreamID .", bio_cmp.compare( a_sri, c_sri ) ); - - c_sri = bulkio.sri.utils.create(); - c_sri.hversion = 2; - assertFalse( " bio_cmp method - different - hversion ", bio_cmp.compare( a_sri, c_sri ) ); - - c_sri = bulkio.sri.utils.create(); - c_sri.xstart = 3; - assertFalse( " bio_cmp method - different - xstart ", bio_cmp.compare( a_sri, c_sri ) ); - - c_sri = bulkio.sri.utils.create(); - c_sri.xdelta = 100.0; - assertFalse( " bio_cmp method - different - xdelta ", bio_cmp.compare( a_sri, c_sri ) ); - - c_sri = bulkio.sri.utils.create(); - c_sri.xunits = 100; - assertFalse( " bio_cmp method - different - xunits ", bio_cmp.compare( a_sri, c_sri ) ); - - c_sri = bulkio.sri.utils.create(); - c_sri.subsize = 100; - assertFalse( " bio_cmp method - different - subsize ", bio_cmp.compare( a_sri, c_sri ) ); - - c_sri = bulkio.sri.utils.create(); - c_sri.ystart = 3; - assertFalse( " bio_cmp method - different - ystart ", bio_cmp.compare( a_sri, c_sri ) ); - - c_sri = bulkio.sri.utils.create(); - c_sri.ydelta = 100.0; - assertFalse( " bio_cmp method - different - ydelta ", bio_cmp.compare( a_sri, c_sri ) ); - - c_sri = bulkio.sri.utils.create(); - c_sri.yunits = 100; - assertFalse( " bio_cmp method - different - yunits ", bio_cmp.compare( a_sri, c_sri ) ); - - c_sri = bulkio.sri.utils.create(); - c_sri.mode = 100; - assertFalse( " bio_cmp method - different - mode ", bio_cmp.compare( a_sri, c_sri ) ); - - CF.DataType kv = new CF.DataType( "key_one", AnyUtils.stringToAny("1", "long") ); - CF.DataType kv2 = new CF.DataType( "key_one", AnyUtils.stringToAny("1", "long") ); - a_sri.keywords = new CF.DataType[1]; - a_sri.keywords[0] = kv; - c_sri = bulkio.sri.utils.create(); - c_sri.keywords = new CF.DataType[1]; - c_sri.keywords[0] = kv2; - assertTrue( " bio_cmp method - same - keyword item ", bio_cmp.compare( a_sri, c_sri ) ); - - kv2 = new CF.DataType( "key_one", AnyUtils.stringToAny("100", "long") ); - c_sri = bulkio.sri.utils.create(); - c_sri.keywords = new CF.DataType[1]; - c_sri.keywords[0] = kv2; - assertFalse( " bio_cmp method - different - keywords value mismatch ", - bio_cmp.compare( a_sri, c_sri ) ); - - kv2 = new CF.DataType( "key_two", AnyUtils.stringToAny("100", "long") ); - c_sri = bulkio.sri.utils.create(); - c_sri.keywords = new CF.DataType[1]; - c_sri.keywords[0] = kv2; - assertFalse(" bio_cmp method - different - keywords name mismatch ", - bio_cmp.compare( a_sri, c_sri ) ); - - } - - - @Test - public void test_timestamp_create( ) { - - logger.info("------ Testing bulkio.time.utils.create -----"); - - BULKIO.PrecisionUTCTime ts = bulkio.time.utils.now(); - assertEquals( " tcmode mismatch.", ts.tcmode,BULKIO.TCM_CPU.value ); - assertEquals( " tcstatus mismatch.", ts.tcstatus,BULKIO.TCS_VALID.value ); - assertEquals(" tcoff mismatch.", ts.toff,0.00,3 ); - - ts = bulkio.time.utils.create( 100.0, 0.125 ); - assertEquals( " tcmode mismatch.", ts.tcmode,BULKIO.TCM_CPU.value ); - assertEquals( " tcstatus mismatch.", ts.tcstatus,BULKIO.TCS_VALID.value ); - assertEquals( " tcwsec mismatch.", ts.twsec,100.0, 3 ); - assertEquals( " tcwsec mismatch.", ts.tfsec,0.125, 3 ); - - ts = bulkio.time.utils.create( 100.0, 0.125, BULKIO.TCM_SDDS.value ); - assertEquals( " tcmode mismatch.", ts.tcmode,BULKIO.TCM_SDDS.value ); - assertEquals( " tcstatus mismatch.", ts.tcstatus,BULKIO.TCS_VALID.value ); - assertEquals( " tcwsec mismatch.", ts.twsec,100.0, 3 ); - assertEquals( " tcwsec mismatch.", ts.tfsec,0.125, 3 ); - - - } - - private void failTime(String message, BULKIO.PrecisionUTCTime expected, BULKIO.PrecisionUTCTime actual) { - fail(String.format("%1$s expected:<%2$s> but was:<%3$s>", message, bulkio.time.utils.toString(expected), - bulkio.time.utils.toString(actual))); - } - - private void assertTimeEquals(String message, BULKIO.PrecisionUTCTime expected, BULKIO.PrecisionUTCTime actual) { - if (bulkio.time.utils.compare(expected, actual) != 0) { - failTime(message, expected, actual); - } - } - - private void assertTimeNotEquals(String message, BULKIO.PrecisionUTCTime expected, BULKIO.PrecisionUTCTime actual) { - if (bulkio.time.utils.compare(expected, actual) == 0) { - failTime(message, expected, actual); - } - } - - @Test - public void test_timestamp_normalize() { - // NOTE: All tests use fractional portions that are exact binary fractions to - // avoid potential roundoff issues - - // Already normalized, no change - BULKIO.PrecisionUTCTime time = new BULKIO.PrecisionUTCTime(); - time.twsec = 100.0; - time.tfsec = 0.5; - bulkio.time.utils.normalize(time); - assertTimeEquals("Already normalized time", bulkio.time.utils.create(100.0, 0.5), time); - - // Whole seconds has fractional portion, should be moved to fractional seconds - time.twsec = 100.25; - time.tfsec = 0.25; - bulkio.time.utils.normalize(time); - assertTimeEquals("Normalizing whole", bulkio.time.utils.create(100.0, 0.5), time); - - // Whole seconds has fractional portion, should be moved to fractional seconds - // leading to carry - time.twsec = 100.75; - time.tfsec = 0.75; - bulkio.time.utils.normalize(time); - assertTimeEquals("Normalizing whole with carry", bulkio.time.utils.create(101.0, 0.5), time); - - // Fractional seconds contains whole portion, should be moved to whole seconds - time.twsec = 100.0; - time.tfsec = 2.5; - bulkio.time.utils.normalize(time); - assertTimeEquals("Normalizing fractional", bulkio.time.utils.create(102.0, 0.5), time); - - // Both parts require normalization; fractional portion of whole seconds adds an - // additional carry - time.twsec = 100.75; - time.tfsec = 2.75; - bulkio.time.utils.normalize(time); - assertTimeEquals("Normalizing both", bulkio.time.utils.create(103.0, 0.5), time); - - // Negative fractional value should borrow - time.twsec = 100.0; - time.tfsec = -0.25; - bulkio.time.utils.normalize(time); - assertTimeEquals("Normalizing negative fractional", bulkio.time.utils.create(99.0, 0.75), time); - - // Negative fractional value with magnitude greater than one - time.twsec = 100.0; - time.tfsec = -3.125; - bulkio.time.utils.normalize(time); - assertTimeEquals("Normalizing negative fractional > 1", bulkio.time.utils.create(96.0, 0.875), time); - - // Fractional portion of whole seconds greater than negative fractional seconds - time.twsec = 100.5; - time.tfsec = -.125; - bulkio.time.utils.normalize(time); - assertTimeEquals("Normalizing both with negative fractional", bulkio.time.utils.create(100.0, 0.375), time); - - // Negative fractional seconds greater than fractional portion of whole seconds - time.twsec = 100.125; - time.tfsec = -.5; - bulkio.time.utils.normalize(time); - assertTimeEquals("Normalizing both with borrow", bulkio.time.utils.create(99.0, 0.625), time); - - // Negative fractional seconds have whole portion, but seconds whole seconds have - // fractional portion with larger magnitude than remaining fractional seconds - time.twsec = 100.75; - time.tfsec = -2.5; - bulkio.time.utils.normalize(time); - assertTimeEquals("Normalizing both with negative fractional > 1", bulkio.time.utils.create(98.0, 0.25), time); - } - - @Test - public void test_timestamp_compare() { - BULKIO.PrecisionUTCTime t1 = bulkio.time.utils.create(100.0, 0.5); - BULKIO.PrecisionUTCTime t2 = bulkio.time.utils.create(100.0, 0.5); - assertTrue("Identical values did not compare as equal", bulkio.time.utils.compare(t1, t2) == 0); - - // Only fractional seconds differ - t1 = bulkio.time.utils.create(100.0, 0.5); - t2 = bulkio.time.utils.create(100.0, 0.25); - assertTrue("Time with larger fractional did not compare greater", bulkio.time.utils.compare(t1, t2) > 0); - assertTrue("Time with smaller fractional did not compare lesser", bulkio.time.utils.compare(t2, t1) < 0); - - // Only whole seconds differ - t1 = bulkio.time.utils.create(100.0, 0.75); - t2 = bulkio.time.utils.create(101.0, 0.75); - assertTrue("Time with smaller whole did not compare lesser", bulkio.time.utils.compare(t1, t2) < 0); - assertTrue("Time with larger whole did not compare greater", bulkio.time.utils.compare(t2, t1) > 0); - - // Whole seconds differ, but fractional seconds have the opposite ordering (which has no effect) - t1 = bulkio.time.utils.create(100.0, 0.75); - t2 = bulkio.time.utils.create(5000.0, 0.25); - assertTrue("Time with smaller whole and larger fractional did not compare lesser", bulkio.time.utils.compare(t1, t2) < 0); - assertTrue("Time with larger whole and smaller fractional did not compare greater", bulkio.time.utils.compare(t2, t1) > 0); - } - - @Test - public void test_timestamp_operators() { - // Test that copy works as expected - final BULKIO.PrecisionUTCTime reference = bulkio.time.utils.create(100.0, 0.5); - BULKIO.PrecisionUTCTime t1 = bulkio.time.utils.copy(reference); - assertNotSame("Copy returned same object", reference, t1); - assertTimeEquals("Copy returned different values", reference, t1); - - // Add a positive offset - BULKIO.PrecisionUTCTime result = bulkio.time.utils.add(t1, 1.75); - BULKIO.PrecisionUTCTime expected = bulkio.time.utils.create(102.0, 0.25); - assertNotSame("Add returned same object", t1, result); - assertTimeEquals("Original value modified", reference, t1); - assertTimeEquals("Add positive offset", expected, result); - - // Add a negative offset (i.e., subtract) - result = bulkio.time.utils.add(t1, -1.75); - expected = bulkio.time.utils.create(98.0, 0.75); - assertTimeEquals("Original value modified", reference, t1); - assertTimeEquals("Add negative offset", expected, result); - - // Increment by positive offset - result = bulkio.time.utils.increment(t1, 2.25); - expected = bulkio.time.utils.create(102.0, 0.75); - assertSame("Increment returned different object", t1, result); - assertTimeEquals("Increment by positive offset", expected, t1); - - // Increment by negative offset (i.e., decrement) - bulkio.time.utils.increment(t1, -3.875); - expected = bulkio.time.utils.create(98.0, 0.875); - assertTimeEquals("Increment by negative offset", expected, t1); - - // Reset to reference time and subtract a positive offset - t1 = bulkio.time.utils.copy(reference); - result = bulkio.time.utils.subtract(t1, 1.25); - expected = bulkio.time.utils.create(99.0, 0.25); - assertNotSame("Subtract returned same object", t1, result); - assertTimeEquals("Original value modified", reference, t1); - assertTimeEquals("Subtract positive offset", expected, result); - - // Subtract a negative offset (i.e., add) - result = bulkio.time.utils.subtract(t1, -4.875); - expected = bulkio.time.utils.create(105.0, 0.375); - assertTimeEquals("Original value modified", reference, t1); - assertTimeEquals("Subtract negative offset", expected, result); - - // Decrement by positive offset - result = bulkio.time.utils.decrement(t1, 2.75); - expected = bulkio.time.utils.create(97.0, 0.75); - assertSame("Increment returned different object", t1, result); - assertTimeEquals("Decrement by positive offset", expected, t1); - - // Decrement by negative offset (i.e., increment) - bulkio.time.utils.decrement(t1, -3.375); - expected = bulkio.time.utils.create(101.0, 0.125); - assertTimeEquals("Decrement by negative offset", expected, t1); - - // Difference, both positive and negative (exact binary fractions used to allow - // exact comparison) - t1 = bulkio.time.utils.add(reference, 8.875); - assertEquals("Positive time difference", bulkio.time.utils.difference(t1, reference), 8.875, 0.0); - assertEquals("Negative time difference", bulkio.time.utils.difference(reference, t1), -8.875, 0.0); - } - - @Test - public void test_timestamp_toString() { - // Test the default epoch (Unix time) - BULKIO.PrecisionUTCTime time = bulkio.time.utils.create(0.0, 0.0); - assertEquals("Epoch", "1970:01:01::00:00:00.000000", bulkio.time.utils.toString(time)); - - // Use a recent time with rounding at the microsecond level - time = bulkio.time.utils.create(1451933967.0, 0.2893569); - assertEquals("String representation", "2016:01:04::18:59:27.289357", bulkio.time.utils.toString(time)); - } -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/InPortTest.java.template b/bulkioInterfaces/libsrc/testing/tests/java/InPortTest.java.template new file mode 100644 index 000000000..b7b8cacfd --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/InPortTest.java.template @@ -0,0 +1,38 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from InPortTest.java.template. + * Do not modify directly. + */ + +import org.junit.*; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +import helpers.@name@TestHelper; + +@RunWith(JUnit4.class) +public class In@name@PortTest extends impl.InPortTestImpl +{ + public In@name@PortTest() + { + super(new @name@TestHelper()); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/InSDDSPort_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/InSDDSPort_Test.java index 6deff8b74..8742fb7e6 100644 --- a/bulkioInterfaces/libsrc/testing/tests/java/InSDDSPort_Test.java +++ b/bulkioInterfaces/libsrc/testing/tests/java/InSDDSPort_Test.java @@ -20,34 +20,13 @@ import static org.junit.Assert.*; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; import org.junit.Test; -import org.junit.Ignore; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; + import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; + import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO. SDDSDataDigraph; -import BULKIO.SDDSStreamDefinition; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; /** * Tests for {@link Foo}. @@ -129,35 +108,12 @@ public void detach(String attachId) { test_fact ctx = null; - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - - } - - @After - public void tearDown() { - - } - @Test - public void test_InSDDS( ) { + public void test_InSDDS( ) { ctx=new test_fact("InSDDS"); - logger.info("------ Testing " + ctx.name + " Port -----"); - bulkio.InSDDSPort port = new bulkio.InSDDSPort(ctx.port_name ); port.setSriListener( new test_stream_cb( ctx ) ); diff --git a/bulkioInterfaces/libsrc/testing/tests/java/InStringPort_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/InStringPort_Test.java deleted file mode 100644 index 110f649cd..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/InStringPort_Test.java +++ /dev/null @@ -1,265 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class InStringPort_Test { - - class test_fact { - - String name = "InInt8"; - - String port_name = new String("test-inport-api"); - - String sid = new String("test-inport-streamid"); - - short mode = 1; - - double srate=22.0; - - test_fact( String tname ){ - name=tname; - }; - }; - - class test_stream_cb implements bulkio.SriListener { - - test_fact ctx=null; - - test_stream_cb ( test_fact inCtx ) { - ctx=inCtx; - } - - public void newSRI( StreamSRI sri ) { - assertTrue("newSRI SRI Object Invalid", null != sri ); - assertTrue("newSRI StreamID Mismatch", ctx.sid == sri.streamID ); - } - - public boolean changedSRI( StreamSRI sri ) { - assertTrue("changedSRI SRI Object Invalid", null != sri ); - assertTrue("changedSRI Mode Mismatch", ctx.mode == sri.mode ); - return true; - } - } - - - Logger logger = Logger.getRootLogger(); - - test_fact ctx = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - - } - - @After - public void tearDown() { - - } - - @Test - public void test_InFile( ) { - - - ctx=new test_fact("InFile"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InFilePort port = new bulkio.InFilePort(ctx.port_name ); - - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InFilePort.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - String v = new String(); - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - - } - - - @Test - public void test_InXML( ) { - - - ctx=new test_fact("InXML"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InXMLPort port = new bulkio.InXMLPort(ctx.port_name ); - - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InXMLPort.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - String v = new String(); - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // test for EOS.. - port.pushPacket( v, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - - } - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/InVectorPort_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/InVectorPort_Test.java deleted file mode 100644 index 6d5a3364c..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/InVectorPort_Test.java +++ /dev/null @@ -1,1384 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class InVectorPort_Test { - - class test_fact { - - String name = "InInt8"; - - String port_name = new String("test-inport-api"); - - String sid = new String("test-inport-streamid"); - - short mode = 1; - - double srate=22.0; - - int new_calls = 0; - int changed_calls = 0; - - test_fact( String tname ){ - name=tname; - }; - }; - - class test_stream_cb implements bulkio.SriListener { - - test_fact ctx=null; - - test_stream_cb ( test_fact inCtx ) { - ctx=inCtx; - } - - public void newSRI( StreamSRI sri ) { - assertTrue("newSRI SRI Object Invalid", null != sri ); - assertTrue("newSRI StreamID Mismatch", ctx.sid == sri.streamID ); - ctx.new_calls++; - } - - public boolean changedSRI( StreamSRI sri ) { - assertTrue("changedSRI SRI Object Invalid", null != sri ); - assertTrue("changedSRI Mode Mismatch", ctx.mode == sri.mode ); - ctx.changed_calls++; - return true; - } - } - - - Logger logger = Logger.getRootLogger(); - - - test_fact ctx = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - - } - - @After - public void tearDown() { - - } - - @Test - public void test_InInt8( ) { - - ctx=new test_fact("InInt8"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InInt8Port port = new bulkio.InInt8Port(ctx.port_name); - port.setSriListener( new test_stream_cb( ctx ) ); - - // - // simple attribute tests - // - port.setLogger( logger ); - - // port statistics test - port.enableStats( false ); - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats == null ); - - port.enableStats( true ); - stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InInt8Port.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - char [] v = new char[0]; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // complex mode test - sri.mode = ctx.mode; - port.pushSRI(sri); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - Complex SRI", streams != null ); - assertTrue("Stream SRI Sequence - Complex, length", streams.length ==1 ); - - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, CPLX pkt", pkt != null ); - assertTrue("Get a Packet, CPLX StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, CPLX EOS", pkt.EOS == false ); - assertTrue("Get a Packet, CPLX mode", pkt.SRI.mode == 1 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 1 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - // Push data without an SRI to check that the sriChanged flag is still - // set and the SRI callback gets called - ctx.sid = "invalid_stream"; - int new_calls = ctx.new_calls + 1; - int changed_calls = ctx.changed_calls; - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive packet without SRI", pkt != null); - assertEquals("Receive packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertTrue("Receive packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive second packet without SRI", pkt != null); - assertEquals("Receive second packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertFalse("Receive second packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive second packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive second packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push to an invalid stream with no logger, ensure that nothing fails - ctx.sid = "null_logger"; - port.setLogger(null); - port.pushPacket(v, TS, false, "null_logger"); - } - - - @Test - public void test_InInt16( ) { - - ctx=new test_fact("InInt16"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InInt16Port port = new bulkio.InInt16Port(ctx.port_name); - port.setSriListener( new test_stream_cb( ctx ) ); - - // - // simple attribute tests - // - port.setLogger( logger ); - - // port statistics test - port.enableStats( false ); - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats == null ); - - port.enableStats( true ); - stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InInt16Port.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - short [] v = new short[0]; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // complex mode test - sri.mode = ctx.mode; - port.pushSRI(sri); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - Complex SRI", streams != null ); - assertTrue("Stream SRI Sequence - Complex, length", streams.length ==1 ); - - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, CPLX pkt", pkt != null ); - assertTrue("Get a Packet, CPLX StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, CPLX EOS", pkt.EOS == false ); - assertTrue("Get a Packet, CPLX mode", pkt.SRI.mode == 1 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 1 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - // Push data without an SRI to check that the sriChanged flag is still - // set and the SRI callback gets called - ctx.sid = "invalid_stream"; - int new_calls = ctx.new_calls + 1; - int changed_calls = ctx.changed_calls; - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive packet without SRI", pkt != null); - assertEquals("Receive packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertTrue("Receive packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive second packet without SRI", pkt != null); - assertEquals("Receive second packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertFalse("Receive second packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive second packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive second packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push to an invalid stream with no logger, ensure that nothing fails - ctx.sid = "null_logger"; - port.setLogger(null); - port.pushPacket(v, TS, false, "null_logger"); - } - - - @Test - public void test_InInt32( ) { - - ctx=new test_fact("InInt32"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InInt32Port port = new bulkio.InInt32Port(ctx.port_name); - port.setSriListener( new test_stream_cb( ctx ) ); - - // - // simple attribute tests - // - port.setLogger( logger ); - - // port statistics test - port.enableStats( false ); - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats == null ); - - port.enableStats( true ); - stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InInt32Port.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - int [] v = new int[0]; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // complex mode test - sri.mode = ctx.mode; - port.pushSRI(sri); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - Complex SRI", streams != null ); - assertTrue("Stream SRI Sequence - Complex, length", streams.length ==1 ); - - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, CPLX pkt", pkt != null ); - assertTrue("Get a Packet, CPLX StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, CPLX EOS", pkt.EOS == false ); - assertTrue("Get a Packet, CPLX mode", pkt.SRI.mode == 1 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 1 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - // Push data without an SRI to check that the sriChanged flag is still - // set and the SRI callback gets called - ctx.sid = "invalid_stream"; - int new_calls = ctx.new_calls + 1; - int changed_calls = ctx.changed_calls; - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive packet without SRI", pkt != null); - assertEquals("Receive packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertTrue("Receive packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive second packet without SRI", pkt != null); - assertEquals("Receive second packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertFalse("Receive second packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive second packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive second packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push to an invalid stream with no logger, ensure that nothing fails - ctx.sid = "null_logger"; - port.setLogger(null); - port.pushPacket(v, TS, false, "null_logger"); - } - - @Test - public void test_InInt64( ) { - - ctx=new test_fact("InInt64"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InInt64Port port = new bulkio.InInt64Port(ctx.port_name); - port.setSriListener( new test_stream_cb( ctx ) ); - - // - // simple attribute tests - // - port.setLogger( logger ); - - // port statistics test - port.enableStats( false ); - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats == null ); - - port.enableStats( true ); - stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InInt64Port.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - long [] v = new long[0]; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // complex mode test - sri.mode = ctx.mode; - port.pushSRI(sri); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - Complex SRI", streams != null ); - assertTrue("Stream SRI Sequence - Complex, length", streams.length ==1 ); - - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, CPLX pkt", pkt != null ); - assertTrue("Get a Packet, CPLX StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, CPLX EOS", pkt.EOS == false ); - assertTrue("Get a Packet, CPLX mode", pkt.SRI.mode == 1 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 1 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - // Push data without an SRI to check that the sriChanged flag is still - // set and the SRI callback gets called - ctx.sid = "invalid_stream"; - int new_calls = ctx.new_calls + 1; - int changed_calls = ctx.changed_calls; - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive packet without SRI", pkt != null); - assertEquals("Receive packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertTrue("Receive packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive second packet without SRI", pkt != null); - assertEquals("Receive second packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertFalse("Receive second packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive second packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive second packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push to an invalid stream with no logger, ensure that nothing fails - ctx.sid = "null_logger"; - port.setLogger(null); - port.pushPacket(v, TS, false, "null_logger"); - } - - - @Test - public void test_InUInt8( ) { - - ctx=new test_fact("InUInt8"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InUInt8Port port = new bulkio.InUInt8Port(ctx.port_name); - port.setSriListener( new test_stream_cb( ctx ) ); - - // - // simple attribute tests - // - port.setLogger( logger ); - - // port statistics test - port.enableStats( false ); - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats == null ); - - port.enableStats( true ); - stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InUInt8Port.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - byte [] v = new byte[0]; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // complex mode test - sri.mode = ctx.mode; - port.pushSRI(sri); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - Complex SRI", streams != null ); - assertTrue("Stream SRI Sequence - Complex, length", streams.length ==1 ); - - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, CPLX pkt", pkt != null ); - assertTrue("Get a Packet, CPLX StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, CPLX EOS", pkt.EOS == false ); - assertTrue("Get a Packet, CPLX mode", pkt.SRI.mode == 1 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 1 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - // Push data without an SRI to check that the sriChanged flag is still - // set and the SRI callback gets called - ctx.sid = "invalid_stream"; - int new_calls = ctx.new_calls + 1; - int changed_calls = ctx.changed_calls; - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive packet without SRI", pkt != null); - assertEquals("Receive packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertTrue("Receive packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive second packet without SRI", pkt != null); - assertEquals("Receive second packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertFalse("Receive second packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive second packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive second packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push to an invalid stream with no logger, ensure that nothing fails - ctx.sid = "null_logger"; - port.setLogger(null); - port.pushPacket(v, TS, false, "null_logger"); - } - - - @Test - public void test_InUInt16( ) { - - ctx=new test_fact("InUInt16"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InUInt16Port port = new bulkio.InUInt16Port(ctx.port_name); - port.setSriListener( new test_stream_cb( ctx ) ); - - // - // simple attribute tests - // - port.setLogger( logger ); - - // port statistics test - port.enableStats( false ); - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats == null ); - - port.enableStats( true ); - stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InUInt16Port.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - short [] v = new short[0]; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // complex mode test - sri.mode = ctx.mode; - port.pushSRI(sri); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - Complex SRI", streams != null ); - assertTrue("Stream SRI Sequence - Complex, length", streams.length ==1 ); - - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, CPLX pkt", pkt != null ); - assertTrue("Get a Packet, CPLX StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, CPLX EOS", pkt.EOS == false ); - assertTrue("Get a Packet, CPLX mode", pkt.SRI.mode == 1 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 1 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - // Push data without an SRI to check that the sriChanged flag is still - // set and the SRI callback gets called - ctx.sid = "invalid_stream"; - int new_calls = ctx.new_calls + 1; - int changed_calls = ctx.changed_calls; - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive packet without SRI", pkt != null); - assertEquals("Receive packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertTrue("Receive packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive second packet without SRI", pkt != null); - assertEquals("Receive second packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertFalse("Receive second packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive second packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive second packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push to an invalid stream with no logger, ensure that nothing fails - ctx.sid = "null_logger"; - port.setLogger(null); - port.pushPacket(v, TS, false, "null_logger"); - } - - - @Test - public void test_InUInt32( ) { - - ctx=new test_fact("InUInt32"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InUInt32Port port = new bulkio.InUInt32Port(ctx.port_name); - port.setSriListener( new test_stream_cb( ctx ) ); - - // - // simple attribute tests - // - port.setLogger( logger ); - - // port statistics test - port.enableStats( false ); - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats == null ); - - port.enableStats( true ); - stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InUInt32Port.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - int [] v = new int[0]; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // complex mode test - sri.mode = ctx.mode; - port.pushSRI(sri); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - Complex SRI", streams != null ); - assertTrue("Stream SRI Sequence - Complex, length", streams.length ==1 ); - - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, CPLX pkt", pkt != null ); - assertTrue("Get a Packet, CPLX StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, CPLX EOS", pkt.EOS == false ); - assertTrue("Get a Packet, CPLX mode", pkt.SRI.mode == 1 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 1 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - // Push data without an SRI to check that the sriChanged flag is still - // set and the SRI callback gets called - ctx.sid = "invalid_stream"; - int new_calls = ctx.new_calls + 1; - int changed_calls = ctx.changed_calls; - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive packet without SRI", pkt != null); - assertEquals("Receive packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertTrue("Receive packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive second packet without SRI", pkt != null); - assertEquals("Receive second packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertFalse("Receive second packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive second packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive second packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push to an invalid stream with no logger, ensure that nothing fails - ctx.sid = "null_logger"; - port.setLogger(null); - port.pushPacket(v, TS, false, "null_logger"); - } - - @Test - public void test_InUInt64( ) { - - ctx=new test_fact("InUInt64"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InUInt64Port port = new bulkio.InUInt64Port(ctx.port_name); - port.setSriListener( new test_stream_cb( ctx ) ); - - // - // simple attribute tests - // - port.setLogger( logger ); - - // port statistics test - port.enableStats( false ); - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats == null ); - - port.enableStats( true ); - stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InUInt64Port.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - long [] v = new long[0]; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // complex mode test - sri.mode = ctx.mode; - port.pushSRI(sri); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - Complex SRI", streams != null ); - assertTrue("Stream SRI Sequence - Complex, length", streams.length ==1 ); - - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, CPLX pkt", pkt != null ); - assertTrue("Get a Packet, CPLX StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, CPLX EOS", pkt.EOS == false ); - assertTrue("Get a Packet, CPLX mode", pkt.SRI.mode == 1 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 1 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - // Push data without an SRI to check that the sriChanged flag is still - // set and the SRI callback gets called - ctx.sid = "invalid_stream"; - int new_calls = ctx.new_calls + 1; - int changed_calls = ctx.changed_calls; - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive packet without SRI", pkt != null); - assertEquals("Receive packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertTrue("Receive packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive second packet without SRI", pkt != null); - assertEquals("Receive second packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertFalse("Receive second packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive second packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive second packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push to an invalid stream with no logger, ensure that nothing fails - ctx.sid = "null_logger"; - port.setLogger(null); - port.pushPacket(v, TS, false, "null_logger"); - } - - - @Test - public void test_InDouble( ) { - - ctx=new test_fact("InDouble"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InDoublePort port = new bulkio.InDoublePort(ctx.port_name); - port.setSriListener( new test_stream_cb( ctx ) ); - - // - // simple attribute tests - // - port.setLogger( logger ); - - // port statistics test - port.enableStats( false ); - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats == null ); - - port.enableStats( true ); - stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InDoublePort.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - double [] v = new double[0]; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // complex mode test - sri.mode = ctx.mode; - port.pushSRI(sri); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - Complex SRI", streams != null ); - assertTrue("Stream SRI Sequence - Complex, length", streams.length ==1 ); - - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, CPLX pkt", pkt != null ); - assertTrue("Get a Packet, CPLX StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, CPLX EOS", pkt.EOS == false ); - assertTrue("Get a Packet, CPLX mode", pkt.SRI.mode == 1 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 1 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - // Push data without an SRI to check that the sriChanged flag is still - // set and the SRI callback gets called - ctx.sid = "invalid_stream"; - int new_calls = ctx.new_calls + 1; - int changed_calls = ctx.changed_calls; - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive packet without SRI", pkt != null); - assertEquals("Receive packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertTrue("Receive packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive second packet without SRI", pkt != null); - assertEquals("Receive second packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertFalse("Receive second packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive second packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive second packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push to an invalid stream with no logger, ensure that nothing fails - ctx.sid = "null_logger"; - port.setLogger(null); - port.pushPacket(v, TS, false, "null_logger"); - } - - - @Test - public void test_InFloat( ) { - - ctx=new test_fact("InFloat"); - - logger.info("------ Testing " + ctx.name + " Port -----"); - - bulkio.InFloatPort port = new bulkio.InFloatPort(ctx.port_name); - port.setSriListener( new test_stream_cb( ctx ) ); - - // - // simple attribute tests - // - port.setLogger( logger ); - - // port statistics test - port.enableStats( false ); - BULKIO.PortStatistics stats = port.statistics(); - assertTrue("Port Statistics Failed", stats == null ); - - port.enableStats( true ); - stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - BULKIO.StreamSRI []streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence Failed", streams != null ); - - int tmp = port.getMaxQueueDepth(); - assertTrue("MaxQueueDepth - default", tmp == 100 ); - - tmp = port.getCurrentQueueDepth(); - assertTrue("CurrentQueueDepth - new", tmp == 0 ); - - port.setMaxQueueDepth(22); - tmp = port.getMaxQueueDepth(); - assertTrue("SetMaxQueueDepth - set value", tmp == 22 ); - - // check that port queue is empty - bulkio.InFloatPort.Packet pkt = port.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket - no data", pkt == null ); - - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - float [] v = new float[0]; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == false ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 0 ); - - // complex mode test - sri.mode = ctx.mode; - port.pushSRI(sri); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - Complex SRI", streams != null ); - assertTrue("Stream SRI Sequence - Complex, length", streams.length ==1 ); - - port.pushPacket( v, TS, false, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, CPLX pkt", pkt != null ); - assertTrue("Get a Packet, CPLX StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, CPLX EOS", pkt.EOS == false ); - assertTrue("Get a Packet, CPLX mode", pkt.SRI.mode == 1 ); - - // test for EOS.. - port.pushPacket( v, TS, true, ctx.sid ); - - // grab off packet - pkt = port.getPacket(bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, pkt", pkt != null ); - assertTrue("Get a Packet, StreamID", pkt.SRI.streamID== ctx.sid ); - assertTrue("Get a Packet, EOS", pkt.EOS == true ); - assertTrue("Get a Packet, mode", pkt.SRI.mode == 1 ); - - streams = port.activeSRIs(); - assertTrue("Stream SRI Sequence - SRI", streams != null ); - assertTrue("Stream SRI Sequence - length", streams.length !=1 ); - assertTrue("Stream SRI Sequence - length", streams.length ==0 ); - - // Push data without an SRI to check that the sriChanged flag is still - // set and the SRI callback gets called - ctx.sid = "invalid_stream"; - int new_calls = ctx.new_calls + 1; - int changed_calls = ctx.changed_calls; - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive packet without SRI", pkt != null); - assertEquals("Receive packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertTrue("Receive packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push again to the same stream ID; sriChanged should now be false and the - // SRI callback should not get called - port.pushPacket(v, TS, false, "invalid_stream"); - pkt = port.getPacket(bulkio.Const.BLOCKING); - assertTrue("Receive second packet without SRI", pkt != null); - assertEquals("Receive second packet without SRI, streamID", pkt.streamID, "invalid_stream"); - assertFalse("Receive second packet without SRI, sriChanged", pkt.sriChanged); - assertEquals("Receive second packet without SRI, new SRI callback", new_calls, ctx.new_calls); - assertEquals("Receive second packet without SRI, SRI change callback", changed_calls, ctx.changed_calls); - - // Push to an invalid stream with no logger, ensure that nothing fails - ctx.sid = "null_logger"; - port.setLogger(null); - port.pushPacket(v, TS, false, "null_logger"); - } - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/Main.java b/bulkioInterfaces/libsrc/testing/tests/java/Main.java new file mode 100644 index 000000000..404c7c55d --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/Main.java @@ -0,0 +1,150 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +import org.apache.log4j.BasicConfigurator; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.PropertyConfigurator; + +import org.junit.Test; +import org.junit.runner.JUnitCore; +import org.junit.runner.Description; +import org.junit.runner.Result; +import org.junit.runner.Request; + +import utils.ChainFilter; +import utils.TestFilter; +import utils.TextListener; + +public class Main { + + public static Description getTestDescription(String target) throws ClassNotFoundException, NoSuchMethodException + { + // Try to see if it's a class with tests first + try { + return getClassDescription(target); + } catch (ClassNotFoundException exc) { + // The target might be "class.method" + } + + // Split package/class from method name + int pos = target.lastIndexOf('.'); + if (pos < 0) { + // No dots, must be an invalid class + throw new ClassNotFoundException(target); + } + String suite = target.substring(0, pos); + String name = target.substring(pos+1); + + // Class and method lookup may throw exceptions, but it's up to the + // caller to handle them + Class clazz = Class.forName(suite); + clazz.getMethod(name); + return Description.createTestDescription(clazz, name); + } + + public static Description getClassDescription(String target) throws ClassNotFoundException + { + Class clazz = Class.forName(target); + + // Create a suite description + Description desc = Description.createSuiteDescription(clazz); + for (Method method : clazz.getMethods()) { + // Find all methods that are annotated as tests + if (method.getAnnotation(Test.class) != null) { + desc.addChild(Description.createTestDescription(clazz, method.getName(), method.getAnnotations())); + } + } + + return desc; + } + + public static void main(String[] args) { + List tests = new ArrayList<>(); + + boolean verbose = false; + Level log_level = null; + String log_config = null; + + Iterator iter = Arrays.asList(args).iterator(); + while (iter.hasNext()) { + String arg = iter.next(); + if (arg.startsWith("-")) { + // Option argument + if (arg.equals("--log-level")) { + log_level = Level.toLevel(iter.next()); + } else if (arg.equals("--log-config")) { + log_config = iter.next(); + } else if (arg.equals("-v") || arg.equals("--verbose")) { + verbose = true; + } else { + System.err.println("Unrecognized option \"" + arg + "\""); + System.exit(1); + } + } else { + // First non-option argument, add remaining arguments to the + // list of tests + tests.add(arg); + while (iter.hasNext()) { + tests.add(iter.next()); + } + } + } + + if (log_config != null) { + PropertyConfigurator.configure(log_config); + } else { + BasicConfigurator.configure(); + if (log_level == null) { + log_level = Level.INFO; + } + } + + if (log_level != null) { + Logger.getRootLogger().setLevel(log_level); + } + + Request request = Request.aClass(AllTests.class); + if (!tests.isEmpty()) { + ChainFilter filter = new ChainFilter(); + for (String test : tests) { + try { + Description desc = getTestDescription(test); + filter.addFilter(new TestFilter(desc)); + } catch (ClassNotFoundException|NoSuchMethodException exc) { + System.err.println("ERROR: No test '" + test + "'"); + System.exit(1); + } + } + request = request.filterWith(filter); + } + + JUnitCore runner = new JUnitCore(); + runner.addListener(new TextListener(verbose)); + Result result = runner.run(request); + System.exit(result.wasSuccessful() ? 0 : 1); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/Makefile b/bulkioInterfaces/libsrc/testing/tests/java/Makefile deleted file mode 100644 index 597217fc8..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/Makefile +++ /dev/null @@ -1,38 +0,0 @@ - -bulkio_top_dir=../../../.. -bulkio_idl_dir=$(bulkio_top_dir) -bulkio_idl_java_dir=$(bulkio_top_dir) -bulkio_libsrc_dir=$(bulkio_top_dir)/libsrc -bulkio_libsrc_java_dir=$(bulkio_libsrc_dir)/java -OSSIE_HOME=$(shell echo $(OSSIEHOME)) -BULKIO_JARS=$(OSSIE_HOME)/lib/CFInterfaces.jar:$(OSSIE_HOME)/lib/log4j-1.2.15.jar:$(OSSIE_HOME)/lib/ossie.jar:$(bulkio_libsrc_dir)/bulkio.jar:$(bulkio_idl_java_dir)/BULKIOInterfaces.jar - -JAVA = $(JAVA_HOME)/bin/java -JAVAC = $(JAVA_HOME)/bin/javac -JAVA_CP=$(CLASSPATH):.:/usr/share/java/junit4.jar:$(BULKIO_JARS) - -.SUFFIXES: .java .class -.PHONEY: all check build-all clean tcheck - -IN_PORTS= InVectorPort_Test.class InStringPort_Test.class InSDDSPort_Test.class BulkioHelpers_Test.class -OUT_PORTS= OutVectorPort_Test.class OutStringPort_Test.class OutSDDSPort_Test.class -MULTIOUT_PORTS=MultiOutInt8_Test.class MultiOutInt16_Test.class MultiOutInt32_Test.class MultiOutInt64_Test.class MultiOutFloat_Test.class MultiOutDouble_Test.class MultiOutUInt8_Test.class MultiOutUInt16_Test.class MultiOutUInt32_Test.class MultiOutUInt64_Test.class - -JTESTS=$(IN_PORTS:.class=) $(OUT_PORTS:.class=) $(MULTIOUT_PORTS:.class=) -.java.class: - $(JAVAC) -cp $(JAVA_CP) $^ - -all: build-all check - -build-all: $(IN_PORTS) $(OUT_PORTS) $(MULTIOUT_PORTS) - -tcheck: - $(JAVA) -cp $(JAVA_CP) org.junit.runner.JUnitCore OutVectorPort_Test - -check: - @for jtest in "$(JTESTS)" ; do \ - $(JAVA) -cp $(JAVA_CP) -Dlog4j.configuration=file:log4j_config.txt org.junit.runner.JUnitCore $$jtest ; \ - done - -clean: - -rm *.class diff --git a/bulkioInterfaces/libsrc/testing/tests/java/Makefile.am b/bulkioInterfaces/libsrc/testing/tests/java/Makefile.am new file mode 100644 index 000000000..4765e055a --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/Makefile.am @@ -0,0 +1,98 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK burstioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK burstioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +@rh_jarfile_rules@ + +TESTS = Bulkio +check_SCRIPTS = Bulkio + +NUMERIC_TYPES = Char Octet Short UShort Long ULong LongLong ULongLong Float Double +ALL_TYPES = $(NUMERIC_TYPES) Bit File XML + +# Tests are generated via sed for ease of maintenance. +In%PortTest.java : sed/%.sed InPortTest.java.template + $(AM_V_GEN)sed -f $^ > $@ + +# Numeric out port tests are generated. +Out%PortTest.java : sed/%.sed NumericOutPortTest.java.template + $(AM_V_GEN)sed -f $^ > $@ + +# Explicitly disable out port test generation for bit, file and XML. +OutBitPortTest.java OutFilePortTest.java OutXMLPortTest.java : ; + +# Test helpers are generated for numeric types. +helpers/%TestHelper.java : sed/%.sed helpers/NumericTestHelper.java.template + $(AM_V_GEN)sed -f $^ > $@ + +# Explicitly disable helper generation for bit, file and XML. +helpers/BitTestHelper.java helpers/FileTestHelper.java helpers/XMLTestHelper.java : ; + +# Most port stubs are generated. +stubs/In%PortStub.java : sed/%.sed stubs/InPortStub.java.template + $(AM_V_GEN)sed -f $^ > $@ + +# Explicitly disable stub generation for XML. +stubs/stub/InXMLPortStub.java : ; + + +BUILT_SOURCES := $(patsubst %,helpers/%TestHelper.java,$(NUMERIC_TYPES)) +BUILT_SOURCES += $(patsubst %,stubs/In%PortStub.java,$(NUMERIC_TYPES) Bit File) +BUILT_SOURCES += $(patsubst %,In%PortTest.java,$(ALL_TYPES)) +BUILT_SOURCES += $(patsubst %,Out%PortTest.java,$(NUMERIC_TYPES)) + +noinst_java_JARFILES = bulkio-tests.jar + +bulkio_tests_jar_SOURCE = $(BUILT_SOURCES) +bulkio_tests_jar_SOURCE += stubs/Packet.java +bulkio_tests_jar_SOURCE += stubs/Stub.java +bulkio_tests_jar_SOURCE += stubs/InXMLPortStub.java +bulkio_tests_jar_SOURCE += helpers/ArrayData.java +bulkio_tests_jar_SOURCE += helpers/TestHelper.java +bulkio_tests_jar_SOURCE += helpers/BitTestHelper.java +bulkio_tests_jar_SOURCE += helpers/FileTestHelper.java +bulkio_tests_jar_SOURCE += helpers/XMLTestHelper.java +bulkio_tests_jar_SOURCE += helpers/ConnectionListener.java +bulkio_tests_jar_SOURCE += utils/TestFilter.java +bulkio_tests_jar_SOURCE += utils/ChainFilter.java +bulkio_tests_jar_SOURCE += utils/TextListener.java +bulkio_tests_jar_SOURCE += AllTests.java +bulkio_tests_jar_SOURCE += Main.java +bulkio_tests_jar_SOURCE += OutBitPortTest.java +bulkio_tests_jar_SOURCE += OutFilePortTest.java +bulkio_tests_jar_SOURCE += OutXMLPortTest.java +bulkio_tests_jar_SOURCE += InSDDSPort_Test.java +bulkio_tests_jar_SOURCE += OutSDDSPort_Test.java +bulkio_tests_jar_SOURCE += PrecisionUTCTimeTest.java +bulkio_tests_jar_SOURCE += StreamSRITest.java +bulkio_tests_jar_SOURCE += impl/InPortTestImpl.java +bulkio_tests_jar_SOURCE += impl/OutPortTestImpl.java +bulkio_tests_jar_SOURCE += impl/ChunkingOutPortTestImpl.java +bulkio_tests_jar_SOURCE += impl/NumericOutPortTestImpl.java + +bulkio_tests_jar_CLASSPATH = $(BULKIO_CLASSPATH):$(OSSIE_CLASSPATH):$(JUNIT_CLASSPATH):. +bulkio_tests_jar_JAVACFLAGS = -g -Xlint + +Bulkio : bulkio-tests.jar Makefile + @echo "#!/bin/bash" > $@ + @echo "export LD_LIBRARY_PATH=$(top_builddir)/jni/.libs:$(OSSIE_HOME)/lib:$(OSSIE_HOME)/lib64" >> $@ + @echo "exec java -cp bulkio-tests.jar:$(bulkio_tests_jar_CLASSPATH) -Dlog4j.configuration=file:$(srcdir)/log4j_config.txt Main \$$*" >> $@ + @chmod +x $@ + +CLEANFILES = Bulkio $(BUILT_SOURCES) diff --git a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutDouble_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/MultiOutDouble_Test.java deleted file mode 100644 index ad80dc94d..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutDouble_Test.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class MultiOutDouble_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - bulkio.InDoublePort ip1 = null; - bulkio.InDoublePort ip2 = null; - bulkio.InDoublePort ip3 = null; - bulkio.InDoublePort ip4 = null; - bulkio.OutDoublePort port = null; - - protected List< bulkio.connection_descriptor_struct> filterTable = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - ip1 = new bulkio.InDoublePort("sink_1",logger); - ip2 = new bulkio.InDoublePort("sink_2", logger ); - ip3 = new bulkio.InDoublePort("sink_3", logger ); - ip4 = new bulkio.InDoublePort("sink_4", logger ); - port = new bulkio.OutDoublePort("multiout_source", logger ); - - filterTable = new ArrayList< bulkio.connection_descriptor_struct>(10); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_4", "stream-4-1", "multiout_source") ); - - } - - @After - public void tearDown() { - } - - @Test - public void test_multiout_sri_filtered( ) { - - logger.info("------ Testing Multiout SRI Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.StreamSRI sri; - BULKIO.StreamSRI []streams; - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - } - - @Test - public void test_multiout_sri_eos_filtered( ) { - - logger.info("------ Testing Multiout SRI/EOS Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", "stream-3-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - // - // Send EOS downstream and check activeSRIs - // - port.updateConnectionFilter( filterTable ); - - filter_stream_id = "stream-1-1"; - double[] v = new double[0]; - port.pushPacket( v, TS, true, filter_stream_id ); - - bulkio.InDoublePort.Packet pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 1 Pkt was empty", pkt != null ); - assertTrue("GetPacket 1 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 1 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 1 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-2-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-3-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - - filter_stream_id = "stream-4-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - - - - } - - - @Test - public void test_multiout_data_filtered( ) { - - logger.info("------ Testing Multiout DATA Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - double[] v = new double[91]; - bulkio.InDoublePort.Packet pkt; - - logger.info("------ MultiOut DATA Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - - // - // Push DATA for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - } - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutFloat_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/MultiOutFloat_Test.java deleted file mode 100644 index 976bc8383..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutFloat_Test.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class MultiOutFloat_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - bulkio.InFloatPort ip1 = null; - bulkio.InFloatPort ip2 = null; - bulkio.InFloatPort ip3 = null; - bulkio.InFloatPort ip4 = null; - bulkio.OutFloatPort port = null; - - protected List< bulkio.connection_descriptor_struct> filterTable = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - ip1 = new bulkio.InFloatPort("sink_1",logger); - ip2 = new bulkio.InFloatPort("sink_2", logger ); - ip3 = new bulkio.InFloatPort("sink_3", logger ); - ip4 = new bulkio.InFloatPort("sink_4", logger ); - port = new bulkio.OutFloatPort("multiout_source", logger ); - - filterTable = new ArrayList< bulkio.connection_descriptor_struct>(10); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_4", "stream-4-1", "multiout_source") ); - - } - - @After - public void tearDown() { - } - - @Test - public void test_multiout_sri_filtered( ) { - - logger.info("------ Testing Multiout SRI Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.StreamSRI sri; - BULKIO.StreamSRI []streams; - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - } - - @Test - public void test_multiout_sri_eos_filtered( ) { - - logger.info("------ Testing Multiout SRI/EOS Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", "stream-3-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - // - // Send EOS downstream and check activeSRIs - // - port.updateConnectionFilter( filterTable ); - - filter_stream_id = "stream-1-1"; - float[] v = new float[0]; - port.pushPacket( v, TS, true, filter_stream_id ); - - bulkio.InFloatPort.Packet pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 1 Pkt was empty", pkt != null ); - assertTrue("GetPacket 1 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 1 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 1 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-2-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-3-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - - filter_stream_id = "stream-4-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - - - - } - - - @Test - public void test_multiout_data_filtered( ) { - - logger.info("------ Testing Multiout DATA Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - float[] v = new float[91]; - bulkio.InFloatPort.Packet pkt; - - logger.info("------ MultiOut DATA Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - - // - // Push DATA for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - } - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt16_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt16_Test.java deleted file mode 100644 index 329733e7c..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt16_Test.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class MultiOutInt16_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - bulkio.InInt16Port ip1 = null; - bulkio.InInt16Port ip2 = null; - bulkio.InInt16Port ip3 = null; - bulkio.InInt16Port ip4 = null; - bulkio.OutInt16Port port = null; - - protected List< bulkio.connection_descriptor_struct> filterTable = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - ip1 = new bulkio.InInt16Port("sink_1",logger); - ip2 = new bulkio.InInt16Port("sink_2", logger ); - ip3 = new bulkio.InInt16Port("sink_3", logger ); - ip4 = new bulkio.InInt16Port("sink_4", logger ); - port = new bulkio.OutInt16Port("multiout_source", logger ); - - filterTable = new ArrayList< bulkio.connection_descriptor_struct>(10); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_4", "stream-4-1", "multiout_source") ); - - } - - @After - public void tearDown() { - } - - @Test - public void test_multiout_sri_filtered( ) { - - logger.info("------ Testing Multiout SRI Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.StreamSRI sri; - BULKIO.StreamSRI []streams; - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - } - - @Test - public void test_multiout_sri_eos_filtered( ) { - - logger.info("------ Testing Multiout SRI/EOS Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", "stream-3-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - // - // Send EOS downstream and check activeSRIs - // - port.updateConnectionFilter( filterTable ); - - filter_stream_id = "stream-1-1"; - short[] v = new short[0]; - port.pushPacket( v, TS, true, filter_stream_id ); - - bulkio.InInt16Port.Packet pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 1 Pkt was empty", pkt != null ); - assertTrue("GetPacket 1 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 1 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 1 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-2-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-3-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - - filter_stream_id = "stream-4-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - - - - } - - - @Test - public void test_multiout_data_filtered( ) { - - logger.info("------ Testing Multiout DATA Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - short[] v = new short[91]; - bulkio.InInt16Port.Packet pkt; - - logger.info("------ MultiOut DATA Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - - // - // Push DATA for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - } - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt32_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt32_Test.java deleted file mode 100644 index 5aede1759..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt32_Test.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class MultiOutInt32_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - bulkio.InInt32Port ip1 = null; - bulkio.InInt32Port ip2 = null; - bulkio.InInt32Port ip3 = null; - bulkio.InInt32Port ip4 = null; - bulkio.OutInt32Port port = null; - - protected List< bulkio.connection_descriptor_struct> filterTable = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - ip1 = new bulkio.InInt32Port("sink_1",logger); - ip2 = new bulkio.InInt32Port("sink_2", logger ); - ip3 = new bulkio.InInt32Port("sink_3", logger ); - ip4 = new bulkio.InInt32Port("sink_4", logger ); - port = new bulkio.OutInt32Port("multiout_source", logger ); - - filterTable = new ArrayList< bulkio.connection_descriptor_struct>(10); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_4", "stream-4-1", "multiout_source") ); - - } - - @After - public void tearDown() { - } - - @Test - public void test_multiout_sri_filtered( ) { - - logger.info("------ Testing Multiout SRI Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.StreamSRI sri; - BULKIO.StreamSRI []streams; - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - } - - @Test - public void test_multiout_sri_eos_filtered( ) { - - logger.info("------ Testing Multiout SRI/EOS Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", "stream-3-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - // - // Send EOS downstream and check activeSRIs - // - port.updateConnectionFilter( filterTable ); - - filter_stream_id = "stream-1-1"; - int[] v = new int[0]; - port.pushPacket( v, TS, true, filter_stream_id ); - - bulkio.InInt32Port.Packet pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 1 Pkt was empty", pkt != null ); - assertTrue("GetPacket 1 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 1 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 1 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-2-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-3-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - - filter_stream_id = "stream-4-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - - - - } - - - @Test - public void test_multiout_data_filtered( ) { - - logger.info("------ Testing Multiout DATA Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - int[] v = new int[91]; - bulkio.InInt32Port.Packet pkt; - - logger.info("------ MultiOut DATA Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - - // - // Push DATA for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - } - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt64_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt64_Test.java deleted file mode 100644 index c283ddcd2..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt64_Test.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class MultiOutInt64_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - bulkio.InInt64Port ip1 = null; - bulkio.InInt64Port ip2 = null; - bulkio.InInt64Port ip3 = null; - bulkio.InInt64Port ip4 = null; - bulkio.OutInt64Port port = null; - - protected List< bulkio.connection_descriptor_struct> filterTable = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - ip1 = new bulkio.InInt64Port("sink_1",logger); - ip2 = new bulkio.InInt64Port("sink_2", logger ); - ip3 = new bulkio.InInt64Port("sink_3", logger ); - ip4 = new bulkio.InInt64Port("sink_4", logger ); - port = new bulkio.OutInt64Port("multiout_source", logger ); - - filterTable = new ArrayList< bulkio.connection_descriptor_struct>(10); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_4", "stream-4-1", "multiout_source") ); - - } - - @After - public void tearDown() { - } - - @Test - public void test_multiout_sri_filtered( ) { - - logger.info("------ Testing Multiout SRI Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.StreamSRI sri; - BULKIO.StreamSRI []streams; - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - } - - @Test - public void test_multiout_sri_eos_filtered( ) { - - logger.info("------ Testing Multiout SRI/EOS Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", "stream-3-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - // - // Send EOS downstream and check activeSRIs - // - port.updateConnectionFilter( filterTable ); - - filter_stream_id = "stream-1-1"; - long[] v = new long[0]; - port.pushPacket( v, TS, true, filter_stream_id ); - - bulkio.InInt64Port.Packet pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 1 Pkt was empty", pkt != null ); - assertTrue("GetPacket 1 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 1 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 1 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-2-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-3-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - - filter_stream_id = "stream-4-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - - - - } - - - @Test - public void test_multiout_data_filtered( ) { - - logger.info("------ Testing Multiout DATA Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - long[] v = new long[91]; - bulkio.InInt64Port.Packet pkt; - - logger.info("------ MultiOut DATA Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - - // - // Push DATA for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - } - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt8_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt8_Test.java deleted file mode 100644 index 58b4f5ba5..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutInt8_Test.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class MultiOutInt8_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - bulkio.InInt8Port ip1 = null; - bulkio.InInt8Port ip2 = null; - bulkio.InInt8Port ip3 = null; - bulkio.InInt8Port ip4 = null; - bulkio.OutInt8Port port = null; - - protected List< bulkio.connection_descriptor_struct> filterTable = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - ip1 = new bulkio.InInt8Port("sink_1",logger); - ip2 = new bulkio.InInt8Port("sink_2", logger ); - ip3 = new bulkio.InInt8Port("sink_3", logger ); - ip4 = new bulkio.InInt8Port("sink_4", logger ); - port = new bulkio.OutInt8Port("multiout_source", logger ); - - filterTable = new ArrayList< bulkio.connection_descriptor_struct>(10); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_4", "stream-4-1", "multiout_source") ); - - } - - @After - public void tearDown() { - } - - @Test - public void test_multiout_sri_filtered( ) { - - logger.info("------ Testing Multiout SRI Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.StreamSRI sri; - BULKIO.StreamSRI []streams; - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - } - - @Test - public void test_multiout_sri_eos_filtered( ) { - - logger.info("------ Testing Multiout SRI/EOS Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", "stream-3-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - // - // Send EOS downstream and check activeSRIs - // - port.updateConnectionFilter( filterTable ); - - filter_stream_id = "stream-1-1"; - char[] v = new char[0]; - port.pushPacket( v, TS, true, filter_stream_id ); - - bulkio.InInt8Port.Packet pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 1 Pkt was empty", pkt != null ); - assertTrue("GetPacket 1 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 1 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 1 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-2-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-3-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - - filter_stream_id = "stream-4-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - - - - } - - - @Test - public void test_multiout_data_filtered( ) { - - logger.info("------ Testing Multiout DATA Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - char[] v = new char[91]; - bulkio.InInt8Port.Packet pkt; - - logger.info("------ MultiOut DATA Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - - // - // Push DATA for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - } - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt16_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt16_Test.java deleted file mode 100644 index 5f2992326..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt16_Test.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class MultiOutUInt16_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - bulkio.InUInt16Port ip1 = null; - bulkio.InUInt16Port ip2 = null; - bulkio.InUInt16Port ip3 = null; - bulkio.InUInt16Port ip4 = null; - bulkio.OutUInt16Port port = null; - - protected List< bulkio.connection_descriptor_struct> filterTable = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - ip1 = new bulkio.InUInt16Port("sink_1",logger); - ip2 = new bulkio.InUInt16Port("sink_2", logger ); - ip3 = new bulkio.InUInt16Port("sink_3", logger ); - ip4 = new bulkio.InUInt16Port("sink_4", logger ); - port = new bulkio.OutUInt16Port("multiout_source", logger ); - - filterTable = new ArrayList< bulkio.connection_descriptor_struct>(10); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_4", "stream-4-1", "multiout_source") ); - - } - - @After - public void tearDown() { - } - - @Test - public void test_multiout_sri_filtered( ) { - - logger.info("------ Testing Multiout SRI Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.StreamSRI sri; - BULKIO.StreamSRI []streams; - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - } - - @Test - public void test_multiout_sri_eos_filtered( ) { - - logger.info("------ Testing Multiout SRI/EOS Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", "stream-3-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - // - // Send EOS downstream and check activeSRIs - // - port.updateConnectionFilter( filterTable ); - - filter_stream_id = "stream-1-1"; - short[] v = new short[0]; - port.pushPacket( v, TS, true, filter_stream_id ); - - bulkio.InUInt16Port.Packet pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 1 Pkt was empty", pkt != null ); - assertTrue("GetPacket 1 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 1 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 1 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-2-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-3-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - - filter_stream_id = "stream-4-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - - - - } - - - @Test - public void test_multiout_data_filtered( ) { - - logger.info("------ Testing Multiout DATA Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - short[] v = new short[91]; - bulkio.InUInt16Port.Packet pkt; - - logger.info("------ MultiOut DATA Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - - // - // Push DATA for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - } - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt32_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt32_Test.java deleted file mode 100644 index 5bc233b73..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt32_Test.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class MultiOutUInt32_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - bulkio.InUInt32Port ip1 = null; - bulkio.InUInt32Port ip2 = null; - bulkio.InUInt32Port ip3 = null; - bulkio.InUInt32Port ip4 = null; - bulkio.OutUInt32Port port = null; - - protected List< bulkio.connection_descriptor_struct> filterTable = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - ip1 = new bulkio.InUInt32Port("sink_1",logger); - ip2 = new bulkio.InUInt32Port("sink_2", logger ); - ip3 = new bulkio.InUInt32Port("sink_3", logger ); - ip4 = new bulkio.InUInt32Port("sink_4", logger ); - port = new bulkio.OutUInt32Port("multiout_source", logger ); - - filterTable = new ArrayList< bulkio.connection_descriptor_struct>(10); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_4", "stream-4-1", "multiout_source") ); - - } - - @After - public void tearDown() { - } - - @Test - public void test_multiout_sri_filtered( ) { - - logger.info("------ Testing Multiout SRI Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.StreamSRI sri; - BULKIO.StreamSRI []streams; - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - } - - @Test - public void test_multiout_sri_eos_filtered( ) { - - logger.info("------ Testing Multiout SRI/EOS Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", "stream-3-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - // - // Send EOS downstream and check activeSRIs - // - port.updateConnectionFilter( filterTable ); - - filter_stream_id = "stream-1-1"; - int[] v = new int[0]; - port.pushPacket( v, TS, true, filter_stream_id ); - - bulkio.InUInt32Port.Packet pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 1 Pkt was empty", pkt != null ); - assertTrue("GetPacket 1 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 1 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 1 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-2-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-3-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - - filter_stream_id = "stream-4-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - - - - } - - - @Test - public void test_multiout_data_filtered( ) { - - logger.info("------ Testing Multiout DATA Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - int[] v = new int[91]; - bulkio.InUInt32Port.Packet pkt; - - logger.info("------ MultiOut DATA Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - - // - // Push DATA for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - } - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt64_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt64_Test.java deleted file mode 100644 index 167c2dc25..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt64_Test.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class MultiOutUInt64_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - bulkio.InUInt64Port ip1 = null; - bulkio.InUInt64Port ip2 = null; - bulkio.InUInt64Port ip3 = null; - bulkio.InUInt64Port ip4 = null; - bulkio.OutUInt64Port port = null; - - protected List< bulkio.connection_descriptor_struct> filterTable = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - ip1 = new bulkio.InUInt64Port("sink_1",logger); - ip2 = new bulkio.InUInt64Port("sink_2", logger ); - ip3 = new bulkio.InUInt64Port("sink_3", logger ); - ip4 = new bulkio.InUInt64Port("sink_4", logger ); - port = new bulkio.OutUInt64Port("multiout_source", logger ); - - filterTable = new ArrayList< bulkio.connection_descriptor_struct>(10); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_4", "stream-4-1", "multiout_source") ); - - } - - @After - public void tearDown() { - } - - @Test - public void test_multiout_sri_filtered( ) { - - logger.info("------ Testing Multiout SRI Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.StreamSRI sri; - BULKIO.StreamSRI []streams; - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - } - - @Test - public void test_multiout_sri_eos_filtered( ) { - - logger.info("------ Testing Multiout SRI/EOS Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", "stream-3-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - // - // Send EOS downstream and check activeSRIs - // - port.updateConnectionFilter( filterTable ); - - filter_stream_id = "stream-1-1"; - long[] v = new long[0]; - port.pushPacket( v, TS, true, filter_stream_id ); - - bulkio.InUInt64Port.Packet pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 1 Pkt was empty", pkt != null ); - assertTrue("GetPacket 1 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 1 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 1 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-2-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-3-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - - filter_stream_id = "stream-4-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - - - - } - - - @Test - public void test_multiout_data_filtered( ) { - - logger.info("------ Testing Multiout DATA Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - long[] v = new long[91]; - bulkio.InUInt64Port.Packet pkt; - - logger.info("------ MultiOut DATA Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - - // - // Push DATA for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - } - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt8_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt8_Test.java deleted file mode 100644 index f7806635e..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/MultiOutUInt8_Test.java +++ /dev/null @@ -1,554 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import java.util.List; -import java.util.ArrayList; -import java.util.Iterator; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class MultiOutUInt8_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - bulkio.InUInt8Port ip1 = null; - bulkio.InUInt8Port ip2 = null; - bulkio.InUInt8Port ip3 = null; - bulkio.InUInt8Port ip4 = null; - bulkio.OutUInt8Port port = null; - - protected List< bulkio.connection_descriptor_struct> filterTable = null; - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - ip1 = new bulkio.InUInt8Port("sink_1",logger); - ip2 = new bulkio.InUInt8Port("sink_2", logger ); - ip3 = new bulkio.InUInt8Port("sink_3", logger ); - ip4 = new bulkio.InUInt8Port("sink_4", logger ); - port = new bulkio.OutUInt8Port("multiout_source", logger ); - - filterTable = new ArrayList< bulkio.connection_descriptor_struct>(10); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_1", "stream-1-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_2", "stream-2-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-1", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-2", "multiout_source") ); - filterTable.add( new bulkio.connection_descriptor_struct("connection_3", "stream-3-3", "multiout_source") ); - - filterTable.add( new bulkio.connection_descriptor_struct("connection_4", "stream-4-1", "multiout_source") ); - - } - - @After - public void tearDown() { - } - - @Test - public void test_multiout_sri_filtered( ) { - - logger.info("------ Testing Multiout SRI Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.StreamSRI sri; - BULKIO.StreamSRI []streams; - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - } - - @Test - public void test_multiout_sri_eos_filtered( ) { - - logger.info("------ Testing Multiout SRI/EOS Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - - // - // Push SRI for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Push SRI for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 1 StreamID Mismatch", "stream-1-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 1 Mode Mismatch", asri.mode == 0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 2 StreamID Mismatch", "stream-2-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 2 Mode Mismatch", asri.mode == 0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", "stream-3-1".equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - asri=streams[0]; - assertTrue("Stream SRI - 3 StreamID Mismatch", filter_stream_id.equals(asri.streamID) ); - assertTrue("Stream SRI - 3 Mode Mismatch", asri.mode == 0 ); - - // - // Send EOS downstream and check activeSRIs - // - port.updateConnectionFilter( filterTable ); - - filter_stream_id = "stream-1-1"; - byte[] v = new byte[0]; - port.pushPacket( v, TS, true, filter_stream_id ); - - bulkio.InUInt8Port.Packet pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 1 Pkt was empty", pkt != null ); - assertTrue("GetPacket 1 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 1 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 1 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-2-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - filter_stream_id = "stream-3-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - - filter_stream_id = "stream-4-1"; - port.pushPacket( v, TS, true, filter_stream_id ); - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("GetPacket 2 Pkt was empty", pkt != null ); - assertTrue("GetPacket 2 StreamID Mismatch", pkt.SRI.streamID.equals(filter_stream_id) ); - assertTrue("GetPacket 2 Mode Mismatch", pkt.SRI.mode == 0 ); - assertTrue("GetPacket 2 EOS Mismatch", pkt.EOS == true ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==0 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==0 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==0 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==0 ); - - // - // Reset connection table - // - port.updateConnectionFilter( null ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - - streams = ip1.activeSRIs(); - assertTrue("Stream SRI Sequence - 1 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 1 SRI, length", streams.length ==1 ); - - streams = ip2.activeSRIs(); - assertTrue("Stream SRI Sequence - 2 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 2 SRI, length", streams.length ==1 ); - - streams = ip3.activeSRIs(); - assertTrue("Stream SRI Sequence - 3 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 3 SRI, length", streams.length ==1 ); - - streams = ip4.activeSRIs(); - assertTrue("Stream SRI Sequence - 4 SRI", streams != null ); - assertTrue("Stream SRI Sequence - 4 SRI, length", streams.length ==1 ); - - - - } - - - @Test - public void test_multiout_data_filtered( ) { - - logger.info("------ Testing Multiout DATA Filtered BEGIN -----"); - - try { - port.connectPort( ip1._this_object(orb), "connection_1"); - port.connectPort( ip2._this_object(orb), "connection_2"); - port.connectPort( ip3._this_object(orb), "connection_3"); - port.connectPort( ip4._this_object(orb), "connection_4"); - } - catch( Exception e ) { - } - port.updateConnectionFilter( filterTable ); - - String filter_stream_id = new String("stream-1-1"); - double srate=11.0; - double xdelta = 1.0/srate; - short xunits=1; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - BULKIO.StreamSRI sri; - BULKIO.StreamSRI asri; - BULKIO.StreamSRI []streams; - byte[] v = new byte[91]; - bulkio.InUInt8Port.Packet pkt; - - logger.info("------ MultiOut DATA Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP2 - // - filter_stream_id = "stream-2-1"; - srate=22.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - - // - // Push DATA for IP3 - // - filter_stream_id = "stream-3-1"; - srate=33.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP4 PKT was not EMPTY", pkt == null ); - - // - // Push DATA for IP4 - // - filter_stream_id = "stream-4-1"; - srate=44.0; - xdelta = 1.0/srate; - logger.info("------ MultiOut SRI Filtered SID:" + filter_stream_id ); - sri = bulkio.sri.utils.create(filter_stream_id, srate, xunits, false ); - port.pushSRI( sri ); - port.pushPacket( v, TS, false, filter_stream_id ); - - pkt = ip1.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP1 PKT was not EMPTY", pkt == null ); - - pkt = ip2.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP2 PKT was not EMPTY", pkt == null ); - - pkt = ip3.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, IP3 PKT was not EMPTY", pkt == null ); - - pkt = ip4.getPacket( bulkio.Const.NON_BLOCKING ); - assertTrue("Get a Packet, Pkt was empty:", pkt != null ); - assertTrue("Get a Packet, StreamID mismatch:", filter_stream_id.equals(pkt.SRI.streamID) ); - assertTrue("Get a Packet, EOS mismatch:", pkt.EOS == false ); - assertTrue("Get a Packet, mode mismatch:", pkt.SRI.mode == 0 ); - assertTrue("Get a Packet, Data Length mismatch:", pkt.dataBuffer.length == 91 ); - - } - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/NumericOutPortTest.java.template b/bulkioInterfaces/libsrc/testing/tests/java/NumericOutPortTest.java.template new file mode 100644 index 000000000..461a0b2e3 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/NumericOutPortTest.java.template @@ -0,0 +1,58 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from NumericOutPortTest.java.template. + * Do not modify directly. + */ + +import java.util.Arrays; + +import org.junit.*; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +import helpers.ArrayData; +import helpers.@name@TestHelper; + +@RunWith(JUnit4.class) +public class Out@name@PortTest extends impl.NumericOutPortTestImpl +{ + public Out@name@PortTest() + { + super(new @name@TestHelper()); + } + + @Test + public void testPushPacketData() + { + BULKIO.StreamSRI sri = bulkio.sri.utils.create("push_packet"); + port.pushSRI(sri); + + // Create an array and fill it with a ramp + @type@ data = helper.makeData(1024); + ArrayData.ramp(data); + + port.pushPacket(data, bulkio.time.utils.now(), false, sri.streamID); + Assert.assertEquals(1, stub.packets.size()); + // NB: Assert.assertArrayEquals does not support float[] or double[], + // and we want strict equality anyway + Assert.assertTrue(Arrays.equals(data, stub.packets.get(0).data)); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/OutBitPortTest.java b/bulkioInterfaces/libsrc/testing/tests/java/OutBitPortTest.java new file mode 100644 index 000000000..bc318164c --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/OutBitPortTest.java @@ -0,0 +1,57 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import org.junit.*; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +import stubs.Packet; +import helpers.BitTestHelper; + +@RunWith(JUnit4.class) +public class OutBitPortTest extends impl.ChunkingOutPortTestImpl +{ + public OutBitPortTest() + { + super(new BitTestHelper()); + } + + @Test + public void testPushPacketData() + { + BULKIO.StreamSRI sri = bulkio.sri.utils.create("push_packet"); + port.pushSRI(sri); + + // Create a byte array and fill it with alternating bits + BULKIO.BitSequence data = new BULKIO.BitSequence(); + data.data = new byte[128]; + for (int ii = 0; ii < data.data.length; ++ii) { + data.data[ii] = (byte) 0x55; + } + data.bits = data.data.length * 8; + + // Check the received data is consistent + port.pushPacket(data, bulkio.time.utils.now(), false, sri.streamID); + Assert.assertEquals(1, stub.packets.size()); + Packet packet = stub.packets.get(0); + Assert.assertEquals(data.bits, packet.data.bits); + Assert.assertArrayEquals(data.data, packet.data.data); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/OutFilePortTest.java b/bulkioInterfaces/libsrc/testing/tests/java/OutFilePortTest.java new file mode 100644 index 000000000..8945f3715 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/OutFilePortTest.java @@ -0,0 +1,50 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import org.junit.*; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +import stubs.Packet; +import helpers.FileTestHelper; + +@RunWith(JUnit4.class) +public class OutFilePortTest extends impl.OutPortTestImpl +{ + public OutFilePortTest() + { + super(new FileTestHelper()); + } + + @Test + public void testPushPacketData() + { + BULKIO.StreamSRI sri = bulkio.sri.utils.create("push_packet"); + port.pushSRI(sri); + + String uri = "file:///tmp/test.dat"; + + // Check the received data is consistent + port.pushPacket(uri, bulkio.time.utils.now(), false, sri.streamID); + Assert.assertEquals(1, stub.packets.size()); + Packet packet = stub.packets.get(0); + Assert.assertEquals(uri, packet.data); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/OutSDDSPort_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/OutSDDSPort_Test.java index 88b342b39..38cf3afbd 100644 --- a/bulkioInterfaces/libsrc/testing/tests/java/OutSDDSPort_Test.java +++ b/bulkioInterfaces/libsrc/testing/tests/java/OutSDDSPort_Test.java @@ -20,33 +20,12 @@ import static org.junit.Assert.*; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; import org.junit.Test; -import org.junit.Ignore; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; + import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import org.omg.CORBA.Object; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; + import bulkio.ConnectionEventListener; /** @@ -105,34 +84,11 @@ public void disconnect( String sid ){ - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - - } - - @After - public void tearDown() { - - } - @Test - public void test_OutSDDS( ) { + public void test_OutSDDS( ) { test_fact ctx = new test_fact( "OutSDDS" ); - logger.info("------ Testing " + ctx.name + " Port ------"); - bulkio.OutSDDSPort port = new bulkio.OutSDDSPort(ctx.port_name); String pname = port.getName(); @@ -175,7 +131,7 @@ public void test_OutSDDS( ) { sri.streamID = ctx.sid; BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); port.pushSRI( sri, TS ); - StreamSRI []sris= port.activeSRIs(); + BULKIO.StreamSRI[] sris= port.activeSRIs(); assertTrue("Current SRIs Failed", sris.length == 1 ); // Pushing an SRI with a null streamID should trigger an NPE diff --git a/bulkioInterfaces/libsrc/testing/tests/java/OutStringPort_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/OutStringPort_Test.java deleted file mode 100644 index 435f2f5a8..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/OutStringPort_Test.java +++ /dev/null @@ -1,268 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import org.omg.CORBA.Object; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import bulkio.ConnectionEventListener; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class OutStringPort_Test { - - Logger logger = Logger.getRootLogger(); - - class test_fact { - - String name = "OutInt8"; - - String port_name = new String("test-outport-api"); - - String sid = new String("test-outport-streamid"); - - String cid = new String("connect-1"); - - short mode = 1; - - double srate=22.0; - - test_fact( String tname ) { - name=tname; - } - - }; - - class connect_listener implements bulkio.ConnectionEventListener { - - test_fact ctx=null; - - connect_listener( test_fact inCtx ) { - ctx = inCtx; - } - - public void connect( String sid ){ - assertTrue("Connection Callback, StreamID mismatch", ctx.cid == sid ); - }; - - public void disconnect( String sid ){ - assertTrue("Disconnection Callback, StreamID mismatch", ctx.cid == sid ); - }; - }; - - - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - - } - - @After - public void tearDown() { - - } - - @Test - public void test_OutFile( ) { - - test_fact ctx = new test_fact( "OutFile" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutFilePort port = new bulkio.OutFilePort(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - org.omg.CORBA.Object p = null; - try { - port.connectPort( p, ctx.cid ); - - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // push data - String v = new String(""); - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - } - - - @Test - public void test_OutXML( ) { - - test_fact ctx = new test_fact( "OutXML" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutXMLPort port = new bulkio.OutXMLPort(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - org.omg.CORBA.Object p = null; - try { - port.connectPort( p, ctx.cid ); - - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // push data - String v = new String(""); - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, true, ctx.sid ); - port.pushPacket( v, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - } - - - - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/OutVectorPort_Test.java b/bulkioInterfaces/libsrc/testing/tests/java/OutVectorPort_Test.java deleted file mode 100644 index 1a247fcc2..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/java/OutVectorPort_Test.java +++ /dev/null @@ -1,1143 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK bulkioInterfaces. - * - * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -import static org.junit.Assert.*; - -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.After; -import org.junit.Test; -import org.junit.Ignore; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.apache.log4j.BasicConfigurator; -import org.apache.log4j.Logger; -import org.apache.log4j.LogManager; -import org.apache.log4j.PropertyConfigurator; -import org.apache.log4j.LogManager; -import org.apache.log4j.PatternLayout; -import org.apache.log4j.Layout; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Appender; -import org.apache.log4j.Level; -import org.apache.log4j.xml.DOMConfigurator; -import org.omg.CORBA.Object; -import BULKIO.StreamSRI; -import BULKIO.PrecisionUTCTime; -import BULKIO.PortStatistics; -import BULKIO.PortUsageType; -import BULKIO.dataSDDSPackage.AttachError; -import BULKIO.dataSDDSPackage.DetachError; -import BULKIO.dataSDDSPackage.StreamInputError; -import bulkio.ConnectionEventListener; -import org.omg.CORBA.ORB; - -/** - * Tests for {@link Foo}. - * - * @author - */ -@RunWith(JUnit4.class) -public class OutVectorPort_Test { - - Logger logger = Logger.getRootLogger(); - - public static ORB orb; - - class test_fact { - - String name = "OutInt8"; - - String port_name = new String("test-outport-api"); - - String sid = new String("test-outport-streamid"); - - String cid = new String("connect-1"); - String cid2 = new String("connect-2"); - - short mode = 1; - - double srate=22.0; - - test_fact( String tname ) { - name=tname; - } - - }; - - class connect_listener implements bulkio.ConnectionEventListener { - - test_fact ctx=null; - - connect_listener( test_fact inCtx ) { - ctx = inCtx; - } - - public void connect( String sid ){ - assertTrue("Connection Callback, StreamID mismatch", ctx.cid == sid ); - }; - - public void disconnect( String sid ){ - assertTrue("Disconnection Callback, StreamID mismatch", ctx.cid == sid ); - }; - }; - - - - @BeforeClass - public static void oneTimeSetUp() { - // Set up a simple configuration that logs on the console. - BasicConfigurator.configure(); - - // Create and initialize the ORB - String [] args = new String[0]; - orb = ORB.init(args, null); - } - - @AfterClass - public static void oneTimeTearDown() { - - } - - @Before - public void setUp() { - } - - @After - public void tearDown() { - - } - - - @Test - public void test_OutInt8( ) { - - test_fact ctx = new test_fact( "OutInt8" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutInt8Port port = new bulkio.OutInt8Port(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - bulkio.InInt8Port p = new bulkio.InInt8Port("sink_1",logger); - try { - - port.connectPort( p._this_object(orb), ctx.cid ); - - port.disconnectPort( ctx.cid ); - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // clear callback and reconnect - port.setConnectionEventListener(null); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - } - catch(Exception e){ - fail("Unable to connect port"); - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // Pushing an SRI with a null streamID should trigger an NPE - sri = new BULKIO.StreamSRI(); - sri.streamID = null; - boolean received_npe = false; - try { - port.pushSRI(sri); - } catch (NullPointerException npe) { - received_npe = true; - } - assertTrue("Did not raise NPE for null streamID", received_npe); - - // push data - char []v = new char[] { (char)0 }; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - // Check that the statistics report the right element size - test_element_size(port, 8); - - // Test that statistics are returned for all connections - test_statistics(port, p._this_object(orb), ctx); - } - - - @Test - public void test_OutInt16( ) { - - - test_fact ctx = new test_fact( "OutInt16" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutInt16Port port = new bulkio.OutInt16Port(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - bulkio.InInt16Port p = new bulkio.InInt16Port("sink_1",logger); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - - port.disconnectPort( ctx.cid ); - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // clear callback and reconnect - port.setConnectionEventListener(null); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - } - catch(Exception e){ - fail("Unable to connect port"); - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // Pushing an SRI with a null streamID should trigger an NPE - sri = new BULKIO.StreamSRI(); - sri.streamID = null; - boolean received_npe = false; - try { - port.pushSRI(sri); - } catch (NullPointerException npe) { - received_npe = true; - } - assertTrue("Did not raise NPE for null streamID", received_npe); - - // push data - short []v = new short[] { (short)0 }; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - // Check that the statistics report the right element size - test_element_size(port, 16); - - // Test that statistics are returned for all connections - test_statistics(port, p._this_object(orb), ctx); - } - - @Test - public void test_OutInt32( ) { - - - test_fact ctx = new test_fact( "OutInt32" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutInt32Port port = new bulkio.OutInt32Port(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - bulkio.InInt32Port p = new bulkio.InInt32Port("sink_1",logger); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - - port.disconnectPort( ctx.cid ); - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // clear callback and reconnect - port.setConnectionEventListener(null); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - } - catch(Exception e){ - fail("Unable to connect port"); - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // Pushing an SRI with a null streamID should trigger an NPE - sri = new BULKIO.StreamSRI(); - sri.streamID = null; - boolean received_npe = false; - try { - port.pushSRI(sri); - } catch (NullPointerException npe) { - received_npe = true; - } - assertTrue("Did not raise NPE for null streamID", received_npe); - - // push data - int []v = new int[] { 0 }; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - // Check that the statistics report the right element size - test_element_size(port, 32); - - // Test that statistics are returned for all connections - test_statistics(port, p._this_object(orb), ctx); - } - - - @Test - public void test_OutInt64( ) { - - - test_fact ctx = new test_fact( "OutInt64" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutInt64Port port = new bulkio.OutInt64Port(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - bulkio.InInt64Port p = new bulkio.InInt64Port("sink_1",logger); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - - port.disconnectPort( ctx.cid ); - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // clear callback and reconnect - port.setConnectionEventListener(null); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - } - catch(Exception e){ - fail("Unable to connect port"); - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // Pushing an SRI with a null streamID should trigger an NPE - sri = new BULKIO.StreamSRI(); - sri.streamID = null; - boolean received_npe = false; - try { - port.pushSRI(sri); - } catch (NullPointerException npe) { - received_npe = true; - } - assertTrue("Did not raise NPE for null streamID", received_npe); - - // push data - long []v = new long[] { 0L }; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - // Check that the statistics report the right element size - test_element_size(port, 64); - - // Test that statistics are returned for all connections - test_statistics(port, p._this_object(orb), ctx); - } - - - @Test - public void test_OutUInt8( ) { - - - test_fact ctx = new test_fact( "OutUInt8" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutUInt8Port port = new bulkio.OutUInt8Port(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - bulkio.InUInt8Port p = new bulkio.InUInt8Port("sink_1",logger); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - - port.disconnectPort( ctx.cid ); - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // clear callback and reconnect - port.setConnectionEventListener(null); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - } - catch(Exception e){ - fail("Unable to connect port"); - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // Pushing an SRI with a null streamID should trigger an NPE - sri = new BULKIO.StreamSRI(); - sri.streamID = null; - boolean received_npe = false; - try { - port.pushSRI(sri); - } catch (NullPointerException npe) { - received_npe = true; - } - assertTrue("Did not raise NPE for null streamID", received_npe); - - // push data - byte []v = new byte[] { (byte)0 }; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - // Check that the statistics report the right element size - test_element_size(port, 8); - - // Test that statistics are returned for all connections - test_statistics(port, p._this_object(orb), ctx); - } - - - @Test - public void test_OutUInt16( ) { - - - test_fact ctx = new test_fact( "OutUInt16" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutUInt16Port port = new bulkio.OutUInt16Port(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - bulkio.InUInt16Port p = new bulkio.InUInt16Port("sink_1",logger); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - - port.disconnectPort( ctx.cid ); - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // clear callback and reconnect - port.setConnectionEventListener(null); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - } - catch(Exception e){ - fail("Unable to connect port"); - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // Pushing an SRI with a null streamID should trigger an NPE - sri = new BULKIO.StreamSRI(); - sri.streamID = null; - boolean received_npe = false; - try { - port.pushSRI(sri); - } catch (NullPointerException npe) { - received_npe = true; - } - assertTrue("Did not raise NPE for null streamID", received_npe); - - // push data - short []v = new short[] { (short)0 }; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - // Check that the statistics report the right element size - test_element_size(port, 16); - - // Test that statistics are returned for all connections - test_statistics(port, p._this_object(orb), ctx); - } - - @Test - public void test_OutUInt32( ) { - - - test_fact ctx = new test_fact( "OutUInt32" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutUInt32Port port = new bulkio.OutUInt32Port(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - bulkio.InUInt32Port p = new bulkio.InUInt32Port("sink_1",logger); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - - port.disconnectPort( ctx.cid ); - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // clear callback and reconnect - port.setConnectionEventListener(null); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - } - catch(Exception e){ - fail("Unable to connect port"); - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // Pushing an SRI with a null streamID should trigger an NPE - sri = new BULKIO.StreamSRI(); - sri.streamID = null; - boolean received_npe = false; - try { - port.pushSRI(sri); - } catch (NullPointerException npe) { - received_npe = true; - } - assertTrue("Did not raise NPE for null streamID", received_npe); - - // push data - int []v = new int[] { 0 }; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - // Check that the statistics report the right element size - test_element_size(port, 32); - - // Test that statistics are returned for all connections - test_statistics(port, p._this_object(orb), ctx); - } - - - @Test - public void test_OutUInt64( ) { - - - test_fact ctx = new test_fact( "OutUInt64" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutUInt64Port port = new bulkio.OutUInt64Port(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - bulkio.InUInt64Port p = new bulkio.InUInt64Port("sink_1",logger); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - - port.disconnectPort( ctx.cid ); - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // clear callback and reconnect - port.setConnectionEventListener(null); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - } - catch(Exception e){ - fail("Unable to connect port"); - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // Pushing an SRI with a null streamID should trigger an NPE - sri = new BULKIO.StreamSRI(); - sri.streamID = null; - boolean received_npe = false; - try { - port.pushSRI(sri); - } catch (NullPointerException npe) { - received_npe = true; - } - assertTrue("Did not raise NPE for null streamID", received_npe); - - // push data - long []v = new long[] { 0L }; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - // Check that the statistics report the right element size - test_element_size(port, 64); - - // Test that statistics are returned for all connections - test_statistics(port, p._this_object(orb), ctx); - } - - - - @Test - public void test_OutDouble( ) { - - - test_fact ctx = new test_fact( "OutDouble" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutDoublePort port = new bulkio.OutDoublePort(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - bulkio.InDoublePort p = new bulkio.InDoublePort("sink_1",logger); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - - port.disconnectPort( ctx.cid ); - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // clear callback and reconnect - port.setConnectionEventListener(null); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - } - catch(Exception e){ - fail("Unable to connect port"); - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // Pushing an SRI with a null streamID should trigger an NPE - sri = new BULKIO.StreamSRI(); - sri.streamID = null; - boolean received_npe = false; - try { - port.pushSRI(sri); - } catch (NullPointerException npe) { - received_npe = true; - } - assertTrue("Did not raise NPE for null streamID", received_npe); - - // push data - double []v = new double[] { 0.0 }; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - // Check that the statistics report the right element size - test_element_size(port, 64); - - // Test that statistics are returned for all connections - test_statistics(port, p._this_object(orb), ctx); - } - - @Test - public void test_OutFloat( ) { - - - test_fact ctx = new test_fact( "OutFloat" ); - - logger.info("------ Testing " + ctx.name + " Port ------"); - - bulkio.OutFloatPort port = new bulkio.OutFloatPort(ctx.port_name); - - String pname = port.getName(); - assertTrue("Port Name Failed", pname == ctx.port_name ); - - port.setLogger(logger); - - // port statistics - port.enableStats( false ); - - port.enableStats( true ); - BULKIO.UsesPortStatistics []stats = port.statistics(); - assertTrue("Port Statistics Failed", stats != null ); - - BULKIO.PortUsageType rt = port.state(); - assertTrue("Port Usage Type Failed", rt == BULKIO.PortUsageType.IDLE ); - - ExtendedCF.UsesConnection []clist = port.connections(); - assertTrue("Uses Connections List Failed", clist != null ); - assertTrue("Uses Connections List Failed", clist.length == 0 ); - - // - // test callback feature - // - port.setConnectionEventListener(new connect_listener(ctx) ); - - bulkio.InFloatPort p = new bulkio.InFloatPort("sink_1",logger); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - - port.disconnectPort( ctx.cid ); - port.disconnectPort( ctx.cid ); - } - catch(Exception e){ - } - - // clear callback and reconnect - port.setConnectionEventListener(null); - try { - port.connectPort( p._this_object(orb), ctx.cid ); - } - catch(Exception e){ - fail("Unable to connect port"); - } - - // push sri - BULKIO.StreamSRI sri; - sri = bulkio.sri.utils.create(); - sri.streamID = ctx.sid; - port.pushSRI( sri ); - StreamSRI []sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // Pushing an SRI with a null streamID should trigger an NPE - sri = new BULKIO.StreamSRI(); - sri.streamID = null; - boolean received_npe = false; - try { - port.pushSRI(sri); - } catch (NullPointerException npe) { - received_npe = true; - } - assertTrue("Did not raise NPE for null streamID", received_npe); - - // push data - float []v = new float[] { 0.0f }; - BULKIO.PrecisionUTCTime TS = bulkio.time.utils.now(); - port.pushPacket( v, TS, false, ctx.sid ); - - // check SRI list - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 1 ); - - // resolve - need to add end point to test stats - //stats = port.statistics(); - //assertTrue("Port Statistics Failed", stats.length == 1 ); - - port.pushPacket( v, TS, true, ctx.sid ); - port.pushPacket( v, TS, true, "unknown_stream_id" ); - - sris= port.activeSRIs(); - assertTrue("Current SRIs Failed", sris.length == 0 ); - - // Check that the statistics report the right element size - test_element_size(port, 32); - - // Test that statistics are returned for all connections - test_statistics(port, p._this_object(orb), ctx); - } - - private void test_element_size(BULKIO.UsesPortStatisticsProviderOperations port, int bits) - { - BULKIO.UsesPortStatistics[] stats = port.statistics(); - assertEquals("No statistics", stats.length, 1); - double bpe = stats[0].statistics.bitsPerSecond / stats[0].statistics.elementsPerSecond; - assertEquals("Incorrect element size", (double)bits, bpe, 1e-6); - } - - private void test_statistics(BULKIO.UsesPortStatisticsProviderPOA port, - org.omg.CORBA.Object sink, - test_fact ctx) - { - try { - port.connectPort(sink, ctx.cid); - port.connectPort(sink, ctx.cid2); - } catch (final CF.PortPackage.OccupiedPort ex) { - fail("Port should never throw CF.Port.OccupiedPort"); - } catch (final CF.PortPackage.InvalidPort ex) { - fail("Failed to connect ports"); - } - BULKIO.UsesPortStatistics[] stats = port.statistics(); - assertEquals("Statistics returned wrong number of connections", stats.length, 2); - assertNotNull("Statistics[0] is null", stats[0]); - assertNotNull("Statistics[1] is null", stats[1]); - } - -} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/OutXMLPortTest.java b/bulkioInterfaces/libsrc/testing/tests/java/OutXMLPortTest.java new file mode 100644 index 000000000..ac622beb6 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/OutXMLPortTest.java @@ -0,0 +1,50 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import org.junit.*; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +import stubs.Packet; +import helpers.XMLTestHelper; + +@RunWith(JUnit4.class) +public class OutXMLPortTest extends impl.OutPortTestImpl +{ + public OutXMLPortTest() + { + super(new XMLTestHelper()); + } + + @Test + public void testPushPacketData() + { + BULKIO.StreamSRI sri = bulkio.sri.utils.create("push_packet"); + port.pushSRI(sri); + + String document = ""; + + // Check the received data is consistent + port.pushPacket(document, null, false, sri.streamID); + Assert.assertEquals(1, stub.packets.size()); + Packet packet = stub.packets.get(0); + Assert.assertEquals(document, packet.data); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/PrecisionUTCTimeTest.java b/bulkioInterfaces/libsrc/testing/tests/java/PrecisionUTCTimeTest.java new file mode 100644 index 000000000..13cd53e19 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/PrecisionUTCTimeTest.java @@ -0,0 +1,235 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +@RunWith(JUnit4.class) +public class PrecisionUTCTimeTest +{ + private void failTime(String message, BULKIO.PrecisionUTCTime expected, BULKIO.PrecisionUTCTime actual) { + Assert.fail(String.format("%1$s expected:<%2$s> but was:<%3$s>", message, + bulkio.time.utils.toString(expected), bulkio.time.utils.toString(actual))); + } + + private void assertTimeEquals(String message, BULKIO.PrecisionUTCTime expected, BULKIO.PrecisionUTCTime actual) { + if (bulkio.time.utils.compare(expected, actual) != 0) { + failTime(message, expected, actual); + } + } + + private void assertTimeNotEquals(String message, BULKIO.PrecisionUTCTime expected, BULKIO.PrecisionUTCTime actual) { + if (bulkio.time.utils.compare(expected, actual) == 0) { + failTime(message, expected, actual); + } + } + + @Test + public void testCreate() { + BULKIO.PrecisionUTCTime ts = bulkio.time.utils.now(); + Assert.assertEquals("tcmode mismatch", ts.tcmode, BULKIO.TCM_CPU.value); + Assert.assertEquals("tcstatus mismatch", ts.tcstatus, BULKIO.TCS_VALID.value); + Assert.assertEquals("tcoff mismatch", ts.toff, 0.00, 3); + + ts = bulkio.time.utils.create(100.0, 0.125); + Assert.assertEquals("tcmode mismatch", ts.tcmode, BULKIO.TCM_CPU.value); + Assert.assertEquals("tcstatus mismatch", ts.tcstatus, BULKIO.TCS_VALID.value); + Assert.assertEquals("tcwsec mismatch", ts.twsec, 100.0, 3); + Assert.assertEquals("tcwsec mismatch", ts.tfsec, 0.125, 3); + + ts = bulkio.time.utils.create(100.0, 0.125, BULKIO.TCM_SDDS.value); + Assert.assertEquals("tcmode mismatch", ts.tcmode, BULKIO.TCM_SDDS.value); + Assert.assertEquals("tcstatus mismatch", ts.tcstatus, BULKIO.TCS_VALID.value); + Assert.assertEquals("tcwsec mismatch", ts.twsec, 100.0, 3); + Assert.assertEquals("tcwsec mismatch", ts.tfsec, 0.125, 3); + } + + @Test + public void testNormalize() { + // NOTE: All tests use fractional portions that are exact binary fractions to + // avoid potential roundoff issues + + // Already normalized, no change + BULKIO.PrecisionUTCTime time = new BULKIO.PrecisionUTCTime(); + time.twsec = 100.0; + time.tfsec = 0.5; + bulkio.time.utils.normalize(time); + assertTimeEquals("Already normalized time", bulkio.time.utils.create(100.0, 0.5), time); + + // Whole seconds has fractional portion, should be moved to fractional seconds + time.twsec = 100.25; + time.tfsec = 0.25; + bulkio.time.utils.normalize(time); + assertTimeEquals("Normalizing whole", bulkio.time.utils.create(100.0, 0.5), time); + + // Whole seconds has fractional portion, should be moved to fractional seconds + // leading to carry + time.twsec = 100.75; + time.tfsec = 0.75; + bulkio.time.utils.normalize(time); + assertTimeEquals("Normalizing whole with carry", bulkio.time.utils.create(101.0, 0.5), time); + + // Fractional seconds contains whole portion, should be moved to whole seconds + time.twsec = 100.0; + time.tfsec = 2.5; + bulkio.time.utils.normalize(time); + assertTimeEquals("Normalizing fractional", bulkio.time.utils.create(102.0, 0.5), time); + + // Both parts require normalization; fractional portion of whole seconds adds an + // additional carry + time.twsec = 100.75; + time.tfsec = 2.75; + bulkio.time.utils.normalize(time); + assertTimeEquals("Normalizing both", bulkio.time.utils.create(103.0, 0.5), time); + + // Negative fractional value should borrow + time.twsec = 100.0; + time.tfsec = -0.25; + bulkio.time.utils.normalize(time); + assertTimeEquals("Normalizing negative fractional", bulkio.time.utils.create(99.0, 0.75), time); + + // Negative fractional value with magnitude greater than one + time.twsec = 100.0; + time.tfsec = -3.125; + bulkio.time.utils.normalize(time); + assertTimeEquals("Normalizing negative fractional > 1", bulkio.time.utils.create(96.0, 0.875), time); + + // Fractional portion of whole seconds greater than negative fractional seconds + time.twsec = 100.5; + time.tfsec = -.125; + bulkio.time.utils.normalize(time); + assertTimeEquals("Normalizing both with negative fractional", bulkio.time.utils.create(100.0, 0.375), time); + + // Negative fractional seconds greater than fractional portion of whole seconds + time.twsec = 100.125; + time.tfsec = -.5; + bulkio.time.utils.normalize(time); + assertTimeEquals("Normalizing both with borrow", bulkio.time.utils.create(99.0, 0.625), time); + + // Negative fractional seconds have whole portion, but seconds whole seconds have + // fractional portion with larger magnitude than remaining fractional seconds + time.twsec = 100.75; + time.tfsec = -2.5; + bulkio.time.utils.normalize(time); + assertTimeEquals("Normalizing both with negative fractional > 1", bulkio.time.utils.create(98.0, 0.25), time); + } + + @Test + public void testCompare() { + BULKIO.PrecisionUTCTime t1 = bulkio.time.utils.create(100.0, 0.5); + BULKIO.PrecisionUTCTime t2 = bulkio.time.utils.create(100.0, 0.5); + Assert.assertTrue("Identical values did not compare as equal", bulkio.time.utils.compare(t1, t2) == 0); + + // Only fractional seconds differ + t1 = bulkio.time.utils.create(100.0, 0.5); + t2 = bulkio.time.utils.create(100.0, 0.25); + Assert.assertTrue("Time with larger fractional did not compare greater", bulkio.time.utils.compare(t1, t2) > 0); + Assert.assertTrue("Time with smaller fractional did not compare lesser", bulkio.time.utils.compare(t2, t1) < 0); + + // Only whole seconds differ + t1 = bulkio.time.utils.create(100.0, 0.75); + t2 = bulkio.time.utils.create(101.0, 0.75); + Assert.assertTrue("Time with smaller whole did not compare lesser", bulkio.time.utils.compare(t1, t2) < 0); + Assert.assertTrue("Time with larger whole did not compare greater", bulkio.time.utils.compare(t2, t1) > 0); + + // Whole seconds differ, but fractional seconds have the opposite ordering (which has no effect) + t1 = bulkio.time.utils.create(100.0, 0.75); + t2 = bulkio.time.utils.create(5000.0, 0.25); + Assert.assertTrue("Time with smaller whole and larger fractional did not compare lesser", bulkio.time.utils.compare(t1, t2) < 0); + Assert.assertTrue("Time with larger whole and smaller fractional did not compare greater", bulkio.time.utils.compare(t2, t1) > 0); + } + + @Test + public void testOperators() { + // Test that copy works as expected + final BULKIO.PrecisionUTCTime reference = bulkio.time.utils.create(100.0, 0.5); + BULKIO.PrecisionUTCTime t1 = bulkio.time.utils.copy(reference); + Assert.assertNotSame("Copy returned same object", reference, t1); + assertTimeEquals("Copy returned different values", reference, t1); + + // Add a positive offset + BULKIO.PrecisionUTCTime result = bulkio.time.utils.add(t1, 1.75); + BULKIO.PrecisionUTCTime expected = bulkio.time.utils.create(102.0, 0.25); + Assert.assertNotSame("Add returned same object", t1, result); + assertTimeEquals("Original value modified", reference, t1); + assertTimeEquals("Add positive offset", expected, result); + + // Add a negative offset (i.e., subtract) + result = bulkio.time.utils.add(t1, -1.75); + expected = bulkio.time.utils.create(98.0, 0.75); + assertTimeEquals("Original value modified", reference, t1); + assertTimeEquals("Add negative offset", expected, result); + + // Increment by positive offset + result = bulkio.time.utils.increment(t1, 2.25); + expected = bulkio.time.utils.create(102.0, 0.75); + Assert.assertSame("Increment returned different object", t1, result); + assertTimeEquals("Increment by positive offset", expected, t1); + + // Increment by negative offset (i.e., decrement) + bulkio.time.utils.increment(t1, -3.875); + expected = bulkio.time.utils.create(98.0, 0.875); + assertTimeEquals("Increment by negative offset", expected, t1); + + // Reset to reference time and subtract a positive offset + t1 = bulkio.time.utils.copy(reference); + result = bulkio.time.utils.subtract(t1, 1.25); + expected = bulkio.time.utils.create(99.0, 0.25); + Assert.assertNotSame("Subtract returned same object", t1, result); + assertTimeEquals("Original value modified", reference, t1); + assertTimeEquals("Subtract positive offset", expected, result); + + // Subtract a negative offset (i.e., add) + result = bulkio.time.utils.subtract(t1, -4.875); + expected = bulkio.time.utils.create(105.0, 0.375); + assertTimeEquals("Original value modified", reference, t1); + assertTimeEquals("Subtract negative offset", expected, result); + + // Decrement by positive offset + result = bulkio.time.utils.decrement(t1, 2.75); + expected = bulkio.time.utils.create(97.0, 0.75); + Assert.assertSame("Increment returned different object", t1, result); + assertTimeEquals("Decrement by positive offset", expected, t1); + + // Decrement by negative offset (i.e., increment) + bulkio.time.utils.decrement(t1, -3.375); + expected = bulkio.time.utils.create(101.0, 0.125); + assertTimeEquals("Decrement by negative offset", expected, t1); + + // Difference, both positive and negative (exact binary fractions used to allow + // exact comparison) + t1 = bulkio.time.utils.add(reference, 8.875); + Assert.assertEquals("Positive time difference", bulkio.time.utils.difference(t1, reference), 8.875, 0.0); + Assert.assertEquals("Negative time difference", bulkio.time.utils.difference(reference, t1), -8.875, 0.0); + } + + @Test + public void testString() { + // Test the default epoch (Unix time) + BULKIO.PrecisionUTCTime time = bulkio.time.utils.create(0.0, 0.0); + Assert.assertEquals("Epoch", "1970:01:01::00:00:00.000000", bulkio.time.utils.toString(time)); + + // Use a recent time with rounding at the microsecond level + time = bulkio.time.utils.create(1451933967.0, 0.2893569); + Assert.assertEquals("String representation", "2016:01:04::18:59:27.289357", bulkio.time.utils.toString(time)); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/StreamSRITest.java b/bulkioInterfaces/libsrc/testing/tests/java/StreamSRITest.java new file mode 100644 index 000000000..41c54b925 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/StreamSRITest.java @@ -0,0 +1,180 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +import org.omg.CORBA.TCKind; + +import org.ossie.properties.AnyUtils; + +import static bulkio.sri.utils.*; + +@RunWith(JUnit4.class) +public class StreamSRITest +{ + @Test + public void testCreate() { + BULKIO.StreamSRI sri = bulkio.sri.utils.create(); + Assert.assertEquals("Stream ID mismatch", "defaultSRI", sri.streamID); + Assert.assertEquals("Version mismatch", 1, sri.hversion); + Assert.assertEquals("XUnits mismatch", BULKIO.UNITS_TIME.value, sri.xunits); + Assert.assertEquals("XStart mismatch", 0.0, sri.xstart, 0); + Assert.assertEquals("XDelta mismatch", 1.0, sri.xdelta, 0); + Assert.assertEquals("YUnits mismatch", 0, sri.yunits); + Assert.assertEquals("YStart mismatch", 0.0, sri.ystart, 0); + Assert.assertEquals("YDelta mismatch", 0.0, sri.ydelta, 0); + Assert.assertEquals("Subsize mismatch", 0, sri.subsize, 0); + Assert.assertFalse("Blocking mismatch", sri.blocking); + Assert.assertEquals("Keywords mismatch", 0, sri.keywords.length); + + sri = bulkio.sri.utils.create("NEW-STREAM-ID", 1e6, BULKIO.UNITS_FREQUENCY.value, false); + Assert.assertEquals("Stream ID mismatch", "NEW-STREAM-ID", sri.streamID); + Assert.assertEquals("XDelta mismatch", 1e-6, sri.xdelta, 0); + Assert.assertEquals("XUnits mismatch", BULKIO.UNITS_FREQUENCY.value, sri.xunits); + Assert.assertFalse("Blocking mismatch", sri.blocking); + } + + + @Test + public void testCompare() { + BULKIO.StreamSRI a_sri = bulkio.sri.utils.create(); + BULKIO.StreamSRI b_sri = bulkio.sri.utils.create(); + BULKIO.StreamSRI c_sri = bulkio.sri.utils.create("THIS_DOES_NOT_MATCH"); + + Assert.assertTrue("bulkio.sri.utils.compare method - same.", bulkio.sri.utils.compare(a_sri, b_sri)); + Assert.assertFalse("bulkio.sri.utils.compare method - different - StreamID .", bulkio.sri.utils.compare(a_sri, c_sri)); + + c_sri = bulkio.sri.utils.create(); + c_sri.hversion = 2; + Assert.assertFalse("bulkio.sri.utils.compare method - different - hversion", bulkio.sri.utils.compare(a_sri, c_sri)); + + c_sri = bulkio.sri.utils.create(); + c_sri.xstart = 3; + Assert.assertFalse("bulkio.sri.utils.compare method - different - xstart", bulkio.sri.utils.compare(a_sri, c_sri)); + + c_sri = bulkio.sri.utils.create(); + c_sri.xdelta = 100.0; + Assert.assertFalse("bulkio.sri.utils.compare method - different - xdelta", bulkio.sri.utils.compare(a_sri, c_sri)); + + c_sri = bulkio.sri.utils.create(); + c_sri.xunits = 100; + Assert.assertFalse("bulkio.sri.utils.compare method - different - xunits", bulkio.sri.utils.compare(a_sri, c_sri)); + + c_sri = bulkio.sri.utils.create(); + c_sri.subsize = 100; + Assert.assertFalse("bulkio.sri.utils.compare method - different - subsize", bulkio.sri.utils.compare(a_sri, c_sri)); + + c_sri = bulkio.sri.utils.create(); + c_sri.ystart = 3; + Assert.assertFalse("bulkio.sri.utils.compare method - different - ystart", bulkio.sri.utils.compare(a_sri, c_sri)); + + c_sri = bulkio.sri.utils.create(); + c_sri.ydelta = 100.0; + Assert.assertFalse("bulkio.sri.utils.compare method - different - ydelta", bulkio.sri.utils.compare(a_sri, c_sri)); + + c_sri = bulkio.sri.utils.create(); + c_sri.yunits = 100; + Assert.assertFalse("bulkio.sri.utils.compare method - different - yunits", bulkio.sri.utils.compare(a_sri, c_sri)); + + c_sri = bulkio.sri.utils.create(); + c_sri.mode = 100; + Assert.assertFalse("bulkio.sri.utils.compare method - different - mode", bulkio.sri.utils.compare(a_sri, c_sri)); + + CF.DataType kv = new CF.DataType("key_one", AnyUtils.stringToAny("1","long")); + CF.DataType kv2 = new CF.DataType("key_one", AnyUtils.stringToAny("1","long")); + a_sri.keywords = new CF.DataType[1]; + a_sri.keywords[0] = kv; + c_sri = bulkio.sri.utils.create(); + c_sri.keywords = new CF.DataType[1]; + c_sri.keywords[0] = kv2; + Assert.assertTrue("bulkio.sri.utils.compare method - same - keyword item", bulkio.sri.utils.compare(a_sri, c_sri)); + + kv2 = new CF.DataType("key_one", AnyUtils.stringToAny("100","long")); + c_sri = bulkio.sri.utils.create(); + c_sri.keywords = new CF.DataType[1]; + c_sri.keywords[0] = kv2; + Assert.assertFalse("bulkio.sri.utils.compare method - different - keywords value mismatch", + bulkio.sri.utils.compare(a_sri, c_sri)); + + kv2 = new CF.DataType("key_two", AnyUtils.stringToAny("100","long")); + c_sri = bulkio.sri.utils.create(); + c_sri.keywords = new CF.DataType[1]; + c_sri.keywords[0] = kv2; + Assert.assertFalse("bulkio.sri.utils.compare method - different - keywords name mismatch", + bulkio.sri.utils.compare(a_sri, c_sri)); + + } + + @Test + public void testCompareFields() + { + BULKIO.StreamSRI sri_1 = bulkio.sri.utils.create("compare_fields"); + BULKIO.StreamSRI sri_2 = bulkio.sri.utils.create("compare_fields"); + + // Identical + int flags = NONE; + Assert.assertEquals(flags, bulkio.sri.utils.compareFields(sri_1, sri_2)); + + // Stream ID + flags = STREAMID; + sri_2 = bulkio.sri.utils.create("compare_fields_2"); + Assert.assertEquals(flags, bulkio.sri.utils.compareFields(sri_1, sri_2)); + + // Framing and axes metadata + sri_2 = bulkio.sri.utils.create("compare_fields"); + sri_2.xstart = -1.0; + sri_2.xdelta = 0.25; + sri_2.xunits = BULKIO.UNITS_FREQUENCY.value; + sri_2.subsize = 9; + sri_2.ystart = 0.5; + sri_2.yunits = BULKIO.UNITS_TIME.value; + sri_2.ydelta = 0.125; + flags = XSTART|XDELTA|XUNITS|SUBSIZE|YSTART|YDELTA|YUNITS; + Assert.assertEquals(flags, bulkio.sri.utils.compareFields(sri_1, sri_2)); + + // Real->complex and blocking + sri_2 = bulkio.sri.utils.create("compare_fields"); + sri_2.mode = 1; + sri_2.blocking = true; + flags = MODE|BLOCKING; + Assert.assertEquals(flags, bulkio.sri.utils.compareFields(sri_1, sri_2)); + + // Keywords + // Adding keywords should register as a change + sri_1.keywords = new CF.DataType[2]; + sri_1.keywords[0] = new CF.DataType("string", AnyUtils.toAny("first")); + sri_1.keywords[1] = new CF.DataType("number", AnyUtils.toAny(1.0, TCKind.tk_double)); + sri_2 = bulkio.sri.utils.create("compare_fields"); + flags = KEYWORDS; + Assert.assertEquals(flags, bulkio.sri.utils.compareFields(sri_1, sri_2)); + // Likewise a different value for the same keyword + sri_2.keywords = new CF.DataType[2]; + sri_2.keywords[0] = new CF.DataType("string", AnyUtils.toAny("first")); + sri_2.keywords[1] = new CF.DataType("number", AnyUtils.toAny(2.0, TCKind.tk_double)); + Assert.assertEquals(flags, bulkio.sri.utils.compareFields(sri_1, sri_2)); + // Same values should register as equal + flags = NONE; + sri_2.keywords[1].value = AnyUtils.toAny(1.0, TCKind.tk_double); + Assert.assertEquals(flags, bulkio.sri.utils.compareFields(sri_1, sri_2)); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/build.xml b/bulkioInterfaces/libsrc/testing/tests/java/build.xml index 161ae294b..018a55355 100644 --- a/bulkioInterfaces/libsrc/testing/tests/java/build.xml +++ b/bulkioInterfaces/libsrc/testing/tests/java/build.xml @@ -26,39 +26,31 @@ with this program. If not, see http://www.gnu.org/licenses/. + - + - - + + - - - - - - - - - - + + + + - - - - - - + + + diff --git a/bulkioInterfaces/libsrc/testing/tests/java/helpers/ArrayData.java b/bulkioInterfaces/libsrc/testing/tests/java/helpers/ArrayData.java new file mode 100644 index 000000000..41abe9e9a --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/helpers/ArrayData.java @@ -0,0 +1,81 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package helpers; + +public class ArrayData { + + public static void ramp(char[] data) + { + // Fun fact: char is an unsigned 16-bit integer type in Java, but + // REDHAWK uses it for signed 8-bit integers. So, in order to generate + // valid data, we have to explicitly mask off the upper bits. + for (int ii = 0; ii < data.length; ii++) { + data[ii] = (char) (ii & 0xFF); + } + } + + public static void ramp(byte[] data) + { + for (int ii = 0; ii < data.length; ii++) { + data[ii] = (byte) ii; + } + } + + public static void ramp(short[] data) + { + for (int ii = 0; ii < data.length; ii++) { + data[ii] = (short) ii; + } + } + + public static void ramp(int[] data) + { + for (int ii = 0; ii < data.length; ii++) { + data[ii] = ii; + } + } + + public static void ramp(long[] data) + { + for (int ii = 0; ii < data.length; ii++) { + data[ii] = (long) ii; + } + } + + public static void ramp(float[] data) + { + for (int ii = 0; ii < data.length; ii++) { + data[ii] = (float) ii; + } + } + + public static void ramp(double[] data) + { + for (int ii = 0; ii < data.length; ii++) { + data[ii] = (double) ii; + } + } + + // Cannot instantiate + private ArrayData() + { + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/helpers/BitTestHelper.java b/bulkioInterfaces/libsrc/testing/tests/java/helpers/BitTestHelper.java new file mode 100644 index 000000000..7a9c9c0b0 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/helpers/BitTestHelper.java @@ -0,0 +1,89 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package helpers; + +import bulkio.InDataPort; +import bulkio.InBitPort; +import bulkio.OutDataPort; +import bulkio.OutBitPort; + +import stubs.Stub; +import stubs.InBitPortStub; + +public class BitTestHelper implements TestHelper { + + public static final int BITS_PER_ELEMENT = 1; + + public int bitsPerElement() + { + return BitTestHelper.BITS_PER_ELEMENT; + } + + public InBitPort createInPort(String name) + { + return new InBitPort(name); + } + + public OutBitPort createOutPort(String name) + { + return new OutBitPort(name); + } + + public Stub createStub() + { + return new InBitPortStub(); + } + + public String getName() + { + return "dataBit"; + } + + public BULKIO.dataBitOperations toCorbaType(InDataPort port) + { + return (BULKIO.dataBitOperations) port; + } + + public void pushTestPacket(InDataPort port, + int length, BULKIO.PrecisionUTCTime time, boolean eos, String streamID) + { + BULKIO.BitSequence data = makeData(length); + toCorbaType(port).pushPacket(data, time, eos, streamID); + } + + public void pushTestPacket(OutDataPort port, + int length, BULKIO.PrecisionUTCTime time, boolean eos, String streamID) + { + BULKIO.BitSequence data = makeData(length); + port.pushPacket(data, time, eos, streamID); + } + + public int dataLength(BULKIO.BitSequence data) + { + return data.bits; + } + + public BULKIO.BitSequence makeData(int length) + { + int bytes = (length + 7) / 8; + return new BULKIO.BitSequence(new byte[bytes], length); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/helpers/ConnectionListener.java b/bulkioInterfaces/libsrc/testing/tests/java/helpers/ConnectionListener.java new file mode 100644 index 000000000..1685fe241 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/helpers/ConnectionListener.java @@ -0,0 +1,39 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package helpers; + +import java.util.ArrayList; +import java.util.List; + +public class ConnectionListener implements bulkio.ConnectionEventListener { + public List connected = new ArrayList(); + public List disconnected = new ArrayList(); + + public void connect(String connectionId) + { + connected.add(connectionId); + } + + public void disconnect(String connectionId) + { + disconnected.add(connectionId); + } +}; diff --git a/bulkioInterfaces/libsrc/testing/tests/java/helpers/FileTestHelper.java b/bulkioInterfaces/libsrc/testing/tests/java/helpers/FileTestHelper.java new file mode 100644 index 000000000..610c02b9c --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/helpers/FileTestHelper.java @@ -0,0 +1,86 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package helpers; + +import bulkio.InDataPort; +import bulkio.InFilePort; +import bulkio.OutDataPort; +import bulkio.OutFilePort; + +import stubs.Stub; +import stubs.InFilePortStub; + +public class FileTestHelper implements TestHelper { + + public static final int BITS_PER_ELEMENT = 8; + + public int bitsPerElement() + { + return FileTestHelper.BITS_PER_ELEMENT; + } + + public InFilePort createInPort(String name) + { + return new InFilePort(name); + } + + public OutFilePort createOutPort(String name) + { + return new OutFilePort(name); + } + + public Stub createStub() + { + return new InFilePortStub(); + } + + public String getName() + { + return "dataFile"; + } + + public BULKIO.dataFileOperations toCorbaType(InDataPort port) + { + return (BULKIO.dataFileOperations) port; + } + + public void pushTestPacket(InDataPort port, int length, BULKIO.PrecisionUTCTime time, boolean eos, String streamID) + { + String data = makeData(length); + toCorbaType(port).pushPacket(data, time, eos, streamID); + } + + public void pushTestPacket(OutDataPort port, int length, BULKIO.PrecisionUTCTime time, boolean eos, String streamID) + { + String data = makeData(length); + port.pushPacket(data, time, eos, streamID); + } + + public int dataLength(String data) + { + return data.length(); + } + + public String makeData(int length) + { + return new String(new char[length]); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/helpers/NumericTestHelper.java.template b/bulkioInterfaces/libsrc/testing/tests/java/helpers/NumericTestHelper.java.template new file mode 100644 index 000000000..9382cebb3 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/helpers/NumericTestHelper.java.template @@ -0,0 +1,94 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from TestHelper.java.template. + * Do not modify directly. + */ + +package helpers; + +import bulkio.InDataPort; +import bulkio.In@name@Port; +import bulkio.OutDataPort; +import bulkio.Out@name@Port; + +import stubs.Stub; +import stubs.In@name@PortStub; + +public class @name@TestHelper implements TestHelper { + + public static final int BITS_PER_ELEMENT = @size@ * 8; + + public int bitsPerElement() + { + return @name@TestHelper.BITS_PER_ELEMENT; + } + + public In@name@Port createInPort(String name) + { + return new In@name@Port(name); + } + + public Out@name@Port createOutPort(String name) + { + return new Out@name@Port(name); + } + + public Stub<@type@> createStub() + { + return new In@name@PortStub(); + } + + public @name@TestHelper() + { + } + + public String getName() + { + return "@idl@"; + } + + public BULKIO.@idl@Operations toCorbaType(InDataPort port) + { + return (BULKIO.@idl@Operations) port; + } + + public void pushTestPacket(InDataPort port, int length, BULKIO.PrecisionUTCTime time, boolean eos, String streamID) + { + @type@ data = makeData(length); + toCorbaType(port).pushPacket(data, time, eos, streamID); + } + + public void pushTestPacket(OutDataPort port, int length, BULKIO.PrecisionUTCTime time, boolean eos, String streamID) + { + @type@ data = makeData(length); + port.pushPacket(data, time, eos, streamID); + } + + public int dataLength(@type@ data) + { + return data.length; + } + + public @type@ makeData(int length) + { + return new @elem@[length]; + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/helpers/SriListener.java b/bulkioInterfaces/libsrc/testing/tests/java/helpers/SriListener.java new file mode 100644 index 000000000..d12cff427 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/helpers/SriListener.java @@ -0,0 +1,42 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package helpers; + +import java.util.ArrayList; +import java.util.List; + +public class SriListener implements bulkio.SriListener { + + public List newSRIs = new ArrayList(); + public List changedSRIs = new ArrayList(); + + public void newSRI(BULKIO.StreamSRI sri) + { + this.newSRIs.add(sri); + } + + public boolean changedSRI(BULKIO.StreamSRI sri) + { + this.changedSRIs.add(sri); + return true; + } +} + diff --git a/bulkioInterfaces/libsrc/testing/tests/java/helpers/TestHelper.java b/bulkioInterfaces/libsrc/testing/tests/java/helpers/TestHelper.java new file mode 100644 index 000000000..1ab416d8d --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/helpers/TestHelper.java @@ -0,0 +1,92 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package helpers; + +import bulkio.InDataPort; +import bulkio.OutDataPort; + +/** + * Test helper to provide a generic interface for input and output port tests. + * + * Each port type pair should have a concrete implementation. + */ +public interface TestHelper { + /** + * Returns the base name of the interface (e.g., "dataFloat"). + */ + public String getName(); + + /** + * Returns the size of a single element (sample, character, etc.) in bits. + */ + public int bitsPerElement(); + + /** + * Creates an input port. + */ + public InDataPort createInPort(String name); + + /** + * Creates an output port. + */ + public OutDataPort createOutPort(String name); + + /** + * Creates a test CORBA stub that can be used for testing output ports. + */ + public stubs.Stub createStub(); + + /** + * Returns the length of packet data. + * + * Abstracts the differences between the packet data types (arrays, string, + * bit sequence). + */ + public int dataLength(A data); + + /** + * Creates uninitialized packet data of the given size. + * + * Abstracts the differences between the packet data types (arrays, string, + * bit sequence). + */ + public A makeData(int length); + + /** + * Returns the CORBA interface for an input port, for use in testing the + * external CORBA API. + */ + public E toCorbaType(InDataPort port); + + /** + * Inject a test packet into an input port. + * + * Abstracts the differences between XML and other ports. + */ + public void pushTestPacket(InDataPort port, int length, BULKIO.PrecisionUTCTime time, boolean eos, String streamID); + + /** + * Send a test packet through an output port. + * + * Abstracts the differences between XML and other ports. + */ + public void pushTestPacket(OutDataPort port, int length, BULKIO.PrecisionUTCTime time, boolean eos, String streamID); +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/helpers/XMLTestHelper.java b/bulkioInterfaces/libsrc/testing/tests/java/helpers/XMLTestHelper.java new file mode 100644 index 000000000..be329c7b8 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/helpers/XMLTestHelper.java @@ -0,0 +1,86 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package helpers; + +import bulkio.InDataPort; +import bulkio.InXMLPort; +import bulkio.OutDataPort; +import bulkio.OutXMLPort; + +import stubs.Stub; +import stubs.InXMLPortStub; + +public class XMLTestHelper implements TestHelper { + + public static final int BITS_PER_ELEMENT = 8; + + public int bitsPerElement() + { + return XMLTestHelper.BITS_PER_ELEMENT; + } + + public InXMLPort createInPort(String name) + { + return new InXMLPort(name); + } + + public OutXMLPort createOutPort(String name) + { + return new OutXMLPort(name); + } + + public Stub createStub() + { + return new InXMLPortStub(); + } + + public String getName() + { + return "dataXML"; + } + + public BULKIO.dataXMLOperations toCorbaType(InDataPort port) + { + return (BULKIO.dataXMLOperations) port; + } + + public void pushTestPacket(InDataPort port, int length, BULKIO.PrecisionUTCTime time, boolean eos, String streamID) + { + String data = makeData(length); + toCorbaType(port).pushPacket(data, eos, streamID); + } + + public void pushTestPacket(OutDataPort port, int length, BULKIO.PrecisionUTCTime time, boolean eos, String streamID) + { + String data = makeData(length); + port.pushPacket(data, null, eos, streamID); + } + + public int dataLength(String data) + { + return data.length(); + } + + public String makeData(int length) + { + return new String(new char[length]); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/impl/ChunkingOutPortTestImpl.java b/bulkioInterfaces/libsrc/testing/tests/java/impl/ChunkingOutPortTestImpl.java new file mode 100644 index 000000000..65ecdc552 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/impl/ChunkingOutPortTestImpl.java @@ -0,0 +1,114 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from ChunkingOutPortTestImpl.java.template. + * Do not modify directly. + */ + +package impl; + +import org.junit.*; +import org.junit.runner.RunWith; + +import helpers.TestHelper; + +public class ChunkingOutPortTestImpl extends OutPortTestImpl { + + public ChunkingOutPortTestImpl(TestHelper helper) + { + super(helper); + } + + protected void _testPushOversizedPacket(BULKIO.PrecisionUTCTime time, boolean eos, String streamID) + { + // Pick a sufficiently large number of samples that the packet has to span + // multiple packets + final int max_bits = 8 * (int) bulkio.Const.MAX_TRANSFER_BYTES; + int count = 2 * max_bits / helper.bitsPerElement(); + helper.pushTestPacket(port, count, time, eos, streamID); + + // More than one packet must have been received, and no packet can + // exceed the max transfer size + Assert.assertTrue(stub.packets.size() > 1); + for (int index = 0; index < stub.packets.size(); ++index) { + int packet_bits = helper.dataLength(stub.packets.get(index).data) * helper.bitsPerElement(); + Assert.assertTrue("Packet too large", packet_bits < max_bits); + } + } + + @Test + public void testPushChunking() + { + // Set up a basic stream + BULKIO.StreamSRI sri = bulkio.sri.utils.create("push_chunking"); + sri.xdelta = 0.125; + port.pushSRI(sri); + + // Test that the push is properly chunked + BULKIO.PrecisionUTCTime time = bulkio.time.utils.create(0.0, 0.0); + _testPushOversizedPacket(time, false, sri.streamID); + + // Check that the synthesized time stamp(s) advanced by the expected time + for (int index = 1; index < stub.packets.size(); index++) { + double expected = helper.dataLength(stub.packets.get(index-1).data) * sri.xdelta; + BULKIO.PrecisionUTCTime prev = stub.packets.get(index-1).T; + BULKIO.PrecisionUTCTime curr = stub.packets.get(index).T; + double elapsed = bulkio.time.utils.difference(curr, prev); + Assert.assertEquals("Incorrect time stamp delta", expected, elapsed, 0.0); + } + } + + @Test + public void testPushChunkingEOS() + { + // Set up a basic stream + BULKIO.StreamSRI sri = bulkio.sri.utils.create("push_chunking_eos"); + port.pushSRI(sri); + + // Test that the push is properly chunked + _testPushOversizedPacket(bulkio.time.utils.now(), true, sri.streamID); + + // Check that only the final packet has end-of-stream-set + int packets = stub.packets.size(); + Assert.assertTrue("Last packet does not have EOS set", stub.packets.get(packets-1).EOS); + for (int index = 0; index < (packets - 1); index++) { + Assert.assertFalse("Intermediate packet has EOS set", stub.packets.get(index).EOS); + } + } + + @Test + public void testPushChunkingSubsize() + { + // Set up a 2-dimensional stream + BULKIO.StreamSRI sri = bulkio.sri.utils.create("push_chunking_subsize"); + sri.subsize = 1023; + port.pushSRI(sri); + + _testPushOversizedPacket(bulkio.time.utils.now(), false, sri.streamID); + + // Check that each packet is a multiple of the subsize (except the + // last, because the oversized packet was not explicitly quantized to + // be an exact multiple) + for (int index = 0; index < (stub.packets.size() - 1); index++) { + int packet_length = helper.dataLength(stub.packets.get(index).data); + Assert.assertEquals("Packet size is not a multiple of subsize", 0, packet_length % sri.subsize); + } + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/impl/InPortTestImpl.java b/bulkioInterfaces/libsrc/testing/tests/java/impl/InPortTestImpl.java new file mode 100644 index 000000000..cfb362710 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/impl/InPortTestImpl.java @@ -0,0 +1,462 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from InPortTestImpl.java.template. + * Do not modify directly. + */ + +package impl; + +import org.junit.*; +import org.junit.runner.RunWith; + +import org.apache.log4j.Level; + +import org.ossie.component.RHLogger; + +import bulkio.InDataPort; +import bulkio.DataTransfer; + +import helpers.TestHelper; + +public class InPortTestImpl { + + /** + * Input port being tested (using generic interface). + */ + protected InDataPort port; + + /** + * External CORBA interface to the tested port. + */ + protected E corbaPort; + + protected TestHelper helper; + + public InPortTestImpl(TestHelper helper) + { + this.helper = helper; + } + + @Before + public void setUp() + { + String name = helper.getName() + "_out"; + port = helper.createInPort(name); + corbaPort = helper.toCorbaType(port); + } + + @Test + public void testLegacyAPI() + { + // Test for methods that are technically still supported, but + // discouraged + port.enableStats(false); + } + + @Test + public void testGetPacket() + { + BULKIO.StreamSRI sri = bulkio.sri.utils.create("test_get_packet"); + corbaPort.pushSRI(sri); + + BULKIO.PrecisionUTCTime ts = bulkio.time.utils.now(); + helper.pushTestPacket(port, 50, ts, false, sri.streamID); + + // Check result of getPacket + DataTransfer packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertNotNull(packet.dataBuffer); + Assert.assertEquals(50, helper.dataLength(packet.dataBuffer)); + // For all types except XML, the timestamp should be preserved + if (!(corbaPort instanceof BULKIO.dataXMLOperations)) { + Assert.assertEquals(0, bulkio.time.utils.compare(ts, packet.T)); + } + Assert.assertEquals(false, packet.EOS); + Assert.assertEquals(sri.streamID, packet.streamID); + Assert.assertTrue(bulkio.sri.utils.compare(packet.SRI, sri)); + Assert.assertEquals(true, packet.sriChanged); + Assert.assertEquals(false, packet.inputQueueFlushed); + + // No packet, should return null + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNull(packet); + + // Change mode to complex and push another packet with EOS set + // NB: Have to create a new instance because the input port doesn't + // copy the SRI, it just shares the reference + sri = bulkio.sri.utils.create(sri.streamID); + sri.mode = 1; + corbaPort.pushSRI(sri); + helper.pushTestPacket(port, 100, ts, true, sri.streamID); + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertEquals(100, helper.dataLength(packet.dataBuffer)); + Assert.assertEquals(true, packet.EOS); + Assert.assertEquals(true, packet.sriChanged); + Assert.assertEquals(1, packet.SRI.mode); + } + + @Test + public void testActiveSRIs() + { + BULKIO.StreamSRI[] active_sris = corbaPort.activeSRIs(); + Assert.assertEquals(0, active_sris.length); + + // Push a new SRI, and make sure that it is immediately visible and + // correct in activeSRIs + BULKIO.StreamSRI sri_1 = bulkio.sri.utils.create("active_sri_1"); + corbaPort.pushSRI(sri_1); + active_sris = corbaPort.activeSRIs(); + Assert.assertEquals(1, active_sris.length); + Assert.assertTrue(bulkio.sri.utils.compare(active_sris[0], sri_1)); + + // Push a second SRI, and make sure that activeSRIs is up-to-date + BULKIO.StreamSRI sri_2 = bulkio.sri.utils.create("active_sri_2"); + corbaPort.pushSRI(sri_2); + active_sris = corbaPort.activeSRIs(); + Assert.assertEquals(2, active_sris.length); + for (BULKIO.StreamSRI current_sri : active_sris) { + if (current_sri.streamID.equals("active_sri_2")) { + Assert.assertTrue(bulkio.sri.utils.compare(current_sri, sri_2)); + } else if (!current_sri.streamID.equals("active_sri_1")) { + Assert.fail("unexpected SRI '" + current_sri.streamID +"'"); + } + } + + // Push an end-of-stream, retrieve the packet, and verify that the + // stream is no longer in activeSRIs + helper.pushTestPacket(port, 0, bulkio.time.utils.notSet(), true, sri_1.streamID); + DataTransfer packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertTrue(packet.EOS); + active_sris = corbaPort.activeSRIs(); + Assert.assertEquals(1, active_sris.length); + Assert.assertEquals(active_sris[0].streamID, sri_2.streamID); + } + + @Test + public void testQueueDepth() + { + // The port had better start with an empty queue + Assert.assertEquals(0, port.getCurrentQueueDepth()); + + // Use a non-blocking stream to allow queue flushing + BULKIO.StreamSRI sri = bulkio.sri.utils.create("queue_depth"); + sri.blocking = false; + corbaPort.pushSRI(sri); + + // Push some test packets, the queue should start growing + for (int ii = 0; ii < 4; ii++) { + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + } + Assert.assertEquals(4, port.getCurrentQueueDepth()); + + // Read a packet and make sure the current depth drops + DataTransfer packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertEquals(3, port.getCurrentQueueDepth()); + + // Reduce the max queue size and push another packet, causing a flush + port.setMaxQueueDepth(3); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + Assert.assertEquals(1, port.getCurrentQueueDepth()); + + // Read the packet and make sure the flush is reported + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertTrue(packet.inputQueueFlushed); + + // One more packet, should not report a flush + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertFalse(packet.inputQueueFlushed); + } + + @Test + public void testState() + { + // Port starts out idle + Assert.assertEquals(BULKIO.PortUsageType.IDLE, corbaPort.state()); + + // Push one test packet, state goes to active + BULKIO.StreamSRI sri = bulkio.sri.utils.create("test_state"); + corbaPort.pushSRI(sri); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + Assert.assertEquals(BULKIO.PortUsageType.ACTIVE, corbaPort.state()); + + // Full queue should report busy + port.setMaxQueueDepth(2); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + Assert.assertEquals(BULKIO.PortUsageType.BUSY, corbaPort.state()); + + // Drop below max, back to active + DataTransfer packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertEquals(BULKIO.PortUsageType.ACTIVE, corbaPort.state()); + + // Empty queue, back to idle + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertEquals(BULKIO.PortUsageType.IDLE, corbaPort.state()); + } + + /** + * Tests that SRI changes are reported correctly from getPacket(). + */ + @Test + public void testSriChanged() + { + helpers.SriListener listener = new helpers.SriListener(); + port.setSriListener(listener); + + // Create a default SRI and push it, which should trigger the callback + BULKIO.StreamSRI sri = bulkio.sri.utils.create("sri_changed"); + corbaPort.pushSRI(sri); + Assert.assertEquals(1, listener.newSRIs.size()); + + // SRI should report changed for first packet + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + DataTransfer packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertTrue(packet.sriChanged); + + // No SRI change for second packet + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + Assert.assertEquals(1, listener.newSRIs.size()); + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertFalse(packet.sriChanged); + + // Change the SRI, should call the change listener and flag the packet + sri = bulkio.sri.utils.create("sri_changed"); + sri.mode = 1; + corbaPort.pushSRI(sri); + Assert.assertEquals(1, listener.newSRIs.size()); + Assert.assertEquals(1, listener.changedSRIs.size()); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertTrue(packet.sriChanged); + } + + @Test + public void testSriChangedFlush() + { + BULKIO.StreamSRI sri = bulkio.sri.utils.create("sri_changed_flush"); + corbaPort.pushSRI(sri); + + // Reduce the queue size so we can force a flush + port.setMaxQueueDepth(2); + + // Push a packet, change the SRI (using a new SRI to avoid accidental + // "same object" issues), and push two more packets so that the packet + // with the associated SRI change gets flushed + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + sri = bulkio.sri.utils.create(sri.streamID, 2.0); + corbaPort.pushSRI(sri); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + + // Get the last packet and verify that the queue has flushed, and the + // SRI change is still reported + DataTransfer packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertTrue(packet.inputQueueFlushed); + Assert.assertTrue(packet.sriChanged); + } + + /** + * Tests that the callback is triggered and SRI changes are reported for an + * unknown stream ID. + */ + @Test + public void testSriChangedInvalidStream() + { + final String stream_id = "invalid_stream"; + + // Turn off the port's logging to avoid dumping a warning to the screen + port.getLogger().setLevel(Level.OFF); + + // Push data without an SRI to check that the sriChanged flag is still + // set and the SRI callback gets called + helpers.SriListener listener = new helpers.SriListener(); + port.setSriListener(listener); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, stream_id); + DataTransfer packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertTrue(packet.sriChanged); + Assert.assertEquals(1, listener.newSRIs.size()); + Assert.assertEquals(0, listener.changedSRIs.size()); + Assert.assertEquals(stream_id, listener.newSRIs.get(0).streamID); + + // Push again to the same stream ID; sriChanged should now be false and the + // SRI callback should not get called + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, stream_id); + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertFalse(packet.sriChanged); + Assert.assertEquals(1, listener.newSRIs.size()); + Assert.assertEquals(0, listener.changedSRIs.size()); + + // Push to an invalid stream with no logger, ensure that nothing fails + port.setLogger((RHLogger) null); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, "null_logger"); + } + + @Test + public void testStatistics() + { + // Push a packet of data to trigger meaningful statistics + BULKIO.StreamSRI sri = bulkio.sri.utils.create("port_stats"); + corbaPort.pushSRI(sri); + helper.pushTestPacket(port, 1024, bulkio.time.utils.now(), false, sri.streamID); + + // Check that the statistics report the right element size + BULKIO.PortStatistics stats = corbaPort.statistics(); + Assert.assertTrue(stats.elementsPerSecond > 0.0); + int bits_per_element = Math.round(stats.bitsPerSecond / stats.elementsPerSecond); + Assert.assertEquals(helper.bitsPerElement(), bits_per_element); + } + + @Test + public void testDiscardEmptyPacket() + { + // Push an empty, non-EOS packet + BULKIO.StreamSRI sri = bulkio.sri.utils.create("empty_packet"); + corbaPort.pushSRI(sri); + helper.pushTestPacket(port, 0, bulkio.time.utils.now(), false, sri.streamID); + + // No packet should be returned + DataTransfer packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNull(packet); + } + + @Test + public void testQueueFlushFlags() + { + // Push 1 packet for the normal data stream + BULKIO.StreamSRI sri_data = bulkio.sri.utils.create("stream_data", 1.0, BULKIO.UNITS_TIME.value, false); + corbaPort.pushSRI(sri_data); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri_data.streamID); + + // Push 1 packet for the EOS test stream + BULKIO.StreamSRI sri_eos = bulkio.sri.utils.create("stream_eos", 1.0, BULKIO.UNITS_TIME.value, false); + corbaPort.pushSRI(sri_eos); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri_eos.streamID); + + // Push 1 packet for the SRI change stream + BULKIO.StreamSRI sri_change = bulkio.sri.utils.create("stream_change", 1.0, BULKIO.UNITS_TIME.value, false); + sri_change.mode = 0; + corbaPort.pushSRI(sri_change); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri_change.streamID); + + // Grab the packets to ensure the initial conditions are correct + DataTransfer packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertEquals(sri_data.streamID, packet.streamID); + + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertEquals(sri_eos.streamID, packet.streamID); + + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertEquals(sri_change.streamID, packet.streamID); + + // Push an EOS packet for the EOS stream + helper.pushTestPacket(port, 0, bulkio.time.utils.notSet(), true, sri_eos.streamID); + + // Modify the SRI for the SRI change stream and push another packet + // (note that we need to create a new StreamSRI object, otherwise the + // change won't be registered) + sri_change = bulkio.sri.utils.create("stream_change", 1.0, BULKIO.UNITS_TIME.value, false); + sri_change.mode = 1; + corbaPort.pushSRI(sri_change); + helper.pushTestPacket(port, 2, bulkio.time.utils.now(), false, sri_change.streamID); + + // Cause a queue flush by lowering the ceiling and pushing packets + port.setMaxQueueDepth(3); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri_data.streamID); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri_data.streamID); + + // Push another packet for the SRI change stream + helper.pushTestPacket(port, 2, bulkio.time.utils.now(), false, sri_change.streamID); + + // 1st packet should be for EOS stream, with no data or SRI change flag + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertEquals(sri_eos.streamID, packet.streamID); + Assert.assertTrue("Input queue flush should be reported", packet.inputQueueFlushed); + Assert.assertTrue("EOS should be reported", packet.EOS); + Assert.assertFalse("SRI change should not be reported", packet.sriChanged); + Assert.assertEquals("EOS packet should contain no data", 0, helper.dataLength(packet.dataBuffer)); + + // 2nd packet should be for data stream, with no EOS or SRI change flag + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertEquals(sri_data.streamID, packet.streamID); + Assert.assertFalse("Input queue flush should not be reported", packet.inputQueueFlushed); + Assert.assertFalse("EOS should not be reported", packet.EOS); + Assert.assertFalse("SRI change should not be reported", packet.sriChanged); + + // 3rd packet should contain the "lost" SRI change flag + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertEquals(sri_change.streamID, packet.streamID); + Assert.assertFalse("Input queue flush should not be reported", packet.inputQueueFlushed); + Assert.assertFalse("EOS should not be reported", packet.EOS); + Assert.assertTrue("SRI change should be reported", packet.sriChanged); + } + + @Test + public void testQueueSize() + { + BULKIO.StreamSRI sri = bulkio.sri.utils.create("queue_size", 1.0, BULKIO.UNITS_TIME.value, false); + corbaPort.pushSRI(sri); + + // Start with a reasonably small queue depth and check that a flush + // occurs at the expected time + port.setMaxQueueDepth(10); + for (int ii = 0; ii < 10; ++ii) { + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + } + Assert.assertEquals(10, port.getCurrentQueueDepth()); + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + Assert.assertEquals(1, port.getCurrentQueueDepth()); + + DataTransfer packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertTrue("Input queue flush not reported", packet.inputQueueFlushed); + + // Set queue depth to unlimited and push a lot of packets + port.setMaxQueueDepth(-1); + final int QUEUE_SIZE = 250; + for (int ii = 0; ii < QUEUE_SIZE; ++ii) { + helper.pushTestPacket(port, 1, bulkio.time.utils.now(), false, sri.streamID); + } + Assert.assertEquals(QUEUE_SIZE, port.getCurrentQueueDepth()); + for (int ii = 0; ii < QUEUE_SIZE; ++ii) { + packet = port.getPacket(bulkio.Const.NON_BLOCKING); + Assert.assertNotNull(packet); + Assert.assertFalse("Input queue flush reported with unlimited queue size", packet.inputQueueFlushed); + } + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/impl/NumericOutPortTestImpl.java b/bulkioInterfaces/libsrc/testing/tests/java/impl/NumericOutPortTestImpl.java new file mode 100644 index 000000000..bf83b4fcc --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/impl/NumericOutPortTestImpl.java @@ -0,0 +1,92 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from NumericOutPortTestImpl.java.template. + * Do not modify directly. + */ + +package impl; + +import java.util.Arrays; + +import org.junit.*; +import org.junit.runner.RunWith; + +import helpers.TestHelper; + +public class NumericOutPortTestImpl extends ChunkingOutPortTestImpl { + + public NumericOutPortTestImpl(TestHelper helper) + { + super(helper); + } + + @Test + public void testPushChunkingComplex() + { + // Set up a complex stream + BULKIO.StreamSRI sri = bulkio.sri.utils.create("push_chunking_complex"); + sri.mode = 1; + sri.xdelta = 0.0625; + port.pushSRI(sri); + + // Test that the push is properly chunked + BULKIO.PrecisionUTCTime time = bulkio.time.utils.create(0.0, 0.0); + _testPushOversizedPacket(time, false, sri.streamID); + + // Check that each packet contains an even number of samples (i.e., no + // complex value was split) + for (int index = 0; index < stub.packets.size(); index++) { + int packet_length = helper.dataLength(stub.packets.get(index).data); + Assert.assertEquals("Packet contains a partial complex value", 0, packet_length % 2); + } + + // Check that the synthesized time stamp(s) advanced by the expected + // time + for (int index = 1; index < stub.packets.size(); index++) { + double expected = helper.dataLength(stub.packets.get(index-1).data) * 0.5 * sri.xdelta; + BULKIO.PrecisionUTCTime prev = stub.packets.get(index-1).T; + BULKIO.PrecisionUTCTime curr = stub.packets.get(index).T; + double elapsed = bulkio.time.utils.difference(curr, prev); + Assert.assertEquals("Incorrect time stamp delta", expected, elapsed, 0.0); + } + } + + @Test + public void testPushChunkingSubsizeComplex() + { + // Set up a 2-dimensional complex stream + BULKIO.StreamSRI sri = bulkio.sri.utils.create("push_chunking_subsize_complex"); + sri.subsize = 2048; + sri.mode = 1; + port.pushSRI(sri); + + _testPushOversizedPacket(bulkio.time.utils.now(), false, sri.streamID); + + // Check that each packet is a multiple of the subsize (except the + // last, because the oversized packet was not explicitly quantized to + // be an exact multiple) + int frame_size = sri.subsize * 2; + for (int index = 0; index < (stub.packets.size() - 1); index++) { + int packet_length = helper.dataLength(stub.packets.get(index).data); + Assert.assertEquals("Packet size is not a multiple of subsize", 0, packet_length % frame_size); + } + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/impl/OutPortTestImpl.java b/bulkioInterfaces/libsrc/testing/tests/java/impl/OutPortTestImpl.java new file mode 100644 index 000000000..eb7bba662 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/impl/OutPortTestImpl.java @@ -0,0 +1,363 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from OutPortTestImpl.java.template. + * Do not modify directly. + */ + +package impl; + +import org.junit.*; +import org.junit.runner.RunWith; + +import java.util.ArrayList; +import java.util.List; + +import org.omg.PortableServer.Servant; + +import bulkio.OutDataPort; + +import stubs.Stub; +import helpers.TestHelper; + +public class OutPortTestImpl { + + protected OutDataPort port; + protected Stub stub; + + protected TestHelper helper; + + protected List connectionTable = new ArrayList(); + protected List servants = new ArrayList(); + + public OutPortTestImpl(TestHelper helper) + { + this.helper = helper; + } + + @Before + public void setUp() throws org.omg.CORBA.UserException + { + org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init(new String[0], null); + + String name = helper.getName() + "_out"; + port = helper.createOutPort(name); + org.ossie.corba.utils.RootPOA().activate_object(port); + + stub = _createStub(); + + org.omg.CORBA.Object objref = stub._this(); + port.connectPort(objref, "connection_1"); + } + + @After + public void tearDown() + { + _disconnectPorts(); + } + + protected void _disconnectPorts() + { + for (ExtendedCF.UsesConnection connection : port.connections()) { + try { + port.disconnectPort(connection.connectionId); + } catch (Throwable exc) { + // Ignore CORBA exceptions + } + } + } + + protected void _releaseServants() + { + for (Servant servant : servants) { + try { + org.omg.PortableServer.POA poa = servant._default_POA(); + byte[] object_id = poa.servant_to_id(servant); + poa.deactivate_object(object_id); + } catch (Throwable exc) { + // Ignore CORBA exceptions + } + } + } + + protected Stub _createStub() throws org.omg.CORBA.UserException + { + Stub new_stub = helper.createStub(); + org.omg.PortableServer.Servant servant = new_stub.servant(); + org.ossie.corba.utils.RootPOA().activate_object(servant); + servants.add(servant); + return new_stub; + } + + @Test + public void testLegacyAPI() + { + port.enableStats(false); + port.enableStats(true); + + // Pushing an SRI with a null streamID should trigger an NPE + BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + sri.streamID = null; + try { + port.pushSRI(sri); + Assert.fail("Did not raise NPE for null streamID"); + } catch (NullPointerException npe) { + // Test passed + } + } + + @Test + public void testConnectionListener() throws org.omg.CORBA.UserException + { + helpers.ConnectionListener listener = new helpers.ConnectionListener(); + port.setConnectionEventListener(listener); + + // Make a new connection + Stub stub2 = _createStub(); + port.connectPort(stub2._this(), "connection_2"); + Assert.assertEquals(1, listener.connected.size()); + Assert.assertEquals(0, listener.disconnected.size()); + Assert.assertEquals("connection_2", listener.connected.get(0)); + + // Disconnect existing connection + port.disconnectPort("connection_1"); + Assert.assertEquals(1, listener.connected.size()); + Assert.assertEquals(1, listener.disconnected.size()); + Assert.assertEquals("connection_1", listener.disconnected.get(0)); + + // Remove listener and reconnect + port.setConnectionEventListener(null); + port.connectPort(stub._this(), "connection_1"); + Assert.assertEquals(1, listener.connected.size()); + Assert.assertEquals(1, listener.disconnected.size()); + } + + @Test + public void testConnections() throws org.omg.CORBA.UserException + { + // Should start with one connection, to the in port stub + ExtendedCF.UsesConnection[] connections = port.connections(); + Assert.assertNotNull(connections); + Assert.assertEquals(1, connections.length); + Assert.assertEquals("connection_1", connections[0].connectionId); + org.omg.CORBA.Object objref = stub._this(); + Assert.assertTrue(connections[0].port._is_equivalent(objref)); + Assert.assertEquals("Port state should be active", BULKIO.PortUsageType.ACTIVE, port.state()); + + // Should throw an invalid port on a nil + try { + port.connectPort(null, "connection_nil"); + Assert.fail("No exception thrown on connection to nil object"); + } catch (CF.PortPackage.InvalidPort exc) { + // Test passed + } + + // Normal connection + Stub stub2 = _createStub(); + objref = stub2._this(); + port.connectPort(objref, "connection_2"); + connections = port.connections(); + Assert.assertNotNull(connections); + Assert.assertEquals(2, connections.length); + for (ExtendedCF.UsesConnection connection : connections) { + if (connection.connectionId.equals("connection_2")) { + Assert.assertTrue(connection.port._is_equivalent(objref)); + } else if (!connection.connectionId.equals("connection_1")) { + Assert.fail("Invalid connectionId in connections(): '" + connection.connectionId + "'"); + } + } + + // Cannot reuse connection ID + try { + port.connectPort(objref, "connection_2"); + Assert.fail("No exception thrown on duplicate connectionId"); + } catch (CF.PortPackage.OccupiedPort exc) { + // Test passed + } + + // Disconnect second connection + port.disconnectPort("connection_2"); + connections = port.connections(); + Assert.assertNotNull(connections); + Assert.assertEquals(1, connections.length); + Assert.assertEquals("connection_1", connections[0].connectionId); + + // Bad connection ID on disconnect + try { + port.disconnectPort("connection_bad"); + Assert.fail("No exception thrown on invalid connectionId"); + } catch (RuntimeException exc) { + // Test passed + // NB: For API backwards-compatibility reasons, Java ports do not + // throw CF.PortPackage.InvalidPort in disconnectPort() + } + + // Disconnect the default stub; port should go to idle + port.disconnectPort("connection_1"); + connections = port.connections(); + Assert.assertNotNull(connections); + Assert.assertEquals(0, connections.length); + Assert.assertEquals("Port state should be idle", BULKIO.PortUsageType.IDLE, port.state()); + } + + @Test + public void testStatistics() throws org.omg.CORBA.UserException + { + // Even if there are no active SRIs, there should still be statistics + // for existing connections + BULKIO.UsesPortStatistics[] uses_stats = port.statistics(); + Assert.assertNotNull(uses_stats); + Assert.assertEquals(1, uses_stats.length); + Assert.assertEquals("connection_1", uses_stats[0].connectionId); + + // Push a packet of data to trigger meaningful statistics + BULKIO.StreamSRI sri = bulkio.sri.utils.create("port_stats"); + port.pushSRI(sri); + helper.pushTestPacket(port, 1024, bulkio.time.utils.now(), false, sri.streamID); + uses_stats = port.statistics(); + Assert.assertNotNull(uses_stats); + Assert.assertEquals(1, uses_stats.length); + BULKIO.PortStatistics stats = uses_stats[0].statistics; + + // Check that the statistics report the right element size + Assert.assertTrue(stats.elementsPerSecond > 0.0); + int bits_per_element = Math.round(stats.bitsPerSecond / stats.elementsPerSecond); + Assert.assertEquals(helper.bitsPerElement(), bits_per_element); + + // Test that statistics are returned for all connections + Stub stub2 = _createStub(); + port.connectPort(stub2._this(), "connection_2"); + uses_stats = port.statistics(); + Assert.assertEquals("List of statistics does not match number of connections", 2, uses_stats.length); + } + + @Test + public void testActiveSRIs() + { + BULKIO.StreamSRI[] active_sris = port.activeSRIs(); + Assert.assertEquals(0, active_sris.length); + + // Push a new SRI, and make sure that it is immediately visible and + // correct in activeSRIs + BULKIO.StreamSRI sri_1 = bulkio.sri.utils.create("active_sri_1"); + port.pushSRI(sri_1); + active_sris = port.activeSRIs(); + Assert.assertEquals(1, active_sris.length); + Assert.assertTrue(bulkio.sri.utils.compare(active_sris[0], sri_1)); + + // Push a second SRI, and make sure that activeSRIs is up-to-date + BULKIO.StreamSRI sri_2 = bulkio.sri.utils.create("active_sri_2"); + port.pushSRI(sri_2); + active_sris = port.activeSRIs(); + Assert.assertEquals(2, active_sris.length); + for (BULKIO.StreamSRI current_sri : active_sris) { + if (current_sri.streamID.equals("active_sri_2")) { + Assert.assertTrue(bulkio.sri.utils.compare(current_sri, sri_2)); + } else if (!current_sri.streamID.equals("active_sri_1")) { + Assert.fail("unexpected SRI '" + current_sri.streamID +"'"); + } + } + + // Push an end-of-stream, and verify that the stream is no longer in + // activeSRIs + helper.pushTestPacket(port, 0, bulkio.time.utils.notSet(), true, sri_1.streamID); + active_sris = port.activeSRIs(); + Assert.assertEquals(1, active_sris.length); + Assert.assertEquals(active_sris[0].streamID, sri_2.streamID); + } + + protected void _addStreamFilter(String streamId, String connectionId) + { + bulkio.connection_descriptor_struct desc = new bulkio.connection_descriptor_struct(); + desc.stream_id.setValue(streamId); + desc.connection_id.setValue(connectionId); + desc.port_name.setValue(port.getName()); + connectionTable.add(desc); + port.updateConnectionFilter(connectionTable); + } + + @Test + public void testMultiOut() throws org.omg.CORBA.UserException + { + Stub stub2 = _createStub(); + org.omg.CORBA.Object objref = stub2._this(); + port.connectPort(objref, "connection_2"); + + // Set up a connection table that only routes the filtered stream to the + // second stub, and another stream to both connections + final String filter_stream_id = "filter_stream"; + _addStreamFilter(filter_stream_id, "connection_2"); + final String all_stream_id = "all_stream"; + _addStreamFilter(all_stream_id, "connection_1"); + _addStreamFilter(all_stream_id, "connection_2"); + + // Push an SRI for the filtered stream; it should only be received by the + // second stub + BULKIO.StreamSRI sri = bulkio.sri.utils.create(filter_stream_id, 2.5e6); + port.pushSRI(sri); + Assert.assertTrue(stub.H.isEmpty()); + Assert.assertEquals(1, stub2.H.size()); + Assert.assertEquals(filter_stream_id, stub2.H.get(0).streamID); + + // Push a packet for the filtered stream; again, only received by #2 + helper.pushTestPacket(port, 91, bulkio.time.utils.now(), false, filter_stream_id); + Assert.assertTrue(stub.packets.isEmpty()); + Assert.assertEquals(1, stub2.packets.size()); + Assert.assertEquals(91, helper.dataLength(stub2.packets.get(0).data)); + + // Unknown (to the connection filter) stream should get dropped + final String unknown_stream_id = "unknown_stream"; + sri = bulkio.sri.utils.create(unknown_stream_id); + port.pushSRI(sri); + Assert.assertTrue(stub.H.isEmpty()); + Assert.assertEquals(1, stub2.H.size()); + helper.pushTestPacket(port, 50, bulkio.time.utils.now(), false, unknown_stream_id); + Assert.assertTrue(stub.packets.isEmpty()); + Assert.assertEquals(1, stub2.packets.size()); + + // Check SRI routed to both connections... + sri = bulkio.sri.utils.create(all_stream_id, 1e6); + port.pushSRI(sri); + Assert.assertEquals(1, stub.H.size()); + Assert.assertEquals(2, stub2.H.size()); + Assert.assertEquals(all_stream_id, stub.H.get(0).streamID); + Assert.assertEquals(all_stream_id, stub2.H.get(1).streamID); + + // ...and data + helper.pushTestPacket(port, 256, bulkio.time.utils.now(), false, all_stream_id); + Assert.assertEquals(1, stub.packets.size()); + Assert.assertEquals(256, helper.dataLength(stub.packets.get(0).data)); + Assert.assertEquals(2, stub2.packets.size()); + Assert.assertEquals(256, helper.dataLength(stub2.packets.get(1).data)); + + // Reset the connection filter and push data for the filtered stream again, + // which should trigger an SRI push to the first stub + connectionTable.clear(); + port.updateConnectionFilter(connectionTable); + helper.pushTestPacket(port, 9, bulkio.time.utils.now(), false, filter_stream_id); + Assert.assertEquals(2, stub.H.size()); + Assert.assertEquals(filter_stream_id, stub.H.get(1).streamID); + Assert.assertEquals(2, stub.packets.size()); + Assert.assertEquals(9, helper.dataLength(stub.packets.get(1).data)); + Assert.assertEquals(2, stub2.H.size()); + Assert.assertEquals(3, stub2.packets.size()); + Assert.assertEquals(9, helper.dataLength(stub2.packets.get(2).data)); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/Bit.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/Bit.sed new file mode 100644 index 000000000..d69c61735 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/Bit.sed @@ -0,0 +1,3 @@ +s/@name@/Bit/g +s/@type@/BULKIO.BitSequence/g +s/@idl@/dataBit/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/Char.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/Char.sed new file mode 100644 index 000000000..b82b86d8c --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/Char.sed @@ -0,0 +1,5 @@ +s/@name@/Char/g +s/@type@/char[]/g +s/@elem@/char/g +s/@idl@/dataChar/g +s/@size@/1/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/Double.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/Double.sed new file mode 100644 index 000000000..0ea61c1cc --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/Double.sed @@ -0,0 +1,5 @@ +s/@name@/Double/g +s/@type@/double[]/g +s/@elem@/double/g +s/@idl@/dataDouble/g +s/@size@/8/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/File.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/File.sed new file mode 100644 index 000000000..aa51e20e9 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/File.sed @@ -0,0 +1,3 @@ +s/@name@/File/g +s/@type@/String/g +s/@idl@/dataFile/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/Float.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/Float.sed new file mode 100644 index 000000000..a671812a8 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/Float.sed @@ -0,0 +1,5 @@ +s/@name@/Float/g +s/@type@/float[]/g +s/@elem@/float/g +s/@idl@/dataFloat/g +s/@size@/4/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/Long.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/Long.sed new file mode 100644 index 000000000..91bd4ed79 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/Long.sed @@ -0,0 +1,5 @@ +s/@name@/Long/g +s/@type@/int[]/g +s/@elem@/int/g +s/@idl@/dataLong/g +s/@size@/4/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/LongLong.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/LongLong.sed new file mode 100644 index 000000000..984520a59 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/LongLong.sed @@ -0,0 +1,5 @@ +s/@name@/LongLong/g +s/@type@/long[]/g +s/@elem@/long/g +s/@idl@/dataLongLong/g +s/@size@/8/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/Octet.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/Octet.sed new file mode 100644 index 000000000..97052027e --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/Octet.sed @@ -0,0 +1,5 @@ +s/@name@/Octet/g +s/@type@/byte[]/g +s/@elem@/byte/g +s/@idl@/dataOctet/g +s/@size@/1/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/Short.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/Short.sed new file mode 100644 index 000000000..877bb55e2 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/Short.sed @@ -0,0 +1,5 @@ +s/@name@/Short/g +s/@type@/short[]/g +s/@elem@/short/g +s/@idl@/dataShort/g +s/@size@/2/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/ULong.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/ULong.sed new file mode 100644 index 000000000..3b5801e86 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/ULong.sed @@ -0,0 +1,5 @@ +s/@name@/ULong/g +s/@type@/int[]/g +s/@elem@/int/g +s/@idl@/dataUlong/g +s/@size@/4/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/ULongLong.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/ULongLong.sed new file mode 100644 index 000000000..1cf71d9a0 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/ULongLong.sed @@ -0,0 +1,5 @@ +s/@name@/ULongLong/g +s/@type@/long[]/g +s/@elem@/long/g +s/@idl@/dataUlongLong/g +s/@size@/8/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/UShort.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/UShort.sed new file mode 100644 index 000000000..bf2562230 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/UShort.sed @@ -0,0 +1,5 @@ +s/@name@/UShort/g +s/@type@/short[]/g +s/@elem@/short/g +s/@idl@/dataUshort/g +s/@size@/2/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/sed/XML.sed b/bulkioInterfaces/libsrc/testing/tests/java/sed/XML.sed new file mode 100644 index 000000000..5812ca2f5 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/sed/XML.sed @@ -0,0 +1,3 @@ +s/@name@/XML/g +s/@type@/String/g +s/@idl@/dataXML/g diff --git a/bulkioInterfaces/libsrc/testing/tests/java/stubs/InPortStub.java.template b/bulkioInterfaces/libsrc/testing/tests/java/stubs/InPortStub.java.template new file mode 100644 index 000000000..062f9f9aa --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/stubs/InPortStub.java.template @@ -0,0 +1,40 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/* + * WARNING: This file is generated from InPortStub.java.template. + * Do not modify directly. + */ + +package stubs; + +import org.omg.PortableServer.Servant; + +public class In@name@PortStub extends Stub<@type@> implements BULKIO.@idl@Operations { + + public void pushPacket(@type@ data, BULKIO.PrecisionUTCTime T, boolean EOS, String streamID) + { + packets.add(new Packet<@type@>(data, T, EOS, streamID)); + } + + protected Servant _makeServant() + { + return new BULKIO.@idl@POATie(this); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/stubs/InXMLPortStub.java b/bulkioInterfaces/libsrc/testing/tests/java/stubs/InXMLPortStub.java new file mode 100644 index 000000000..70c7bcd38 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/stubs/InXMLPortStub.java @@ -0,0 +1,37 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package stubs; + +import org.omg.PortableServer.Servant; + +public class InXMLPortStub extends Stub implements BULKIO.dataXMLOperations { + + public void pushPacket(String data, boolean EOS, String streamID) + { + // Add a null time stamp to adapt XML to the base class + packets.add(new Packet(data, null, EOS, streamID)); + } + + protected Servant _makeServant() + { + return new BULKIO.dataXMLPOATie(this); + } +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/stubs/Packet.java b/bulkioInterfaces/libsrc/testing/tests/java/stubs/Packet.java new file mode 100644 index 000000000..969f686c3 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/stubs/Packet.java @@ -0,0 +1,36 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package stubs; + +public class Packet { + public Packet(E data, BULKIO.PrecisionUTCTime T, boolean EOS, String streamID) + { + this.data = data; + this.T = T; + this.EOS = EOS; + this.streamID = streamID; + } + + public E data; + public BULKIO.PrecisionUTCTime T; + public boolean EOS; + public String streamID; +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/stubs/Stub.java b/bulkioInterfaces/libsrc/testing/tests/java/stubs/Stub.java new file mode 100644 index 000000000..f312f6e25 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/stubs/Stub.java @@ -0,0 +1,68 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package stubs; + +import java.util.ArrayList; +import java.util.List; + +import org.omg.PortableServer.Servant; + +public abstract class Stub implements BULKIO.updateSRIOperations, + BULKIO.ProvidesPortStatisticsProviderOperations { + public List H = new ArrayList(); + public List> packets = new ArrayList>(); + + public void pushSRI(BULKIO.StreamSRI H) + { + this.H.add(H); + } + + public BULKIO.StreamSRI[] activeSRIs() + { + return new BULKIO.StreamSRI[0]; + } + + public BULKIO.PortUsageType state() + { + return BULKIO.PortUsageType.IDLE; + } + + public BULKIO.PortStatistics statistics() + { + return null; + } + + public Servant servant() + { + if (_servant == null) { + _servant = _makeServant(); + } + return _servant; + } + + public org.omg.CORBA.Object _this() + { + return servant()._this_object(); + } + + protected abstract Servant _makeServant(); + private Servant _servant = null; +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/utils/ChainFilter.java b/bulkioInterfaces/libsrc/testing/tests/java/utils/ChainFilter.java new file mode 100644 index 000000000..b4d51f5f6 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/utils/ChainFilter.java @@ -0,0 +1,63 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.runner.Description; +import org.junit.runner.manipulation.Filter; + +/** + * JUnit test filter that combines multiple filters, selecting any test that + * satisfies one of the filters. + */ +public class ChainFilter extends Filter { + public void addFilter(Filter filter) + { + filters.add(filter); + } + + @Override + public boolean shouldRun(Description description) + { + for (Filter filter : this.filters) { + if (filter.shouldRun(description)) { + return true; + } + } + return false; + } + + @Override + public String describe() + { + String result = ""; + for (Filter filter : this.filters) { + if (!result.isEmpty()) { + result = result + ", "; + } + result += filter.describe(); + } + return "[" + result + "]"; + } + + private List filters = new ArrayList<>(); +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/utils/TestFilter.java b/bulkioInterfaces/libsrc/testing/tests/java/utils/TestFilter.java new file mode 100644 index 000000000..82d76e9b0 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/utils/TestFilter.java @@ -0,0 +1,69 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +import org.junit.runner.Description; +import org.junit.runner.manipulation.Filter; + +/** + * JUnit test filter that selects a single test, or a suite of tests from a + * single class. + */ +public class TestFilter extends Filter { + public TestFilter(Description description) + { + test = description; + } + + @Override + public boolean shouldRun(Description description) + { + // Suite-to-suite or test-to-test comparison + if (test.equals(description)) { + return true; + } + if (description.isTest()) { + for (Description child : test.getChildren()) { + if (child.equals(description)) { + return true; + } + } + } else { + for (Description child : description.getChildren()) { + if (shouldRun(child)) { + return true; + } + } + } + return false; + } + + @Override + public String describe() + { + if (test.isTest()) { + return "Method " + test.getDisplayName(); + } else { + return "Class " + test.getDisplayName(); + } + } + + private Description test; +} diff --git a/bulkioInterfaces/libsrc/testing/tests/java/utils/TextListener.java b/bulkioInterfaces/libsrc/testing/tests/java/utils/TextListener.java new file mode 100644 index 000000000..5e02c378e --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/java/utils/TextListener.java @@ -0,0 +1,107 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +import java.io.PrintStream; +import java.text.NumberFormat; + +import org.junit.runner.Description; +import org.junit.runner.Result; +import org.junit.runner.notification.Failure; +import org.junit.runner.notification.RunListener; + +/** + * JUnit RunListener to provide similar output to CppUnit and Python: mainly, + * printing the name of each test as it runs with verbose mode enabled. + */ +public class TextListener extends RunListener { + public TextListener(boolean verbose) + { + this.verbose = verbose; + this.stream = System.out; + this.testPassed = false; + } + + public void testRunFinished(Result result) + { + stream.println(); + stream.println("Time: " + elapsedTimeAsString(result.getRunTime())); + + for (Failure failure : result.getFailures()) { + stream.println(failure.getTestHeader()); + stream.println(failure.getTrace()); + } + + if (result.wasSuccessful()) { + stream.println("OK (" + result.getRunCount() + " tests)"); + } else { + stream.println("FAILURES!!!"); + stream.println("Tests run: " + result.getRunCount() + ", Failures: " + result.getFailureCount()); + } + } + + public void testStarted(Description description) + { + if (verbose) { + stream.print(description.getDisplayName() + " : "); + } else { + stream.print("."); + } + testPassed = true; + } + + public void testIgnored(Description description) + { + if (verbose) { + stream.print("IGNORED"); + } else { + stream.print("I"); + } + testPassed = false; + } + + public void testFailure(Failure failure) + { + if (verbose) { + stream.print("FAILED"); + } else { + stream.print("F"); + } + testPassed = false; + } + + public void testFinished(Description description) + { + if (verbose) { + if (testPassed) { + stream.print("OK"); + } + stream.println(); + } + } + + protected String elapsedTimeAsString(long runTime) { + return NumberFormat.getInstance().format((double) runTime / 1000); + } + + private boolean verbose; + private PrintStream stream; + private boolean testPassed; +} diff --git a/bulkioInterfaces/libsrc/testing/tests/python/bulkio b/bulkioInterfaces/libsrc/testing/tests/python/bulkio new file mode 120000 index 000000000..95c886187 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/python/bulkio @@ -0,0 +1 @@ +../../../python/bulkio \ No newline at end of file diff --git a/bulkioInterfaces/libsrc/testing/tests/python/helpers.py b/bulkioInterfaces/libsrc/testing/tests/python/helpers.py new file mode 100644 index 000000000..177cb9af7 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/python/helpers.py @@ -0,0 +1,251 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import collections +import struct + +from ossie.utils.log4py import logging +from redhawk.bitbuffer import bitbuffer + +import bulkio +from bulkio.bulkioInterfaces import BULKIO, BULKIO__POA + +class InPortStub(object): + Packet = collections.namedtuple('Packet', 'data T EOS streamID') + + def __init__(self): + self.H = [] + self.packets = [] + self.logger = logging.getLogger(self.__class__.__name__) + + def pushSRI(self, H): + self.logger.debug("pushSRI '%s'", H.streamID) + self.H.append(H) + + def pushPacket(self, data, T, EOS, streamID): + self.logger.debug("pushPacket '%s'", streamID) + self._queuePacket(InPortStub.Packet(data, T, EOS, streamID)) + + def _queuePacket(self, packet): + self.packets.append(packet) + +# XML requires a special override for pushPacket +class InXMLPortStub(BULKIO__POA.dataXML, InPortStub): + Packet = collections.namedtuple('Packet', 'data EOS streamID') + + def pushPacket(self, data, EOS, streamID): + self.logger.debug("pushPacket '%s'", streamID) + self._queuePacket(InXMLPortStub.Packet(data, EOS, streamID)) + +# Generate stubs for all standard BULKIO interfaces +for name in ('dataChar', 'dataOctet', 'dataBit', 'dataShort', 'dataUshort', + 'dataLong', 'dataUlong', 'dataLongLong', 'dataUlongLong', + 'dataFloat', 'dataDouble', 'dataFile'): + class_name = 'In%sPortStub' % (name[4:],) + port_type = getattr(BULKIO__POA, name) + globals()[class_name] = type(class_name, (port_type, InPortStub), {}) + + +class PortTestHelper(object): + def pushPacket(self, port, data, time, eos, streamID): + port.pushPacket(data, time, eos, streamID) + + def createData(self, length): + """ + Creates a data object suitable for a pushPacket call. + """ + data = self.createStreamData(length) + return self.pack(data) + + def createStreamData(self, length): + """ + Creates a data object suitable for writing to an output stream. + """ + return [0]*length + + def pack(self, data): + """ + Converts a Python array into a data object suitable for a pushPacket + call. + """ + return data + + def unpack(self, data): + """ + Converts the payload from a pushPacket into a Python object. + """ + return data + + def createInPort(self): + return self.InPortType(self._getName() + '_in') + + def createOutPort(self): + return self.OutPortType(self._getName() + '_out') + + def createInStub(self): + return self.InStubType() + + def packetLength(self, data): + return len(data) + + def _getName(self): + return self.PortType.__name__ + + def getName(self): + name = self._getName() + # Remap BULKIO interface names, where unsigned types have a lowercase + # letter after the U, to the implementations, where it's uppercase + if name[4] == 'U': + name = name[:5] + name[5].upper() + name[6:] + return name[4:] + +class CharTestHelper(PortTestHelper): + PortType = BULKIO.dataChar + InPortType = bulkio.InCharPort + OutPortType = bulkio.OutCharPort + InStubType = InCharPortStub + + BITS_PER_ELEMENT = 8 + + def pack(self, data): + return struct.pack('%db' % len(data), *data) + + def unpack(self, data): + return list(struct.unpack('%db' % len(data), data)) + +class OctetTestHelper(PortTestHelper): + PortType = BULKIO.dataOctet + InPortType = bulkio.InOctetPort + OutPortType = bulkio.OutOctetPort + InStubType = InOctetPortStub + + BITS_PER_ELEMENT = 8 + + def pack(self, data): + return struct.pack('%dB' % len(data), *data) + + def unpack(self, data): + return list(struct.unpack('%dB' % len(data), data)) + +class ShortTestHelper(PortTestHelper): + PortType = BULKIO.dataShort + InPortType = bulkio.InShortPort + OutPortType = bulkio.OutShortPort + InStubType = InShortPortStub + + BITS_PER_ELEMENT = 16 + +class UShortTestHelper(PortTestHelper): + PortType = BULKIO.dataUshort + InPortType = bulkio.InUShortPort + OutPortType = bulkio.OutUShortPort + InStubType = InUshortPortStub + + BITS_PER_ELEMENT = 16 + +class LongTestHelper(PortTestHelper): + PortType = BULKIO.dataLong + InPortType = bulkio.InLongPort + OutPortType = bulkio.OutLongPort + InStubType = InLongPortStub + + BITS_PER_ELEMENT = 32 + +class ULongTestHelper(PortTestHelper): + PortType = BULKIO.dataUlong + InPortType = bulkio.InULongPort + OutPortType = bulkio.OutULongPort + InStubType = InUlongPortStub + + BITS_PER_ELEMENT = 32 + +class LongLongTestHelper(PortTestHelper): + PortType = BULKIO.dataLongLong + InPortType = bulkio.InLongLongPort + OutPortType = bulkio.OutLongLongPort + InStubType = InLongLongPortStub + + BITS_PER_ELEMENT = 64 + +class ULongLongTestHelper(PortTestHelper): + PortType = BULKIO.dataUlongLong + InPortType = bulkio.InULongLongPort + OutPortType = bulkio.OutULongLongPort + InStubType = InUlongLongPortStub + + BITS_PER_ELEMENT = 64 + +class FloatTestHelper(PortTestHelper): + PortType = BULKIO.dataFloat + InPortType = bulkio.InFloatPort + OutPortType = bulkio.OutFloatPort + InStubType = InFloatPortStub + + BITS_PER_ELEMENT = 32 + +class DoubleTestHelper(PortTestHelper): + PortType = BULKIO.dataDouble + InPortType = bulkio.InDoublePort + OutPortType = bulkio.OutDoublePort + InStubType = InDoublePortStub + + BITS_PER_ELEMENT = 64 + +class BitTestHelper(PortTestHelper): + PortType = BULKIO.dataBit + InPortType = bulkio.InBitPort + OutPortType = bulkio.OutBitPort + InStubType = InBitPortStub + + BITS_PER_ELEMENT = 1 + + def createStreamData(self, length): + return bitbuffer(bits=length) + + def packetLength(self, data): + return data.bits + + def unpack(self, data): + return bitbuffer(bytearray(data.data), data.bits) + +class FileTestHelper(PortTestHelper): + PortType = BULKIO.dataFile + InPortType = bulkio.InFilePort + OutPortType = bulkio.OutFilePort + InStubType = InFilePortStub + + BITS_PER_ELEMENT = 8 + + def createStreamData(self, length): + return ' '*length + +class XMLTestHelper(PortTestHelper): + PortType = BULKIO.dataXML + InPortType = bulkio.InXMLPort + OutPortType = bulkio.OutXMLPort + InStubType = InXMLPortStub + + BITS_PER_ELEMENT = 8 + + def createStreamData(self, length): + return ' '*length + + def pushPacket(self, port, data, time, eos, streamID): + port.pushPacket(data, eos, streamID) diff --git a/bulkioInterfaces/libsrc/testing/tests/python/runtests.py b/bulkioInterfaces/libsrc/testing/tests/python/runtests.py new file mode 100755 index 000000000..183987689 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/python/runtests.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import sys +import getopt + +from omniORB import CORBA + +from ossie.utils.log4py import logging +import ossie.utils.log4py.config + +class MultiTestLoader(unittest.TestLoader): + """ + Extend the default TestLoader to support a list of modules, at least for + the purposes of loadTestsFromName and loadTestsFromNames. + """ + def loadTestsFromName(self, name, modules): + if not isinstance(modules, list): + return unittest.TestLoader.loadTestsFromName(self, name, modules) + else: + # Try all modules in order, returning the first one that has + # matching tests + for mod in modules: + try: + return unittest.TestLoader.loadTestsFromName(self, name, mod) + except AttributeError: + pass + raise AttributeError("test '%s' not found" % (name,)) + +class TestProgram(object): + def __init__(self, modules=None): + if modules is None: + self.modules = [sys.modules['__main__']] + else: + self.modules = modules + self.verbosity = 1 + self.testRunner = None + + self.parseArgs(sys.argv[1:]) + self.createTests() + self.runTests() + + def createTests(self): + # Load tests, filtering by name (if arguments were given). + loader = MultiTestLoader() + if self.testNames: + self.test = loader.loadTestsFromNames(self.testNames, self.modules) + else: + self.test = unittest.TestSuite() + for mod in self.modules: + self.test.addTests(loader.loadTestsFromModule(mod)) + + def parseArgs(self, argv): + import getopt + short_options = 'vx' + long_options = ['xunit', 'log-level=', 'log-config=', 'verbose'] + + xunit = False + log_level = None + log_config = None + options, args = getopt.getopt(argv, short_options, long_options) + for opt, value in options: + if opt in ('-v', '--verbose'): + self.verbosity = 2 + elif opt in ('-x', '--xunit'): + xunit = True + elif opt == '--log-level': + # Map from string names to Python levels (this does not appear to + # be built into Python's logging module) + log_level = ossie.utils.log4py.config._LEVEL_TRANS.get(value.upper(), None) + elif opt == '--log-config': + log_config = value + + + # If requested, use XML output (but the module is non-standard, so it + # may not be available). + if xunit: + try: + import xmlrunner + self.testRunner = xmlrunner.XMLTestRunner(verbosity=self.verbosity) + except ImportError: + print >>sys.stderr, 'WARNING: XML test runner module is not installed' + except TypeError: + # Maybe it didn't like the verbosity argument + self.testRunner = xmlrunner.XMLTestRunner() + + # If a log4j configuration file was given, read it. + if log_config: + ossie.utils.log4py.config.fileConfig(log_config) + else: + # Set up a simple configuration that logs on the console. + logging.basicConfig() + + # Apply the log level (can override config file). + if log_level: + logging.getLogger().setLevel(log_level) + + # Any additional arguments are test names + self.testNames = args + + def runTests(self): + # Many tests require CORBA, so initialize up front + orb = CORBA.ORB_init() + root_poa = orb.resolve_initial_references("RootPOA") + manager = root_poa._get_the_POAManager() + manager.activate() + + # Default: use text output. + if not self.testRunner: + self.testRunner = unittest.TextTestRunner(verbosity=self.verbosity) + + result = self.testRunner.run(self.test) + + orb.shutdown(True) + + sys.exit(not result.wasSuccessful()) + +main = TestProgram + +if __name__ == '__main__': + import os + import glob + import imp + + # Find all Python files in the current directory and import them, adding + # their tests to the overall test suite. + modules = [] + for filename in glob.glob('*.py'): + modname, ext = os.path.splitext(filename) + fd = None + try: + fd, fn, desc = imp.find_module(modname) + mod = imp.load_module(modname, fd, fn, desc) + modules.append(mod) + finally: + if fd: + fd.close() + + main(modules) diff --git a/bulkioInterfaces/libsrc/testing/tests/python/test_inports.py b/bulkioInterfaces/libsrc/testing/tests/python/test_inports.py new file mode 100644 index 000000000..e4d4def27 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/python/test_inports.py @@ -0,0 +1,451 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import threading +import time + +from ossie.utils.log4py import logging + +import bulkio +from bulkio.bulkioInterfaces import BULKIO + +from helpers import * + +class SriListener(object): + def __init__(self): + self.sri = None + + def __call__(self, sri): + self.sri = sri + + def reset(self): + self.sri = None + +class InPortTest(object): + def setUp(self): + self.port = self.helper.createInPort() + self.port.startPort() + + def tearDown(self): + pass + + def testBasicAPI(self): + ## + ## test bulkio base class standalone + ## + ps = self.port._get_statistics() + self.assertNotEqual(ps,None,"Cannot get Port Statistics") + + s = self.port._get_state() + self.assertNotEqual(s,None,"Cannot get Port State") + self.assertEqual(s, BULKIO.IDLE,"Invalid Port State") + + streams = self.port._get_activeSRIs() + self.assertNotEqual(streams,None,"Cannot get Streams List") + + qed = self.port.getMaxQueueDepth() + self.assertEqual(qed,100,"Get Stream Depth Failed") + + self.port.setMaxQueueDepth(22) + qed = self.port.getMaxQueueDepth() + self.assertEqual(qed,22,"Set/Get Stream Depth Failed") + + def testGetPacket(self): + sri = bulkio.sri.create('test_get_packet') + self.port.pushSRI(sri) + + ts = bulkio.timestamp.now() + self._pushTestPacket(50, ts, False, sri.streamID) + + # Check result of getPacket + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet is None) + self.failIf(packet.dataBuffer is None, 'packet.dataBuffer is empty') + self.assertEqual(50, len(packet.dataBuffer)) + if isinstance(self.port, bulkio.InXMLPort): + # XML does not use timestamp + self.failUnless(packet.T is None) + else: + self.assertEqual(ts, packet.T, 'packet.T is incorrect') + self.assertEqual(packet.EOS, False, 'packet.EOS is incorrect') + self.assertEqual(packet.streamID, sri.streamID, 'packet.streamID is incorrect') + self.failUnless(bulkio.sri.compare(packet.SRI, sri), 'packet.SRI is incorrect') + self.failIf(packet.sriChanged is None, 'packet.sriChanged is incorrect') + self.assertEqual(packet.inputQueueFlushed, False, 'packet.inputQueueFlushed is incorrect') + + # Check backwards-compatibility for tuple offsets + self.assertEqual(7, len(packet), 'packet should be a tuple of length 7') + self.assertEqual(packet.dataBuffer, packet[bulkio.InPort.DATA_BUFFER], 'packet[DATA_BUFFER] mismatch') + self.assertEqual(packet.T, packet[bulkio.InPort.TIME_STAMP], 'packet[TIME_STAMP] mismatch') + self.assertEqual(packet.EOS, packet[bulkio.InPort.END_OF_STREAM], 'packet[END_OF_STREAM] mismatch') + self.assertEqual(packet.streamID, packet[bulkio.InPort.STREAM_ID], 'packet[STREAM_ID] mismatch') + self.assertEqual(packet.SRI, packet[bulkio.InPort.SRI], 'packet[SRI] mismatch') + self.assertEqual(packet.sriChanged, packet[bulkio.InPort.SRI_CHG], 'packet[SRI_CHG] mismatch') + self.assertEqual(packet.inputQueueFlushed, packet[bulkio.InPort.QUEUE_FLUSH], 'packet[QUEUE_FLUSH] mismatch') + + # No packet, all fields should be None + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failUnless(packet.dataBuffer is None, 'packet.dataBuffer should be None') + self.failUnless(packet.T is None, 'packet.T should be None') + self.failUnless(packet.EOS is None, 'packet.EOS should be None') + self.failUnless(packet.streamID is None, 'packet.streamID should be None') + self.failUnless(packet.SRI is None, 'packet.SRI should be None') + self.failUnless(packet.sriChanged is None, 'packet.sriChanged should be None') + self.failUnless(packet.inputQueueFlushed is None, 'packet.inputQueueFlushed should be None') + + # Change mode to complex and push another packet with EOS set + sri.mode = 1 + self.port.pushSRI(sri) + self._pushTestPacket(100, ts, True, sri.streamID) + packet = self.port.getPacket() + self.assertEqual(100, len(packet.dataBuffer)) + self.assertEqual(True, packet.EOS, 'packet.EOS should be True') + self.assertEqual(True, packet.sriChanged, 'packet.sriChanged should be True') + self.assertEqual(1, packet.SRI.mode, 'packet.SRI should have complex mode') + + def testSriChanged(self): + """ + Tests that SRI changes are reported correctly from getPacket(). + """ + # Create a default SRI and push it + sri = bulkio.sri.create('sri_changed') + self.port.pushSRI(sri) + + # SRI should report changed for first packet + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri.streamID) + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet.dataBuffer is None) + self.failUnless(packet.sriChanged) + + # No SRI change for second packet + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri.streamID) + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet.dataBuffer is None) + self.failIf(packet.sriChanged) + + # Reduce the queue size so we can force a flush + self.port.setMaxQueueDepth(2) + + # Push a packet, change the SRI, and push two more packets so that the + # packet with the associated SRI change gets flushed + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri.streamID) + sri.xdelta /= 2.0 + self.port.pushSRI(sri) + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri.streamID) + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri.streamID) + + # Get the last packet and verify that the queue has flushed, and the + # SRI change is still reported + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet.dataBuffer is None) + self.failUnless(packet.inputQueueFlushed) + self.failUnless(packet.sriChanged) + + def testStatistics(self): + """ + Tests that statistics reports the expected information. + """ + # Push a packet of data to trigger meaningful statistics + sri = bulkio.sri.create("port_stats") + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), False, sri.streamID); + + # Check that the statistics report the right element size + stats = self.port._get_statistics() + self.failUnless(stats.elementsPerSecond > 0.0) + bits_per_element = int(round(stats.bitsPerSecond / stats.elementsPerSecond)) + self.assertEqual(self.helper.BITS_PER_ELEMENT, bits_per_element) + + def testStatisticsStreamIDs(self): + """ + Tests that the stream IDs reported in statistics are correct. + """ + # Create a few streams, push an SRI and packet for each, and test that + # the statistics report the correct stream IDs + stream_ids = set('sri%d' % ii for ii in xrange(3)) + for stream in stream_ids: + stream_sri = bulkio.sri.create(stream) + self.port.pushSRI(stream_sri) + self._pushTestPacket(1, bulkio.timestamp.now(), False, stream) + self.assertEqual(stream_ids, set(self.port._get_statistics().streamIDs)) + + # Push an end-of-stream for one of the streams (doesn't matter which), + # and test that the stream ID has been removed from the stats + stream = stream_ids.pop() + self._pushTestPacket(0, bulkio.timestamp.notSet(), True, stream) + self.assertEqual(stream_ids, set(self.port._get_statistics().streamIDs)) + + def testSriChangedInvalidStream(self): + """ + Tests that the callback is triggered and SRI changes are reported for + an unknown stream ID. + """ + stream_id = 'invalid_stream' + + # Turn off the port's logging to avoid dumping a warning to the screen + self.port.getLogger().setLevel(logging.OFF); + + # Push data without an SRI to check that the sriChanged flag is still + # set and the SRI callback gets called + listener = SriListener() + self.port.setNewSriListener(listener) + self._pushTestPacket(100, bulkio.timestamp.now(), False, stream_id) + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(not packet) + self.failUnless(packet.sriChanged) + self.failIf(listener.sri is None) + + # Push again to the same stream ID; sriChanged should now be false and the + # SRI callback should not get called + listener.reset() + self._pushTestPacket(100, bulkio.timestamp.now(), False, stream_id) + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(not packet) + self.failIf(packet.sriChanged) + self.failUnless(listener.sri is None) + + def testGetPacketTimeout(self): + """ + Tests that timeout modes work as expected in getPacket(). + """ + # If non-blocking takes more than a millisecond, something is wrong; + # however, on VMs, timing is a little unreliable, so try to round out + # timing spikes by doing it a few times + results = [] + start = time.time() + iterations = 10 + for ii in xrange(iterations): + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + if packet.dataBuffer: + results.append(packet) + elapsed = time.time() - start + self.assertEqual(0, len(results)) + self.failIf(elapsed > (iterations * 1e-3)) + + # Check that (at least) the timeout period elapses + timeout = 0.125 + start = time.time() + packet = self.port.getPacket(timeout) + elapsed = time.time() - start + self.failUnless(packet.dataBuffer is None) + self.failIf(elapsed < timeout) + + # Try a blocking getPacket() on another thread + results = [] + def get_packet(): + packet = self.port.getPacket(bulkio.const.BLOCKING) + results.append(packet) + t = threading.Thread(target=get_packet) + t.setDaemon(True) + t.start() + + # Wait for a while to ensure that the thread has had a chance to enter + # getPacket(), then check that it has not returned + time.sleep(0.125) + self.assertEqual(len(results), 0) + + # Stop the port and make sure the thread exits + self.port.stopPort() + t.join(timeout=1.0) + self.failIf(t.isAlive()) + self.failUnless(results[0].dataBuffer is None) + + def testBlockingDeadlock(self): + """ + Tests that a blocking pushPacket does not prevent other threads from + interacting with the port. + """ + sri = bulkio.sri.create('blocking-stream') + sri.blocking = True + self.port.pushSRI(sri) + + self.port.setMaxQueueDepth(1) + + # Push enough packets to block in one thread + def push_packet(): + for ii in range(2): + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri.streamID) + push_thread = threading.Thread(target=push_packet) + push_thread.setDaemon(True) + push_thread.start() + + # Get the queue depth in another thread, which used to lead to deadlock + # (well, mostly-dead-lock) + test_thread = threading.Thread(target=self.port.getCurrentQueueDepth) + test_thread.setDaemon(True) + test_thread.start() + + # Wait a while for the queue depth query to complete, which should happen + # quickly. If the thread is still alive, then deadlock must have occurred + test_thread.join(1.0) + deadlock = test_thread.isAlive() + + # Get packets to unblock the push thread, allows all threads to finish + self.port.getPacket() + self.port.getPacket() + self.failIf(deadlock) + + def testDiscardEmptyPacket(self): + # Push an empty, non-EOS packet + sri = bulkio.sri.create("empty_packet") + self.port.pushSRI(sri) + self._pushTestPacket(0, bulkio.timestamp.now(), False, sri.streamID) + + # No packet should be returned + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failUnless(not packet.dataBuffer) + + def testQueueFlushFlags(self): + """ + Tests that EOS and sriChanged flags are preserved on a per-stream basis + when a queue flush occurs. + """ + # Push 1 packet for the normal data stream + sri_data = bulkio.sri.create('stream_data') + sri_data.blocking = False + self.port.pushSRI(sri_data) + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri_data.streamID) + + # Push 1 packet for the EOS test stream + sri_eos = bulkio.sri.create('stream_eos') + sri_eos.blocking = False + self.port.pushSRI(sri_eos) + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri_eos.streamID) + + # Push 1 packet for the SRI change stream + sri_change = bulkio.sri.create('stream_sri') + sri_change.blocking = False + self.port.pushSRI(sri_change) + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri_change.streamID) + + # Grab the packets to ensure the initial conditions are correct + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet is None) + self.assertEqual(sri_data.streamID, packet.streamID) + + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet is None) + self.assertEqual(sri_eos.streamID, packet.streamID) + + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet is None) + self.assertEqual(sri_change.streamID, packet.streamID) + + # Push an EOS packet for the EOS stream + self._pushTestPacket(0, bulkio.timestamp.notSet(), True, sri_eos.streamID) + + # Modify the SRI for the SRI change stream and push another packet + sri_change.mode = 1 + self.port.pushSRI(sri_change) + self._pushTestPacket(2, bulkio.timestamp.now(), False, sri_change.streamID) + + # Cause a queue flush by lowering the ceiling and pushing packets + self.port.setMaxQueueDepth(3) + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri_data.streamID) + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri_data.streamID) + + # Push another packet for the SRI change stream + self._pushTestPacket(2, bulkio.timestamp.now(), False, sri_change.streamID) + + # 1st packet should be for EOS stream, with no data or SRI change flag + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet is None) + self.assertEqual(sri_eos.streamID, packet.streamID) + self.assertTrue(packet.inputQueueFlushed) + self.assertTrue(packet.EOS) + self.assertFalse(packet.sriChanged) + self.assertEqual(0, len(packet.dataBuffer)) + + # 2nd packet should be for data stream, with no EOS or SRI change flag + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet is None) + self.assertEqual(sri_data.streamID, packet.streamID) + self.assertFalse(packet.inputQueueFlushed) + self.assertFalse(packet.EOS) + self.assertFalse(packet.sriChanged) + + # 3rd packet should contain the "lost" SRI change flag + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet is None) + self.assertEqual(sri_change.streamID, packet.streamID) + self.assertFalse(packet.inputQueueFlushed) + self.assertFalse(packet.EOS) + self.assertTrue(packet.sriChanged) + + def testQueueSize(self): + """ + Tests that the max queue size can be set to a non-default value or + unlimited (negative) + """ + sri = bulkio.sri.create('queue_size') + sri.blocking = False + self.port.pushSRI(sri) + + # Start with a reasonably small queue depth and check that a flush + # occurs at the expected time + self.port.setMaxQueueDepth(10) + for _ in xrange(10): + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri.streamID) + self.assertEqual(10, self.port.getCurrentQueueDepth()) + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri.streamID) + self.assertEqual(1, self.port.getCurrentQueueDepth()) + + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet.dataBuffer is None) + self.assertTrue(packet.inputQueueFlushed) + + # Set queue depth to unlimited and push a lot of packets + self.port.setMaxQueueDepth(-1) + QUEUE_SIZE = 250 + for _ in xrange(QUEUE_SIZE): + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri.streamID) + self.assertEqual(QUEUE_SIZE, self.port.getCurrentQueueDepth()) + for _ in xrange(QUEUE_SIZE): + packet = self.port.getPacket(bulkio.const.NON_BLOCKING) + self.failIf(packet.dataBuffer is None) + self.assertFalse(packet.inputQueueFlushed) + + + def _pushTestPacket(self, length, time, eos, streamID): + data = self.helper.createData(length) + self.helper.pushPacket(self.port, data, time, eos, streamID) + +def register_test(name, testbase, **kwargs): + globals()[name] = type(name, (testbase, unittest.TestCase), kwargs) + +register_test('InBitPortTest', InPortTest, helper=BitTestHelper()) +register_test('InXMLPortTest', InPortTest, helper=XMLTestHelper()) +register_test('InFilePortTest', InPortTest, helper=FileTestHelper()) +register_test('InCharPortTest', InPortTest, helper=CharTestHelper()) +register_test('InOctetPortTest', InPortTest, helper=OctetTestHelper()) +register_test('InShortPortTest', InPortTest, helper=ShortTestHelper()) +register_test('InUShortPortTest', InPortTest, helper=UShortTestHelper()) +register_test('InLongPortTest', InPortTest, helper=LongTestHelper()) +register_test('InULongPortTest', InPortTest, helper=ULongTestHelper()) +register_test('InLongLongPortTest', InPortTest, helper=LongLongTestHelper()) +register_test('InULongLongPortTest', InPortTest, helper=ULongLongTestHelper()) +register_test('InFloatPortTest', InPortTest, helper=FloatTestHelper()) +register_test('InDoublePortTest', InPortTest, helper=DoubleTestHelper()) + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/bulkioInterfaces/libsrc/testing/tests/python/test_instreams.py b/bulkioInterfaces/libsrc/testing/tests/python/test_instreams.py new file mode 100644 index 000000000..2eba7a383 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/python/test_instreams.py @@ -0,0 +1,597 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest + +from omniORB.any import to_any + +from ossie.cf import CF + +import bulkio +from bulkio.bulkioInterfaces import BULKIO + +from helpers import * + +class InStreamTest(object): + def setUp(self): + self.port = self.helper.createInPort() + + def tearDown(self): + pass + + def _pushTestPacket(self, length, time, eos, streamID): + data = self.helper.createData(length) + self.helper.pushPacket(self.port, data, time, eos, streamID) + + def testTimestamp(self): + # Create a new stream and push data with a known timestamp to it + sri = bulkio.sri.create("time_stamp") + self.port.pushSRI(sri) + ts = bulkio.timestamp.create(1520883276.8045831) + self._pushTestPacket(16, ts, False, sri.streamID) + + # Get the input stream and read the packet as a data block; it should + # contain exactly 1 timestamp, equal to the one that was pushed + stream = self.port.getStream("time_stamp") + self.failIf(not stream) + block = stream.read() + self.failIf(not block) + timestamps = block.getTimestamps() + self.assertEqual(1, len(timestamps)) + self.assertEqual(ts, timestamps[0].time) + self.assertEqual(0, timestamps[0].offset) + self.assertEqual(False, timestamps[0].synthetic) + + # getStartTime() should always return the first timestamp + self.assertEqual(ts, block.getStartTime()) + + def testGetCurrentStreamEmptyPacket(self): + # Create a new stream and push some data to it + sri = bulkio.sri.create("empty_packet") + self.port.pushSRI(sri) + self._pushTestPacket(0, bulkio.timestamp.now(), False, sri.streamID) + + # getCurrentStream() should not return any stream + stream = self.port.getCurrentStream(bulkio.const.NON_BLOCKING) + self.failUnless(not stream) + + def testGetCurrentStreamEmptyEos(self): + # Create a new stream and push some data to it + sri = bulkio.sri.create("empty_eos") + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), False, sri.streamID) + + # Get the input stream and read the first packet + stream = self.port.getStream("empty_eos") + self.failIf(not stream) + block = stream.read() + self.failIf(not block) + self.assertEqual(1024, len(block.buffer)) + self.failIf(stream.eos()) + + # Push an end-of-stream packet with no data and get the stream again + self._pushTestPacket(0, bulkio.timestamp.notSet(), True, sri.streamID) + stream = self.port.getCurrentStream(bulkio.const.NON_BLOCKING) + self.failIf(not stream) + block = stream.read() + self.failUnless(not block) + + # There should be no current stream, because the failed read should have + # removed it + next_stream = self.port.getCurrentStream(bulkio.const.NON_BLOCKING) + self.failUnless(not next_stream) + + # The original stream should report end-of-stream + self.failUnless(stream.eos()) + + def testGetCurrentStreamDataEos(self): + # Create a new stream and push some data to it + sri = bulkio.sri.create("empty_eos") + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), False, sri.streamID) + + # Get the input stream and read the first packet + stream = self.port.getStream("empty_eos") + self.failIf(not stream) + block = stream.read() + self.failIf(not block) + self.assertEqual(1024, len(block.buffer)) + self.failIf(stream.eos()) + + # Push an end-of-stream packet with data and get the stream again + self._pushTestPacket(1024, bulkio.timestamp.now(), True, sri.streamID) + stream = self.port.getCurrentStream(bulkio.const.NON_BLOCKING) + self.failIf(not stream) + block = stream.read() + self.failIf(not block) + self.assertEqual(1024, len(block.buffer)) + + # Try to get the current stream again since the end-of-stream has not been + # checked yet, it should return the existing stream (as with above) + stream = self.port.getCurrentStream(bulkio.const.NON_BLOCKING) + self.failIf(not stream) + block = stream.read() + self.failUnless(not block) + + # There should be no current stream, because the failed read should have + # removed it + next_stream = self.port.getCurrentStream(bulkio.const.NON_BLOCKING) + self.failUnless(not next_stream) + + # The original stream should report end-of-stream + self.failUnless(stream.eos()) + + def testSriChanges(self): + stream_id = 'sri_changes' + + # Create a new stream and push some data to it + sri = bulkio.sri.create(stream_id) + sri.xstart = 0.0 + sri.xdelta = 1.0 + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), False, sri.streamID) + + # Get the input stream and read the first packet + stream = self.port.getStream(stream_id) + self.failIf(not stream) + block = stream.read() + self.failIf(not block) + self.assertEqual(1024, len(block.buffer)) + self.failIf(stream.eos()) + self.assertEqual(sri.xdelta, block.xdelta) + + # Change xdelta (based on sample rate of 2.5Msps) + sri.xdelta = 1.0 / 2.5e6 + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), False, sri.streamID) + block = stream.read() + self.failIf(not block) + self.assertEqual(1024, len(block.buffer)) + self.failIf(stream.eos()) + self.failUnless(block.sriChanged) + flags = bulkio.sri.XDELTA + self.assertEqual(flags, block.sriChangeFlags, 'SRI change flags incorrect') + self.assertEqual(sri.xdelta, block.xdelta, 'SRI xdelta incorrect') + + # Add a keyword, change xdelta back and update xstart + sri.keywords.append(CF.DataType('COL_RF', to_any(101.1e6))) + sri.xstart = 100.0 + sri.xdelta = 1.0 + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), False, sri.streamID) + block = stream.read() + self.failIf(not block) + self.assertEqual(1024, len(block.buffer)) + self.failIf(stream.eos()) + self.failUnless(block.sriChanged) + flags = bulkio.sri.XSTART | bulkio.sri.XDELTA | bulkio.sri.KEYWORDS + self.assertEqual(flags, block.sriChangeFlags, 'SRI change flags incorrect') + self.assertEqual(sri.xstart, block.sri.xstart, 'SRI xstart incorrect') + self.assertEqual(sri.xdelta, block.sri.xdelta, 'SRI xdelta incorrect') + + def testDisable(self): + stream_id = "disable" + + # Create a new stream and push some data to it + sri = bulkio.sri.create(stream_id) + self.port.pushSRI(sri) + self._pushTestPacket(16, bulkio.timestamp.now(), False, sri.streamID) + + # Get the input stream and read the first packet + stream = self.port.getStream(stream_id) + self.failIf(not stream) + + block = stream.read() + self.failIf(not block) + + # Push a couple more packets + self._pushTestPacket(16, bulkio.timestamp.now(), False, sri.streamID) + self._pushTestPacket(16, bulkio.timestamp.now(), False, sri.streamID) + self.assertEqual(2, self.port.getCurrentQueueDepth()) + + # Disable the stream this should drop the existing packets + stream.disable() + self.failIf(stream.enabled) + self.assertEqual(0, self.port.getCurrentQueueDepth(), 'Queued packets for disabled stream were not discarded') + + # Push a couple more packets they should get dropped + self._pushTestPacket(16, bulkio.timestamp.now(), False, sri.streamID) + self._pushTestPacket(16, bulkio.timestamp.now(), False, sri.streamID) + self.assertEqual(0, self.port.getCurrentQueueDepth(), 'New packets for disabled stream were not dropped') + + # Push an end-of-stream packet + self._pushTestPacket(16, bulkio.timestamp.notSet(), True, sri.streamID) + + # Re-enable the stream and read it should fail with end-of-stream set + stream.enable() + block = stream.read() + self.failUnless(block is None) + self.failUnless(stream.eos()) + +class BufferedInStreamTest(InStreamTest): + def testSizedReadEmptyEos(self): + stream_id = "read_empty_eos" + + # Create a new stream and push an end-of-stream packet with no data + sri = bulkio.sri.create(stream_id) + self.port.pushSRI(sri) + self._pushTestPacket(0, bulkio.timestamp.notSet(), True, stream_id) + + # Try to read a single element this should return a null block + stream = self.port.getStream(stream_id) + self.failIf(not stream) + block = stream.read(1) + self.failUnless(not block) + self.failUnless(stream.eos()) + + def testSizedTryreadEmptyEos(self): + stream_id = "tryread_empty_eos" + + # Create a new stream and push an end-of-stream packet with no data + sri = bulkio.sri.create(stream_id) + self.port.pushSRI(sri) + self._pushTestPacket(0, bulkio.timestamp.notSet(), True, stream_id) + + # Try to read a single element this should return a null block + stream = self.port.getStream(stream_id) + self.failIf(not stream) + block = stream.tryread(1) + self.failUnless(not block) + self.failUnless(stream.eos()) + + def testTryreadPeek(self): + stream_id = "tryread_peek" + + # Create a new stream and push some data to it + sri = bulkio.sri.create(stream_id) + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), True, stream_id) + + # Get the input stream and read the first packet + stream = self.port.getStream(stream_id) + self.failIf(not stream) + block = stream.tryread(10000, 0) + self.assertEqual(1024, len(block.buffer)) + block = stream.read(10000) + self.assertEqual(1024, len(block.buffer)) + block = stream.read(10000) + self.failUnless(not block) + self.failUnless(stream.eos()) + + def testReadPeek(self): + stream_id = "read_peek" + + # Create a new stream and push some data to it + sri = bulkio.sri.create(stream_id) + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), True, stream_id) + + # Get the input stream and read the first packet + stream = self.port.getStream(stream_id) + self.failIf(not stream) + block = stream.read(10000, 0) + self.assertEqual(1024, len(block.buffer)) + block = stream.read(10000) + self.assertEqual(1024, len(block.buffer)) + block = stream.read(10000) + self.failUnless(not block) + self.failUnless(stream.eos()) + + def testReadPartial(self): + stream_id = "read_partial" + + # Create a new stream and push some data to it + sri = bulkio.sri.create(stream_id) + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), True, stream_id) + + # Get the input stream and read the first packet + stream = self.port.getStream(stream_id) + self.failIf(not stream) + block = stream.read(10000, 2000) + self.assertEqual(1024, len(block.buffer)) + block = stream.read(10000) + self.failUnless(not block) + + def testReadMultiplePackets(self): + sri = bulkio.sri.create('multiple_packets') + self.port.pushSRI(sri) + for _ in xrange(4): + self._pushTestPacket(100, bulkio.timestamp.now(), False, sri.streamID) + + # Get the input stream + stream = self.port.getStream(sri.streamID) + self.failIf(not stream) + + # Read a block that spans two packets but does not consume the entire + # second packet + block = stream.read(150) + self.failIf(not block) + self.assertEqual(150, len(block.buffer)) + + # Read a block that spans the remainder of the prior packet, an entire + # middle packet, and part of the next + block = stream.read(200) + self.failIf(not block) + self.assertEqual(200, len(block.buffer)) + + def testReadSubPacket(self): + sri = bulkio.sri.create('sub_packet') + self.port.pushSRI(sri) + self._pushTestPacket(400, bulkio.timestamp.now(), False, sri.streamID) + + # Get the input stream and read the packet + stream = self.port.getStream(sri.streamID) + self.failIf(not stream) + + # Read half + block = stream.read(200) + self.failIf(not block) + self.assertEqual(200, len(block.buffer)) + + # Read a smaller packet + block = stream.read(100) + self.failIf(not block) + self.assertEqual(100, len(block.buffer)) + + # Read the remainder of the packet + block = stream.tryread() + self.failIf(not block) + self.assertEqual(100, len(block.buffer)) + + def testReadTimestamps(self): + # Create a new stream and push several packets with known timestamps + sri = bulkio.sri.create('read_timestamps') + sri.xdelta = 0.0625; + self.port.pushSRI(sri) + + # Push packets of size 32, which should advance the time by exactly 2 + # seconds each + ts = bulkio.timestamp.create(4000.0, 0.5) + self._pushTestPacket(32, ts, False, sri.streamID) + self._pushTestPacket(32, ts+2.0, False, sri.streamID) + self._pushTestPacket(32, ts+4.0, False, sri.streamID) + self._pushTestPacket(32, ts+6.0, False, sri.streamID) + + # Get the input stream and read several packets as one block, enough to + # bisect the third packet + stream = self.port.getStream(sri.streamID) + self.failIf(not stream) + block = stream.read(70) + self.failIf(not block) + self.assertEqual(70, len(block.buffer)) + + # There should be 3 timestamps, all non-synthetic + timestamps = block.getTimestamps() + self.assertEqual(3, len(timestamps)) + self.assertEqual(ts, timestamps[0].time) + self.assertEqual(0, timestamps[0].offset) + self.assertEqual(False, timestamps[0].synthetic) + self.assertEqual(timestamps[0].time, block.getStartTime(), "getStartTime() doesn't match first timestamp") + self.assertEqual(ts+2.0, timestamps[1].time) + self.assertEqual(32, timestamps[1].offset) + self.assertEqual(False, timestamps[1].synthetic) + self.assertEqual(ts+4.0, timestamps[2].time) + self.assertEqual(64, timestamps[2].offset) + self.assertEqual(False, timestamps[2].synthetic) + + # Read the remaining packet and a half; the first timestamp should be + # synthetic + block = stream.read(58) + self.failIf(not block) + self.assertEqual(58, len(block.buffer)) + timestamps = block.getTimestamps() + self.assertEqual(2, len(timestamps)) + self.assertEqual(True, timestamps[0].synthetic, "First timestamp should by synthesized") + self.assertEqual(ts+4.375, timestamps[0].time, "Synthesized timestamp is incorrect") + self.assertEqual(0, timestamps[0].offset) + self.assertEqual(timestamps[0].time, block.getStartTime(), "getStartTime() doesn't match first timestamp") + self.assertEqual(ts+6.0, timestamps[1].time) + self.assertEqual(26, timestamps[1].offset) + self.assertEqual(False, timestamps[1].synthetic) + + def testDisableDiscard(self): + stream_id = "disable_discard" + + # Create a new stream and push a couple of packets to it + sri = bulkio.sri.create(stream_id) + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), False, sri.streamID) + self.assertEqual(1, self.port.getCurrentQueueDepth()) + + # Get the input stream and read half of the first packet + stream = self.port.getStream(stream_id) + self.failIf(not stream) + block = stream.read(512) + self.failIf(not block) + + # There should be no packets in the port's queue, but a peek should + # still still return valid data; the data is not consumed so that we + # can be sure that it's discarded later + self.assertEqual(0, self.port.getCurrentQueueDepth()) + block = stream.read(512, 0) + self.failIf(not block) + + # Disable the stream--this should discard + stream.disable() + + # Re-enable the stream and try to read + stream.enable() + block = stream.tryread(512) + self.failUnless(not block) + +class NumericInStreamTest(BufferedInStreamTest): + def testSriModeChanges(self): + stream_id = "sri_mode_changes" + + # Create a new stream and push some data to it + sri = bulkio.sri.create(stream_id) + self.port.pushSRI(sri) + self._pushTestPacket(100, bulkio.timestamp.now(), False, sri.streamID) + + # Get the input stream and read the first packet + stream = self.port.getStream(stream_id) + self.failIf(not stream) + block = stream.read() + self.failIf(not block) + + # First block from a new stream reports SRI change + self.failUnless(block.sriChanged) + + # Change the mode to complex and push more data + sri.mode = 1 + self.port.pushSRI(sri) + self._pushTestPacket(200, bulkio.timestamp.now(), False, sri.streamID) + block = stream.read() + self.failIf(not block) + self.failUnless(block.complex) + self.failUnless(block.sriChanged) + self.failUnless(block.sriChangeFlags & bulkio.sri.MODE) + + # Next push should report no SRI changes + self._pushTestPacket(200, bulkio.timestamp.now(), False, sri.streamID) + block = stream.read() + self.failIf(not block) + self.failUnless(block.complex) + self.failIf(block.sriChanged) + + # Change back to scalar + sri.mode = 0 + self.port.pushSRI(sri) + self._pushTestPacket(100, bulkio.timestamp.now(), False, sri.streamID) + block = stream.read() + self.failIf(not block) + self.failIf(block.complex) + self.failUnless(block.sriChanged) + self.failUnless(block.sriChangeFlags & bulkio.sri.MODE) + + def testReadComplex(self): + sri = bulkio.sri.create('read_complex') + sri.mode = 1 + self.port.pushSRI(sri) + self._pushTestPacket(128, bulkio.timestamp.now(), False, sri.streamID) + + # Get the input stream and read the packet + stream = self.port.getStream(sri.streamID) + self.failIf(not stream) + block = stream.read() + self.failIf(not block) + + self.failUnless(block.complex) + self.assertEqual(64, block.cxsize) + + def testReadTimestampsComplex(self): + # Create a new complex stream and push several packets with known + # timestamps + sri = bulkio.sri.create('read_timestamps_cx') + sri.mode = 1 + sri.xdelta = 0.125 + self.port.pushSRI(sri) + + # Push 8 complex values (16 real), which should advance the time by + # exactly 1 second each time + ts = bulkio.timestamp.create(100.0, 0.0) + self._pushTestPacket(16, ts, False, sri.streamID) + self._pushTestPacket(16, ts+1.0, False, sri.streamID) + self._pushTestPacket(16, ts+2.0, False, sri.streamID) + self._pushTestPacket(16, ts+3.0, False, sri.streamID) + + # Get the input stream and read several packets as one block, enough to + # bisect the third packet + stream = self.port.getStream(sri.streamID) + self.failIf(not stream) + block = stream.read(20) + self.failIf(not block) + self.failUnless(block.complex) + self.assertEqual(20, block.cxsize) + + # There should be 3 timestamps, all non-synthetic, with sample offsets + # based on the complex type + timestamps = block.getTimestamps() + self.assertEqual(3, len(timestamps)) + self.assertEqual(ts, timestamps[0].time) + self.assertEqual(0, timestamps[0].offset) + self.assertEqual(False, timestamps[0].synthetic) + self.assertEqual(timestamps[0].time, block.getStartTime(), "getStartTime() doesn't match first timestamp") + self.assertEqual(ts+1.0, timestamps[1].time) + self.assertEqual(8, timestamps[1].offset) + self.assertEqual(False, timestamps[1].synthetic) + self.assertEqual(ts+2.0, timestamps[2].time) + self.assertEqual(16, timestamps[2].offset) + self.assertEqual(False, timestamps[2].synthetic) + + # Read the remaining packet and a half; the first timestamp should be + # synthetic + block = stream.read(12) + self.failIf(not block) + self.failUnless(block.complex) + self.assertEqual(12, block.cxsize) + timestamps = block.getTimestamps() + self.assertEqual(2, len(timestamps)) + self.assertEqual(True, timestamps[0].synthetic, "First timestamp should by synthesized") + self.assertEqual(ts+2.5, timestamps[0].time, "Synthesized timestamp is incorrect") + self.assertEqual(0, timestamps[0].offset) + self.assertEqual(timestamps[0].time, block.getStartTime(), "getStartTime() doesn't match first timestamp") + self.assertEqual(ts+3.0, timestamps[1].time) + self.assertEqual(4, timestamps[1].offset) + self.assertEqual(False, timestamps[1].synthetic) + + +class InXMLStreamTest(InStreamTest, unittest.TestCase): + helper = XMLTestHelper() + + def testTimestamp(self): + # Override for XML ports, which do not pass timestamp information + # Create a new stream and push some data to it + sri = bulkio.sri.create("time_stamp") + self.port.pushSRI(sri) + self._pushTestPacket(16, None, False, sri.streamID) + + # Get the input stream and read the packet as a data block; it should + # not contain any timestamps + stream = self.port.getStream("time_stamp") + self.failIf(not stream) + block = stream.read() + self.failIf(not block) + timestamps = block.getTimestamps() + self.assertEqual(0, len(timestamps)) + + # Calling getStartTime() will throw an IndexError + +def register_test(name, testbase, **kwargs): + globals()[name] = type(name, (testbase, unittest.TestCase), kwargs) + +register_test('InBitStreamTest', BufferedInStreamTest, helper=BitTestHelper()) +register_test('InFileStreamTest', InStreamTest, helper=FileTestHelper()) +register_test('InCharStreamTest', NumericInStreamTest, helper=CharTestHelper()) +register_test('InOctetStreamTest', NumericInStreamTest, helper=OctetTestHelper()) +register_test('InShortStreamTest', NumericInStreamTest, helper=ShortTestHelper()) +register_test('InUShortStreamTest', NumericInStreamTest, helper=UShortTestHelper()) +register_test('InLongStreamTest', NumericInStreamTest, helper=LongTestHelper()) +register_test('InULongStreamTest', NumericInStreamTest, helper=ULongTestHelper()) +register_test('InLongLongStreamTest', NumericInStreamTest, helper=LongLongTestHelper()) +register_test('InULongLongStreamTest', NumericInStreamTest, helper=ULongLongTestHelper()) +register_test('InFloatStreamTest', NumericInStreamTest, helper=FloatTestHelper()) +register_test('InDoubleStreamTest', NumericInStreamTest, helper=DoubleTestHelper()) + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/bulkioInterfaces/libsrc/testing/tests/python/test_outports.py b/bulkioInterfaces/libsrc/testing/tests/python/test_outports.py new file mode 100644 index 000000000..d607c485f --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/python/test_outports.py @@ -0,0 +1,351 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest + +from ossie.cf import CF + +import bulkio +from bulkio.bulkioInterfaces import BULKIO + +from helpers import * + +class OutPortTest(object): + def setUp(self): + self.__servants = [] + self.port = self.helper.createOutPort() + self.stub = self._createStub() + self.connectionTable = [] + + objref = self.stub._this() + self.port.connectPort(objref, 'test_connection') + + def tearDown(self): + try: + self._disconnectPorts() + except: + # Ignore disconnection errors + pass + + self._releaseServants() + + def _createStub(self): + stub = self.helper.createInStub() + self.__servants.append(stub) + return stub + + def _disconnectPorts(self): + for connection in self.port._get_connections(): + self.port.disconnectPort(connection.connectionId) + + def _releaseServants(self): + for servant in self.__servants: + try: + poa = servant._default_POA() + object_id = poa.servant_to_id(servant) + poa.deactivate_object(object_id) + except: + # Ignore CORBA exceptions + pass + self.__servants = [] + + def testLegacyAPI(self): + # Test to ensure old API methods and some inadvertently public members + # still behave as expected (within reason) + + # sriDict member + self.assertEqual(0, len(self.port.sriDict)) + + sri = bulkio.sri.create('test_legacy_api') + self.port.pushSRI(sri) + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri.streamID) + self.assertEqual(1, len(self.port.sriDict)) + + sri2 = bulkio.sri.create('test_legacy_api_2') + self.port.pushSRI(sri2) + self._pushTestPacket(1, bulkio.timestamp.now(), False, sri2.streamID) + self.assertEqual(2, len(self.port.sriDict)) + + self._pushTestPacket(0, bulkio.timestamp.notSet(), True, sri.streamID) + self.assertEqual(1, len(self.port.sriDict)) + + # enableStats + self.port.enableStats(False) + + def testConnections(self): + # Should start with one connection, to the in port stub + connections = self.port._get_connections() + self.assertEqual(1, len(connections)) + self.assertEqual(BULKIO.ACTIVE, self.port._get_state()) + + # Should throw an invalid port on a nil + self.assertRaises(CF.Port.InvalidPort, self.port.connectPort, None, 'connection_nil') + + # Normal connection + stub2 = self._createStub() + objref = stub2._this() + self.port.connectPort(objref, 'connection_2') + connections = self.port._get_connections() + self.assertEqual(2, len(connections)) + + # Cannot reuse connection ID + self.assertRaises(CF.Port.OccupiedPort, self.port.connectPort, objref, 'connection_2') + + # Disconnect second connection + self.port.disconnectPort('connection_2') + connections = self.port._get_connections() + self.assertEqual(1, len(connections)) + + # Bad connection ID on disconnect + self.assertRaises(CF.Port.InvalidPort, self.port.disconnectPort, 'connection_bad') + + # Disconnect the default stub; port should go to idle + self.port.disconnectPort('test_connection') + connections = self.port._get_connections() + self.assertEqual(0, len(connections)) + self.assertEqual(BULKIO.IDLE, self.port._get_state()) + + def testStatistics(self): + # Even if there are no active SRIs, there should still be statistics + # for existing connections + uses_stats = self.port._get_statistics() + self.assertEqual(1, len(uses_stats)) + self.assertEqual('test_connection', uses_stats[0].connectionId) + + # Push a packet of data to trigger meaningful statistics + sri = bulkio.sri.create("port_stats") + self.port.pushSRI(sri) + self._pushTestPacket(1024, bulkio.timestamp.now(), False, sri.streamID); + uses_stats = self.port._get_statistics() + self.assertEqual(1, len(uses_stats)) + + # Check that the statistics report the right element size + stats = uses_stats[0].statistics + self.failUnless(stats.elementsPerSecond > 0.0) + bits_per_element = int(round(stats.bitsPerSecond / stats.elementsPerSecond)) + self.assertEqual(self.helper.BITS_PER_ELEMENT, bits_per_element) + + # Test that statistics are returned for all connections + stub2 = self._createStub() + self.port.connectPort(stub2._this(), 'connection_2') + uses_stats = self.port._get_statistics() + self.assertEqual(2, len(uses_stats)) + + def testMultiOut(self): + stub2 = self._createStub() + self.port.connectPort(stub2._this(), 'connection_2') + + # Set up a connection table that only routes the filtered stream to the + # second stub, and another stream to both connections + filter_stream_id = 'filter_stream' + self._addStreamFilter(filter_stream_id, 'connection_2') + all_stream_id = 'all_stream' + self._addStreamFilter(all_stream_id, 'test_connection') + self._addStreamFilter(all_stream_id, 'connection_2') + + # Push an SRI for the filtered stream; it should only be received by + # the second stub + sri = bulkio.sri.create(filter_stream_id, 2.5e6) + self.port.pushSRI(sri) + self.assertEqual(0, len(self.stub.H)) + self.assertEqual(1, len(stub2.H)) + self.assertEqual(filter_stream_id, stub2.H[-1].streamID) + + # Push a packet for the filtered stream; again, only received by #2 + self._pushTestPacket(91, bulkio.timestamp.now(), False, filter_stream_id) + self.assertEqual(0, len(self.stub.packets)) + self.assertEqual(1, len(stub2.packets)) + self.assertEqual(91, self.helper.packetLength(stub2.packets[-1].data)) + + # Unknown (to the connection filter) stream should get dropped + unknown_stream_id = 'unknown_stream' + sri = bulkio.sri.create(unknown_stream_id) + self.port.pushSRI(sri) + self.assertEqual(0, len(self.stub.H)) + self.assertEqual(1, len(stub2.H)) + self._pushTestPacket(50, bulkio.timestamp.now(), False, unknown_stream_id) + self.assertEqual(0, len(self.stub.packets)) + self.assertEqual(1, len(stub2.packets)) + + # Check SRI routed to both connections... + sri = bulkio.sri.create(all_stream_id, 1e6) + self.port.pushSRI(sri) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(2, len(stub2.H)) + self.assertEqual(all_stream_id, self.stub.H[-1].streamID) + self.assertEqual(all_stream_id, stub2.H[-1].streamID) + + # ...and data + self._pushTestPacket(256, bulkio.timestamp.now(), False, all_stream_id) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(256, self.helper.packetLength(self.stub.packets[-1].data)) + self.assertEqual(2, len(stub2.packets)) + self.assertEqual(256, self.helper.packetLength(stub2.packets[-1].data)) + + # Reset the connection filter and push data for the filtered stream again, + # which should trigger an SRI push to the first stub + self.connectionTable = [] + self.port.updateConnectionFilter(self.connectionTable) + self._pushTestPacket(9, bulkio.timestamp.now(), False, filter_stream_id) + self.assertEqual(2, len(self.stub.H)) + self.assertEqual(filter_stream_id, self.stub.H[-1].streamID) + self.assertEqual(2, len(self.stub.packets)) + self.assertEqual(9, self.helper.packetLength(self.stub.packets[-1].data)) + self.assertEqual(3, len(stub2.packets)) + self.assertEqual(9, self.helper.packetLength(stub2.packets[-1].data)) + + def _addStreamFilter(self, streamId, connectionId): + desc = bulkio.connection_descriptor_struct(connectionId, streamId, self.port.name) + self.connectionTable.append(desc) + self.port.updateConnectionFilter(self.connectionTable) + + def _pushTestPacket(self, length, time, eos, streamID): + data = self.helper.createData(length) + self.helper.pushPacket(self.port, data, time, eos, streamID) + +class ChunkingOutPortTest(OutPortTest): + def testPushChunking(self): + # Set up a basic stream + stream_id = 'push_chunking' + sri = bulkio.sri.create(stream_id) + sri.xdelta = 0.125 + self.port.pushSRI(sri) + + # Test that the push is properly chunked + time = bulkio.timestamp.create(0.0, 0.0) + self._testPushOversizedPacket(time, False, stream_id) + + # Check that the synthesized time stamp(s) advanced by the expected + # time + last = self.stub.packets[0] + for packet in self.stub.packets[1:]: + expected = self.helper.packetLength(last.data) * sri.xdelta + elapsed = packet.T - last.T + self.assertEqual(expected, elapsed, 'Incorrect time stamp delta') + last = packet + + def testPushChunkingEOS(self): + # Set up a basic stream + stream_id = 'push_chunking_eos' + sri = bulkio.sri.create(stream_id) + self.port.pushSRI(sri) + + # Send a packet with end-of-stream set + self._testPushOversizedPacket(bulkio.timestamp.notSet(), True, stream_id) + + # Check that only the final packet has end-of-stream set + self.failUnless(self.stub.packets[-1].EOS, 'Last packet does not have EOS set') + for packet in self.stub.packets[:-1]: + self.failIf(packet.EOS, 'Intermediate packet has EOS set') + + def testPushChunkingSubsize(self): + # Set up a 2-dimensional stream + stream_id = 'push_chunking_subsize' + sri = bulkio.sri.create(stream_id) + sri.subsize = 1023 + self.port.pushSRI(sri) + + self._testPushOversizedPacket(bulkio.timestamp.notSet(), False, stream_id) + + # Check that each packet is a multiple of the subsize (except the last, + # because the oversized packet was not explicitly quantized to be an + # exact multiple) + for packet in self.stub.packets[:-1]: + self.assertEqual(0, self.helper.packetLength(packet.data) % 1023, 'Packet size is not a multiple of subsize') + + def _testPushOversizedPacket(self, time, eos, streamID): + # Pick a sufficiently large number of samples that the packet has to + # span multiple packets + max_bits = 8 * bulkio.const.MAX_TRANSFER_BYTES + bits_per_element = self.helper.BITS_PER_ELEMENT + count = 2 * max_bits / bits_per_element + self._pushTestPacket(count, time, eos, streamID) + + # More than one packet must have been received, and no packet can + # exceed the max transfer size + self.failUnless(len(self.stub.packets) > 1) + for packet in self.stub.packets: + packet_bits = self.helper.packetLength(packet.data) * bits_per_element + self.failUnless(packet_bits < max_bits, 'Packet too large') + +class NumericOutPortTest(ChunkingOutPortTest): + def testPushChunkingComplex(self): + # Set up a complex stream + stream_id = 'push_chunking_complex' + sri = bulkio.sri.create(stream_id) + sri.mode = 1 + sri.xdelta = 0.0625 + self.port.pushSRI(sri) + + # Test that the push is properly chunked + time = bulkio.timestamp.create(0.0, 0.0) + self._testPushOversizedPacket(time, False, stream_id) + + # Check that each packet contains an even number of samples (i.e., no + # complex value was split) + for packet in self.stub.packets: + self.assertEqual(0, self.helper.packetLength(packet.data) % 2, 'Packet contains a partial complex value') + + # Check that the synthesized time stamp(s) advanced by the expected time + last = self.stub.packets[0] + for packet in self.stub.packets[1:]: + expected = self.helper.packetLength(last.data) * 0.5 * sri.xdelta + elapsed = packet.T - last.T + self.assertEqual(expected, elapsed, 'Incorrect time stamp delta') + last = packet + + def testPushChunkingSubsizeComplex(self): + # Set up a 2-dimensional complex stream + stream_id = 'push_chunking_subsize_complex' + sri = bulkio.sri.create(stream_id) + sri.subsize = 2048 + sri.mode = 1 + self.port.pushSRI(sri) + + self._testPushOversizedPacket(bulkio.timestamp.notSet(), False, stream_id) + + # Check that each packet is a multiple of the subsize (except the last, + # because the oversized packet was not explicitly quantized to be an exact + # multiple) + for packet in self.stub.packets[:-1]: + self.assertEqual(0, self.helper.packetLength(packet.data) % 4096, 'Packet size is not a multiple of subsize') + + +def register_test(name, testbase, **kwargs): + globals()[name] = type(name, (testbase, unittest.TestCase), kwargs) + +register_test('OutBitPortTest', ChunkingOutPortTest, helper=BitTestHelper()) +register_test('OutXMLPortTest', OutPortTest, helper=XMLTestHelper()) +register_test('OutFilePortTest', OutPortTest, helper=FileTestHelper()) +register_test('OutCharPortTest', NumericOutPortTest, helper=CharTestHelper()) +register_test('OutOctetPortTest', NumericOutPortTest, helper=OctetTestHelper()) +register_test('OutShortPortTest', NumericOutPortTest, helper=ShortTestHelper()) +register_test('OutUShortPortTest', NumericOutPortTest, helper=UShortTestHelper()) +register_test('OutLongPortTest', NumericOutPortTest, helper=LongTestHelper()) +register_test('OutULongPortTest', NumericOutPortTest, helper=ULongTestHelper()) +register_test('OutLongLongPortTest', NumericOutPortTest, helper=LongLongTestHelper()) +register_test('OutULongLongPortTest', NumericOutPortTest, helper=ULongLongTestHelper()) +register_test('OutFloatPortTest', NumericOutPortTest, helper=FloatTestHelper()) +register_test('OutDoublePortTest', NumericOutPortTest, helper=DoubleTestHelper()) + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/bulkioInterfaces/libsrc/testing/tests/python/test_outstreams.py b/bulkioInterfaces/libsrc/testing/tests/python/test_outstreams.py new file mode 100644 index 000000000..8156a2a0f --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/python/test_outstreams.py @@ -0,0 +1,521 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import math + +from omniORB import CORBA + +from ossie import properties + +import bulkio +from bulkio.bulkioInterfaces import BULKIO + +from helpers import * + +class OutStreamTest(object): + def setUp(self): + self.port = self.helper.createOutPort() + self.stub = self._createStub() + + objref = self.stub._this() + self.port.connectPort(objref, 'test_connection') + + def tearDown(self): + try: + self._disconnectPorts() + except: + # Ignore disconnection errors + pass + + self._releaseServants() + + def _createStub(self): + return self.helper.createInStub() + + def _disconnectPorts(self): + for connection in self.port._get_connections(): + self.port.disconnectPort(connection.connectionId) + + def _releaseServants(self): + poa = self.stub._default_POA() + object_id = poa.servant_to_id(self.stub) + poa.deactivate_object(object_id) + + def testBasicWrite(self): + stream = self.port.createStream('test_basic_write') + self.failUnless(not self.stub.packets) + + time = bulkio.timestamp.now() + self._writeSinglePacket(stream, 256, time) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(256, self.helper.packetLength(self.stub.packets[0].data)) + self.failIf(self.stub.packets[0].EOS) + self.assertEqual(stream.streamID, self.stub.packets[0].streamID) + + # Check timestamp, but only if it's supported (i.e., not dataXML) + if hasattr(self.stub.Packet, 'T'): + self.assertEqual(time, self.stub.packets[0].T, msg='Received incorrect time stamp') + + def testSriFields(self): + sri = bulkio.sri.create("test_sri") + sri.xstart = -2.5 + sri.xdelta = 0.125 + sri.xunits = BULKIO.UNITS_FREQUENCY + sri.subsize = 1024 + sri.ystart = 2.5 + sri.ydelta = 1.0 + sri.yunits = BULKIO.UNITS_TIME + sri.mode = 1 + sri.blocking = 1 + sri.keywords = properties.props_from_dict({'string':'value', 'number':100}) + + # Create a stream from the SRI and compare accessors + stream = self.port.createStream(sri) + self.assertEqual(stream.streamID, sri.streamID) + self.assertEqual(stream.xstart, sri.xstart) + self.assertEqual(stream.xdelta, sri.xdelta) + self.assertEqual(stream.xunits, sri.xunits) + self.assertEqual(stream.subsize, sri.subsize) + self.assertEqual(stream.ystart, sri.ystart) + self.assertEqual(stream.ydelta, sri.ydelta) + self.assertEqual(stream.yunits, sri.yunits) + self.failUnless(stream.complex) + self.failUnless(stream.blocking) + self.assertEqual(len(sri.keywords), len(stream.keywords)) + self.assertEqual('value', stream.getKeyword('string')) + self.assertEqual(100, stream.getKeyword('number')) + + def testSriUpdate(self): + # Create initial stream; all changes should be queued up for the first + # write + stream = self.port.createStream("test_sri_update") + xdelta = 1.0 / 1.25e6 + stream.xdelta = xdelta + stream.blocking = True + self.failUnless(not self.stub.H) + + # Write data to trigger initial SRI update + self._writeSinglePacket(stream, 10) + self.assertEqual(len(self.stub.H), 1) + self.failUnless(self.stub.H[-1].blocking) + self.assertEqual(xdelta, self.stub.H[-1].xdelta) + + # Update xdelta; no SRI update should occur + new_xdelta = 1.0/2.5e6 + stream.xdelta = new_xdelta + self.assertEqual(len(self.stub.H), 1) + self.assertEqual(xdelta, self.stub.H[-1].xdelta) + + # Write data to trigger SRI update + self._writeSinglePacket(stream, 25) + self.assertEqual(len(self.stub.H), 2) + self.assertEqual(new_xdelta, self.stub.H[-1].xdelta) + + # Change blocking flag, then trigger an SRI update + stream.blocking = False + self.assertEqual(len(self.stub.H), 2) + self.failUnless(self.stub.H[-1].blocking) + self._writeSinglePacket(stream, 25) + self.assertEqual(len(self.stub.H), 3) + self.failIf(self.stub.H[-1].blocking) + + # Change multiple fields, but only one SRI update should occur (after the + # next write) + stream.complex = True + stream.subsize = 16 + stream.xstart = -math.pi + stream.xdelta = 2.0 * math.pi / 1024.0 + stream.xunits = BULKIO.UNITS_FREQUENCY + stream.ydelta = 1024.0 / 1.25e6 + stream.yunits = BULKIO.UNITS_TIME + self.assertEqual(len(self.stub.H), 3) + + # Trigger SRI update and verify that it matches + self._writeSinglePacket(stream, 1024) + self.assertEqual(len(self.stub.H), 4) + self.failUnless(bulkio.sri.compare(stream.sri, self.stub.H[-1])) + + def testSriReplace(self): + # Create initial stream + stream = self.port.createStream("test_sri_replace") + + # Create a new SRI with a different stream ID + new_sri = bulkio.sri.create("modified_sri") + new_sri.mode = 1 + new_sri.blocking = 1 + new_sri.subsize = 16 + new_sri.xstart = -math.pi + new_sri.xdelta = 2.0 * math.pi / 1024.0 + new_sri.xunits = BULKIO.UNITS_FREQUENCY + new_sri.ydelta = 1024.0 / 1.25e6 + new_sri.yunits = BULKIO.UNITS_TIME + + # Replace the SRI and ensure that everything *except* the streamID has + # changed + stream.sri = new_sri + self.assertEqual(stream.streamID, "test_sri_replace") + self.assertEqual(stream.xstart, new_sri.xstart) + self.assertEqual(stream.xdelta, new_sri.xdelta) + self.assertEqual(stream.xunits, new_sri.xunits) + self.assertEqual(stream.subsize, new_sri.subsize) + self.assertEqual(stream.ystart, new_sri.ystart) + self.assertEqual(stream.ydelta, new_sri.ydelta) + self.assertEqual(stream.yunits, new_sri.yunits) + self.failUnless(stream.complex) + self.failUnless(stream.blocking) + + def testKeywords(self): + stream = self.port.createStream("test_keywords") + self._writeSinglePacket(stream, 1) + self.assertEqual(1, len(self.stub.H)) + + # Set/get keywords + stream.setKeyword('integer', 250) + stream.setKeyword('string', "value") + stream.setKeyword('double', 101.1e6) + stream.setKeyword('boolean', False) + self.assertEqual(250, stream.getKeyword('integer')) + self.assertEqual('value', stream.getKeyword('string')) + self.assertEqual(101.1e6, stream.getKeyword('double')) + self.assertEqual(False, stream.getKeyword('boolean')) + + # Set with a specific type + stream.setKeyword('float', -1.25, 'float') + self.assertEqual(-1.25, stream.getKeyword('float')) + any_value = stream.keywords[-1].value + self.assertEqual(CORBA.TC_float, any_value.typecode()) + + # Erase and check for presence of keywords + stream.eraseKeyword('string') + self.failUnless(stream.hasKeyword('integer')) + self.failIf(stream.hasKeyword('string')) + self.failUnless(stream.hasKeyword('double')) + self.failUnless(stream.hasKeyword('boolean')) + + # Write a packet to trigger an SRI update + self.assertEqual(1, len(self.stub.H)) + self._writeSinglePacket(stream, 1) + self.assertEqual(2, len(self.stub.H)) + + keywords = properties.props_to_dict(self.stub.H[-1].keywords) + self.assertEqual(len(stream.keywords), len(keywords)) + for key, value in keywords.iteritems(): + self.assertEqual(stream.getKeyword(key), value) + + # Replace keywords with a new set + stream.keywords = properties.props_from_dict({'COL_RF': 100.0e6, 'CHAN_RF': 101.1e6}) + self.assertEqual(2, len(stream.keywords)) + self.assertEqual(100.0e6, stream.getKeyword('COL_RF')) + self.assertEqual(101.1e6, stream.getKeyword('CHAN_RF')) + + # Trigger another SRI update + self.assertEqual(2, len(self.stub.H)) + self._writeSinglePacket(stream, 1) + self.assertEqual(3, len(self.stub.H)) + + keywords = properties.props_to_dict(self.stub.H[-1].keywords) + self.assertEqual(len(stream.keywords), len(keywords)) + for key, value in keywords.iteritems(): + self.assertEqual(stream.getKeyword(key), value) + + def testSendEosOnClose(self): + stream = self.port.createStream("close_eos") + + self.assertEqual(len(self.stub.H), 0) + self.assertEqual(len(self.stub.packets), 0) + + self._writeSinglePacket(stream, 16) + + self.assertEqual(len(self.stub.H), 1) + self.assertEqual(len(self.stub.packets), 1) + self.failIf(self.stub.packets[-1].EOS) + + stream.close() + self.assertEqual(len(self.stub.packets), 2) + self.failUnless(self.stub.packets[-1].EOS) + + def _writeSinglePacket(self, stream, length, time=None): + if time is None: + time = bulkio.timestamp.now() + data = self.helper.createStreamData(length) + stream.write(data, time) + +class BufferedOutStreamTest(OutStreamTest): + def testBufferedWrite(self): + # Initial state is unbuffered; turn on buffering + stream = self.port.createStream("test_buffered_write") + self.assertEqual(0, stream.bufferSize()) + stream.setBufferSize(128) + self.assertEqual(128, stream.bufferSize()) + self.assertEqual(0, len(self.stub.packets)) + + # First write is below the buffer size + data = self.helper.createStreamData(48) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(0, len(self.stub.packets)) + + # The second write is still below the buffer size + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(0, len(self.stub.packets)) + + # The third write goes beyond the buffer size and should trigger a push, + # but only up to the buffer size (48*3 == 144) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(stream.bufferSize(), self.helper.packetLength(self.stub.packets[-1].data)) + + # There should now be 16 samples in the queue; writing another 48 should + # not trigger a push + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(1, len(self.stub.packets)) + + # Flush the stream and make sure we get as many samples as expected + stream.flush() + self.assertEqual(2, len(self.stub.packets)) + self.assertEqual(64, self.helper.packetLength(self.stub.packets[-1].data)) + + # Disable buffering; push should happen immediately + stream.setBufferSize(0) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(3, len(self.stub.packets)) + self.assertEqual(len(data), self.helper.packetLength(self.stub.packets[-1].data)) + + def testWriteSkipBuffer(self): + # Turn on buffering + stream = self.port.createStream("test_skip_buffer") + stream.setBufferSize(100) + + # With an empty queue, large write should go right through + data = self.helper.createStreamData(256) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(len(data), self.helper.packetLength(self.stub.packets[-1].data)) + + # Queue up a bit of data + data = self.helper.createStreamData(16) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(1, len(self.stub.packets)) + + # With queued data, the large write should get broken up into a buffer- + # sized packet + data = self.helper.createStreamData(128) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(2, len(self.stub.packets)) + self.assertEqual(stream.bufferSize(), self.helper.packetLength(self.stub.packets[-1].data)) + + def testFlush(self): + # Turn on buffering + stream = self.port.createStream("test_flush") + stream.setBufferSize(64) + + # Queue data (should not flush) + data = self.helper.createStreamData(48) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(0, len(self.stub.H)) + self.assertEqual(0, len(self.stub.packets)) + + # Make sure flush sends a packet + stream.flush() + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(len(data), self.helper.packetLength(self.stub.packets[-1].data)) + self.assertEqual(False, self.stub.packets[-1].EOS) + + def testFlushOnClose(self): + stream = self.port.createStream("test_flush_close") + stream.setBufferSize(64) + + # Queue data (should not flush) + data = self.helper.createStreamData(48) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(0, len(self.stub.H)) + self.assertEqual(0, len(self.stub.packets)) + + # Close the stream; should cause a flush + stream.close() + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(len(data), self.helper.packetLength(self.stub.packets[-1].data)) + self.assertEqual(True, self.stub.packets[-1].EOS) + + def testFlushOnSriChange(self): + # Start with known values for important stream metadata + stream = self.port.createStream("test_flush_sri") + stream.setBufferSize(64) + stream.xdelta = 0.125 + stream.complex = False + stream.blocking = False + stream.subsize = 0 + + # Queue data (should not flush) + data = self.helper.createStreamData(24) + stream.write(data, bulkio.timestamp.now()) + + # Change the xdelta to cause a flush; the received data should be using + # the old xdelta + self.assertEqual(0, len(self.stub.packets)) + stream.xdelta = 0.25 + self.assertEqual(1, len(self.stub.packets), "xdelta change did not flush stream") + self.assertEqual(0.125, self.stub.H[-1].xdelta) + + # Queue more data (should not flush) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(1, len(self.stub.packets)) + + # Change the mode to complex to cause a flush; the mode shouldn't + # change yet, but xdelta should be up-to-date now + stream.complex = True + self.assertEqual(2, len(self.stub.packets), "Complex mode change did not flush stream") + self.assertEqual(0, self.stub.H[-1].mode) + self.assertEqual(stream.xdelta, self.stub.H[-1].xdelta) + + # Queue more data (should not flush) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(2, len(self.stub.H)) + self.assertEqual(2, len(self.stub.packets)) + + # Change the blocking mode to cause a flush; the blocking flag + # shouldn't change yet, but mode should be up-to-date now + stream.blocking = True + self.assertEqual(3, len(self.stub.packets), "Blocking change did not flush stream") + self.assertEqual(0, self.stub.H[-1].blocking) + self.assertNotEqual(0, self.stub.H[-1].mode) + + # Queue more data (should not flush) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(3, len(self.stub.H)) + self.assertEqual(3, len(self.stub.packets)) + + # Change the subsize to cause a flush; the subsize shouldn't change + # yet, but blocking should be up-to-date now + stream.subsize = 16 + self.assertEqual(4, len(self.stub.packets), "Subsize change did not flush stream") + self.assertEqual(0, self.stub.H[-1].subsize) + self.assertNotEqual(0, self.stub.H[-1].blocking) + + def testFlushOnBufferSizeChange(self): + stream = self.port.createStream("test_flush_buffer_size") + stream.setBufferSize(64) + + # Queue data (should not flush) + data = self.helper.createStreamData(48) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(0, len(self.stub.packets)) + + # Reduce the buffer size smaller than the current queue, should trigger + # a flush + stream.setBufferSize(32) + self.assertEqual(1, len(self.stub.packets), "Reducing buffer size below queue size did not flush") + + # Reduce the buffer size again, but not down to the queue size, should + # not trigger a flush + data = self.helper.createStreamData(16) + stream.write(data, bulkio.timestamp.now()) + stream.setBufferSize(24) + self.assertEqual(1, len(self.stub.packets), "Reducing buffer size above queue size flushed") + + # Boundary condition: exact size + stream.setBufferSize(16) + self.assertEqual(2, len(self.stub.packets), "Reducing buffer size to exact size did not flush") + + # Increasing the buffer size should not trigger a flush + data = self.helper.createStreamData(8) + stream.write(data, bulkio.timestamp.now()) + stream.setBufferSize(128) + self.assertEqual(2, len(self.stub.packets), "Increasing buffer size flushed") + + # Disabling buffering must flush + stream.setBufferSize(0) + self.assertEqual(3, len(self.stub.packets), "Disabling buffering did not flush") + +class NumericOutStreamTest(BufferedOutStreamTest): + def testWriteComplex(self): + stream = self.port.createStream("test_write_complex") + stream.complex = True + + # Write a list of complex values, which should get turned into a list + # of real values that is twice as long + data = [complex(ii,0) for ii in xrange(100)] + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(1, len(self.stub.packets)) + result = self.helper.unpack(self.stub.packets[-1].data) + self.assertEqual(200, len(result)) + self.assertEqual([ii.real for ii in data], result[::2]) + self.assertEqual([ii.imag for ii in data], result[1::2]) + + # Write a list of real values, each of which is interpreted as a + # complex value (with an imaginary component of 0) + data = range(100) + stream.write(data, bulkio.timestamp.now()) + self.assertEqual(2, len(self.stub.packets)) + result = self.helper.unpack(self.stub.packets[-1].data) + self.assertEqual(200, len(result)) + self.assertEqual(data, result[::2]) + self.assertEqual([0] * 100, result[1::2]) + + # Write pre-interleaved data; no conversion should occur + data = self.helper.pack(range(100)) + stream.write(data, bulkio.timestamp.now(), interleaved=True) + self.assertEqual(3, len(self.stub.packets)) + self.assertEqual(100, len(self.stub.packets[-1].data)) + self.assertEqual(data, self.stub.packets[-1].data) + + +class OutXMLStreamTest(OutStreamTest, unittest.TestCase): + helper = XMLTestHelper() + + def _writeSinglePacket(self, stream, length, time=None): + data = self.helper.createStreamData(length) + stream.write(data) + +class OutBitStreamTest(BufferedOutStreamTest, unittest.TestCase): + helper = BitTestHelper() + + def testWriteLiteral(self): + stream = self.port.createStream("test_write_literal") + + literal = '101101011101011010101' + stream.write(literal, bulkio.timestamp.now()) + + self.assertEqual(1, len(self.stub.packets)) + result = self.helper.unpack(self.stub.packets[-1].data) + self.assertEqual(literal, result) + +def register_test(name, testbase, **kwargs): + globals()[name] = type(name, (testbase, unittest.TestCase), kwargs) + +register_test('OutFileStreamTest', OutStreamTest, helper=FileTestHelper()) +register_test('OutCharStreamTest', NumericOutStreamTest, helper=CharTestHelper()) +register_test('OutOctetStreamTest', NumericOutStreamTest, helper=OctetTestHelper()) +register_test('OutShortStreamTest', NumericOutStreamTest, helper=ShortTestHelper()) +register_test('OutUShortStreamTest', NumericOutStreamTest, helper=UShortTestHelper()) +register_test('OutLongStreamTest', NumericOutStreamTest, helper=LongTestHelper()) +register_test('OutULongStreamTest', NumericOutStreamTest, helper=ULongTestHelper()) +register_test('OutLongLongStreamTest', NumericOutStreamTest, helper=LongLongTestHelper()) +register_test('OutULongLongStreamTest', NumericOutStreamTest, helper=ULongLongTestHelper()) +register_test('OutFloatStreamTest', NumericOutStreamTest, helper=FloatTestHelper()) +register_test('OutDoubleStreamTest', NumericOutStreamTest, helper=DoubleTestHelper()) + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/bulkioInterfaces/libsrc/testing/tests/python/test_sandbox.py b/bulkioInterfaces/libsrc/testing/tests/python/test_sandbox.py new file mode 100644 index 000000000..3bfdd971e --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/python/test_sandbox.py @@ -0,0 +1,1002 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import threading +import time +import unittest + +from omniORB import CORBA + +from ossie.utils.sandbox import LocalSandbox +from redhawk.bitbuffer import bitbuffer + +import bulkio +from bulkio.bulkioInterfaces import BULKIO +from bulkio.sandbox import StreamSource, StreamSink + +import helpers + +def after(delay, func, *args, **kwargs): + def delayed_func(): + time.sleep(delay) + func(*args, **kwargs) + t = threading.Thread(target=delayed_func) + t.setDaemon(True) + t.start() + +class format(object): + """ + Decorator to declare the data format for an individual unit test. The + TestCase can use this format in setUp to perform connection logic. + """ + def __init__(self, format): + self.format = format + + def __call__(self, obj): + obj.format = self.format + return obj + +bithelper = helpers.BitTestHelper() + +class StreamSourceTest(unittest.TestCase): + def setUp(self): + self.sandbox = LocalSandbox() + self.source = StreamSource(sandbox=self.sandbox) + + # Connect source directly to a port stub, bypassing the normal sandbox + # connection logic + format = self._getTestFormat() + if not format: + format = 'float' + self.stub = self._createStub(format) + port = self.source.getPort(format+'Out') + port.connectPort(self.stub._this(), 'test_connection') + + def tearDown(self): + self.sandbox.shutdown() + + try: + poa = self.stub._default_POA() + object_id = poa.servant_to_id(self.stub) + poa.deactivate_object(object_id) + except: + # Ignore CORBA exceptions + pass + + def _getTestFormat(self): + method = getattr(self, self._testMethodName, None) + if not method: + return None + return getattr(method, 'format', None) + + def _createStub(self, format): + stubs = { + 'char' : helpers.InCharPortStub, + 'octet' : helpers.InOctetPortStub, + 'short' : helpers.InShortPortStub, + 'ushort' : helpers.InUshortPortStub, + 'long' : helpers.InLongPortStub, + 'ulong' : helpers.InUlongPortStub, + 'longlong' : helpers.InLongLongPortStub, + 'ulonglong': helpers.InUlongLongPortStub, + 'float' : helpers.InFloatPortStub, + 'double' : helpers.InDoublePortStub, + 'bit' : helpers.InBitPortStub, + 'xml' : helpers.InXMLPortStub, + 'file' : helpers.InFilePortStub + } + return stubs[format]() + + @format('ulonglong') + def testWrite(self): + data = range(16) + ts = bulkio.timestamp.now() + self.source.write(data, ts) + + # Check that the stub received a packet that matches + self.assertEqual(1, len(self.stub.H)) + self.failUnless(bulkio.sri.compare(self.source.sri, self.stub.H[-1])) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(self.source.streamID, self.stub.packets[-1].streamID) + self.assertEqual(data, self.stub.packets[-1].data) + self.assertEqual(ts, self.stub.packets[-1].T) + + @format('long') + def testWriteTimestamp(self): + # Explicit timestamp + data = range(16) + ts = bulkio.timestamp.create(10000.0, 0.75) + self.source.write(data, ts) + + # Check that the stub received the timestamp given + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(ts, self.stub.packets[-1].T) + + # No timestamp given; should use current time (giving a fair amount of + # leeway here) + ts = bulkio.timestamp.now() + self.source.write(data) + self.assertEqual(2, len(self.stub.packets)) + self.failIf(abs(self.stub.packets[-1].T - ts) > 1.0) + + @format('double') + def testWriteComplex(self): + # Write a 40-element complex list where the interleaved values are a + # ramp + self.source.complex = True + data = [complex(ii,ii+1) for ii in xrange(0, 80, 2)] + self.source.write(data) + + # Check that the stub received a packet that matches (i.e., data is an + # 80-element ramp) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(1, self.stub.H[-1].mode) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(self.source.streamID, self.stub.packets[-1].streamID) + self.assertEqual(range(80), self.stub.packets[-1].data) + self.failIf(self.stub.packets[-1].EOS) + + # Write a 20-element scalar list, which will be interpreted as 20 + # complex values with 0 for the imaginary portion + self.source.write(range(20)) + # To generate the expected output, walk through twice the range, + # turning odd numbers into zeros and dividing even numbers in half + # [ 0, 1, 2, 3, 4, 5...] => [ 0, 0, 1, 0, 2, 0...] + expected = [ii/2 if ii % 2 == 0 else 0 for ii in xrange(0, 40)] + self.assertEqual(2, len(self.stub.packets)) + self.assertEqual(expected, self.stub.packets[-1].data) + + # Write pre-interleaved data + data = range(36) + self.source.write(data, interleaved=True) + self.assertEqual(3, len(self.stub.packets)) + self.assertEqual(data, self.stub.packets[-1].data) + + + @format('ushort') + def testWriteFramed(self): + # Write a list of 4 ramps + self.source.subsize = 16 + data = [range(x,x+16) for x in xrange(0,64,16)] + self.source.write(data) + + # Check that the stub received a packet that matches (i.e., data is an + # 64-element ramp) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(16, self.stub.H[-1].subsize) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(range(64), self.stub.packets[-1].data) + + @format('double') + def testWriteFramedComplex(self): + # Create a set of complex values where each alternating real/imaginary + # value forms a ramp, and reframe it a a list of 4-item lists + data = [complex(x,x+1) for x in xrange(0, 32, 2)] + data = [data[x:x+4] for x in xrange(0, len(data), 4)] + self.source.complex = True + self.source.subsize = 4 + self.source.write(data) + + # Check that the stub received a packet that matches (i.e., data is an + # 32-element ramp) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(1, self.stub.H[-1].mode) + self.assertEqual(4, self.stub.H[-1].subsize) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(range(32), self.stub.packets[-1].data) + + # Write a 4 frames of 4-element real lists; each element will be + # interpreted as a complex value with 0 for the imaginary portion + data = range(16) + data = [data[x:x+4] for x in xrange(0, len(data), 4)] + self.source.write(data) + # To generate the expected output, walk through twice the range, + # turning odd numbers into zeros and dividing even numbers in half + # [ 0, 1, 2, 3, 4, 5...] => [ 0, 0, 1, 0, 2, 0...] + expected = [ii/2 if ii % 2 == 0 else 0 for ii in xrange(0, 32)] + self.assertEqual(2, len(self.stub.packets)) + self.assertEqual(expected, self.stub.packets[-1].data) + + @format('ulong') + def testWriteFramedPreformatted(self): + # Test that writing a 1-dimensional list for framed data still works as + # expected + self.source.subsize = 8 + data = range(48) + self.source.write(data) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(8, self.stub.H[-1].subsize) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(data, self.stub.packets[-1].data) + + @format('bit') + def testWriteBit(self): + # No timestamp + data = bitbuffer('101011010100101011010101101111') + self.source.write(data) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(data, bithelper.unpack(self.stub.packets[-1].data)) + + # Provided time should pass through unmodified + ts = bulkio.timestamp.now() + data = bitbuffer('10001001001001001') + self.source.write(data, ts) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(2, len(self.stub.packets)) + self.assertEqual(data, bithelper.unpack(self.stub.packets[-1].data)) + self.assertEqual(ts, self.stub.packets[-1].T) + + # Write a string literal and make sure it gets translated + literal = '101011010100101011010101101111' + self.source.write(literal) + self.assertEqual(3, len(self.stub.packets)) + self.assertEqual(literal, bithelper.unpack(self.stub.packets[-1].data)) + + @format('bit') + def testWriteBitFramed(self): + # Create frames of all 0s and all 1s for a good indicator of framing + # problems + zeros = bitbuffer(bits=17) + zeros[:] = 0 + ones = bitbuffer(bits=17) + ones[:] = 1 + + # Write five frames, alterning all 0s and all 1s + self.source.subsize = 17 + data = [ones,zeros,ones,zeros,ones] + self.source.write(data) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(17, self.stub.H[-1].subsize) + self.assertEqual(1, len(self.stub.packets)) + expected = sum(data, bitbuffer()) + self.assertEqual(expected, bithelper.unpack(self.stub.packets[-1].data)) + + @format('file') + def testWriteFile(self): + # Write data without a timestamp + uri1 = 'file:///tmp/file1.dat' + self.source.write(uri1) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(uri1, self.stub.packets[0].data) + + # Provided time should pass through unmodified + ts = bulkio.timestamp.now() + uri2 = 'file:///tmp/file2.dat' + self.source.write(uri2, ts) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(2, len(self.stub.packets)) + self.assertEqual(uri2, self.stub.packets[1].data) + self.assertEqual(ts, self.stub.packets[-1].T) + + @format('xml') + def testWriteXML(self): + # No timestamp needed + data1 = '' + self.source.write(data1) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(1, len(self.stub.packets)) + self.assertEqual(data1, self.stub.packets[0].data) + + # Provided timestamp should be ignored + data2 = '' + self.source.write(data2, bulkio.timestamp.now()) + self.assertEqual(1, len(self.stub.H)) + self.assertEqual(2, len(self.stub.packets)) + self.assertEqual(data2, self.stub.packets[1].data) + + @format('octet') + def testClose(self): + # Normal write, EOS should be false + self.source.write([0] * 10) + self.assertEqual(1, len(self.stub.packets)) + self.failIf(self.stub.packets[-1].EOS) + + # Close the stream, which must send an EOS + self.source.close() + self.assertEqual(2, len(self.stub.packets)) + self.assertEqual(self.source.streamID, self.stub.packets[-1].streamID) + self.assertEqual(0, len(self.stub.packets[-1].data)) + self.failUnless(self.stub.packets[-1].EOS) + + def testStreamID(self): + # Default: use instance name + self.assertEqual(self.source._instanceName, self.source.streamID) + + # Override in constructor + source2 = StreamSource(streamID='test_stream_id', sandbox=self.sandbox) + self.assertEqual('test_stream_id', source2.streamID) + + # streamID is immutable + self.assertRaises(AttributeError, setattr, source2, 'streamID', 'error') + + @format('short') + def testSriMetadata(self): + # Configure the source's stream metadata + self.source.xstart = -2.5 + self.source.xdelta = 0.125 + self.source.xunits = BULKIO.UNITS_FREQUENCY + self.source.subsize = 1024 + self.source.ystart = 2.5 + self.source.ydelta = 1.0 + self.source.yunits = BULKIO.UNITS_TIME + self.source.complex = True + self.source.blocking = True + self.source.setKeyword('COL_RF', 100.0e6) + self.source.setKeyword('CHAN_RF', 101.1e6) + + # Check that the raw SRI is correct + sri = self.source.sri + self.assertEqual(-2.5, sri.xstart) + self.assertEqual(0.125, sri.xdelta) + self.assertEqual(BULKIO.UNITS_FREQUENCY, sri.xunits) + self.assertEqual(1024, sri.subsize) + self.assertEqual(2.5, sri.ystart) + self.assertEqual(1.0, sri.ydelta) + self.assertEqual(BULKIO.UNITS_TIME, sri.yunits) + self.assertEqual(1, sri.mode) + self.assertEqual(1, sri.blocking) + self.assertEqual(100.0e6, bulkio.sri.getKeyword(sri, 'COL_RF')) + self.assertEqual(101.1e6, bulkio.sri.getKeyword(sri, 'CHAN_RF')) + + # Set an explicitly typed keyword + self.source.setKeyword('typed', 0.25, 'float') + self.assertEqual(0.25, bulkio.sri.getKeyword(sri, 'typed')) + any_value = self.source.sri.keywords[-1].value + self.assertEqual(CORBA.TC_float, any_value.typecode()) + + # Write to force an SRI push and compare the SRIs + self.source.write(range(16)) + self.assertEqual(1, len(self.stub.H)) + self.failUnless(bulkio.sri.compare(self.source.sri, self.stub.H[-1])) + + # Modify a few fields and make sure the SRI is updated + self.source.xdelta = 0.0625 + self.source.blocking = False + self.source.eraseKeyword('CHAN_RF') + self.source.write(range(16)) + self.assertEqual(2, len(self.stub.H)) + self.failUnless(bulkio.sri.compare(self.source.sri, self.stub.H[-1])) + + def testPortAccess(self): + # New source should have no port yet + source2 = StreamSource(sandbox=self.sandbox) + self.failUnless(source2.port is None) + + # A connection has already been made for the test fixture's source, so + # the port must be defined + self.failIf(self.source.port is None) + + # Use direct access to create another stream + stream = self.source.port.createStream('test_stream') + stream.close() + + def testStreamAccess(self): + # New source should have no stream yet + source2 = StreamSource(sandbox=self.sandbox) + self.failUnless(source2.stream is None) + + # A connection has already been made for the test fixture's source, so + # a new stream should be created on access + stream = self.source.stream + self.failIf(stream is None) + self.assertEqual(self.source.streamID, stream.streamID) + + def testFormat(self): + source = StreamSource(format='float', sandbox=self.sandbox) + port = source.getPort('floatOut') + self.assertRaises(RuntimeError, source.getPort, 'shortOut') + +class StreamSinkTest(unittest.TestCase): + def setUp(self): + self.sandbox = LocalSandbox() + self.sink = StreamSink(sandbox=self.sandbox) + + format = self._getTestFormat() + if not format: + format = 'float' + + # Get a direct reference to a port, bypassing the normal sandbox + # connection logic + self.port = self.sink.getPort(format+'In') + + # StreamSink has to be started to return data, because the underlying + # BulkIO ports require it + self.sandbox.start() + + def tearDown(self): + self.sandbox.shutdown() + + def _getTestFormat(self): + method = getattr(self, self._testMethodName, None) + if not method: + return None + return getattr(method, 'format', None) + + @format('ulong') + def testRead(self): + # Read from empty sink + sink_data = self.sink.read(timeout=0.0) + self.failUnless(sink_data is None) + + # Push directly to the port + sri = bulkio.sri.create('test_read') + self.port.pushSRI(sri) + data = range(16) + ts = bulkio.timestamp.now() + self.port.pushPacket(range(16), ts, False, sri.streamID) + + # Read the packet we just pushed + sink_data = self.sink.read(timeout=1.0) + self.failIf(sink_data is None) + self.assertEqual(sri.streamID, sink_data.sri.streamID) + self.assertEqual(1, len(sink_data.sris)) + self.failUnless(bulkio.sri.compare(sri, sink_data.sri)) + self.assertEqual(data, sink_data.data) + self.assertEqual((0, ts), sink_data.timestamps[0]) + + @format('float') + def testReadComplex(self): + # Push directly to the port + sri = bulkio.sri.create('test_read_complex') + sri.mode = 1 + self.port.pushSRI(sri) + ts = bulkio.timestamp.now() + self.port.pushPacket(range(32), ts, False, sri.streamID) + + # Data should be returned as complex values + data = self.sink.read(timeout=1.0) + self.failIf(data is None) + expected = [complex(x,x+1) for x in xrange(0,32,2)] + self.assertEqual(expected, data.data) + + @format('ushort') + def testReadFramed(self): + # Push four frames directly to the port + sri = bulkio.sri.create('test_read_framed') + sri.subsize = 16 + self.port.pushSRI(sri) + ts1 = bulkio.timestamp.now() + self.port.pushPacket(range(64), ts1, False, sri.streamID) + + # Data should be returned as a 4-item list of lists + data = self.sink.read(timeout=1.0) + self.failIf(data is None) + self.assertEqual(4, len(data.data)) + expected = [range(x, x+16) for x in xrange(0, 64, 16)] + self.assertEqual(expected, data.data) + + # Push another four frames across two packets + ts2 = bulkio.timestamp.now() + self.port.pushPacket(range(32,64), ts2, False, sri.streamID) + ts3 = bulkio.timestamp.now() + self.port.pushPacket(range(64,96), ts3, False, sri.streamID) + + # Data should be returned as a 4-item list of lists + data = self.sink.read(timeout=1.0) + self.failIf(data is None) + self.assertEqual(4, len(data.data)) + expected = [range(x, x+16) for x in xrange(32, 96, 16)] + self.assertEqual(expected, data.data) + + # Time stamp offsets should be frame-based + self.assertEqual(2, len(data.timestamps)) + self.assertEqual(0, data.timestamps[0].offset) + self.assertEqual(ts2, data.timestamps[0].time) + self.assertEqual(2, data.timestamps[1].offset) + self.assertEqual(ts3, data.timestamps[1].time) + + @format('double') + def testReadFramedComplex(self): + # Push a single frame directly to the port + sri = bulkio.sri.create('test_read_framed_cx') + sri.mode = 1 + sri.subsize = 10 + self.port.pushSRI(sri) + ts1 = bulkio.timestamp.now() + self.port.pushPacket(range(20), ts1, False, sri.streamID) + + # Data should be returned as a 1-item list of lists of complex values + data = self.sink.read(timeout=1.0) + self.failIf(data is None) + self.assertEqual(1, len(data.data)) + expected = [[complex(x,x+1) for x in xrange(0, 20,2)]] + self.assertEqual(expected, data.data) + + # Push three frames across two packets + ts2 = bulkio.timestamp.now() + self.port.pushPacket(range(40), ts2, False, sri.streamID) + ts3 = bulkio.timestamp.now() + self.port.pushPacket(range(40,60), ts3, False, sri.streamID) + + # Data should come back as a 3-item list + data = self.sink.read(timeout=1.0) + self.failIf(data is None) + self.assertEqual(3, len(data.data)) + expected = [[complex(x,x+1) for x in xrange(y, y+20,2)] for y in xrange(0, 60, 20)] + self.assertEqual(expected, data.data) + + # Time stamp offsets should be frame-based + self.assertEqual(2, len(data.timestamps)) + self.assertEqual(0, data.timestamps[0].offset) + self.assertEqual(ts2, data.timestamps[0].time) + self.assertEqual(2, data.timestamps[1].offset) + self.assertEqual(ts3, data.timestamps[1].time) + + @format('bit') + def testReadBit(self): + # Push a bit sequence directly to the port + sri = bulkio.sri.create('test_read_bit') + self.port.pushSRI(sri) + data1 = bitbuffer('10101101010101') + ts1 = bulkio.timestamp.now() + self.port.pushPacket(self._formatBitPacket(data1), ts1, False, sri.streamID) + + # Read should return the equivalent bitbuffer + sink_data = self.sink.read(timeout=1.0) + self.failIf(sink_data is None) + self.assertEqual(data1, sink_data.data) + self.assertEqual(1, len(sink_data.timestamps)) + self.assertEqual(0, sink_data.timestamps[0].offset) + self.assertEqual(ts1, sink_data.timestamps[0].time) + + # Push a couple more strings of bits with new timestamps + ts1 = bulkio.timestamp.now() + self.port.pushPacket(self._formatBitPacket(data1), ts1, False, sri.streamID) + data2 = bitbuffer('1010010101011') + ts2 = bulkio.timestamp.now() + self.port.pushPacket(self._formatBitPacket(data2), ts2, False, sri.streamID) + data3 = bitbuffer('1000101011100') + ts3 = bulkio.timestamp.now() + self.port.pushPacket(self._formatBitPacket(data3), ts3, False, sri.streamID) + + # Read should merge all of the bits into a single bitbuffer + sink_data = self.sink.read(timeout=1.0) + self.failIf(sink_data is None) + self.assertEqual(data1+data2+data3, sink_data.data) + self.assertEqual(3, len(sink_data.timestamps)) + + # Check each timestamp's offset + expected_timestamps = [(0, ts1), (len(data1), ts2), (len(data1)+len(data2), ts3)] + for (exp_offset, exp_ts), actual_ts in zip(expected_timestamps, sink_data.timestamps): + self.assertEqual(exp_offset, actual_ts.offset) + self.assertEqual(exp_ts, actual_ts.time) + + def _formatBitPacket(self, bitdata): + return BULKIO.BitSequence(bitdata.bytes(), len(bitdata)) + + @format('bit') + def testReadBitFramed(self): + sri = bulkio.sri.create('test_read_bit_framed') + sri.subsize = 25 + self.port.pushSRI(sri) + + # Create frames of all 0s and all 1s for a good indicator of framing + # problems + zeros = bitbuffer(bits=sri.subsize) + zeros[:] = 0 + ones = bitbuffer(bits=sri.subsize) + ones[:] = 1 + + # Push 4 frames in one packet + ts1 = bulkio.timestamp.now() + self.port.pushPacket(self._formatBitPacket(zeros+ones+zeros+ones), ts1, False, sri.streamID) + + # Data should be returned as a 4-item list of bitbuffers + sink_data = self.sink.read(timeout=1.0) + self.failIf(sink_data is None) + self.assertEqual(4, len(sink_data.data)) + self.assertEqual([zeros,ones,zeros,ones], sink_data.data) + + # Push 6 frames across two packets + ts2 = bulkio.timestamp.now() + self.port.pushPacket(self._formatBitPacket(zeros+ones+zeros), ts2, False, sri.streamID) + ts3 = bulkio.timestamp.now() + self.port.pushPacket(self._formatBitPacket(ones+zeros+ones), ts3, False, sri.streamID) + + # Data should be returned as a 6-item list of bitbuffers + sink_data = self.sink.read(timeout=1.0) + self.failIf(sink_data is None) + self.assertEqual(6, len(sink_data.data)) + self.assertEqual([zeros,ones,zeros,ones,zeros,ones], sink_data.data) + + # Time stamp offsets should be frame-based + self.assertEqual(2, len(sink_data.timestamps)) + self.assertEqual(0, sink_data.timestamps[0].offset) + self.assertEqual(ts2, sink_data.timestamps[0].time) + self.assertEqual(3, sink_data.timestamps[1].offset) + self.assertEqual(ts3, sink_data.timestamps[1].time) + + @format('file') + def testReadFile(self): + # Push a file URI directly to the port + sri = bulkio.sri.create('test_read_file') + self.port.pushSRI(sri) + uri1 = 'file:///tmp/file1.dat' + ts1 = bulkio.timestamp.now() + self.port.pushPacket(uri1, ts1, False, sri.streamID) + + # Read should return a list of URIs with 1 timestamp per URI + sink_data = self.sink.read(timeout=1.0) + self.failIf(sink_data is None) + self.assertEqual([uri1], sink_data.data) + self.assertEqual(1, len(sink_data.timestamps)) + self.assertEqual(0, sink_data.timestamps[0].offset) + self.assertEqual(ts1, sink_data.timestamps[0].time) + + # Push a couple more URIs with new timestamps + ts1 = bulkio.timestamp.now() + self.port.pushPacket(uri1, ts1, False, sri.streamID) + uri2 = 'file:///tmp/file2.dat' + ts2 = bulkio.timestamp.now() + self.port.pushPacket(uri2, ts2, False, sri.streamID) + uri3 = 'file:///tmp/file3.dat' + ts3 = bulkio.timestamp.now() + self.port.pushPacket(uri3, ts3, False, sri.streamID) + + # Again, read should return a list of URIs with 1 timestamp per URI + sink_data = self.sink.read(timeout=1.0) + self.failIf(sink_data is None) + self.assertEqual([uri1, uri2, uri3], sink_data.data) + self.assertEqual(3, len(sink_data.timestamps)) + + # Check each timestamp; offset should increase by 1 each time + for (exp_offset, exp_ts), actual_ts in zip(enumerate([ts1, ts2, ts3]), sink_data.timestamps): + self.assertEqual(exp_offset, actual_ts.offset) + self.assertEqual(exp_ts, actual_ts.time) + + @format('xml') + def testReadXML(self): + # Push an XML string directly to the port + sri = bulkio.sri.create('test_read_xml') + self.port.pushSRI(sri) + data1 = '' + self.port.pushPacket(data1, False, sri.streamID) + + # Read should return a list of complete XML strings, and there should + # be no timestamps; in this case, there's only one XML string + sink_data = self.sink.read(timeout=1.0) + self.failIf(sink_data is None) + self.assertEqual([data1], sink_data.data) + self.assertEqual(0, len(sink_data.timestamps)) + + # Push a couple more XML strings + self.port.pushPacket(data1, False, sri.streamID) + data2 = '' + self.port.pushPacket(data2, False, sri.streamID) + data3 = '' + self.port.pushPacket(data3, True, sri.streamID) + + # This time, it should return 3 XML strings, but still no timestamps + sink_data = self.sink.read(timeout=1.0, eos=True) + self.failIf(sink_data is None) + self.assertEqual([data1, data2, data3], sink_data.data) + self.assertEqual(0, len(sink_data.timestamps)) + + @format('long') + def testReadStreamID(self): + # Push directly to the port + sri = bulkio.sri.create('test_read_stream_1') + self.port.pushSRI(sri) + ts = bulkio.timestamp.now() + self.port.pushPacket(range(16), ts, False, sri.streamID) + + # Read from a stream that does not have data should fail + data = self.sink.read(timeout=0.1, streamID='not here') + self.failUnless(data is None) + + # Push to a second stream ID + sri = bulkio.sri.create('test_read_stream_2') + self.port.pushSRI(sri) + ts = bulkio.timestamp.now() + self.port.pushPacket(range(16), ts, False, sri.streamID) + + # Read should return data specifically from the given streamID (need to + # give it a timeout so that the sink's thread has time to queue the + # packet data) + data = self.sink.read(timeout=1.0, streamID=sri.streamID) + self.failIf(data is None) + self.assertEqual(sri.streamID, data.sri.streamID) + self.assertEqual(1, len(data.sris)) + self.failUnless(bulkio.sri.compare(sri, data.sri)) + self.assertEqual(range(16), data.data) + self.assertEqual((0, ts), data.timestamps[0]) + + @format('char') + def testTimeStamps(self): + # Push a bunch of packets and remember the time stamps + sri = bulkio.sri.create('test_time_stamps') + self.port.pushSRI(sri) + expected = [] + ts = bulkio.timestamp.now() + self.port.pushPacket('\x00'*16, ts, False, sri.streamID) + expected.append((0, ts)) + ts = bulkio.timestamp.now() + self.port.pushPacket('\x00'*32, ts, False, sri.streamID) + expected.append((16, ts)) + ts = bulkio.timestamp.now() + self.port.pushPacket('\x00'*16, ts, False, sri.streamID) + expected.append((48, ts)) + self.port.pushPacket('', bulkio.timestamp.notSet(), True, sri.streamID) + + # Read all of the data and check the timestamps against what was sent + data = self.sink.read(timeout=1.0, eos=True) + self.failIf(data is None) + self.assertEqual(3, len(data.timestamps)) + for (exp_off, exp_ts), (act_off, act_ts) in zip(expected, data.timestamps): + self.assertEqual(exp_off, act_off) + self.assertEqual(exp_ts, act_ts) + + @format('double') + def testTimeStampsComplex(self): + # Push a bunch of packets and remember the time stamps + sri = bulkio.sri.create('test_time_stamps_cx') + sri.mode = 1 + self.port.pushSRI(sri) + expected = [] + ts = bulkio.timestamp.now() + self.port.pushPacket([0] * 32, ts, False, sri.streamID) + expected.append((0, ts)) + ts = bulkio.timestamp.now() + self.port.pushPacket([1] * 64, ts, False, sri.streamID) + # NB: The offset advances by the number of complex values, not the + # number of scalars + expected.append((16, ts)) + ts = bulkio.timestamp.now() + self.port.pushPacket([22] * 32, ts, False, sri.streamID) + expected.append((48, ts)) + self.port.pushPacket([], bulkio.timestamp.notSet(), True, sri.streamID) + + data = self.sink.read(timeout=1.0, eos=True) + self.failIf(data is None) + self.assertEqual(3, len(data.timestamps)) + for (exp_off, exp_ts), (act_off, act_ts) in zip(expected, data.timestamps): + self.assertEqual(exp_off, act_off) + self.assertEqual(exp_ts, act_ts) + + @format('octet') + def testSriChanges(self): + # Push some data with an initial SRI + sri = bulkio.sri.create('test_sri_changes') + sri.xdelta = 1.0 + self.port.pushSRI(sri) + self.port.pushPacket('\x00', bulkio.timestamp.now(), False, sri.streamID) + + # Modify the SRI and push some more + sri2 = bulkio.sri.create(sri.streamID) + sri2.xdelta = 2.0 + self.port.pushSRI(sri2) + self.port.pushPacket('\x01', bulkio.timestamp.now(), False, sri.streamID) + self.port.pushPacket('\x02', bulkio.timestamp.now(), False, sri.streamID) + self.port.pushPacket('\x03', bulkio.timestamp.now(), False, sri.streamID) + + # One last modification and some data, followed by an EOS + sri3 = bulkio.sri.create(sri.streamID) + sri3.xdelta = 3.0 + self.port.pushSRI(sri3) + self.port.pushPacket('\x04', bulkio.timestamp.now(), False, sri.streamID) + self.port.pushPacket('\x05', bulkio.timestamp.now(), False, sri.streamID) + self.port.pushPacket('', bulkio.timestamp.notSet(), True, sri.streamID) + + # Read all of the data up to the EOS to ensure we get all of the SRIs, + # then check the SRIs and offsets + data = self.sink.read(timeout=1.0, eos=True) + self.failIf(data is None) + self.assertEqual(sri.streamID, data.streamID) + self.assertEqual(3, len(data.sris)) + self.assertEqual(0, data.sris[0].offset) + self.failUnless(bulkio.sri.compare(sri, data.sris[0].sri)) + self.assertEqual(1, data.sris[1].offset) + self.failUnless(bulkio.sri.compare(sri2, data.sris[1].sri)) + self.assertEqual(4, data.sris[2].offset) + self.failUnless(bulkio.sri.compare(sri3, data.sris[2].sri)) + + @format('short') + def testWaitEOS(self): + # Push directly to the port + sri = bulkio.sri.create('test_wait_eos') + self.port.pushSRI(sri) + self.port.pushPacket([0], bulkio.timestamp.now(), False, sri.streamID) + + # Read with eos=True should fail + sink_data = self.sink.read(timeout=0.1, eos=True) + + # Push more data and and end-of-stream packet + self.port.pushPacket([1,2], bulkio.timestamp.now(), False, sri.streamID) + self.port.pushPacket([3,4,5], bulkio.timestamp.now(), False, sri.streamID) + self.port.pushPacket([6,7,8,9], bulkio.timestamp.now(), False, sri.streamID) + self.port.pushPacket([], bulkio.timestamp.notSet(), True, sri.streamID) + + # Read until end-of-stream should succeed, returning all the data + # pushed, with EOS set + sink_data = self.sink.read(timeout=1.0, eos=True) + self.failIf(sink_data is None) + self.failUnless(sink_data.eos) + self.assertEqual(sri.streamID, sink_data.sri.streamID) + self.assertEqual(range(10), sink_data.data) + + @format('float') + def testWaitStreamAndEOS(self): + # Push directly to the port + sri = bulkio.sri.create('test_stream_eos_1') + self.port.pushSRI(sri) + self.port.pushPacket(range(7), bulkio.timestamp.now(), True, sri.streamID) + + # Read with eos=True and a different stream ID should fail + sink_data = self.sink.read(timeout=0.1, streamID='other', eos=True) + self.failUnless(sink_data is None) + + # Push new data and an end-of-stream packet to a second stream + sri2 = bulkio.sri.create('test_stream_eos_2') + self.port.pushSRI(sri2) + self.port.pushPacket(range(21), bulkio.timestamp.now(), False, sri2.streamID) + self.port.pushPacket(range(21, 32), bulkio.timestamp.now(), False, sri2.streamID) + self.port.pushPacket([], bulkio.timestamp.notSet(), True, sri2.streamID) + + # Read until end-of-stream should succeed, returning all the data + # pushed, with EOS set + sink_data = self.sink.read(timeout=1.0, streamID=sri2.streamID, eos=True) + self.failIf(sink_data is None) + self.failUnless(sink_data.eos) + self.assertEqual(sri2.streamID, sink_data.sri.streamID) + self.assertEqual(range(32), sink_data.data) + + @format('double') + def testStreamIDs(self): + # Start with no streams + self.assertEqual([], self.sink.streamIDs()) + + # First stream + sri_1 = bulkio.sri.create('test_streamids_1') + self.port.pushSRI(sri_1) + self.port.pushPacket([0], bulkio.timestamp.now(), False, sri_1.streamID) + self.assertEqual([sri_1.streamID], self.sink.streamIDs()) + + # Push a second stream; note that order is undefined, so we need to + # sort the stream IDs + sri_2 = bulkio.sri.create('test_streamids_2') + self.port.pushSRI(sri_2) + self.port.pushPacket([0], bulkio.timestamp.now(), False, sri_2.streamID) + stream_ids = self.sink.streamIDs() + stream_ids.sort() + self.assertEqual([sri_1.streamID, sri_2.streamID], stream_ids) + + # Send an end-of-stream packet for the first stream and read it all; + # only the second stream's ID should remain + self.port.pushPacket([], bulkio.timestamp.notSet(), True, sri_1.streamID) + data = self.sink.read(timeout=1.0, streamID=sri_1.streamID) + self.failIf(data is None) + self.failUnless(data.eos) + self.assertEqual([sri_2.streamID], self.sink.streamIDs()) + + # Read all the data from the second stream; it should still be active + data = self.sink.read(timeout=1.0) + self.failIf(data is None) + self.failIf(data.eos) + self.assertEqual([sri_2.streamID], self.sink.streamIDs()) + + # Close out the second stream + self.port.pushPacket([], bulkio.timestamp.notSet(), True, sri_2.streamID) + self.assertEqual([sri_2.streamID], self.sink.streamIDs()) + data = self.sink.read(timeout=1.0) + self.failIf(data is None) + self.failUnless(data.eos) + + # Should end up with no streams + self.assertEqual([], self.sink.streamIDs()) + + @format('ulonglong') + def testActiveSRIs(self): + # Start with no SRIs + self.assertEqual([], self.sink.activeSRIs()) + + # First stream + sri_1 = bulkio.sri.create('test_activesris_1') + sri_1.xdelta = 1.0 + self.port.pushSRI(sri_1) + self.port.pushPacket([0], bulkio.timestamp.now(), False, sri_1.streamID) + active_sris = self.sink.activeSRIs() + self.assertEqual(1, len(active_sris)) + self.failUnless(bulkio.sri.compare(sri_1, active_sris[0])) + + # Push a second stream + sri_2 = bulkio.sri.create('test_streamids_2') + self.port.pushSRI(sri_2) + self.port.pushPacket([0], bulkio.timestamp.now(), False, sri_2.streamID) + + # Note that order is undefined, so we need to sort on the stream IDs + active_sris = self.sink.activeSRIs() + self._sortSRIs(active_sris) + self.assertEqual(2, len(active_sris)) + self.failUnless(bulkio.sri.compare(sri_1, active_sris[0])) + self.failUnless(bulkio.sri.compare(sri_2, active_sris[1])) + + # Send an end-of-stream for the second stream, but don't read it yet + self.port.pushPacket([], bulkio.timestamp.notSet(), True, sri_2.streamID) + active_sris = self.sink.activeSRIs() + self._sortSRIs(active_sris) + self.assertEqual(2, len(active_sris)) + self.failUnless(bulkio.sri.compare(sri_1, active_sris[0])) + self.failUnless(bulkio.sri.compare(sri_2, active_sris[1])) + + # Modify first SRI and push some more data; the active SRI should still + # have the original values (xdelta == 1.0) + sri_1.xdelta = 2.0 + self.port.pushSRI(sri_1) + self.port.pushPacket([0], bulkio.timestamp.now(), False, sri_1.streamID) + active_sris = self.sink.activeSRIs() + self._sortSRIs(active_sris) + self.assertEqual(2, len(active_sris)) + self.assertEqual(1.0, active_sris[0].xdelta) + + # Read all of the data from the first stream; afterwards, the active + # SRI should have the updated values + data = self.sink.read(timeout=1.0, streamID=sri_1.streamID) + self.failIf(data is None) + active_sris = self.sink.activeSRIs() + self._sortSRIs(active_sris) + self.assertEqual(2, len(active_sris)) + self.assertEqual(2.0, active_sris[0].xdelta) + + # Acknowledge the end-of-stream for the second stream, and make sure it + # no longer shows up in the active SRIs + data = self.sink.read(timeout=1.0, streamID=sri_2.streamID) + self.failIf(data is None) + self.failUnless(data.eos) + active_sris = self.sink.activeSRIs() + self.assertEqual(1, len(active_sris)) + self.failUnless(bulkio.sri.compare(sri_1, active_sris[0])) + + # Close out the first stream and make sure there are no more active + # SRIs + self.port.pushPacket([], bulkio.timestamp.notSet(), True, sri_1.streamID) + data = self.sink.read(timeout=1.0) + self.failIf(data is None) + self.failUnless(data.eos) + self.assertEqual([], self.sink.activeSRIs()) + + def _sortSRIs(self, sris): + # Sorts a list of SRIs by stream ID + sris.sort(cmp=lambda x,y: cmp(x.streamID, y.streamID)) + + def testPortAccess(self): + # New sink should have no port yet + sink2 = StreamSink(sandbox=self.sandbox) + self.failUnless(sink2.port is None) + + # A connection has already been made for the test fixture's sink, so + # the port must be defined + port = self.sink.port + self.failIf(port is None) + + # Try calling a port method as a quick sanity check + self.assertEqual([], port.getStreams()) + + def testFormat(self): + sink = StreamSink(format='float', sandbox=self.sandbox) + port = sink.getPort('floatIn') + self.assertRaises(RuntimeError, sink.getPort, 'shortIn') + + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/bulkioInterfaces/libsrc/testing/tests/python/test_streamsri.py b/bulkioInterfaces/libsrc/testing/tests/python/test_streamsri.py new file mode 100644 index 000000000..a529bd6ad --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/python/test_streamsri.py @@ -0,0 +1,200 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +from omniORB.any import to_any, from_any + +from ossie.cf import CF + +import bulkio +from bulkio.bulkioInterfaces import BULKIO + +class StreamSRITest(unittest.TestCase): + def assertSRIFieldsEqual(self, fields, sri): + for field, expected in fields.iteritems(): + actual = getattr(sri, field) + self.assertEqual(expected, actual, "sri.%s: expected '%s', actual '%s'" % (field, expected, actual)) + + def testCreate(self): + expected = { + 'hversion': 1, + 'xstart': 0.0, + 'xdelta': 1.0, + 'xunits': BULKIO.UNITS_TIME, + 'subsize': 0, + 'ystart': 0.0, + 'ydelta': 0.0, + 'yunits': BULKIO.UNITS_NONE, + 'mode': 0, + 'streamID': 'defStream', + 'blocking': False, + 'keywords': [] + } + + sri = bulkio.sri.create() + self.assertSRIFieldsEqual(expected, sri) + + # Test stream ID and sample rate arguments + sample_rate = 2.5e6 + expected['streamID'] = 'new_stream_id' + expected['xdelta'] = 1.0 / sample_rate + sri = bulkio.sri.create('new_stream_id', sample_rate) + self.assertSRIFieldsEqual(expected, sri) + + def testCompare(self): + a_sri = bulkio.sri.create() + b_sri = bulkio.sri.create() + c_sri = bulkio.sri.create() + c_sri.streamID = "THIS_DOES_NOT_MATCH" + + self.assertEqual( bulkio.sri.compare( a_sri, b_sri ), True, " bulkio.sri.compare method - same.") + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - StreamID .") + + c_sri = bulkio.sri.create() + c_sri.hversion = 2 + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - hversion ") + + c_sri = bulkio.sri.create() + c_sri.xstart = 3 + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - xstart ") + + c_sri = bulkio.sri.create() + c_sri.xdelta = 100.0 + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - xdelta ") + + c_sri = bulkio.sri.create() + c_sri.xunits = 100.0 + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - xunits ") + + c_sri = bulkio.sri.create() + c_sri.subsize = 100 + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - subsize ") + + c_sri = bulkio.sri.create() + c_sri.ystart = 3 + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - ystart ") + + c_sri = bulkio.sri.create() + c_sri.ydelta = 100.0 + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - ydelta ") + + c_sri = bulkio.sri.create() + c_sri.yunits = 100.0 + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - yunits ") + + c_sri = bulkio.sri.create() + c_sri.mode = 100 + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - mode ") + + kv = CF.DataType( id="key_one", value=to_any(1) ) + kv2 = CF.DataType( id="key_one", value=to_any(1) ) + a_sri.keywords = [kv] + c_sri = bulkio.sri.create() + c_sri.keywords = [kv2] + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), True, " bulkio.sri.compare method - same - keyword item ") + + kv2 = CF.DataType( id="key_one", value=to_any(100) ) + c_sri = bulkio.sri.create() + c_sri.keywords = [kv2] + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - keywords value mismatch ") + + kv2 = CF.DataType( id="key_two", value=to_any(100) ) + c_sri = bulkio.sri.create() + c_sri.keywords = [kv2] + self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - keywords name mismatch ") + + def testHasKeyword(self): + sri = bulkio.sri.create('has_keyword') + sri.keywords.append(CF.DataType('string', to_any('first'))) + sri.keywords.append(CF.DataType('number', to_any(2.0))) + + self.failUnless(bulkio.sri.hasKeyword(sri, 'string')) + self.failUnless(bulkio.sri.hasKeyword(sri, 'number')) + self.failIf(bulkio.sri.hasKeyword(sri, 'missing')) + + def testGetKeyword(self): + sri = bulkio.sri.create('get_keyword') + sri.keywords.append(CF.DataType('string', to_any('first'))) + sri.keywords.append(CF.DataType('number', to_any(2.0))) + + # Basic get + self.assertEqual('first', bulkio.sri.getKeyword(sri, 'string')) + self.assertEqual(2.0, bulkio.sri.getKeyword(sri, 'number')) + + # Add a duplicate keyword at the end, should still return first value + sri.keywords.append(CF.DataType('string', to_any('second'))) + self.assertEqual('first', bulkio.sri.getKeyword(sri, 'string')) + + self.assertRaises(KeyError, bulkio.sri.getKeyword, sri, 'missing') + + def testSetKeyword(self): + sri = bulkio.sri.create('set_keyword') + sri.keywords.append(CF.DataType('string', to_any('first'))) + sri.keywords.append(CF.DataType('number', to_any(2.0))) + + # Update first keyword + bulkio.sri.setKeyword(sri, 'string', 'modified') + self.assertEqual(2, len(sri.keywords)) + self.assertEqual('modified', bulkio.sri.getKeyword(sri, 'string')) + self.assertEqual(2.0, bulkio.sri.getKeyword(sri, 'number')) + + # Update second keyword + bulkio.sri.setKeyword(sri, 'number', -1) + self.assertEqual(2, len(sri.keywords)) + self.assertEqual('modified', bulkio.sri.getKeyword(sri, 'string')) + self.assertEqual(-1, bulkio.sri.getKeyword(sri, 'number')) + + # Add new keyword + bulkio.sri.setKeyword(sri, 'new', True) + self.assertEqual(3, len(sri.keywords)) + self.assertEqual('modified', bulkio.sri.getKeyword(sri, 'string')) + self.assertEqual(-1, bulkio.sri.getKeyword(sri, 'number')) + self.assertEqual(True, bulkio.sri.getKeyword(sri, 'new')) + + def testEraseKeyword(self): + sri = bulkio.sri.create('erase_keyword') + sri.keywords.append(CF.DataType('string', to_any('first'))) + sri.keywords.append(CF.DataType('number', to_any(2.0))) + + # Basic erase + self.failUnless(bulkio.sri.hasKeyword(sri, 'string')) + bulkio.sri.eraseKeyword(sri, 'string') + self.assertEqual(1, len(sri.keywords)) + self.failIf(bulkio.sri.hasKeyword(sri, 'string')) + self.failUnless(bulkio.sri.hasKeyword(sri, 'number')) + + # Non-existant key, no modification + bulkio.sri.eraseKeyword(sri, 'missing') + self.assertEqual(1, len(sri.keywords)) + self.failUnless(bulkio.sri.hasKeyword(sri, 'number')) + + # Add some more keywords, including a duplicate; erasing the duplicate + # should only erase the first instance + sri.keywords.append(CF.DataType('string', to_any('first'))) + sri.keywords.append(CF.DataType('number', to_any(500))) + self.assertEqual(2.0, bulkio.sri.getKeyword(sri, 'number')) + bulkio.sri.eraseKeyword(sri, 'number') + self.assertEqual(2, len(sri.keywords)) + self.assertEqual(500, bulkio.sri.getKeyword(sri, 'number')) + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/bulkioInterfaces/libsrc/testing/tests/python/test_utctime.py b/bulkioInterfaces/libsrc/testing/tests/python/test_utctime.py new file mode 100644 index 000000000..8913b89e6 --- /dev/null +++ b/bulkioInterfaces/libsrc/testing/tests/python/test_utctime.py @@ -0,0 +1,216 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import copy + +import bulkio +from bulkio.bulkioInterfaces import BULKIO + +class PrecisionUTCTimeTest(unittest.TestCase): + def assertTimeEqual(self, t1, t2, msg=''): + self.assertEqual(t1, t2, msg='%s expected:<%s> but was:<%s>' % (msg, t1, t2)) + + def testNow(self): + ts = bulkio.timestamp.now() + self.assertEqual(ts.tcmode, BULKIO.TCM_CPU, msg="tcmode mismatch") + self.assertEqual(ts.tcstatus, BULKIO.TCS_VALID, msg="tcstatus mismatch") + self.assertEqual(ts.toff, 0, msg="toff mismatch") + + ts = bulkio.timestamp.cpuTimeStamp() + self.assertEqual(ts.tcmode, BULKIO.TCM_CPU, msg="tcmode mismatch") + self.assertEqual(ts.tcstatus, BULKIO.TCS_VALID, msg="tcstatus mismatch") + self.assertEqual(ts.toff, 0, msg="toff mismatch") + + def testCreate(self): + ts = bulkio.timestamp.create(100.0, 0.125) + self.assertEqual(ts.tcmode, BULKIO.TCM_CPU, msg="tcmode mismatch") + self.assertEqual(ts.tcstatus, BULKIO.TCS_VALID, msg="tcstatus mismatch") + self.assertEqual(ts.twsec, 100.0, msg="tcwsec mismatch") + self.assertEqual(ts.tfsec, 0.125, msg="tcfsec mismatch") + + ts = bulkio.timestamp.create(100.0, 0.125, BULKIO.TCM_SDDS) + self.assertEqual(ts.tcmode, BULKIO.TCM_SDDS, msg="tcmode mismatch") + self.assertEqual(ts.tcstatus, BULKIO.TCS_VALID, msg="tcstatus mismatch") + self.assertEqual(ts.twsec, 100.0, msg="tcwsec mismatch") + self.assertEqual(ts.tfsec, 0.125, msg="tcfsec mismatch") + + def testCompare(self): + t1 = bulkio.timestamp.create(100.0, 0.5) + t2 = bulkio.timestamp.create(100.0, 0.5) + self.assertTimeEqual(t1, t2) + + # Only fractional seconds differ + t1 = bulkio.timestamp.create(100.0, 0.5) + t2 = bulkio.timestamp.create(100.0, 0.25) + self.assertTrue(t1 > t2, msg="Time with larger fractional did not compare greater") + self.assertTrue(t2 < t1, msg="Time with smaller fractional did not compare lesser") + + # Only whole seconds differ + t1 = bulkio.timestamp.create(100.0, 0.75) + t2 = bulkio.timestamp.create(101.0, 0.75) + self.assertTrue(t1 < t2, msg="Time with smaller whole did not compare lesser") + self.assertTrue(t2 > t1, msg="Time with larger whole did not compare greater") + + # Whole seconds differ, but fractional seconds have the opposite ordering (which has no effect) + t1 = bulkio.timestamp.create(100.0, 0.75) + t2 = bulkio.timestamp.create(5000.0, 0.25) + self.assertTrue(t1 < t2, msg="Time with smaller whole and larger fractional did not compare lesser") + self.assertTrue(t2 > t1, msg="Time with larger whole and smaller fractional did not compare greater") + + def testNormalize(self): + # NOTE: All tests use fractional portions that are exact binary fractions to + # avoid potential roundoff issues + + # Already normalized, no change + tstamp = bulkio.timestamp.create(100.0, 0.5) + bulkio.timestamp.normalize(tstamp) + self.assertTimeEqual(bulkio.timestamp.create(100.0, 0.5), tstamp, msg="Already normalized time") + + # Whole seconds has fractional portion, should be moved to fractional seconds + tstamp.twsec = 100.25; + tstamp.tfsec = 0.25; + bulkio.timestamp.normalize(tstamp) + self.assertTimeEqual(bulkio.timestamp.create(100.0, 0.5), tstamp, msg="Normalizing whole") + + # Whole seconds has fractional portion, should be moved to fractional seconds + # leading to carry + tstamp.twsec = 100.75; + tstamp.tfsec = 0.75; + bulkio.timestamp.normalize(tstamp) + self.assertTimeEqual(bulkio.timestamp.create(101.0, 0.5), tstamp, msg="Normalizing whole with carry") + + # Fractional seconds contains whole portion, should be moved to whole seconds + tstamp.twsec = 100.0; + tstamp.tfsec = 2.5; + bulkio.timestamp.normalize(tstamp) + self.assertTimeEqual(bulkio.timestamp.create(102.0, 0.5), tstamp, msg="Normalizing fractional") + + # Both parts require normalization; fractional portion of whole seconds adds an + # additional carry + tstamp.twsec = 100.75; + tstamp.tfsec = 2.75; + bulkio.timestamp.normalize(tstamp) + self.assertTimeEqual(bulkio.timestamp.create(103.0, 0.5), tstamp, msg="Normalizing both") + + # Negative fractional value should borrow + tstamp.twsec = 100.0; + tstamp.tfsec = -0.25; + bulkio.timestamp.normalize(tstamp) + self.assertTimeEqual(bulkio.timestamp.create(99.0, 0.75), tstamp, msg="Normalizing negative fractional") + + # Negative fractional value with magnitude greater than one + tstamp.twsec = 100.0; + tstamp.tfsec = -3.125; + bulkio.timestamp.normalize(tstamp) + self.assertTimeEqual(bulkio.timestamp.create(96.0, 0.875), tstamp, msg="Normalizing negative fractional > 1") + + # Fractional portion of whole seconds greater than negative fractional seconds + tstamp.twsec = 100.5; + tstamp.tfsec = -.125; + bulkio.timestamp.normalize(tstamp) + self.assertTimeEqual(bulkio.timestamp.create(100.0, 0.375), tstamp, msg="Normalizing both with negative fractional") + + # Negative fractional seconds greater than fractional portion of whole seconds + tstamp.twsec = 100.125; + tstamp.tfsec = -.5; + bulkio.timestamp.normalize(tstamp) + self.assertTimeEqual(bulkio.timestamp.create(99.0, 0.625), tstamp, msg="Normalizing both with borrow") + + # Negative fractional seconds have whole portion, but seconds whole seconds have + # fractional portion with larger magnitude than remaining fractional seconds + tstamp.twsec = 100.75; + tstamp.tfsec = -2.5; + bulkio.timestamp.normalize(tstamp) + self.assertTimeEqual(bulkio.timestamp.create(98.0, 0.25), tstamp, msg="Normalizing both with negative fractional > 1") + + def testOperators(self): + # Test that copy works as expected + reference = bulkio.timestamp.create(100.0, 0.5) + t1 = copy.copy(reference) + self.assertTimeEqual(reference, t1, msg="copy.copy() returned different values") + + # Add a positive offset + result = t1 + 1.75 + expected = bulkio.timestamp.create(102.0, 0.25) + self.assertTrue(result is not t1, msg="Add returned same object") + self.assertTimeEqual(reference, t1, msg="Add modified original value") + self.assertTimeEqual(expected, result, msg="Add positive offset") + + # Add a negative offset (i.e., subtract) + result = t1 + -1.75 + expected = bulkio.timestamp.create(98.0, 0.75) + self.assertTimeEqual(reference, t1, msg="Add modified original value") + self.assertTimeEqual(expected, result, msg="Add negative offset") + + # Increment by positive offset + t1 += 2.25 + expected = bulkio.timestamp.create(102.0, 0.75) + self.assertTimeEqual(expected, t1, msg="Increment by positive offset") + + # Increment by negative offset (i.e., decrement) + t1 += -3.875 + expected = bulkio.timestamp.create(98.0, 0.875) + self.assertTimeEqual(expected, t1, msg="Increment by negative offset") + + # Reset to reference time and subtract a positive offset + t1 = copy.copy(reference) + result = t1 - 1.25 + expected = bulkio.timestamp.create(99.0, 0.25) + self.assertTrue(result is not t1, msg="Subtract returned same object") + self.assertTimeEqual(reference, t1, msg="Subtract modified original value") + self.assertTimeEqual(expected, result, msg="Subtract positive offset") + + # Subtract a negative offset (i.e., add) + result = t1 - -4.875 + expected = bulkio.timestamp.create(105.0, 0.375) + self.assertTrue(result is not t1, msg="Subtract returned same object") + self.assertTimeEqual(reference, t1, msg="Subtract modified original value") + self.assertTimeEqual(expected, result, msg="Subtract negative offset") + + # Decrement by positive offset + t1 -= 2.75 + expected = bulkio.timestamp.create(97.0, 0.75) + self.assertTimeEqual(expected, t1, msg="Decrement by positive offset") + + # Decrement by negative offset (i.e., increment) + t1 -= -3.375 + expected = bulkio.timestamp.create(101.0, 0.125) + self.assertTimeEqual(expected, t1, msg="Decrement by negative offset") + + # Difference, both positive and negative (exact binary fractions used to allow + # exact comparison) + t1 = reference + 8.875 + self.assertEqual(t1 - reference, 8.875) + self.assertEqual(reference - t1, -8.875) + + def testString(self): + # Test the default epoch (Unix time) + tstamp = bulkio.timestamp.create(0.0, 0.0) + self.assertEqual("1970:01:01::00:00:00.000000", str(tstamp)) + + # Use a recent time with rounding at the microsecond level + tstamp = bulkio.timestamp.create(1451933967.0, 0.2893569) + self.assertEqual("2016:01:04::18:59:27.289357", str(tstamp)) + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/bulkioInterfaces/libsrc/testing/tests/runtests b/bulkioInterfaces/libsrc/testing/tests/runtests index c34efd0ed..3b99f2c9f 100755 --- a/bulkioInterfaces/libsrc/testing/tests/runtests +++ b/bulkioInterfaces/libsrc/testing/tests/runtests @@ -1,3 +1,4 @@ +#!/bin/bash # # Runs relative to bulkio project # @@ -7,7 +8,8 @@ bulkio_top=$(cd ../../..;pwd) bulkio_libsrc_top=$bulkio_top/libsrc export LD_LIBRARY_PATH=$bulkio_libsrc_top/.libs:$bulkio_top/.libs:$bulkio_top/jni/.libs:${LD_LIBRARY_PATH} -export PYTHONPATH=$bulkio_libsrc_top/build/lib:${PYTHONPATH} +export PYTHONPATH=$bulkio_libsrc_top/python:${PYTHONPATH} +export CLASSPATH=${bulkio_libsrc_top}/bulkio.jar:${bulkio_top}/BULKIOInterfaces.jar:${CLASSPATH} # Limit the number of threads Java uses for the garbage collector to avoid # misleading Java "out of memory" errors that in all actuality appear to be @@ -15,21 +17,9 @@ export PYTHONPATH=$bulkio_libsrc_top/build/lib:${PYTHONPATH} export _JAVA_OPTIONS="-XX:ParallelGCThreads=1" # -# Run Python based testing.. -# test_xxx_vector.py -- uses sandbox to load components and test data flow -# test_python_helpers.py -- test sri and time helpers +# Run Python Sandbox based testing.. # -# -# make sure there is link to bulkioInterfaces in the build/bulkio/ directory -# -if [ ! -h $bulkio_libsrc_top/build/lib*/bulkio/bulkioInterfaces ]; -then - cd $bulkio_libsrc_top/build/lib*/bulkio - ln -s ../../../../build/lib/bulkio/bulkioInterfaces - cd - -fi - if [ $# -gt 0 ] then # run an associated test script @@ -42,14 +32,7 @@ else fi # -# Run Java based testing -# InVector_Port - test vector based bulkio ports -# InString_Port - test string based bulkio ports -# InSDDS_Port - test SDDS based bulkio ports -# OutVector_Port - test vector based bulkio ports -# OutString_Port - test string based bulkio ports -# OutSDDS_Port - test SDDS based bulkio ports -# +# Run Java unit tests # if command -v ant 2>/dev/null then @@ -60,16 +43,18 @@ fi # -# Run C++ based testing -# Bulkio_InPort_Fixture - test vector based bulkio ports -# Bulkio_OutPort_Fixture - test vector based bulkio ports -# Bulkio_Helper_Fixture - test bulkio helper api -# +# Run C++ unit tests # cd cpp ./runtests cd - +# +# Run Python unit tests with XML output +# +# NOTE: virtualenv aliases python in the shell, so it's necessary to explicitly +# run runtests.py via python +(cd python && python ./runtests.py -x) # # Run Large Packet Size Test @@ -96,11 +81,16 @@ cd ../components/Oversized_framedata/tests/ cd - # +# Run port lock Test +# +# +cd ../components/src/tests/ +./test_src.py +cd - + # Run jni reference resolution # # cd ../devices/dev_src/tests/ python test_dev_src.py cd - - -rm $bulkio_libsrc_top/build/lib/bulkio/bulkioInterfaces diff --git a/bulkioInterfaces/libsrc/testing/tests/test_cpp_vector.py b/bulkioInterfaces/libsrc/testing/tests/test_cpp_vector.py index 6aaf93e9d..f09442e59 100644 --- a/bulkioInterfaces/libsrc/testing/tests/test_cpp_vector.py +++ b/bulkioInterfaces/libsrc/testing/tests/test_cpp_vector.py @@ -30,57 +30,38 @@ class Test_CPP_Int8(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Int8', cname='CPP_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int8', 'CPP_Ports', *args, **kwargs) class Test_CPP_Int16(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Int16', cname='CPP_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname , - bio_in_module=bulkio.InShortPort, - bio_out_module=bulkio.OutShortPort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int16', 'CPP_Ports', *args, **kwargs) class Test_CPP_Int32(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Int32', cname='CPP_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InLongPort, - bio_out_module=bulkio.OutLongPort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int32', 'CPP_Ports', *args, **kwargs) class Test_CPP_Int64(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Int64', cname='CPP_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InLongLongPort, - bio_out_module=bulkio.OutLongLongPort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int64', 'CPP_Ports', *args, **kwargs) class Test_CPP_Float(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Float', cname='CPP_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname , - bio_in_module=bulkio.InFloatPort, - bio_out_module=bulkio.OutFloatPort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Float', 'CPP_Ports', *args, **kwargs) class Test_CPP_Double(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Double', cname='CPP_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InDoublePort, - bio_out_module=bulkio.OutDoublePort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Double', 'CPP_Ports', *args, **kwargs) class Test_CPP_File(BaseVectorPort): _sample = "The quick brown fox jumped over the lazy dog" - def __init__(self, methodName='runTest', ptype='File', cname='CPP_Ports', srcData=_sample ): - BaseVectorPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InFilePort, - bio_out_module=bulkio.OutFilePort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'File', 'CPP_Ports', *args, srcData=Test_CPP_File._sample, **kwargs) if __name__ == '__main__': suite = unittest.TestSuite() - for x in [ Test_CPP_Int8, Test_CPP_Int16, Test_CPP_Int32, Test_CPP_Int64, Test_CPP_Float, Test_CPP_Double ]: + for x in [ Test_CPP_Int8, Test_CPP_Int16, Test_CPP_Int32, Test_CPP_Int64, Test_CPP_Float, Test_CPP_Double, Test_CPP_File ]: tests = unittest.TestLoader().loadTestsFromTestCase(x) suite.addTests(tests) try: diff --git a/bulkioInterfaces/libsrc/testing/tests/test_fail_ports.py b/bulkioInterfaces/libsrc/testing/tests/test_fail_ports.py index 83b3d7a7f..f9515a457 100644 --- a/bulkioInterfaces/libsrc/testing/tests/test_fail_ports.py +++ b/bulkioInterfaces/libsrc/testing/tests/test_fail_ports.py @@ -26,273 +26,103 @@ # runs same set of tests for each type of port specified... # -from base_ports import * +import os +import time +import unittest -class BaseFailPort(unittest.TestCase): - KEYS = ['c_name', 'c_inport', 'c_outport', 'sink_inport'] - PORT_FLOW = { - 'Int8' : [ 'dataCharIn', 'dataCharOut', 'charIn' ], - 'UInt8' : [ 'dataOctetIn', 'dataOctetOut', 'charIn' ], - 'Int16' : [ 'dataShortIn', 'dataShortOut', 'shortIn' ], - 'UInt16' : [ 'dataUShortIn', 'dataUShortOut', 'shortIn' ], - 'Int32' : [ 'dataLongIn', 'dataLongOut', 'longIn' ], - 'UInt32' : [ 'dataULongIn', 'dataULongOut', 'longIn' ], - 'Int64' : [ 'dataLongLongIn', 'dataLongLongOut', 'longlongIn' ], - 'UInt64' : [ 'dataULongLongIn', 'dataULongLongOut', 'longlongIn' ], - 'Float' : [ 'dataFloatIn', 'dataFloatOut', 'floatIn' ], - 'Double' : [ 'dataDoubleIn', 'dataDoubleOut', 'doubleIn' ], - 'File' : [ 'dataFileIn', 'dataFileOut', 'fileIn' ], - 'Xml' : [ 'dataXMLIn', 'dataXMLOut', 'xmlIn' ] - } - - def __init__( - self, - methodName='runTest', - ptype='Int8', - cname=None, - srcData=None, - cmpData=None, - bio_in_module=bulkio.InCharPort, - bio_out_module=bulkio.OutCharPort ): - unittest.TestCase.__init__(self, methodName) - self.c_dir = 'components' - self.c_name = cname - self.ptype = ptype - self.execparams = {} - self.c_inport = None - self.c_outport = None - self.sink_inport = None - self.srcData = srcData - self.cmpData = cmpData - self.ctx = dict().fromkeys(BaseVectorPort.KEYS) - self.bio_in_module = bio_in_module - self.bio_out_module = bio_out_module - - def getPortFlow(self, ptype='Int8' ): - return BaseVectorPort.PORT_FLOW[ptype] +from ossie.utils import sb +from base_ports import test_dir - def setContext(self, ctx=None): - self.ctx[ BaseVectorPort.KEYS[0] ] = self.c_name - self.ctx[ BaseVectorPort.KEYS[1] ] = BaseVectorPort.PORT_FLOW[self.ptype][0] - self.ctx[ BaseVectorPort.KEYS[2] ] = BaseVectorPort.PORT_FLOW[self.ptype][1] - self.ctx[ BaseVectorPort.KEYS[3] ] = BaseVectorPort.PORT_FLOW[self.ptype][2] - tmp=self.ctx - if ctx: - tmp = ctx - try: - self.c_inport = tmp['c_inport'] - self.c_outport = tmp['c_outport'] - self.sink_inport = tmp['sink_inport'] - except: - pass +class BaseFailPort(unittest.TestCase): + def __init__(self, component, *args, **kwargs): + unittest.TestCase.__init__(self, *args, **kwargs) + self.component = component def setUp(self): - self.setContext() - if self.srcData: - self.seq = self.srcData - else: - self.seq = range(100) - self.launchedComps = [] - - def tearDown(self): - for comp in self.launchedComps: - comp.releaseObject() - self.launchedComps = [] + c_spd_xml = os.path.join(test_dir, 'components', self.component, self.component+'.spd.xml') + execparams = { 'LOGGING_CONFIG_URI' : 'file://'+os.getcwd()+'/log4j.ex1' } - def launch(self, *args, **kwargs): - comp = sb.launch(*args, **kwargs) - self.launchedComps.append(comp) - return comp + self.comp1 = sb.launch(c_spd_xml, execparams=execparams) + self.comp2 = sb.launch(c_spd_xml, execparams=execparams) - def test_connection_fail(self): - import os - in_sri = bulkio.sri.create() - in_sri.streamID = "VECTOR-PUSHPACKET-SIMPLE" - in_sri.mode = 0 - in_sri.xdelta = 1/33.0 - dsource=sb.DataSource() - dsink=sb.DataSink() - c_spd_xml = test_dir + self.c_dir + '/' + self.c_name + '/' + self.c_name + '.spd.xml' - print "Test Component:" + c_spd_xml - self.execparams = { 'LOGGING_CONFIG_URI' : 'file://'+os.getcwd()+'/log4j.ex1' } - data=self.seq - c=self.launch( c_spd_xml, execparams=self.execparams) - c1=self.launch( c_spd_xml, execparams=self.execparams) - - dsource.connect(c, providesPortName=self.c_inport ) - c.connect(c1,usesPortName=self.c_outport,providesPortName=self.c_inport) - c.connect(dsink, providesPortName=self.sink_inport, usesPortName=self.c_outport) - os.kill(c1._pid, 9) - while ( c1._process.isAlive() == True ) : time.sleep(.5) - dsource.start() - dsource.push( data, EOS=False, streamID=in_sri.streamID, sampleRate=33.0, complexData=(in_sri.mode==1)) - dsource.push( data, EOS=False) - dsource.push( data, EOS=False) - dsource.push( data, EOS=False) - dsource.push( data, EOS=False) - dsource.push( data, EOS=False) - dsource.push( data, EOS=False) - dsource.push( data, EOS=False) - dsource.push( data, EOS=False) - dsource.push( data, EOS=True) - c.start() - adata=dsink.getData(eos_block=True) - - -class Test_Int8_Fail_CPP(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int8', cname='CPP_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname ) - pass - -class Test_Int16_Fail_CPP(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int16', cname='CPP_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname , - bio_in_module=bulkio.InShortPort, - bio_out_module=bulkio.OutShortPort ) - pass - -class Test_Int32_Fail_CPP(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int32', cname='CPP_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InLongPort, - bio_out_module=bulkio.OutLongPort ) - pass + def tearDown(self): + sb.release() -class Test_Int64_Fail_CPP(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int64', cname='CPP_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InLongLongPort, - bio_out_module=bulkio.OutLongLongPort ) - pass + def _testConnectionFail(self, format, data): + uses_name = 'data%sOut' % format + provides_name = 'data%sIn' % format -class Test_Float_Fail_CPP(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Float', cname='CPP_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname , - bio_in_module=bulkio.InFloatPort, - bio_out_module=bulkio.OutFloatPort ) - pass + source = sb.DataSource() + source.connect(self.comp1, providesPortName=provides_name) + self.comp1.connect(self.comp2, usesPortName=uses_name) -class Test_Double_Fail_CPP(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Double', cname='CPP_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InDoublePort, - bio_out_module=bulkio.OutDoublePort ) - pass + sink = sb.DataSink() + self.comp1.connect(sink, usesPortName=uses_name) -class Test_File_Fail_CPP(BaseFailPort): - _sample = "The quick brown fox jumped over the lazy dog" - def __init__(self, methodName='runTest', ptype='File', cname='CPP_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, srcData=Test_File_Fail_CPP._sample, - bio_in_module=bulkio.InFilePort, - bio_out_module=bulkio.OutFilePort ) - pass + sb.start() + os.kill(self.comp2._pid, 9) + while self.comp2._process.isAlive(): + time.sleep(0.1) -class Test_Int8_Fail_Java(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int8', cname='Java_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname ) - pass + source.push(data, EOS=False, streamID='test_connection_fail') + for ii in xrange(9): + source.push(data, EOS=False) + source.push(data, EOS=True) -class Test_Int16_Fail_Java(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int16', cname='Java_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname , - bio_in_module=bulkio.InShortPort, - bio_out_module=bulkio.OutShortPort ) - pass + sink.getData(eos_block=True) -class Test_Int32_Fail_Java(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int32', cname='Java_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InLongPort, - bio_out_module=bulkio.OutLongPort ) - pass + def testCharConnectionFail(self): + self._testConnectionFail('Char', range(100)) -class Test_Int64_Fail_Java(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int64', cname='Java_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InLongLongPort, - bio_out_module=bulkio.OutLongLongPort ) - pass + def testOctetConnectionFail(self): + self._testConnectionFail('Octet', range(100)) -class Test_Float_Fail_Java(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Float', cname='Java_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname , - bio_in_module=bulkio.InFloatPort, - bio_out_module=bulkio.OutFloatPort ) - pass + def testShortConnectionFail(self): + self._testConnectionFail('Short', range(100)) -class Test_Double_Fail_Java(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Double', cname='Java_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InDoublePort, - bio_out_module=bulkio.OutDoublePort ) - pass + def testUShortConnectionFail(self): + self._testConnectionFail('UShort', range(100)) -class Test_File_Fail_Java(BaseFailPort): - _sample = "The quick brown fox jumped over the lazy dog" - def __init__(self, methodName='runTest', ptype='File', cname='Java_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, srcData=Test_File_Fail_Java._sample, - bio_in_module=bulkio.InFilePort, - bio_out_module=bulkio.OutFilePort ) - pass + def testLongConnectionFail(self): + self._testConnectionFail('Long', range(100)) + def testULongConnectionFail(self): + self._testConnectionFail('ULong', range(100)) + def testLongLongConnectionFail(self): + self._testConnectionFail('LongLong', range(100)) -class Test_Int8_Fail_Python(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int8', cname='Python_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname ) - pass + def testULongLongConnectionFail(self): + self._testConnectionFail('ULongLong', range(100)) -class Test_Int16_Fail_Python(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int16', cname='Python_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname , - bio_in_module=bulkio.InShortPort, - bio_out_module=bulkio.OutShortPort ) - pass + def testFloatConnectionFail(self): + self._testConnectionFail('Float', range(100)) -class Test_Int32_Fail_Python(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int32', cname='Python_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InLongPort, - bio_out_module=bulkio.OutLongPort ) - pass + def testDoubleConnectionFail(self): + self._testConnectionFail('Double', range(100)) -class Test_Int64_Fail_Python(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Int64', cname='Python_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InLongLongPort, - bio_out_module=bulkio.OutLongLongPort ) - pass + def testFileConnectionFail(self): + text = "The quick brown fox jumped over the lazy dog" + self._testConnectionFail('File', text) -class Test_Float_Fail_Python(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Float', cname='Python_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname , - bio_in_module=bulkio.InFloatPort, - bio_out_module=bulkio.OutFloatPort ) - pass -class Test_Double_Fail_Python(BaseFailPort): - def __init__(self, methodName='runTest', ptype='Double', cname='Python_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InDoublePort, - bio_out_module=bulkio.OutDoublePort ) - pass +class CPPFailPortTest(BaseFailPort): + def __init__(self, *args, **kwargs): + BaseFailPort.__init__(self, 'CPP_Ports', *args, **kwargs) -class Test_File_Fail_Python(BaseFailPort): - _sample = "The quick brown fox jumped over the lazy dog" - def __init__(self, methodName='runTest', ptype='File', cname='Python_Ports' ): - BaseFailPort.__init__(self, methodName, ptype, cname, srcData=Test_File_Fail_Python._sample, - bio_in_module=bulkio.InFilePort, - bio_out_module=bulkio.OutFilePort ) - pass +class JavaFailPortTest(BaseFailPort): + def __init__(self, *args, **kwargs): + BaseFailPort.__init__(self, 'Java_Ports', *args, **kwargs) +class PythonFailPortTest(BaseFailPort): + def __init__(self, *args, **kwargs): + BaseFailPort.__init__(self, 'Python_Ports', *args, **kwargs) if __name__ == '__main__': suite = unittest.TestSuite() - for x in [ Test_Int8_Fail_CPP, Test_Int16_Fail_CPP, Test_Int32_Fail_CPP, Test_Double_Fail_CPP, Test_Float_Fail_CPP, Test_File_Fail_CPP, - Test_Int8_Fail_Java, Test_Int16_Fail_Java, Test_Int32_Fail_Java, Test_Double_Fail_Java, Test_Float_Fail_Java, Test_File_Fail_Java, - Test_Int8_Fail_Python, Test_Int16_Fail_Python, Test_Int32_Fail_Python, Test_Double_Fail_Python, Test_Float_Fail_Python, Test_File_Fail_Python ]: + for x in [ CPPFailPortTest, JavaFailPortTest, PythonFailPortTest ]: tests = unittest.TestLoader().loadTestsFromTestCase(x) suite.addTests(tests) try: diff --git a/bulkioInterfaces/libsrc/testing/tests/test_helpers.py b/bulkioInterfaces/libsrc/testing/tests/test_helpers.py deleted file mode 100644 index 4eb3c75e9..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/test_helpers.py +++ /dev/null @@ -1,72 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -import random -import unittest -import sys -import time -import types -from ossie.utils import sb - -# Add the local search paths to find local IDL files -from ossie.utils import model -from ossie.utils.idllib import IDLLibrary -model._idllib = IDLLibrary() -model._idllib.addSearchPath('../../../idl') -model._idllib.addSearchPath('/usr/local/redhawk/core/share/idl') - -# add local build path to test out api, issue with bulkio. and bulkio.bulkioInterfaces... __init__.py -# differs during build process -sys.path = [ '../../build/lib' ] + sys.path - -import bulkio - -def str_to_class(s): - if s in globals() and isinstance(globals()[s], types.ClassType): - return globals()[s] - return None - -class SRI_Tests(unittest.TestCase): - def __init__(self, methodName='runTest'): - unittest.TestCase.__init__(self, methodName) - - def setUp(self): - self.seq = range(10) - - def test_create(self): - sri = bulkio.sri.create() - - self.assertEqual( sri.hversion, 1, "Version Incompatable" ) - -if __name__ == '__main__': - if len(sys.argv) < 2 : - unittest.main() - else: - suite = unittest.TestLoader().loadTestsFromTestCase(globals()[sys.argv[1]] ) - unittest.TextTestRunner(verbosity=2).run(suite) - -##python -m unittest test_module1 test_module2 -##python -m unittest test_module.TestClass -##python -m unittest test_module.TestClass.test_method -##You can pass in a list with any combination of module names, and fully qualified class or method names. -##You can run tests with more detail (higher verbosity) by passing in the -v flag: -##python -m unittest -v test_module -##For a list of all the command-line options: -##python -m unittest -h - diff --git a/bulkioInterfaces/libsrc/testing/tests/test_java_vector.py b/bulkioInterfaces/libsrc/testing/tests/test_java_vector.py index 7c494ce57..5d855b81f 100644 --- a/bulkioInterfaces/libsrc/testing/tests/test_java_vector.py +++ b/bulkioInterfaces/libsrc/testing/tests/test_java_vector.py @@ -22,44 +22,28 @@ class Test_Java_Int8(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Int8', cname='Java_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int8', 'Java_Ports', *args, **kwargs) class Test_Java_Int16(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Int16', cname='Java_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InShortPort, - bio_out_module=bulkio.OutShortPort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int16', 'Java_Ports', *args, **kwargs) class Test_Java_Int32(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Int32', cname='Java_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InLongPort, - bio_out_module=bulkio.OutLongPort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int32', 'Java_Ports', *args, **kwargs) class Test_Java_Int64(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Int64', cname='Java_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InLongLongPort, - bio_out_module=bulkio.OutLongLongPort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int64', 'Java_Ports', *args, **kwargs) class Test_Java_Float(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Float', cname='Java_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InFloatPort, - bio_out_module=bulkio.OutFloatPort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Float', 'Java_Ports', *args, **kwargs) class Test_Java_Double(BaseVectorPort): - def __init__(self, methodName='runTest', ptype='Double', cname='Java_Ports' ): - BaseVectorPort.__init__(self, methodName, ptype, cname, - bio_in_module=bulkio.InDoublePort, - bio_out_module=bulkio.OutDoublePort ) - pass + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Double', 'Java_Ports', *args, **kwargs) if __name__ == '__main__': diff --git a/bulkioInterfaces/libsrc/testing/tests/test_python_helpers.py b/bulkioInterfaces/libsrc/testing/tests/test_python_helpers.py deleted file mode 100644 index 96ae61c80..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/test_python_helpers.py +++ /dev/null @@ -1,334 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# - -import copy -from omniORB import any - -from base_ports import * -from bulkio import * -from ossie.cf import CF - -class Test_PythonHelpers(unittest.TestCase): - def __init__( self, methodName='runTest' ): - unittest.TestCase.__init__(self, methodName) - - def setUp(self): - pass - - def tearDown(self): - pass - - def test_sri_create(self): - ## - ## test bulkio helper methods - ## - print "Bulkio Helpers: SRI CREATE" - sri = bulkio.sri.create() - self.assertEqual(sri.streamID,"defStream","Stream ID mismatch.") - self.assertEqual(sri.hversion,1,"Version mismatch.") - self.assertEqual(sri.xunits,1,"XUnits mismatch.") - self.assertAlmostEqual(sri.xstart,0.00,3, msg="XStart mismatch.") - self.assertAlmostEqual(sri.xdelta,1.00,3, msg="XDelta mismatch.") - self.assertEqual(sri.yunits,0,"YUnits mismatch.") - self.assertAlmostEqual(sri.ystart,0.00,3, msg="YStart mismatch.") - self.assertAlmostEqual(sri.ydelta,0.00,3, msg="YDelta mismatch.") - self.assertEqual(sri.subsize,0,"Subsize mismatch.") - self.assertEqual(sri.blocking,False,"Blocking mismatch.") - self.assertEqual(sri.keywords,[],"Keywords mismatch.") - - print "Bulkio Helpers: SRI CREATE - part Due" - sri = bulkio.sri.create( "NEW-STREAM-ID" ) - self.assertEqual(sri.streamID,"NEW-STREAM-ID","Stream ID mismatch.") - - - def test_sri_compare(self): - - ## - ## test bulkio helper method - ## - print "Bulkio Helpers: SRI CREATE" - a_sri = bulkio.sri.create() - b_sri = bulkio.sri.create() - c_sri = bulkio.sri.create() - c_sri.streamID = "THIS_DOES_NOT_MATCH" - - self.assertEqual( bulkio.sri.compare( a_sri, b_sri ), True, " bulkio.sri.compare method - same.") - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - StreamID .") - - c_sri = bulkio.sri.create() - c_sri.hversion = 2 - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - hversion ") - - c_sri = bulkio.sri.create() - c_sri.xstart = 3 - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - xstart ") - - c_sri = bulkio.sri.create() - c_sri.xdelta = 100.0 - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - xdelta ") - - c_sri = bulkio.sri.create() - c_sri.xunits = 100.0 - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - xunits ") - - c_sri = bulkio.sri.create() - c_sri.subsize = 100 - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - subsize ") - - c_sri = bulkio.sri.create() - c_sri.ystart = 3 - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - ystart ") - - c_sri = bulkio.sri.create() - c_sri.ydelta = 100.0 - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - ydelta ") - - c_sri = bulkio.sri.create() - c_sri.yunits = 100.0 - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - yunits ") - - c_sri = bulkio.sri.create() - c_sri.mode = 100 - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - mode ") - - kv = CF.DataType( id="key_one", value=any.to_any(1) ) - kv2 = CF.DataType( id="key_one", value=any.to_any(1) ) - a_sri.keywords = [kv] - c_sri = bulkio.sri.create() - c_sri.keywords = [kv2] - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), True, " bulkio.sri.compare method - same - keyword item ") - - kv2 = CF.DataType( id="key_one", value=any.to_any(100) ) - c_sri = bulkio.sri.create() - c_sri.keywords = [kv2] - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - keywords value mismatch ") - - kv2 = CF.DataType( id="key_two", value=any.to_any(100) ) - c_sri = bulkio.sri.create() - c_sri.keywords = [kv2] - self.assertEqual( bulkio.sri.compare( a_sri, c_sri ), False, " bulkio.sri.compare method - different - keywords name mismatch ") - - - def test_timestamp_create(self): - ## - ## test bulkio helper methods - ## - print "Bulkio Helpers: TimeStamp CREATE" - ts = bulkio.timestamp.now() - self.assertEqual(ts.tcmode,BULKIO.TCM_CPU, msg=" tcmode mismatch.") - self.assertEqual(ts.tcstatus,BULKIO.TCS_VALID, msg=" tcstatus mismatch.") - self.assertAlmostEqual(ts.toff,0.00,3, msg=" tcoff mismatch.") - - print "Bulkio Helpers: TimeStamp CPU TimeStamp" - ts = bulkio.timestamp.cpuTimeStamp() - self.assertEqual(ts.tcmode,BULKIO.TCM_CPU, msg=" tcmode mismatch.") - self.assertEqual(ts.tcstatus,BULKIO.TCS_VALID, msg=" tcstatus mismatch.") - self.assertAlmostEqual(ts.toff,0.00,3, msg=" tcoff mismatch.") - - ## - ## test bulkio helper methods - ## - print "Bulkio Helpers: TimeStamp CREATE with time" - ts = bulkio.timestamp.create( 100.0, 0.125 ) - self.assertEqual(ts.tcmode,BULKIO.TCM_CPU, msg=" tcmode mismatch.") - self.assertEqual(ts.tcstatus,BULKIO.TCS_VALID, msg=" tcstatus mismatch.") - self.assertAlmostEqual(ts.twsec,100.0, 3, msg=" tcwsec mismatch.") - self.assertAlmostEqual(ts.tfsec,0.125, 3, msg=" tcfsec mismatch.") - - print "Bulkio Helpers: TimeStamp CREATE with time" - ts = bulkio.timestamp.create( 100.0, 0.125, BULKIO.TCM_SDDS) - self.assertEqual(ts.tcmode,BULKIO.TCM_SDDS, msg=" tcmode mismatch.") - self.assertEqual(ts.tcstatus,BULKIO.TCS_VALID, msg=" tcstatus mismatch.") - self.assertAlmostEqual(ts.twsec,100.0, 3, msg=" tcwsec mismatch.") - self.assertAlmostEqual(ts.tfsec,0.125, 3, msg=" tcfsec mismatch.") - - def assertTimeEqual(self, t1, t2, msg=''): - self.assertEqual(t1, t2, msg='%s expected:<%s> but was:<%s>' % (msg, t1, t2)) - - def test_timestamp_normalize(self): - # NOTE: All tests use fractional portions that are exact binary fractions to - # avoid potential roundoff issues - - # Already normalized, no change - tstamp = bulkio.timestamp.create(100.0, 0.5) - bulkio.timestamp.normalize(tstamp) - self.assertTimeEqual(bulkio.timestamp.create(100.0, 0.5), tstamp, msg="Already normalized time") - - # Whole seconds has fractional portion, should be moved to fractional seconds - tstamp.twsec = 100.25; - tstamp.tfsec = 0.25; - bulkio.timestamp.normalize(tstamp) - self.assertTimeEqual(bulkio.timestamp.create(100.0, 0.5), tstamp, msg="Normalizing whole") - - # Whole seconds has fractional portion, should be moved to fractional seconds - # leading to carry - tstamp.twsec = 100.75; - tstamp.tfsec = 0.75; - bulkio.timestamp.normalize(tstamp) - self.assertTimeEqual(bulkio.timestamp.create(101.0, 0.5), tstamp, msg="Normalizing whole with carry") - - # Fractional seconds contains whole portion, should be moved to whole seconds - tstamp.twsec = 100.0; - tstamp.tfsec = 2.5; - bulkio.timestamp.normalize(tstamp) - self.assertTimeEqual(bulkio.timestamp.create(102.0, 0.5), tstamp, msg="Normalizing fractional") - - # Both parts require normalization; fractional portion of whole seconds adds an - # additional carry - tstamp.twsec = 100.75; - tstamp.tfsec = 2.75; - bulkio.timestamp.normalize(tstamp) - self.assertTimeEqual(bulkio.timestamp.create(103.0, 0.5), tstamp, msg="Normalizing both") - - # Negative fractional value should borrow - tstamp.twsec = 100.0; - tstamp.tfsec = -0.25; - bulkio.timestamp.normalize(tstamp) - self.assertTimeEqual(bulkio.timestamp.create(99.0, 0.75), tstamp, msg="Normalizing negative fractional") - - # Negative fractional value with magnitude greater than one - tstamp.twsec = 100.0; - tstamp.tfsec = -3.125; - bulkio.timestamp.normalize(tstamp) - self.assertTimeEqual(bulkio.timestamp.create(96.0, 0.875), tstamp, msg="Normalizing negative fractional > 1") - - # Fractional portion of whole seconds greater than negative fractional seconds - tstamp.twsec = 100.5; - tstamp.tfsec = -.125; - bulkio.timestamp.normalize(tstamp) - self.assertTimeEqual(bulkio.timestamp.create(100.0, 0.375), tstamp, msg="Normalizing both with negative fractional") - - # Negative fractional seconds greater than fractional portion of whole seconds - tstamp.twsec = 100.125; - tstamp.tfsec = -.5; - bulkio.timestamp.normalize(tstamp) - self.assertTimeEqual(bulkio.timestamp.create(99.0, 0.625), tstamp, msg="Normalizing both with borrow") - - # Negative fractional seconds have whole portion, but seconds whole seconds have - # fractional portion with larger magnitude than remaining fractional seconds - tstamp.twsec = 100.75; - tstamp.tfsec = -2.5; - bulkio.timestamp.normalize(tstamp) - self.assertTimeEqual(bulkio.timestamp.create(98.0, 0.25), tstamp, msg="Normalizing both with negative fractional > 1") - - def test_timestamp_compare(self): - t1 = bulkio.timestamp.create(100.0, 0.5) - t2 = bulkio.timestamp.create(100.0, 0.5) - self.assertTimeEqual(t1, t2) - - # Only fractional seconds differ - t1 = bulkio.timestamp.create(100.0, 0.5) - t2 = bulkio.timestamp.create(100.0, 0.25) - self.assertTrue(t1 > t2, msg="Time with larger fractional did not compare greater") - self.assertTrue(t2 < t1, msg="Time with smaller fractional did not compare lesser") - - # Only whole seconds differ - t1 = bulkio.timestamp.create(100.0, 0.75) - t2 = bulkio.timestamp.create(101.0, 0.75) - self.assertTrue(t1 < t2, msg="Time with smaller whole did not compare lesser") - self.assertTrue(t2 > t1, msg="Time with larger whole did not compare greater") - - # Whole seconds differ, but fractional seconds have the opposite ordering (which has no effect) - t1 = bulkio.timestamp.create(100.0, 0.75) - t2 = bulkio.timestamp.create(5000.0, 0.25) - self.assertTrue(t1 < t2, msg="Time with smaller whole and larger fractional did not compare lesser") - self.assertTrue(t2 > t1, msg="Time with larger whole and smaller fractional did not compare greater") - - def test_timestamp_operators(self): - # Test that copy works as expected - reference = bulkio.timestamp.create(100.0, 0.5) - t1 = copy.copy(reference) - self.assertTimeEqual(reference, t1, msg="copy.copy() returned different values") - - # Add a positive offset - result = t1 + 1.75 - expected = bulkio.timestamp.create(102.0, 0.25) - self.assertTrue(result is not t1, msg="Add returned same object") - self.assertTimeEqual(reference, t1, msg="Add modified original value") - self.assertTimeEqual(expected, result, msg="Add positive offset") - - # Add a negative offset (i.e., subtract) - result = t1 + -1.75 - expected = bulkio.timestamp.create(98.0, 0.75) - self.assertTimeEqual(reference, t1, msg="Add modified original value") - self.assertTimeEqual(expected, result, msg="Add negative offset") - - # Increment by positive offset - t1 += 2.25 - expected = bulkio.timestamp.create(102.0, 0.75) - self.assertTimeEqual(expected, t1, msg="Increment by positive offset") - - # Increment by negative offset (i.e., decrement) - t1 += -3.875 - expected = bulkio.timestamp.create(98.0, 0.875) - self.assertTimeEqual(expected, t1, msg="Increment by negative offset") - - # Reset to reference time and subtract a positive offset - t1 = copy.copy(reference) - result = t1 - 1.25 - expected = bulkio.timestamp.create(99.0, 0.25) - self.assertTrue(result is not t1, msg="Subtract returned same object") - self.assertTimeEqual(reference, t1, msg="Subtract modified original value") - self.assertTimeEqual(expected, result, msg="Subtract positive offset") - - # Subtract a negative offset (i.e., add) - result = t1 - -4.875 - expected = bulkio.timestamp.create(105.0, 0.375) - self.assertTrue(result is not t1, msg="Subtract returned same object") - self.assertTimeEqual(reference, t1, msg="Subtract modified original value") - self.assertTimeEqual(expected, result, msg="Subtract negative offset") - - # Decrement by positive offset - t1 -= 2.75 - expected = bulkio.timestamp.create(97.0, 0.75) - self.assertTimeEqual(expected, t1, msg="Decrement by positive offset") - - # Decrement by negative offset (i.e., increment) - t1 -= -3.375 - expected = bulkio.timestamp.create(101.0, 0.125) - self.assertTimeEqual(expected, t1, msg="Decrement by negative offset") - - # Difference, both positive and negative (exact binary fractions used to allow - # exact comparison) - t1 = reference + 8.875 - self.assertEqual(t1 - reference, 8.875) - self.assertEqual(reference - t1, -8.875) - - def test_timestamp_str(self): - # Test the default epoch (Unix time) - tstamp = bulkio.timestamp.create(0.0, 0.0) - self.assertEqual("1970:01:01::00:00:00.000000", str(tstamp)) - - # Use a recent time with rounding at the microsecond level - tstamp = bulkio.timestamp.create(1451933967.0, 0.2893569) - self.assertEqual("2016:01:04::18:59:27.289357", str(tstamp)) - -if __name__ == '__main__': - suite = unittest.TestSuite() - for x in [ Test_PythonHelpers ]: - tests = unittest.TestLoader().loadTestsFromTestCase(x) - suite.addTests(tests) - try: - import xmlrunner - runner = xmlrunner.XMLTestRunner(verbosity=2) - except ImportError: - runner = unittest.TextTestRunner(verbosity=2) - runner.run(suite) - diff --git a/bulkioInterfaces/libsrc/testing/tests/test_python_multiout.py b/bulkioInterfaces/libsrc/testing/tests/test_python_multiout.py deleted file mode 100644 index d9b657db1..000000000 --- a/bulkioInterfaces/libsrc/testing/tests/test_python_multiout.py +++ /dev/null @@ -1,612 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK bulkioInterfaces. -# -# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -from bulkio import * -from ossie.cf import CF -from omniORB import any -from omniORB import CORBA -import logging -import random -import unittest -import sys -from ossie.utils import sb -import time -from bulkio.bulkioInterfaces.BULKIO import * - -# remove when sandbox support for relative path works -test_dir='../' - -def str_to_class(s): - if s in globals() and isinstance(globals()[s], types.ClassType): - return globals()[s] - return None - - -class BaseMultiOut(unittest.TestCase): - KEYS = ['c_name', 'c_inport', 'c_outport', 'sink_inport'] - PORT_FLOW = { - 'Int8' : [ 'dataCharIn', 'dataCharOut', 'charIn' ], - 'UInt8' : [ 'dataOctetIn', 'dataOctetOut', 'charIn' ], - 'Int16' : [ 'dataShortIn', 'dataShortOut', 'shortIn' ], - 'UInt16' : [ 'dataUShortIn', 'dataUShortOut', 'shortIn' ], - 'Int32' : [ 'dataLongIn', 'dataLongOut', 'longIn' ], - 'UInt32' : [ 'dataULongIn', 'dataULongOut', 'longIn' ], - 'Int64' : [ 'dataLongLongIn', 'dataLongLongOut', 'longlongIn' ], - 'UInt64' : [ 'dataULongLongIn', 'dataULongLongOut', 'longlongIn' ], - 'Float' : [ 'dataFloatIn', 'dataFloatOut', 'floatIn' ], - 'Double' : [ 'dataDoubleIn', 'dataDoubleOut', 'doubleIn' ], - 'File' : [ 'dataFileIn', 'dataFileOut', 'fileIn' ], - 'Xml' : [ 'dataXMLIn', 'dataXMLOut', 'xmlIn' ] - } - - def __init__( - self, - methodName='runTest', - ptype='Int8', - cname=None, - srcData=None, - cmpData=None, - bio_in_module=bulkio.InCharPort, - bio_out_module=bulkio.OutCharPort ): - unittest.TestCase.__init__(self, methodName) - self.c_dir = 'components' - self.c_name = cname - self.ptype = ptype - self.execparams = {} - self.c_inport = None - self.c_outport = None - self.sink_inport = None - self.srcData = srcData - self.cmpData = cmpData - self.ctx = dict().fromkeys(BaseMultiOut.KEYS) - self.bio_in_module = bio_in_module - self.bio_out_module = bio_out_module - - - def getPortFlow(self, ptype='Int8' ): - return BaseMultiOut.PORT_FLOW[ptype] - - def setContext(self, ctx=None): - self.ctx[ BaseMultiOut.KEYS[0] ] = self.c_name - self.ctx[ BaseMultiOut.KEYS[1] ] = BaseMultiOut.PORT_FLOW[self.ptype][0] - self.ctx[ BaseMultiOut.KEYS[2] ] = BaseMultiOut.PORT_FLOW[self.ptype][1] - self.ctx[ BaseMultiOut.KEYS[3] ] = BaseMultiOut.PORT_FLOW[self.ptype][2] - tmp=self.ctx - if ctx: - tmp = ctx - try: - self.c_inport = tmp['c_inport'] - self.c_outport = tmp['c_outport'] - self.sink_inport = tmp['sink_inport'] - except: - pass - - - def setUp(self): - self.setContext() - if self.srcData: - self.seq = self.srcData - else: - self.seq = range(100) - - self.orb = CORBA.ORB_init(); - self.rootPOA = self.orb.resolve_initial_references("RootPOA") - self.logger = logging.getLogger(self.ptype[0]) - self.logger.setLevel(logging.NOTSET) - self.logger.info( "Setup - Multiout Create Ports Table " ); - - self.ip1 = bulkio.InFloatPort("sink_1", self.logger ); - self.ip1_oid = self.rootPOA.activate_object(self.ip1); - self.ip2 = bulkio.InFloatPort("sink_2", self.logger ); - self.ip2_oid = self.rootPOA.activate_object(self.ip2); - self.ip3 = bulkio.InFloatPort("sink_3", self.logger ); - self.ip3_oid = self.rootPOA.activate_object(self.ip3); - self.ip4 = bulkio.InFloatPort("sink_4", self.logger ); - self.ip4_oid = self.rootPOA.activate_object(self.ip4); - self.port = bulkio.OutFloatPort("multiout_source", self.logger ); - self.port_oid = self.rootPOA.activate_object(self.port); - - self.desc_list=[]; - self.logger.info( "Setup - Multiout Connection Table " ); - self.desc_list.append( bulkio.connection_descriptor_struct( port_name="multiout_source", connection_id="connection_1", stream_id="stream-1-1" ) ) - self.desc_list.append( bulkio.connection_descriptor_struct( port_name="multiout_source", connection_id="connection_1", stream_id="stream-1-2" ) ) - self.desc_list.append( bulkio.connection_descriptor_struct( port_name="multiout_source", connection_id="connection_1", stream_id="stream-1-3" ) ) - self.desc_list.append( bulkio.connection_descriptor_struct( port_name="multiout_source", connection_id="connection_2", stream_id="stream-2-1" ) ) - self.desc_list.append( bulkio.connection_descriptor_struct( port_name="multiout_source", connection_id="connection_2", stream_id="stream-2-2" ) ) - self.desc_list.append( bulkio.connection_descriptor_struct( port_name="multiout_source", connection_id="connection_2", stream_id="stream-2-3" ) ) - self.desc_list.append( bulkio.connection_descriptor_struct( port_name="multiout_source", connection_id="connection_3", stream_id="stream-3-1" ) ) - self.desc_list.append( bulkio.connection_descriptor_struct( port_name="multiout_source", connection_id="connection_3", stream_id="stream-3-2" ) ) - self.desc_list.append( bulkio.connection_descriptor_struct( port_name="multiout_source", connection_id="connection_3", stream_id="stream-3-3" ) ) - self.desc_list.append( bulkio.connection_descriptor_struct( port_name="multiout_source", connection_id="connection_4", stream_id="stream-4-1" ) ) - - - def tearDown(self): - self.rootPOA.deactivate_object(self.ip1_oid); - self.rootPOA.deactivate_object(self.ip2_oid); - self.rootPOA.deactivate_object(self.ip3_oid); - self.rootPOA.deactivate_object(self.ip4_oid); - self.rootPOA.deactivate_object(self.port_oid); - - - def test_multiout_sri_filtered(self): - self.logger.info( "Multiout SRI Filtered - BEGIN " ); - - clist = self.port._get_connections(); - self.assertEqual( clist != None, True, "Connection List Error" ) - - self.logger.info( "Multiout SRI Filtered - Create Connections and Filter list " ); - self.port.connectPort( self.ip1._this(), "connection_1"); - self.port.connectPort( self.ip2._this(), "connection_2"); - self.port.connectPort( self.ip3._this(), "connection_3"); - self.port.connectPort( self.ip4._this(), "connection_4"); - self.port.updateConnectionFilter( self.desc_list ); - - ## - ## Push SRI for IP1 - ## - - filter_stream_id = "stream-1-1" - srate=11.0 - xdelta = 1.0/srate - TS = bulkio.timestamp.now(); - sri = bulkio.sri.create( filter_stream_id, srate); - self.port.pushSRI( sri ); - - streams = self.ip1._get_activeSRIs(); - self.assertEqual( streams != None, True, "Active SRI List Error" ) - self.assertEqual( len(streams) == 1, True, "Active SRI List Length Error" ) - asri = streams[0] - self.assertEqual( asri.streamID == filter_stream_id, True, "activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "activeSRIs - Mode Mismatch") - - - streams = self.ip2._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI Filtered - Port 2 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI Filtered - Port 2 SRI was Received, Failed") - - streams = self.ip3._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI Filtered - Port 3 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI Filtered - Port 3 SRI was Received, Failed") - - streams = self.ip4._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI Filtered - Port 4 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI Filtered - Port 4 SRI was Received, Failed") - - self.port.updateConnectionFilter( None ); - - filter_stream_id = "stream-1-1" - srate=11.0 - xdelta = 1.0/srate - TS = bulkio.timestamp.now(); - sri = bulkio.sri.create( filter_stream_id, srate); - self.port.pushSRI( sri ); - - streams = self.ip1._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI Filtered - Port 1 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI Filtered - Port 1 SRI was Received, Failed") - asri=streams[0] - self.assertEqual( asri.streamID == filter_stream_id, True, "Multiout SRI Filtered Port 1 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "Multiout SRI Filtered Port 1 activeSRIs - Mode Mismatch") - - - streams = self.ip2._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI Filtered - Port 2 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI Filtered - Port 2 SRI was Received, Failed") - asri=streams[0] - self.assertEqual( asri.streamID == filter_stream_id, True, "Multiout SRI Filtered Port 2 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "Multiout SRI Filtered Port 2 activeSRIs - Mode Mismatch") - - streams = self.ip3._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI Filtered - Port 3 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI Filtered - Port 3 SRI was Received, Failed") - asri=streams[0] - self.assertEqual( asri.streamID == filter_stream_id, True, "Multiout SRI Filtered Port 3 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "Multiout SRI Filtered Port 3 activeSRIs - Mode Mismatch") - - streams = self.ip4._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI Filtered - Port 4 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI Filtered - Port 4 SRI was Received, Failed") - asri=streams[0] - self.assertEqual( asri.streamID == filter_stream_id, True, "Multiout SRI Filtered Port 4 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "Multiout SRI Filtered Port 4 activeSRIs - Mode Mismatch") - - - def test_multiout_sri_eos_filter(self): - self.logger.info( "Multiout SRI/EOS Filtered - BEGIN " ); - - clist = self.port._get_connections(); - self.assertEqual( clist != None, True, "Connection List Error" ) - - self.logger.info( "Multiout SRI/EOS Filtered - Create Connections and Filter list " ); - self.port.connectPort( self.ip1._this(), "connection_1"); - self.port.connectPort( self.ip2._this(), "connection_2"); - self.port.connectPort( self.ip3._this(), "connection_3"); - self.port.connectPort( self.ip4._this(), "connection_4"); - self.port.updateConnectionFilter( self.desc_list ); - - ## - ## Push SRI for IP1 - ## - filter_stream_id = "stream-1-1" - srate=11.0 - xdelta = 1.0/srate - TS = bulkio.timestamp.now(); - sri = bulkio.sri.create( filter_stream_id, srate); - self.port.pushSRI( sri ); - - streams = self.ip1._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 1 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI/EOS Filtered - Port 1 SRI was Received, Failed") - asri = streams[0] - self.assertEqual( asri.streamID == filter_stream_id, True, "activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "activeSRIs - Mode Mismatch") - - streams = self.ip2._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 2 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI/EOS Filtered - Port 2 SRI was Received, Failed") - - streams = self.ip3._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 3 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI/EOS Filtered - Port 3 SRI was Received, Failed") - - streams = self.ip4._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 4 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI/EOS Filtered - Port 4 SRI was Received, Failed") - - ## - ## Push SRI for IP2 - ## - filter_stream_id = "stream-2-1" - srate=22.0 - xdelta = 1.0/srate - sri = bulkio.sri.create( filter_stream_id, srate) - self.logger.info( "Multiout SRI/EOS Filter - sid:" + filter_stream_id ) - self.port.pushSRI( sri ); - - streams = self.ip1._get_activeSRIs(); - self.assertEqual( streams != None, True, "Active SRI List Error" ) - self.assertEqual( len(streams) == 1, True, "Active SRI List Length Error" ) - asri = streams[0] - self.assertEqual( asri.streamID == "stream-1-1", True, "IP1 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "IP1 activeSRIs - Mode Mismatch") - - streams = self.ip2._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 2 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI/EOS Filtered - Port 2 SRI was Received, Failed") - asri = streams[0] - self.assertEqual( asri.streamID == filter_stream_id, True, "IP2 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "IP2 activeSRIs - Mode Mismatch") - - streams = self.ip3._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 3 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI/EOS Filtered - Port 3 SRI was Received, Failed") - - streams = self.ip4._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 4 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI/EOS Filtered - Port 4 SRI was Received, Failed") - - ## - ## Push SRI for IP3 - ## - filter_stream_id = "stream-3-1" - srate=33.0 - xdelta = 1.0/srate - sri = bulkio.sri.create( filter_stream_id, srate) - self.logger.info( "Multiout SRI/EOS Filter - sid:" + filter_stream_id ) - self.port.pushSRI( sri ); - - streams = self.ip1._get_activeSRIs(); - self.assertEqual( streams != None, True, "Active SRI List Error" ) - self.assertEqual( len(streams) == 1, True, "Active SRI List Length Error" ) - asri = streams[0] - self.assertEqual( asri.streamID == "stream-1-1", True, "IP1 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "IP1 activeSRIs - Mode Mismatch") - - streams = self.ip2._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 2 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI/EOS Filtered - Port 2 SRI was Received, Failed") - asri = streams[0] - self.assertEqual( asri.streamID == "stream-2-1", True, "IP2 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "IP2 activeSRIs - Mode Mismatch") - - streams = self.ip3._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 3 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI/EOS Filtered - Port 3 SRI was Received, Failed") - asri = streams[0] - self.assertEqual( asri.streamID == filter_stream_id, True, "IP3 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "IP3 activeSRIs - Mode Mismatch") - - streams = self.ip4._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 4 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI/EOS Filtered - Port 4 SRI was Received, Failed") - - ## - ## Push SRI for IP4 - ## - filter_stream_id = "stream-4-1" - srate=44.0 - xdelta = 1.0/srate - sri = bulkio.sri.create( filter_stream_id, srate) - self.logger.info( "Multiout SRI/EOS Filter - sid:" + filter_stream_id ) - self.port.pushSRI( sri ); - - streams = self.ip1._get_activeSRIs(); - self.assertEqual( streams != None, True, "Active SRI List Error" ) - self.assertEqual( len(streams) == 1, True, "Active SRI List Length Error" ) - asri = streams[0] - self.assertEqual( asri.streamID == "stream-1-1", True, "IP1 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "IP1 activeSRIs - Mode Mismatch") - - streams = self.ip2._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 2 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI/EOS Filtered - Port 2 SRI was Received, Failed") - asri = streams[0] - self.assertEqual( asri.streamID == "stream-2-1", True, "IP2 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "IP2 activeSRIs - Mode Mismatch") - - streams = self.ip3._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 3 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI/EOS Filtered - Port 3 SRI was Received, Failed") - asri = streams[0] - self.assertEqual( asri.streamID == "stream-3-1", True, "IP3 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "IP3 activeSRIs - Mode Mismatch") - - streams = self.ip4._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 4 Stream Failed") - self.assertEqual( len(streams) == 1, True, "Multiout SRI/EOS Filtered - Port 4 SRI was Received, Failed") - asri = streams[0] - self.assertEqual( asri.streamID == filter_stream_id, True, "IP4 activeSRIs - StreamID Mismatch") - self.assertEqual( asri.mode == 0, True, "IP4 activeSRIs - Mode Mismatch") - - - ## - ## Push EOS downstream and check SRI Lists - ## - filter_stream_id = "stream-1-1" - self.logger.info( "Multiout SRI/EOS Filter - EOS sid:" + filter_stream_id ) - self.port.pushPacket( [], TS, True, filter_stream_id ); - - pkt = self.ip1.getPacket() - self.assertEqual( pkt != None, True, "getPacket - IP1 PKT was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI] != None, True, "getPacket - IP1 SRI was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI].streamID == filter_stream_id, True, "getPacket - IP1 StreamID Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.END_OF_STREAM], True, "getPacket - IP1 EOS Mismatch" ) - - filter_stream_id = "stream-2-1" - self.logger.info( "Multiout SRI/EOS Filter - EOS sid:" + filter_stream_id ) - self.port.pushPacket( [], TS, True, filter_stream_id ); - - pkt = self.ip2.getPacket() - self.assertEqual( pkt != None, True, "getPacket - IP2 PKT was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI] != None, True, "getPacket - IP2 SRI was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI].streamID == filter_stream_id, True, "getPacket - IP2 StreamID Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.END_OF_STREAM], True, "getPacket - IP2 EOS Mismatch" ) - - filter_stream_id = "stream-3-1" - self.logger.info( "Multiout SRI/EOS Filter - EOS sid:" + filter_stream_id ) - self.port.pushPacket( [], TS, True, filter_stream_id ); - - pkt = self.ip3.getPacket() - self.assertEqual( pkt != None, True, "getPacket - IP3 PKT was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI] != None, True, "getPacket - IP3 SRI was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI].streamID == filter_stream_id, True, "getPacket - IP3 StreamID Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.END_OF_STREAM], True, "getPacket - IP3 EOS Mismatch" ) - - filter_stream_id = "stream-4-1" - self.logger.info( "Multiout SRI/EOS Filter - EOS sid:" + filter_stream_id ) - self.port.pushPacket( [], TS, True, filter_stream_id ); - - pkt = self.ip4.getPacket() - self.assertEqual( pkt != None, True, "getPacket - IP4 PKT was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI] != None, True, "getPacket - IP4 SRI was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI].streamID == filter_stream_id, True, "getPacket - IP4 StreamID Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.END_OF_STREAM], True, "getPacket - IP4 EOS Mismatch" ) - - streams = self.ip1._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 1 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI/EOS Filtered - Port 1 SRI was Received, Failed") - - streams = self.ip2._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 2 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI/EOS Filtered - Port 2 SRI was Received, Failed") - - streams = self.ip3._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 3 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI/EOS Filtered - Port 3 SRI was Received, Failed") - - streams = self.ip4._get_activeSRIs(); - self.assertEqual( streams != None, True, "Multiout SRI/EOS Filtered - Port 4 Stream Failed") - self.assertEqual( len(streams) == 0, True, "Multiout SRI/EOS Filtered - Port 4 SRI was Received, Failed") - - def test_multiout_data_filter(self): - self.logger.info( "Multiout DATA Filtered - BEGIN " ); - - clist = self.port._get_connections(); - self.assertEqual( clist != None, True, "Connection List Error" ) - - self.logger.info( "Multiout DATA Filtered - Create Connections and Filter list " ); - self.port.connectPort( self.ip1._this(), "connection_1"); - self.port.connectPort( self.ip2._this(), "connection_2"); - self.port.connectPort( self.ip3._this(), "connection_3"); - self.port.connectPort( self.ip4._this(), "connection_4"); - self.port.updateConnectionFilter( self.desc_list ); - - ## - ## Push SRI for IP1 - ## - filter_stream_id = "stream-1-1" - srate=11.0 - xdelta = 1.0/srate - TS = bulkio.timestamp.now(); - sri = bulkio.sri.create( filter_stream_id, srate); - self.port.pushSRI( sri ); - - self.logger.info( "Multiout DATA Filter - sid:" + filter_stream_id ) - self.port.pushPacket( self.seq, TS, False, filter_stream_id ); - pkt = self.ip1.getPacket() - self.assertEqual( pkt != None, True, "getPacket - IP1 PKT was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI] != None, True, "getPacket - IP1 SRI was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI].streamID == filter_stream_id, True, "getPacket - IP1 StreamID Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.SRI].mode == 0, True, "getPacket - IP1 Mode Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.END_OF_STREAM], False, "getPacket - IP1 EOS Mismatch" ) - self.assertEqual( len(pkt[bulkio.InPort.DATA_BUFFER])==len(self.seq), True, "getPacket - IP1 DataLength Mismatch" ) - - # - # make sure other ports did not receive a packet - # - pkt = self.ip2.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP2 PKT was NOT empty" ) - pkt = self.ip3.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP3 PKT was NOT empty" ) - pkt = self.ip4.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP4 PKT was NOT empty" ) - - ## - ## Push SRI for IP2 - ## - filter_stream_id = "stream-2-1" - srate=22.0 - xdelta = 1.0/srate - TS = bulkio.timestamp.now(); - sri = bulkio.sri.create( filter_stream_id, srate); - self.port.pushSRI( sri ); - - self.logger.info( "Multiout DATA Filter - sid:" + filter_stream_id ) - self.port.pushPacket( self.seq, TS, False, filter_stream_id ); - pkt = self.ip2.getPacket() - self.assertEqual( pkt != None, True, "getPacket - IP2 PKT was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI] != None, True, "getPacket - IP2 SRI was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI].streamID == filter_stream_id, True, "getPacket - IP2 StreamID Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.SRI].mode == 0, True, "getPacket - IP2 Mode Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.END_OF_STREAM], False, "getPacket - IP2 EOS Mismatch" ) - self.assertEqual( len(pkt[bulkio.InPort.DATA_BUFFER])==len(self.seq), True, "getPacket - IP2 DataLength Mismatch" ) - - # - # make sure other ports did not receive a packet - # - pkt = self.ip1.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP1 PKT was NOT empty" ) - pkt = self.ip3.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP3 PKT was NOT empty" ) - pkt = self.ip4.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP4 PKT was NOT empty" ) - - - ## - ## Push SRI for IP3 - ## - filter_stream_id = "stream-3-1" - srate=33.0 - xdelta = 1.0/srate - TS = bulkio.timestamp.now(); - sri = bulkio.sri.create( filter_stream_id, srate); - self.port.pushSRI( sri ); - - self.logger.info( "Multiout DATA Filter - sid:" + filter_stream_id ) - self.port.pushPacket( self.seq, TS, False, filter_stream_id ); - pkt = self.ip3.getPacket() - self.assertEqual( pkt != None, True, "getPacket - IP3 PKT was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI] != None, True, "getPacket - IP3 SRI was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI].streamID == filter_stream_id, True, "getPacket - IP3 StreamID Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.SRI].mode == 0, True, "getPacket - IP3 Mode Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.END_OF_STREAM], False, "getPacket - IP3 EOS Mismatch" ) - self.assertEqual( len(pkt[bulkio.InPort.DATA_BUFFER])==len(self.seq), True, "getPacket - IP3 DataLength Mismatch" ) - - # - # make sure other ports did not receive a packet - # - pkt = self.ip1.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP1 PKT was NOT empty" ) - pkt = self.ip2.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP2 PKT was NOT empty" ) - pkt = self.ip4.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP4 PKT was NOT empty" ) - - ## - ## Push SRI for IP4 - ## - filter_stream_id = "stream-4-1" - srate=44.0 - xdelta = 1.0/srate - TS = bulkio.timestamp.now(); - sri = bulkio.sri.create( filter_stream_id, srate); - self.port.pushSRI( sri ); - - self.logger.info( "Multiout DATA Filter - sid:" + filter_stream_id ) - self.port.pushPacket( self.seq, TS, False, filter_stream_id ); - pkt = self.ip4.getPacket() - self.assertEqual( pkt != None, True, "getPacket - IP4 PKT was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI] != None, True, "getPacket - IP4 SRI was empty" ) - self.assertEqual( pkt[bulkio.InPort.SRI].streamID == filter_stream_id, True, "getPacket - IP4 StreamID Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.SRI].mode == 0, True, "getPacket - IP4 Mode Mismatch" ) - self.assertEqual( pkt[bulkio.InPort.END_OF_STREAM], False, "getPacket - IP4 EOS Mismatch" ) - self.assertEqual( len(pkt[bulkio.InPort.DATA_BUFFER])==len(self.seq), True, "getPacket - IP4 DataLength Mismatch" ) - - # - # make sure other ports did not receive a packet - # - pkt = self.ip1.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP1 PKT was NOT empty" ) - pkt = self.ip2.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP2 PKT was NOT empty" ) - pkt = self.ip3.getPacket() - self.assertEqual( pkt[0] == None, True, "getPacket - IP3 PKT was NOT empty" ) - - -class Test_Python_Int8(BaseMultiOut): - def __init__(self, methodName='runTest', ptype='Int8', cname='Python_Ports' ): - BaseMultiOut.__init__(self, methodName, ptype, cname ) - pass - -class Test_Python_Int16(BaseMultiOut): - def __init__(self, methodName='runTest', ptype='Int16', cname='Python_Ports' ): - BaseMultiOut.__init__(self, methodName, ptype, cname ) - pass - -class Test_Python_Int32(BaseMultiOut): - def __init__(self, methodName='runTest', ptype='Int32', cname='Python_Ports' ): - BaseMultiOut.__init__(self, methodName, ptype, cname ) - pass - -class Test_Python_Int64(BaseMultiOut): - def __init__(self, methodName='runTest', ptype='Int64', cname='Python_Ports' ): - BaseMultiOut.__init__(self, methodName, ptype, cname ) - pass - -class Test_Python_Float(BaseMultiOut): - def __init__(self, methodName='runTest', ptype='Float', cname='Python_Ports' ): - BaseMultiOut.__init__(self, methodName, ptype, cname ) - pass - -class Test_Python_Double(BaseMultiOut): - def __init__(self, methodName='runTest', ptype='Double', cname='Python_Ports' ): - BaseMultiOut.__init__(self, methodName, ptype, cname ) - pass - -if __name__ == '__main__': - suite = unittest.TestSuite() - for x in [ Test_Python_Int8, Test_Python_Int16, Test_Python_Int32, Test_Python_Int64, Test_Python_Float, Test_Python_Double ]: - tests = unittest.TestLoader().loadTestsFromTestCase(x) - suite.addTests(tests) - try: - import xmlrunner - runner = xmlrunner.XMLTestRunner(verbosity=2) - except ImportError: - runner = unittest.TextTestRunner(verbosity=2) - runner.run(suite) diff --git a/bulkioInterfaces/libsrc/testing/tests/test_python_vector.py b/bulkioInterfaces/libsrc/testing/tests/test_python_vector.py index df5f8ee60..43c10d40d 100644 --- a/bulkioInterfaces/libsrc/testing/tests/test_python_vector.py +++ b/bulkioInterfaces/libsrc/testing/tests/test_python_vector.py @@ -18,212 +18,33 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # -from base_ports import * from bulkio import * -import time -import threading - -class SriListener(object): - def __init__(self): - self.sri = None - - def __call__(self, sri): - self.sri = sri - - def reset(self): - self.sri = None - -class TestPythonAPI(BaseVectorPort): - def test_inport_sri_changed(self): - """ - Tests that SRI changes are reported correctly from getPacket(). - """ - port = self.bio_in_module('test') - port.startPort() - - # Create a default SRI and push it - test_sri = sri.create('test-stream') - port.pushSRI(test_sri) - - # SRI should report changed for first packet - port.pushPacket([0], timestamp.now(), False, test_sri.streamID) - packet = port.getPacket(const.NON_BLOCKING) - self.assertNotEqual(packet[port.DATA_BUFFER], None) - self.assertEqual(packet[port.SRI_CHG], True) - - # No SRI change for second packet - port.pushPacket([1], timestamp.now(), False, test_sri.streamID) - packet = port.getPacket(const.NON_BLOCKING) - self.assertNotEqual(packet[port.DATA_BUFFER], None) - self.assertEqual(packet[port.SRI_CHG], False) - - # Reduce the queue size so we can force a flush - port.setMaxQueueDepth(2) - - # Push a packet, change the SRI, and push two more packets so that the - # packet with the associated SRI change gets flushed - port.pushPacket([2], timestamp.now(), False, test_sri.streamID) - test_sri.xdelta /= 2.0 - port.pushSRI(test_sri) - port.pushPacket([3], timestamp.now(), False, test_sri.streamID) - port.pushPacket([4], timestamp.now(), False, test_sri.streamID) - - # Get the last packet and verify that the queue has flushed, and the - # SRI change is still reported - packet = port.getPacket(const.NON_BLOCKING) - self.assertNotEqual(packet[port.DATA_BUFFER], None) - self.assertEqual(packet[port.QUEUE_FLUSH], True) - self.assertEqual(packet[port.SRI_CHG], True) - - # Push data without an SRI to check that the sriChanged flag is still - # set and the SRI callback gets called - listener = SriListener() - port.setNewSriListener(listener) - port.pushPacket([0], timestamp.now(), False, 'invalid_stream') - packet = port.getPacket(const.NON_BLOCKING) - self.assertTrue(packet.sriChanged) - self.assertFalse(listener.sri is None) - - # Push again to the same stream ID; sriChanged should now be false and - # the SRI callback should not get called - listener.reset() - port.pushPacket([0], timestamp.now(), False, 'invalid_stream') - packet = port.getPacket(const.NON_BLOCKING) - self.assertFalse(packet.sriChanged) - self.assertTrue(listener.sri is None) - - def test_inport_getPacket_timeout(self): - """ - Tests that timeout modes work as expected in getPacket(). - """ - port = self.bio_in_module('test') - port.startPort() - - # If non-blocking takes more than a millisecond, something is wrong - import timeit - def fn_getPacket(fn, timeout): - def _foo(): - fn(timeout) - return _foo - - timer_fn = timeit.Timer(fn_getPacket(port.getPacket, const.NON_BLOCKING)) - rettime = timer_fn.timeit(number=100) - self.assert_(rettime < 100e-3) - packet = port.getPacket(const.NON_BLOCKING) - self.assertEqual(packet[port.DATA_BUFFER], None) - - # Check that (at least) the timeout period elapses - timeout = 0.125 - number_iterations = 10 - timer_fn = timeit.Timer(fn_getPacket(port.getPacket, timeout)) - rettime = timer_fn.timeit(number=number_iterations) - self.assert_(rettime > timeout * number_iterations) - packet = port.getPacket(timeout) - self.assertEqual(packet[port.DATA_BUFFER], None) - - # Try a blocking getPacket() on another thread - results = [] - def get_packet(): - packet = port.getPacket(const.BLOCKING) - results.append(packet) - t = threading.Thread(target=get_packet) - t.setDaemon(True) - t.start() - - # Wait for a while to ensure that the thread has had a chance to enter - # getPacket(), then check that it has not returned - time.sleep(0.125) - self.assertEqual(len(results), 0) - - # Stop the port and make sure the thread exits - port.stopPort() - t.join(timeout=1.0) - self.failIf(t.isAlive()) - - def test_inport_statistics_streamIDs(self): - """ - Tests that the stream IDs reported in statistics are correct. - """ - port = self.bio_in_module('test') - port.startPort() - - # Create a few streams, push an SRI and packet for each, and test that - # the statistics report the correct stream IDs - stream_ids = set('sri%d' % ii for ii in xrange(3)) - for streamID in stream_ids: - stream_sri = sri.create(streamID) - port.pushSRI(stream_sri) - port.pushPacket([0], timestamp.now(), False, streamID) - self.assertEqual(stream_ids, set(port._get_statistics().streamIDs)) - - # Push an end-of-stream for one of the streams (doesn't matter which), - # and test that the stream ID has been removed from the stats - streamID = stream_ids.pop() - port.pushPacket([], timestamp.now(), True, streamID) - self.assertEqual(stream_ids, set(port._get_statistics().streamIDs)) - - def test_inport_blocking_deadlock(self): - """ - Tests that a blocking pushPacket does not prevent other threads from - interacting with the port. - """ - port = self.bio_in_module('test') - port.startPort() - - test_sri = sri.create('blocking-stream') - test_sri.blocking = True - port.pushSRI(test_sri) - - port.setMaxQueueDepth(1) - - # Push enough packets to block in one thread - def push_packet(): - for ii in range(2): - port.pushPacket([0], timestamp.now(), False, test_sri.streamID) - push_thread = threading.Thread(target=push_packet) - push_thread.setDaemon(True) - push_thread.start() - - # Get the queue depth in another thread, which used to lead to deadlock - # (well, mostly-dead-lock) - test_thread = threading.Thread(target=port.getCurrentQueueDepth) - test_thread.setDaemon(True) - test_thread.start() - - # Wait a while for the queue depth query to complete, which should happen - # quickly. If the thread is still alive, then deadlock must have occurred - test_thread.join(1.0) - deadlock = test_thread.isAlive() - - # Get packets to unblock the push thread, allows all threads to finish - port.getPacket() - port.getPacket() - self.failIf(deadlock) +from base_ports import * -class Test_Python_Int8(TestPythonAPI): - def __init__(self, methodName='runTest', ptype='Int8', cname='Python_Ports' ): - TestPythonAPI.__init__(self, methodName, ptype, cname ) +class Test_Python_Int8(BaseVectorPort): + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int8', 'Python_Ports', *args, **kwargs) -class Test_Python_Int16(TestPythonAPI): - def __init__(self, methodName='runTest', ptype='Int16', cname='Python_Ports' ): - TestPythonAPI.__init__(self, methodName, ptype, cname, bio_in_module=InShortPort, bio_out_module=OutShortPort ) +class Test_Python_Int16(BaseVectorPort): + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int16', 'Python_Ports', *args, **kwargs) -class Test_Python_Int32(TestPythonAPI): - def __init__(self, methodName='runTest', ptype='Int32', cname='Python_Ports' ): - TestPythonAPI.__init__(self, methodName, ptype, cname, bio_in_module=InLongPort, bio_out_module=OutLongPort ) +class Test_Python_Int32(BaseVectorPort): + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int32', 'Python_Ports', *args, **kwargs) -class Test_Python_Int64(TestPythonAPI): - def __init__(self, methodName='runTest', ptype='Int64', cname='Python_Ports' ): - TestPythonAPI.__init__(self, methodName, ptype, cname, bio_in_module=InLongLongPort, bio_out_module=OutLongLongPort ) +class Test_Python_Int64(BaseVectorPort): + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Int64', 'Python_Ports', *args, **kwargs) -class Test_Python_Float(TestPythonAPI): - def __init__(self, methodName='runTest', ptype='Float', cname='Python_Ports' ): - TestPythonAPI.__init__(self, methodName, ptype, cname, bio_in_module=InFloatPort, bio_out_module=OutFloatPort ) +class Test_Python_Float(BaseVectorPort): + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Float', 'Python_Ports', *args, **kwargs) -class Test_Python_Double(TestPythonAPI): - def __init__(self, methodName='runTest', ptype='Double', cname='Python_Ports' ): - TestPythonAPI.__init__(self, methodName, ptype, cname, bio_in_module=InDoublePort, bio_out_module=OutDoublePort ) +class Test_Python_Double(BaseVectorPort): + def __init__(self, *args, **kwargs): + BaseVectorPort.__init__(self, 'Double', 'Python_Ports', *args, **kwargs) if __name__ == '__main__': suite = unittest.TestSuite() diff --git a/bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/m4/.gitignore b/bulkioInterfaces/m4/.gitignore similarity index 100% rename from bulkioInterfaces/libsrc/testing/components/Oversized_framedata/cpp/m4/.gitignore rename to bulkioInterfaces/m4/.gitignore diff --git a/bulkioInterfaces/pom.xml b/bulkioInterfaces/pom.xml deleted file mode 100644 index a637d2dd3..000000000 --- a/bulkioInterfaces/pom.xml +++ /dev/null @@ -1,95 +0,0 @@ - - 4.0.0 - - redhawk.coreframework - parent - 2.0.9-SNAPSHOT - ../pom.xml - - bulkio-interfaces - bundle - - - ${project.groupId} - cf-interfaces - ${project.version} - - - - idl - - - - - idl - - - src/java - - - org.codehaus.gmaven - gmaven-plugin - 1.3 - - - set-main-artifact - package - - execute - - - - project.artifact.setFile(new - File("${project.basedir}/BULKIOInterfaces.jar")) - - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.7 - - - attach-artifacts - package - - attach-artifact - - - - - ${project.build.directory}/${project.artifactId}-${project.version}.jar - beta - jar - - - - - - - - - maven-assembly-plugin - 2.2-beta-5 - - - attach-idlzip - package - - single - - - - assembly.xml - - - - - - - - diff --git a/bulkioInterfaces/reconf b/bulkioInterfaces/reconf index e5cae955d..b9c1ef072 100755 --- a/bulkioInterfaces/reconf +++ b/bulkioInterfaces/reconf @@ -19,41 +19,4 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # - -rm -f config.cache - -# Setup the libtool stuff -if [ -e /usr/local/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/local/share/aclocal/libtool.m4 aclocal.d/acinclude.m4 -elif [ -e /usr/share/aclocal/libtool.m4 ]; then - /bin/cp /usr/share/aclocal/libtool.m4 acinclude.m4 -fi -libtoolize --force --automake - -# Search in expected locations for the OSSIE acincludes -# 1. Included with CF source -# 2. Using installed CF -if [ -d ../../common/acinclude ]; then - OSSIE_AC_INCLUDE=../../common/acinclude -elif [ -n ${OSSIEHOME} ] && [ -d ${OSSIEHOME}/share/aclocal/ossie ]; then - OSSIE_AC_INCLUDE=${OSSIEHOME}/share/aclocal/ossie -else - echo "Error: Cannot find the OSSIE aclocal files. This is not expected!" -fi - -if [ -n ${OSSIE_AC_INCLUDE} ]; then - aclocal -I ${OSSIE_AC_INCLUDE} -else - aclocal -fi - -autoconf -automake --foreign --add-missing - -# Due to strange autotools bootstrap issues, -# if ltmain.sh doesn't exists we have to run both again -if [ ! -f ltmain.sh ]; then - libtoolize --force --automake - automake --foreign --add-missing -fi - +autoreconf -i diff --git a/bulkioInterfaces/setup.py.in b/bulkioInterfaces/setup.py.in index 06266d4f1..59fcfe056 100644 --- a/bulkioInterfaces/setup.py.in +++ b/bulkioInterfaces/setup.py.in @@ -7,5 +7,7 @@ setup(name='bulkioInterfaces', packages=['bulkio', 'bulkio.bulkioInterfaces', 'bulkio.bulkioInterfaces.BULKIO', - 'bulkio.bulkioInterfaces.BULKIO__POA'] + 'bulkio.bulkioInterfaces.BULKIO.internal', + 'bulkio.bulkioInterfaces.BULKIO__POA', + 'bulkio.bulkioInterfaces.BULKIO__POA.internal'] ) diff --git a/burstioInterfaces/.gitignore b/burstioInterfaces/.gitignore index 76b426e3b..639a8c9d4 100644 --- a/burstioInterfaces/.gitignore +++ b/burstioInterfaces/.gitignore @@ -1,3 +1,14 @@ +*.class +*.o +*.la +*.lo +*.java +*.jar +*.pyc +.deps/ +.idlj/ +.idljni/ +.libs/ Makefile Makefile.in aclocal.m4 @@ -14,20 +25,10 @@ install-sh libtool ltmain.sh missing -*.class -*.idlj -*.o -*.la -*.lo -*.java -*.jar -*.omnijni -*.deps -*.libs -.idljni/ classlist.txt filelist.txt src/cpp/redhawk src/java/redhawk +src/python/build/ src/python/redhawk/burstioInterfaces src/python/setup.py diff --git a/burstioInterfaces/Makefile.am b/burstioInterfaces/Makefile.am index c61b1cb8f..a813fe517 100644 --- a/burstioInterfaces/Makefile.am +++ b/burstioInterfaces/Makefile.am @@ -24,6 +24,12 @@ SUBDIRS = src/idl src/cpp src/python if HAVE_JAVASUPPORT SUBDIRS += src/java endif +if ENABLE_TESTING + SUBDIRS += testing/tests/cpp +if HAVE_JAVASUPPORT + SUBDIRS += testing/tests/java +endif +endif pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = burstioInterfaces.pc burstio.pc diff --git a/burstioInterfaces/burstioInterfaces.spec b/burstioInterfaces/burstioInterfaces.spec index 7cd281ca1..154485fc9 100644 --- a/burstioInterfaces/burstioInterfaces.spec +++ b/burstioInterfaces/burstioInterfaces.spec @@ -28,8 +28,8 @@ Prefix: %{_prefix} %bcond_without java Name: burstioInterfaces -Version: 2.0.9 -Release: 1%{?dist} +Version: 2.2.1 +Release: 2%{?dist} Summary: BURSTIO interfaces for REDHAWK Group: Applications/Engineering @@ -38,13 +38,13 @@ URL: http://redhawksdr.org/ Source: %{name}-%{version}.tar.gz Vendor: REDHAWK -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot +Requires: redhawk = %{version} +BuildRequires: redhawk-devel = %{version} -Requires: redhawk >= 2.0 -BuildRequires: redhawk-devel >= 2.0 +Requires: bulkioInterfaces = %{version} +BuildRequires: bulkioInterfaces = %{version} -Requires: bulkioInterfaces >= 2.0 -BuildRequires: bulkioInterfaces >= 2.0 +BuildRequires: cppunit-devel %description BURSTIO interfaces for REDHAWK @@ -107,5 +107,11 @@ rm -rf --preserve-root $RPM_BUILD_ROOT %changelog +* Wed Jun 28 2017 Ryan Bauman - 2.1.2-1 +- Update for 2.1.2-rc1 + +* Wed Jun 28 2017 Ryan Bauman - 2.1.1-2 +- Bump for 2.1.1-rc2 + * Thu Feb 20 2014 1.10.0 - Initial commit diff --git a/burstioInterfaces/configure.ac b/burstioInterfaces/configure.ac index 8968784bb..5c1d33dc5 100644 --- a/burstioInterfaces/configure.ac +++ b/burstioInterfaces/configure.ac @@ -18,7 +18,7 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # -AC_INIT(burstio, 2.0.9) +AC_INIT(burstio, 2.2.1) AC_CONFIG_SRCDIR([src/cpp/Makefile.am]) AC_CONFIG_MACRO_DIR([m4]) @@ -42,10 +42,10 @@ fi AC_LANG_PUSH([C++]) PKG_CHECK_MODULES([OMNIORB], [omniORB4 >= 4.1.0]) -PKG_CHECK_MODULES(OSSIE, ossie >= 2.0.9,,exit) +PKG_CHECK_MODULES(OSSIE, ossie >= 2.2.1,,exit) RH_PKG_IDLDIR([OSSIE], [ossie]) -PKG_CHECK_MODULES([BULKIO], [bulkioInterfaces >= 2.0]) +PKG_CHECK_MODULES([BULKIO], [bulkio >= 2.2]) RH_PKG_IDLDIR([BULKIO], [bulkioInterfaces]) AX_BOOST_BASE([1.41]) @@ -60,7 +60,7 @@ AC_ARG_ENABLE([java], AS_HELP_STRING([--disable-java], [Disable java support])) HAVE_JAVASUPPORT=no if test "x$enable_java" != "xno"; then # configure was run with java enabled - java_source_version=1.6 + java_source_version=1.8 RH_JAVA_HOME RH_PROG_JAVAC([$java_source_version]) @@ -72,7 +72,7 @@ if test "x$enable_java" != "xno"; then # Set up CLASSPATH for REDHAWK, CF and BULKIO RH_PKG_CLASSPATH([OSSIE], [ossie]) - RH_PKG_CLASSPATH([BULKIO], [bulkioInterfaces]) + RH_PKG_CLASSPATH([BULKIO], [bulkio]) if test -n "$HAVE_JNI_H"; then # The omnijni package must be available to generate JNI stubs and skeletons. @@ -132,6 +132,17 @@ AC_MSG_RESULT($HAVE_JAVASUPPORT) AM_CONDITIONAL(HAVE_JAVASUPPORT, test $HAVE_JAVASUPPORT = yes) # End optional java support +# Optionally disable unit tests +AC_ARG_ENABLE([testing], AS_HELP_STRING([--disable-testing], [disable build of unit tests])) +AS_IF([test "x$enable_testing" != "xno"], [ + AM_PATH_CPPUNIT(1.12.1) + AS_IF([test "x$HAVE_JAVASUPPORT" == "xyes"], [ + dnl Use RPM location hard-coded for now + AC_SUBST([JUNIT_CLASSPATH], "/usr/share/java/junit4.jar") + ]) +]) +AM_CONDITIONAL(ENABLE_TESTING, test "x$enable_testing" != "xno") + AC_SUBST(idldir, '${prefix}/share/idl') AC_CONFIG_FILES([Makefile \ @@ -141,6 +152,8 @@ AC_CONFIG_FILES([Makefile \ src/java/Makefile \ src/python/Makefile \ src/python/setup.py \ - src/idl/Makefile]) + src/idl/Makefile \ + testing/tests/cpp/Makefile \ + testing/tests/java/Makefile]) AC_OUTPUT diff --git a/burstioInterfaces/pom.xml b/burstioInterfaces/pom.xml deleted file mode 100644 index dc87a4dd2..000000000 --- a/burstioInterfaces/pom.xml +++ /dev/null @@ -1,100 +0,0 @@ - - 4.0.0 - - redhawk.coreframework - parent - 2.0.9-SNAPSHOT - ../pom.xml - - burstio-interfaces - bundle - - - ${project.groupId} - bulkio-interfaces - ${project.version} - - - ${project.groupId} - cf-interfaces - ${project.version} - - - - src/idl/redhawk - - - - - {idl.dir} - - - src/java/src - - - org.codehaus.gmaven - gmaven-plugin - 1.3 - - - set-main-artifact - package - - execute - - - - project.artifact.setFile(new - File("${project.basedir}/src/java/BURSTIOInterfaces.jar")) - - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.7 - - - attach-artifacts - package - - attach-artifact - - - - - ${project.build.directory}/${project.artifactId}-${project.version}.jar - beta - jar - - - - - - - - - maven-assembly-plugin - 2.2-beta-5 - - - attach-idlzip - package - - single - - - - assembly.xml - - - - - - - - diff --git a/burstioInterfaces/src/cpp/Makefile.am b/burstioInterfaces/src/cpp/Makefile.am index 0df3c2ff0..0ce007752 100644 --- a/burstioInterfaces/src/cpp/Makefile.am +++ b/burstioInterfaces/src/cpp/Makefile.am @@ -56,6 +56,7 @@ nobase_nodist_include_HEADERS = redhawk/BURSTIO/burstioDataTypes.h \ redhawk/BURSTIO/burstio_burstUshort.h libburstioInterfaces_la_CPPFLAGS = -I . $(OSSIE_CFLAGS) +libburstioInterfaces_la_LDFLAGS = -lbulkioInterfaces BUILT_SOURCES = $(nobase_nodist_include_HEADERS) $(nodist_libburstioInterfaces_la_SOURCES) CLEANFILES = $(BUILT_SOURCES) @@ -100,8 +101,7 @@ libburstio_la_SOURCES += lib/OutPortImpl.h libburstio_la_SOURCES += lib/utils.cpp libburstio_la_CPPFLAGS = -I $(srcdir)/include -I redhawk $(OSSIE_CFLAGS) $(BOOST_CPPFLAGS) -libburstio_la_LIBADD = $(BOOST_LDFLAGS) -libburstio_la_LDFLAGS = -lburstioInterfaces +libburstio_la_LIBADD = $(BOOST_LDFLAGS) $(builddir)/libburstioInterfaces.la $(BULKIO_LIBS) libincludedir = $(includedir)/redhawk/burstio libinclude_HEADERS = include/burstio/burstio.h diff --git a/burstioInterfaces/src/cpp/include/burstio/ExecutorService.h b/burstioInterfaces/src/cpp/include/burstio/ExecutorService.h index fbe87f0e5..abcc3aeb5 100644 --- a/burstioInterfaces/src/cpp/include/burstio/ExecutorService.h +++ b/burstioInterfaces/src/cpp/include/burstio/ExecutorService.h @@ -20,138 +20,11 @@ #ifndef BURSTIO_EXECUTORSERVICE_H #define BURSTIO_EXECUTORSERVICE_H -#include - -#include -#include -#include +#include namespace burstio { - - class ExecutorService { - public: - ExecutorService() : - thread_(0), - running_(false) - { - } - - void start () - { - boost::mutex::scoped_lock lock(mutex_); - if (running_) { - return; - } - - running_ = true; - thread_ = new boost::thread(&ExecutorService::run, this); - } - - void stop () - { - boost::thread* old_thread = 0; - { - boost::mutex::scoped_lock lock(mutex_); - running_ = false; - old_thread = thread_; - thread_ = 0; - cond_.notify_all(); - } - if (old_thread) { - old_thread->join(); - delete old_thread; - } - } - - template - void execute (F func) - { - insert_sorted(func); - } - - template - void execute (F func, A1 arg1) - { - insert_sorted(boost::bind(func, arg1)); - } - - template - void schedule (boost::system_time when, F func) - { - insert_sorted(func, when); - } - - template - void schedule (boost::system_time when, F func, A1 arg1) - { - insert_sorted(boost::bind(func, arg1), when); - } - - void clear () - { - boost::mutex::scoped_lock lock(mutex_); - queue_.clear(); - cond_.notify_all(); - } - - private: - typedef boost::function func_type; - typedef std::pair task_type; - typedef std::list task_queue; - - void run () - { - boost::mutex::scoped_lock lock(mutex_); - while (running_) { - while (!queue_.empty()) { - // Start at the front of the queue every time--a task may - // have been added while the lock was released to service - // the last task - task_queue::iterator task = queue_.begin(); - if (task->first > boost::get_system_time()) { - // Head of queue is scheduled in the future - break; - } - - // Copy the task's function and remove it from the queue - func_type func = task->second; - queue_.erase(task); - - // Run task with the lock released - lock.unlock(); - func(); - lock.lock(); - } - - if (queue_.empty()) { - cond_.wait(lock); - } else { - boost::system_time when = queue_.front().first; - cond_.timed_wait(lock, when); - } - } - } - - void insert_sorted (func_type func, boost::system_time when=boost::get_system_time()) - { - boost::mutex::scoped_lock lock(mutex_); - task_queue::iterator pos = queue_.begin(); - while ((pos != queue_.end()) && (when > pos->first)) { - ++pos; - } - queue_.insert(pos, std::make_pair(when, func)); - cond_.notify_all(); - } - - - boost::mutex mutex_; - boost::condition_variable cond_; - - boost::thread* thread_; - task_queue queue_; - bool running_; - }; - + // Bring the core's ExecutorService into the burstio namespace + typedef redhawk::ExecutorService ExecutorService; } #endif // BURSTIO_EXECUTORSERVICE_H diff --git a/burstioInterfaces/src/cpp/include/burstio/InPortDecl.h b/burstioInterfaces/src/cpp/include/burstio/InPortDecl.h index d6b7bc01b..5cb593917 100644 --- a/burstioInterfaces/src/cpp/include/burstio/InPortDecl.h +++ b/burstioInterfaces/src/cpp/include/burstio/InPortDecl.h @@ -42,7 +42,7 @@ namespace burstio { ENABLE_INSTANCE_LOGGING; public: - typedef typename Traits::PortType PortType; + typedef typename Traits::PortType PortType; typedef typename Traits::BurstType BurstType; typedef typename Traits::BurstSequenceType BurstSequenceType; typedef typename Traits::ElementType ElementType; @@ -124,7 +124,7 @@ namespace burstio { // Support function for automatic component-managed stop. virtual void stopPort (); - std::string getRepid() const; + std::string getRepid() const; protected: // Wait timeout seconds for a burst to become available; the caller diff --git a/burstioInterfaces/src/cpp/include/burstio/OutPortDecl.h b/burstioInterfaces/src/cpp/include/burstio/OutPortDecl.h index 2fa0ed938..beef18b5a 100644 --- a/burstioInterfaces/src/cpp/include/burstio/OutPortDecl.h +++ b/burstioInterfaces/src/cpp/include/burstio/OutPortDecl.h @@ -29,27 +29,16 @@ #include -#include "UsesPort.h" +#include +#include + #include "BurstStatistics.h" #include "PortTraits.h" -#include "ExecutorService.h" #include "utils.h" #include "debug.h" namespace burstio { - struct PortStatus - { - PortStatus(const std::string& name, size_t bitsPerElement): - stats(name, bitsPerElement), - alive(true) - { - } - - SenderStatistics stats; - bool alive; - }; - enum RoutingModeType { ROUTE_ALL_INTERLEAVED, ROUTE_ALL_STREAMS, @@ -67,15 +56,14 @@ namespace burstio { virtual void flush () = 0; }; + template + class BurstTransport; + template - class OutPort : public UsesPort, - public virtual POA_BULKIO::UsesPortStatisticsProvider + class OutPort : public redhawk::UsesPort, public virtual POA_BULKIO::UsesPortStatisticsProvider { - ENABLE_INSTANCE_LOGGING; - public: - typedef UsesPort super; - typedef typename Traits::PortType PortType; + typedef typename Traits::PortType PortType; typedef typename Traits::BurstType BurstType; typedef typename Traits::BurstSequenceType BurstSequenceType; typedef typename Traits::ElementType ElementType; @@ -88,8 +76,6 @@ namespace burstio { OutPort(std::string port_name); ~OutPort(); - void setLogger (LoggerPtr logger); - // Sets how streams are routed to connections: // ROUTE_ALL_INTERLEAVED - All connections receive all streams; // streams are interleaved in one buffer @@ -190,7 +176,7 @@ namespace burstio { // Support function for automatic component-managed stop. virtual void stopPort (); - std::string getRepid() const; + std::string getRepid() const; protected: class Queue : public OutputPolicy @@ -229,7 +215,7 @@ namespace burstio { void sendBursts_ (); OutPort* port_; - LoggerPtr& __logger; + LoggerPtr& logger; mutable boost::mutex mutex_; @@ -244,17 +230,20 @@ namespace burstio { std::string streamID_; }; + class CorbaTransport; + class LocalTransport; + friend class Queue; - typedef typename super::ConnectionMap ConnectionMap; - typedef typename super::Connection Connection; + typedef BurstTransport TransportType; + typedef redhawk::UsesPort::TransportIteratorAdapter TransportIterator; typedef std::map QueueMap; typedef std::map > RouteTable; void sendBursts (const BurstSequenceType& bursts, boost::system_time startTime, float queueDepth, const std::string& streamID); - void partitionBursts (const BurstSequenceType& bursts, boost::system_time startTime, float queueDepth, const std::string& streamID, const Connection& connection); + // void partitionBursts (const BurstSequenceType& bursts, boost::system_time startTime, float queueDepth, const std::string& streamID, const Connection& connection); void scheduleCheck (boost::system_time when); void checkQueues (); @@ -262,8 +251,7 @@ namespace burstio { void queueBurst (SequenceType& data, const BURSTIO::BurstSRI& sri, const BULKIO::PrecisionUTCTime& timestamp, bool eos, bool isComplex); - virtual void connectionAdded (const std::string& connectionId, Connection& connection); - virtual void connectionModified (const std::string& connectionId, Connection& connection); + virtual redhawk::UsesTransport* _createTransport(CORBA::Object_ptr object, const std::string& connectionId); const Queue& getQueueForStream (const std::string& streamID) const; Queue& getQueueForStream (const std::string& streamID); @@ -277,10 +265,7 @@ namespace burstio { RoutingModeType routingMode_; RouteTable routes_; - ExecutorService monitor_; - - using super::updatingPortsLock; - using super::connections_; + redhawk::ExecutorService monitor_; }; typedef OutPort BurstByteOut; diff --git a/burstioInterfaces/src/cpp/include/burstio/PortTraits.h b/burstioInterfaces/src/cpp/include/burstio/PortTraits.h index 3ec1b4afb..e098cad18 100644 --- a/burstioInterfaces/src/cpp/include/burstio/PortTraits.h +++ b/burstioInterfaces/src/cpp/include/burstio/PortTraits.h @@ -43,18 +43,21 @@ namespace burstio { typedef Native NativeType; }; -#define TYPEDEF_PORTTRAITS(T, CT, ST, NT) typedef PortTraits T##Traits; - TYPEDEF_PORTTRAITS(Byte, Octet, CF::OctetSequence, signed char); - TYPEDEF_PORTTRAITS(Double, Double, PortTypes::DoubleSequence, CORBA::Double); - TYPEDEF_PORTTRAITS(Float, Float, PortTypes::FloatSequence, CORBA::Float); - TYPEDEF_PORTTRAITS(Long, Long, PortTypes::LongSequence, CORBA::Long); - TYPEDEF_PORTTRAITS(LongLong, LongLong, PortTypes::LongLongSequence, CORBA::LongLong); - TYPEDEF_PORTTRAITS(Short, Short, PortTypes::ShortSequence, CORBA::Short); - TYPEDEF_PORTTRAITS(Ubyte, Octet, CF::OctetSequence, unsigned char); - TYPEDEF_PORTTRAITS(Ulong, ULong, PortTypes::UlongSequence, CORBA::ULong); - TYPEDEF_PORTTRAITS(UlongLong, ULongLong, PortTypes::UlongLongSequence, CORBA::ULongLong); - TYPEDEF_PORTTRAITS(Ushort, UShort, PortTypes::UshortSequence, CORBA::UShort); -#undef TYPEDEF_PORTTRAITS +#define DEFINE_PORTTRAITS(T, CT, ST, NT) \ + struct T##Traits : public PortTraits { \ + }; + + DEFINE_PORTTRAITS(Byte, Octet, CF::OctetSequence, signed char); + DEFINE_PORTTRAITS(Double, Double, PortTypes::DoubleSequence, CORBA::Double); + DEFINE_PORTTRAITS(Float, Float, PortTypes::FloatSequence, CORBA::Float); + DEFINE_PORTTRAITS(Long, Long, PortTypes::LongSequence, CORBA::Long); + DEFINE_PORTTRAITS(LongLong, LongLong, PortTypes::LongLongSequence, CORBA::LongLong); + DEFINE_PORTTRAITS(Short, Short, PortTypes::ShortSequence, CORBA::Short); + DEFINE_PORTTRAITS(Ubyte, Octet, CF::OctetSequence, unsigned char); + DEFINE_PORTTRAITS(Ulong, ULong, PortTypes::UlongSequence, CORBA::ULong); + DEFINE_PORTTRAITS(UlongLong, ULongLong, PortTypes::UlongLongSequence, CORBA::ULongLong); + DEFINE_PORTTRAITS(Ushort, UShort, PortTypes::UshortSequence, CORBA::UShort); +#undef DEFINE_PORTTRAITS } #endif // BURSTIO_PORTTRAITS_H diff --git a/burstioInterfaces/src/cpp/include/burstio/UsesPort.h b/burstioInterfaces/src/cpp/include/burstio/UsesPort.h index 6371181cc..c23d23688 100644 --- a/burstioInterfaces/src/cpp/include/burstio/UsesPort.h +++ b/burstioInterfaces/src/cpp/include/burstio/UsesPort.h @@ -33,14 +33,44 @@ namespace burstio { - template + template + class BasicTransport { + public: + typedef PortType port_type; + typedef typename port_type::_ptr_type ptr_type; + typedef typename port_type::_var_type var_type; + + BasicTransport(ptr_type port, const std::string& connectionId) : + port_(port_type::_duplicate(port)), + connectionId_(connectionId) + { + } + + virtual ~BasicTransport() { } + + const std::string& getConnectionId() const + { + return connectionId_; + } + + ptr_type objref() const + { + return port_type::_duplicate(port_); + } + + protected: + var_type port_; + const std::string connectionId_; + }; + + template > class UsesPort : public Port_Uses_base_impl, public virtual POA_ExtendedCF::QueryablePort { public: typedef PortType port_type; typedef typename port_type::_ptr_type ptr_type; typedef typename port_type::_var_type var_type; - typedef InfoType info_type; + typedef TransportType transport_type; typedef std::pair connection_type; typedef std::vector connection_list; @@ -121,16 +151,10 @@ namespace burstio { if (entry == connections_.end()) { // Store the new connection and pass the new entry along to // connectionAdded - entry = insertPort_(connectionId, port._retn()); - - // Allow subclasses to do additional bookkeeping - connectionAdded(entry->first, entry->second); + connections_[connectionId] = _createConnection(port, connectionId); } else { - // Replace the object reference - entry->second.port = port._retn(); - - // Allow subclasses to do additional bookkeeping - connectionModified(entry->first, entry->second); + // TODO: Replace the object reference + //entry->second.port = port._retn(); } } @@ -147,10 +171,7 @@ namespace burstio { throw CF::Port::InvalidPort(2, message.c_str()); } - // Allow subclasses to do additional cleanup - connectionRemoved(existing->first, existing->second); - delete existing->second.info; - + delete existing->second; connections_.erase(existing); } @@ -165,7 +186,7 @@ namespace burstio { CORBA::ULong index = 0; for (typename ConnectionMap::iterator ii = connections_.begin(); ii != connections_.end(); ++ii, ++index) { retval[index].connectionId = ii->first.c_str(); - retval[index].port = CORBA::Object::_duplicate(ii->second.port); + retval[index].port = CORBA::Object::_duplicate(ii->second->objref()); } return retval._retn(); } @@ -177,7 +198,7 @@ namespace burstio { if (existing == connections_.end()) { throw std::invalid_argument("No connection " + connectionId); } - return port_type::_duplicate(existing->second.port); + return existing->second->objref(); } connection_list getConnections() @@ -185,52 +206,23 @@ namespace burstio { boost::mutex::scoped_lock lock(updatingPortsLock); connection_list result; for (typename ConnectionMap::iterator ii = connections_.begin(); ii != connections_.end(); ++ii) { - result.push_back(std::make_pair(ii->first, port_type::_duplicate(ii->second.port))); + result.push_back(std::make_pair(ii->first, ii->second->objref())); } return result; } protected: - struct Connection { - Connection(ptr_type _port) : - port(_port), - info(0) - { } - - var_type port; - info_type* info; - }; - UsesPort (std::string port_name) : Port_Uses_base_impl(port_name) { } - virtual void connectionAdded (const std::string&, Connection&) - { - } + virtual transport_type* _createConnection(ptr_type port, const std::string& connectionId) = 0; - virtual void connectionRemoved (const std::string&, Connection&) - { - } - - virtual void connectionModified (const std::string&, Connection&) - { - } - - typedef std::map ConnectionMap; + typedef std::map ConnectionMap; ConnectionMap connections_; private: - inline typename ConnectionMap::iterator - insertPort_ (const std::string& connectionId, ptr_type port) - { - // Store the new connection (constructing in-place because there is - // no default constructor for Connection), returning an iterator to - // the new entry - return connections_.insert(std::make_pair(connectionId, Connection(port))).first; - } - ossie::notification connectListeners_; ossie::notification disconnectListeners_; }; diff --git a/burstioInterfaces/src/cpp/include/burstio/utils.h b/burstioInterfaces/src/cpp/include/burstio/utils.h index 4a1a3e44a..cd505bd97 100644 --- a/burstioInterfaces/src/cpp/include/burstio/utils.h +++ b/burstioInterfaces/src/cpp/include/burstio/utils.h @@ -144,9 +144,7 @@ namespace burstio { double elapsed (const BULKIO::PrecisionUTCTime& begin, const BULKIO::PrecisionUTCTime& end=now()); - BURSTIO::BurstSRI createSRI ( const std::string &streamID, double xdelta); - - BURSTIO::BurstSRI createSRI ( const std::string &streamID); + BURSTIO::BurstSRI createSRI (const std::string& streamID, double xdelta=1.0); } } diff --git a/burstioInterfaces/src/cpp/lib/InPortImpl.h b/burstioInterfaces/src/cpp/lib/InPortImpl.h index c1dad8387..466ada1e9 100644 --- a/burstioInterfaces/src/cpp/lib/InPortImpl.h +++ b/burstioInterfaces/src/cpp/lib/InPortImpl.h @@ -31,7 +31,6 @@ namespace burstio { template InPort::InPort(std::string port_name) : Port_Provides_base_impl(port_name), - __logger(__classlogger), queueOffset_(0), queuedBursts_(0), queueThreshold_(DEFAULT_QUEUE_THRESHOLD), @@ -46,13 +45,6 @@ namespace burstio { { } - template - void InPort::setLogger (LoggerPtr logger) - { - __logger = logger; - } - - template size_t InPort::getQueueThreshold () const { @@ -72,6 +64,12 @@ namespace burstio { queueThreshold_ = count; } + template + void InPort::setLogger (rh_logger::LoggerPtr newLogger) + { + _portLog = newLogger; + } + template void InPort::start () { @@ -153,8 +151,14 @@ namespace burstio { // Add bursts to queue, if there are any if (total_bursts > 0) { LOG_INSTANCE_TRACE("Queueing " << total_bursts << " bursts"); - queue_.push_back(new BurstSequenceType()); - ossie::corba::move(queue_.back(), const_cast(bursts)); + if (bursts.release()) { + // Steal the bursts + queue_.push_back(new BurstSequenceType()); + ossie::corba::move(queue_.back(), const_cast(bursts)); + } else { + // Someone else owns the bursts, make a copy + queue_.push_back(new BurstSequenceType(bursts)); + } queuedBursts_ += total_bursts; queueNotEmpty_.notify_all(); } else { diff --git a/burstioInterfaces/src/cpp/lib/OutPortImpl.h b/burstioInterfaces/src/cpp/lib/OutPortImpl.h index a5126e628..30668685c 100644 --- a/burstioInterfaces/src/cpp/lib/OutPortImpl.h +++ b/burstioInterfaces/src/cpp/lib/OutPortImpl.h @@ -20,16 +20,179 @@ #ifndef BURSTIO_OUTPORTIMPL_H #define BURSTIO_OUTPORTIMPL_H +#include + +#include + #include +#include #include "debug_impl.h" namespace burstio { + template + class BurstTransport : public redhawk::UsesTransport + { + public: + typedef typename Traits::PortType PortType; + typedef typename PortType::_ptr_type PtrType; + typedef typename Traits::BurstSequenceType BurstSequenceType; + typedef typename Traits::ElementType ElementType; + + BurstTransport(OutPort* port) : + redhawk::UsesTransport(port), + _port(port), + _stats(port->getName(), sizeof(ElementType) * 8) + { + } + + virtual void pushBursts(const BurstSequenceType& bursts, boost::system_time startTime, float queueDepth) = 0; + + BULKIO::PortStatistics* getStatistics() const + { + return _stats.retrieve(); + } + + protected: + OutPort* _port; + SenderStatistics _stats; + }; + + template + class OutPort::CorbaTransport : public BurstTransport + { + public: + typedef BurstTransport super; + typedef typename Traits::PortType PortType; + typedef typename PortType::_var_type VarType; + typedef typename PortType::_ptr_type PtrType; + typedef typename Traits::BurstType BurstType; + typedef typename Traits::BurstSequenceType BurstSequenceType; + + CorbaTransport(OutPort* parent, PtrType objref) : + super(parent), + _objref(PortType::_duplicate(objref)) + { + } + + virtual std::string transportType() const + { + return "CORBA"; + } + + virtual CF::Properties transportInfo() const + { + return CF::Properties(); + } + + void pushBursts(const BurstSequenceType& bursts, boost::system_time startTime, float queueDepth) + { + try { + sendBursts(bursts, startTime, queueDepth); + } catch (const CORBA::MARSHAL& ex) { + if (bursts.length() > 1) { + partitionBursts(bursts, startTime, queueDepth); + } else { + throw redhawk::TransportError("burst size is too long"); + } + } catch (const CORBA::Exception& ex) { + throw redhawk::FatalTransportError(ossie::corba::describeException(ex)); + } + } + + private: + void sendBursts(const BurstSequenceType& bursts, boost::system_time startTime, float queueDepth) + { + // Record delay from queueing of first burst to now + boost::posix_time::time_duration delay = boost::get_system_time() - startTime; + + _objref->pushBursts(bursts); + this->setAlive(true); + + // Count up total elements + size_t total_elements = 0; + for (CORBA::ULong index = 0; index < bursts.length(); ++index) { + total_elements += bursts[index].data.length(); + } + this->_stats.record(bursts.length(), total_elements, queueDepth, delay.total_microseconds() * 1e-6); + } + + void partitionBursts(const BurstSequenceType& bursts, boost::system_time startTime, float queueDepth) + { + // Split the input bursts in the middle, sending each half in a + // separate call (which may end up recursively partitioning); no + // copies are made, just non-owning sequences + CORBA::ULong middle = bursts.length() / 2; + BurstType* buffer = const_cast(bursts.get_buffer()); + BurstSequenceType left(middle, middle, buffer, false); + pushBursts(left, startTime, queueDepth); + + CORBA::ULong remain = bursts.length() - middle; + BurstSequenceType right(remain, remain, buffer + middle, false); + pushBursts(right, startTime, queueDepth); + } + + using super::_port; + VarType _objref; + }; + + template + class OutPort::LocalTransport : public BurstTransport + { + public: + typedef BurstTransport super; + typedef typename Traits::PortType PortType; + typedef typename PortType::_ptr_type PtrType; + typedef typename Traits::BurstType BurstType; + typedef typename Traits::BurstSequenceType BurstSequenceType; + + LocalTransport(OutPort* parent, InPort* localPort) : + super(parent), + localPort_(localPort) + { + } + + virtual std::string transportType() const + { + return "local"; + } + + virtual CF::Properties transportInfo() const + { + return CF::Properties(); + } + + void pushBursts(const BurstSequenceType& bursts, boost::system_time startTime, float queueDepth) + { + try { + // Record delay from queueing of first burst to now + boost::posix_time::time_duration delay = boost::get_system_time() - startTime; + + // Count up total elements + size_t total_elements = 0; + size_t total_bursts = bursts.length(); + for (CORBA::ULong index = 0; index < total_bursts; ++index) { + total_elements += bursts[index].data.length(); + } + + localPort_->pushBursts(bursts); + + this->_stats.record(total_bursts, total_elements, queueDepth, delay.total_microseconds() * 1e-6); + } catch (...) { + throw redhawk::TransportError("pushBursts failed"); + } + } + + private: + using super::_port; + InPort* localPort_; + }; + template OutPort::Queue::Queue(OutPort* port, const std::string& streamID, size_t maxBursts, size_t thresholdBytes, long thresholdLatency) : port_(port), - __logger(port->__logger), + logger(port->_portLog), maxBursts_(maxBursts), thresholdBytes_(thresholdBytes), thresholdLatency_(boost::posix_time::microseconds(thresholdLatency)), @@ -51,7 +214,7 @@ namespace burstio { boost::mutex::scoped_lock lock(mutex_); maxBursts_ = count; if (bursts_.length() >= maxBursts_) { - LOG_INSTANCE_DEBUG("New max bursts " << maxBursts_ << " triggering push"); + RH_DEBUG(logger, "New max bursts " << maxBursts_ << " triggering push"); executeThreadedFlush(); } } @@ -69,7 +232,7 @@ namespace burstio { boost::mutex::scoped_lock lock(mutex_); thresholdBytes_ = bytes; if (bytes_ >= thresholdBytes_) { - LOG_INSTANCE_DEBUG("New byte threshold " << thresholdBytes_ << " triggering push"); + RH_DEBUG(logger, "New byte threshold " << thresholdBytes_ << " triggering push"); executeThreadedFlush(); } } @@ -100,7 +263,7 @@ namespace burstio { // If this is the first burst, mark the time for latency guarantees if (bursts_.length() == 0) { startTime_ = boost::get_system_time(); - LOG_INSTANCE_TRACE("Scheduling latency check on monitor thread after " << thresholdLatency_.total_microseconds() << " usec"); + RH_TRACE(logger, "Scheduling latency check on monitor thread after " << thresholdLatency_.total_microseconds() << " usec"); port_->scheduleCheck(startTime_ + thresholdLatency_); } @@ -114,10 +277,10 @@ namespace burstio { burst.EOS = eos; bytes_ += burst.data.length() * sizeof(ElementType); - LOG_INSTANCE_TRACE("Queue size: " << bursts_.length() << " bursts / " << bytes_ << " bytes"); + RH_TRACE(logger, "Queue size: " << bursts_.length() << " bursts / " << bytes_ << " bytes"); if (shouldFlush()) { - LOG_INSTANCE_DEBUG("Queued burst exceeded threshold, flushing queue"); + RH_DEBUG(logger, "Queued burst exceeded threshold, flushing queue"); sendBursts_(); } } @@ -163,7 +326,12 @@ namespace burstio { { if (bursts_.length() > 0) { port_->sendBursts(bursts_, startTime_, bursts_.length()/(float)maxBursts_, streamID_); - bursts_.length(0); + // Reset the burst queue to empty, reallocating if necessary + if (bursts_.maximum() < maxBursts_) { + bursts_.replace(maxBursts_, 0, BurstSequenceType::allocbuf(maxBursts_), true); + } else { + bursts_.length(0); + } bytes_ = 0; startTime_ = boost::posix_time::ptime(); } @@ -171,8 +339,7 @@ namespace burstio { template OutPort::OutPort(std::string port_name) : - super(port_name), - __logger(__classlogger), + UsesPort(port_name), defaultQueue_(this, "(default)", DEFAULT_MAX_BURSTS, omniORB::giopMaxMsgSize() * 0.9, DEFAULT_LATENCY_THRESHOLD), streamQueues_(), routingMode_(ROUTE_ALL_INTERLEAVED) @@ -190,12 +357,6 @@ namespace burstio { } } - template - void OutPort::setLogger (LoggerPtr logger) - { - __logger = logger; - } - template size_t OutPort::getMaxBursts () const { @@ -265,7 +426,7 @@ namespace burstio { template void OutPort::addConnectionFilter (const std::string& streamID, const std::string& connectionID) { - LOG_INSTANCE_DEBUG("Routing stream " << streamID << " to connection " << connectionID); + RH_DEBUG(_portLog, "Routing stream " << streamID << " to connection " << connectionID); boost::mutex::scoped_lock lock(updatingPortsLock); routes_[streamID].insert(connectionID); } @@ -273,7 +434,7 @@ namespace burstio { template void OutPort::removeConnectionFilter (const std::string& streamID, const std::string& connectionID) { - LOG_INSTANCE_DEBUG("Unrouting stream " << streamID << " from connection " << connectionID); + RH_DEBUG(_portLog, "Unrouting stream " << streamID << " from connection " << connectionID); boost::mutex::scoped_lock lock(updatingPortsLock); RouteTable::iterator route = routes_.find(streamID); if (route != routes_.end()) { @@ -310,17 +471,17 @@ namespace burstio { stop(); } - template + template std::string OutPort::getRepid () const { - return PortType::_PD_repoId;; + return PortType::_PD_repoId; } template BULKIO::PortUsageType OutPort::state () { boost::mutex::scoped_lock lock(updatingPortsLock); - if (connections_.empty()) { + if (_connections.empty()) { return BULKIO::IDLE; } else { return BULKIO::ACTIVE; @@ -355,105 +516,39 @@ namespace burstio { template void OutPort::sendBursts(const BurstSequenceType& bursts, boost::system_time startTime, float queueDepth, const std::string& streamID) { - //LOG_INSTANCE_DEBUG("Sending " << bursts.length() << " bursts"); + RH_TRACE(_portLog, "Sending " << bursts.length() << " bursts"); - // Count up total elements - size_t total_elements = 0; - for (CORBA::ULong index = 0; index < bursts.length(); ++index) { - total_elements += bursts[index].data.length(); - } - - boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in - for (typename ConnectionMap::iterator ii = connections_.begin(); ii != connections_.end(); ++ii) { - const std::string& connectionId = ii->first; - Connection& connection = ii->second; + // Create a non-owning view to prevent local transport from stealing + // the burst buffer; for N local connections, this will lead to making + // N copies (optimal is N-1), but this approach is simpler and safe. + BurstType* buffer = const_cast(bursts.get_buffer()); + BurstSequenceType const_bursts(bursts.length(), bursts.length(), buffer, false); - if (!isStreamRoutedToConnection(streamID, connectionId)) { + boost::mutex::scoped_lock lock(updatingPortsLock); + for (TransportIterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + TransportType* transport = connection.transport(); + // Skip ports known to be dead + if (!transport->isAlive()) { continue; } - // Record statistics - boost::posix_time::time_duration delay = boost::get_system_time() - startTime; + const std::string& connection_id = connection.connectionId(); + if (!isStreamRoutedToConnection(streamID, connection_id)) { + RH_TRACE(_portLog, "Stream " << streamID << " is not routed to connection " << connection_id); + continue; + } + RH_TRACE(_portLog, "Pushing " << const_bursts.length() << " bursts to connection " << connection_id); try { - connection.port->pushBursts(bursts); - connection.info->alive = true; - connection.info->stats.record(bursts.length(), total_elements, queueDepth, delay.total_microseconds() * 1e-6); - } catch (const std::exception& ex) { - if (connection.info->alive) { - LOG_INSTANCE_ERROR("pushBursts to " << ii->first << " failed: " << ex.what()); - } - connection.info->alive = false; - } catch (const CORBA::MARSHAL& ex) { - if (bursts.length() == 1) { - // the burst length is 1. There's nothing we can do - if (connection.info->alive) { - LOG_INSTANCE_ERROR("pushBursts to " << ii->first << " failed because the burst size is too long"); - } - connection.info->alive = false; - } else { - try { - partitionBursts(bursts, startTime, queueDepth, streamID, connection); - } catch (...) { - if (connection.info->alive) { - LOG_INSTANCE_ERROR("Paritioned pushBursts to " << ii->first << " failed"); - } - connection.info->alive = false; - } - } - } catch (const CORBA::Exception& ex) { - if (connection.info->alive) { - LOG_INSTANCE_ERROR("pushBursts to " << ii->first << " failed: CORBA::" << ex._name()); - } - connection.info->alive = false; - } catch (...) { - if (connection.info->alive) { - LOG_INSTANCE_ERROR("pushBursts to " << ii->first << " failed"); - } - connection.info->alive = false; + transport->pushBursts(const_bursts, startTime, queueDepth); + } catch (const redhawk::FatalTransportError& exc) { + RH_ERROR(_portLog, "pushBursts to " << connection_id << " failed: " << exc.what()); + transport->setAlive(false); + } catch (const redhawk::TransportError& exc) { + RH_ERROR(_portLog, "pushBursts to " << connection_id << " failed: " << exc.what()); } } } - - template - void OutPort::partitionBursts(const BurstSequenceType& bursts, boost::system_time startTime, float queueDepth, const std::string& streamID, const Connection& connection) - { - // cut the burst length in half and try again.... - BurstSequenceType first_burst; - BurstSequenceType second_burst; - first_burst.length(bursts.length()/2); - second_burst.length(bursts.length()-first_burst.length()); - boost::posix_time::time_duration delay = boost::get_system_time() - startTime; - for (int i=0; ipushBursts(first_burst); - connection.info->alive = true; - connection.info->stats.record(first_burst.length(), total_elements, queueDepth, delay.total_microseconds() * 1e-6); - } catch (const CORBA::MARSHAL& ex) { - partitionBursts(first_burst, startTime, queueDepth, streamID, connection); - } - try { - size_t total_elements = 0; - for (CORBA::ULong index = 0; index < first_burst.length(); ++index) { - total_elements += first_burst[index].data.length(); - } - connection.port->pushBursts(second_burst); - connection.info->alive = true; - connection.info->stats.record(second_burst.length(), total_elements, queueDepth, delay.total_microseconds() * 1e-6); - } catch (const CORBA::MARSHAL& ex) { - partitionBursts(second_burst, startTime, queueDepth, streamID, connection); - } - } template void OutPort::queueBurst (SequenceType& data, const BURSTIO::BurstSRI& sri, @@ -465,7 +560,7 @@ namespace burstio { queue.queueBurst(data, sri, timestamp, eos, isComplex); if (eos) { if (ROUTE_ALL_INTERLEAVED != routingMode_) { - LOG_INSTANCE_DEBUG("Flushing '" << streamID << " on EOS"); + RH_DEBUG(_portLog, "Flushing '" << streamID << " on EOS"); queue.flush(); delete streamQueues_[streamID]; } @@ -478,14 +573,13 @@ namespace burstio { { boost::mutex::scoped_lock lock(updatingPortsLock); BULKIO::UsesPortStatisticsSequence_var retval = new BULKIO::UsesPortStatisticsSequence(); - retval->length(connections_.size()); + retval->length(_connections.size()); CORBA::ULong index = 0; - for (typename ConnectionMap::iterator ii = connections_.begin(); ii != connections_.end(); ++ii, ++index) { - retval[index].connectionId = ii->first.c_str(); - BULKIO::PortStatistics_var stats = ii->second.info->stats.retrieve(); + for (TransportIterator connection = _connections.begin(); connection != _connections.end(); ++connection, ++index) { + BULKIO::PortStatistics_var stats = connection.transport()->getStatistics(); for (typename QueueMap::iterator jj = streamQueues_.begin(); jj != streamQueues_.end(); ++jj) { const std::string& streamID = jj->first; - if (isStreamRoutedToConnection(streamID, ii->first)) { + if (isStreamRoutedToConnection(streamID, connection.connectionId())) { burstio::utils::push_back(stats->streamIDs, jj->first.c_str()); } } @@ -499,10 +593,10 @@ namespace burstio { { boost::mutex::scoped_lock lock(queueMutex_); if (ROUTE_ALL_INTERLEAVED == routingMode_) { - LOG_INSTANCE_DEBUG("Forcing flush of default queue"); + RH_DEBUG(_portLog, "Forcing flush of default queue"); defaultQueue_.flush(); } else { - LOG_INSTANCE_DEBUG("Forcing flush of all queues"); + RH_DEBUG(_portLog, "Forcing flush of all queues"); for (typename QueueMap::iterator queue = streamQueues_.begin(); queue != streamQueues_.end(); ++queue) { queue->second->flush(); } @@ -510,16 +604,17 @@ namespace burstio { } template - void OutPort::connectionAdded (const std::string& connectionId, Connection& connection) + redhawk::UsesTransport* OutPort::_createTransport (CORBA::Object_ptr object, + const std::string& connectionId) { - connection.info = new PortStatus(this->name, sizeof(ElementType)*8); - } - - template - void OutPort::connectionModified (const std::string& connectionId, Connection& connection) - { - // Assume that the updated connection is alive - connection.info->alive = true; + typedef typename PortType::_var_type var_type; + var_type port = ossie::corba::_narrowSafe(object); + InPort* local_port = ossie::corba::getLocalServant >(port); + if (local_port) { + return new LocalTransport(this, local_port); + } else { + return new CorbaTransport(this, port); + } } template @@ -564,7 +659,7 @@ namespace burstio { streamQueues_[streamID] = &defaultQueue_; return defaultQueue_; } else { - LOG_INSTANCE_TRACE("Creating new queue for stream " << streamID); + RH_TRACE(_portLog, "Creating new queue for stream " << streamID); // Propagate the default queue's settings size_t max_bursts = defaultQueue_.getMaxBursts(); size_t byte_threshold = defaultQueue_.getByteThreshold(); @@ -590,7 +685,6 @@ namespace burstio { } #define INSTANTIATE_TEMPLATE(traits, name) \ - PREPARE_CLASS_LOGGING(name); \ template class OutPort; #endif diff --git a/burstioInterfaces/src/cpp/lib/utils.cpp b/burstioInterfaces/src/cpp/lib/utils.cpp index 91f14cc10..3e4b4f505 100644 --- a/burstioInterfaces/src/cpp/lib/utils.cpp +++ b/burstioInterfaces/src/cpp/lib/utils.cpp @@ -19,6 +19,8 @@ */ #include +#include + #include namespace burstio { @@ -38,42 +40,38 @@ namespace burstio { double elapsed (const BULKIO::PrecisionUTCTime& begin, const BULKIO::PrecisionUTCTime& end) { - return (end.twsec - begin.twsec) + (end.tfsec - begin.tfsec); + return end - begin; } - - BURSTIO::BurstSRI createSRI ( const std::string &streamID, double xdelta) { - BURSTIO::BurstSRI sri; - sri.hversion = 1; - sri.streamID = streamID.c_str(); - sri.id = ""; - sri.xdelta = xdelta; - sri.mode = (short)0; - sri.flags = (short)0; - sri.tau = 0.0; - sri.theta = 0.0f; - sri.gain = 0.0f; - sri.uwlength = (short)0; - sri.bursttype = (short)0; - sri.burstLength = 0; - sri.CHAN_RF = 0.0; - sri.baudestimate = 0.0f; - sri.carrieroffset = 0.0; - sri.SNR = 0.0; - sri.modulation = ""; - sri.baudrate = 0.0; - sri.fec = ""; - sri.fecrate = ""; - sri.randomizer = ""; - sri.overhead = ""; - sri.expectedStartOfBurstTime = burstio::utils::now(); - sri.keywords.length(0); - return sri; + BURSTIO::BurstSRI createSRI (const std::string& streamID, double xdelta) + { + BURSTIO::BurstSRI sri; + sri.hversion = 1; + sri.streamID = streamID.c_str(); + sri.id = ""; + sri.xdelta = xdelta; + sri.mode = (short)0; + sri.flags = (short)0; + sri.tau = 0.0; + sri.theta = 0.0f; + sri.gain = 0.0f; + sri.uwlength = (short)0; + sri.bursttype = (short)0; + sri.burstLength = 0; + sri.CHAN_RF = 0.0; + sri.baudestimate = 0.0f; + sri.carrieroffset = 0.0; + sri.SNR = 0.0; + sri.modulation = ""; + sri.baudrate = 0.0; + sri.fec = ""; + sri.fecrate = ""; + sri.randomizer = ""; + sri.overhead = ""; + sri.expectedStartOfBurstTime = burstio::utils::now(); + sri.keywords.length(0); + return sri; } - BURSTIO::BurstSRI createSRI ( const std::string &streamID ) { - return createSRI( streamID, 1.0 ); - } - } } diff --git a/burstioInterfaces/src/java/burstio/BurstIn.java.template b/burstioInterfaces/src/java/burstio/BurstIn.java.template index 1929b8b76..e7c67dd0d 100644 --- a/burstioInterfaces/src/java/burstio/BurstIn.java.template +++ b/burstioInterfaces/src/java/burstio/BurstIn.java.template @@ -7,12 +7,15 @@ package burstio; import burstio.traits.@name@Traits; -import org.ossie.component.PortBase; +import org.ossie.component.StartablePort; +import org.ossie.component.RHLogger; -public class Burst@name@In extends BURSTIO.jni.burst@name@POA implements InPort, PortBase +public class Burst@name@In extends BURSTIO.jni.burst@name@POA implements InPort, StartablePort { private final InPortImpl impl_; + public RHLogger _portLog = null; + public Burst@name@In (final String name) { this.impl_ = new InPortImpl(name, new @name@Traits()); @@ -23,6 +26,16 @@ public class Burst@name@In extends BURSTIO.jni.burst@name@POA implements InPort< return this.impl_.getName(); } + public void startPort () + { + start(); + } + + public void stopPort () + { + stop(); + } + public void start () { this.impl_.start(); @@ -88,13 +101,18 @@ public class Burst@name@In extends BURSTIO.jni.burst@name@POA implements InPort< this.impl_.flush(); } - public String getRepid () - { - return BURSTIO.burst@name@Helper.id(); - } + public String getRepid () + { + return BURSTIO.burst@name@Helper.id(); + } - public String getDirection () - { - return "Provides"; - } + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + + public String getDirection () + { + return CF.PortSet.DIRECTION_PROVIDES; + } } diff --git a/burstioInterfaces/src/java/burstio/InPort.java b/burstioInterfaces/src/java/burstio/InPort.java index cf20861f5..16c2f74e7 100644 --- a/burstioInterfaces/src/java/burstio/InPort.java +++ b/burstioInterfaces/src/java/burstio/InPort.java @@ -19,8 +19,6 @@ */ package burstio; -import org.ossie.component.PortBase; - public interface InPort { diff --git a/burstioInterfaces/src/java/burstio/OutPort.java b/burstioInterfaces/src/java/burstio/OutPort.java index f09d9aac2..515d3192a 100644 --- a/burstioInterfaces/src/java/burstio/OutPort.java +++ b/burstioInterfaces/src/java/burstio/OutPort.java @@ -33,18 +33,21 @@ import org.apache.log4j.Logger; +import org.ossie.component.StartablePort; +import org.ossie.component.RHLogger; import org.ossie.properties.IProperty; import org.ossie.properties.StructDef; import burstio.stats.SenderStatistics; import burstio.traits.BurstTraits; -import org.ossie.component.PortBase; -abstract class OutPort extends BULKIO.UsesPortStatisticsProviderPOA implements PortBase { +abstract class OutPort extends BULKIO.UsesPortStatisticsProviderPOA implements StartablePort { public static final int DEFAULT_MAX_BURSTS = 100; public static final int DEFAULT_LATENCY_THRESHOLD = 10000; // 10000 us = 10ms + public RHLogger _portLog = null; + // Basic port information static class Connection { @@ -86,7 +89,9 @@ public synchronized void setMaxBursts (int bursts) { this.maxBursts_ = bursts; if (this.queue_.size() >= this.maxBursts_) { - OutPort.this.logger_.debug("New max bursts " + this.maxBursts_ + " triggering push"); + if (OutPort.this._portLog != null) { + OutPort.this._portLog.debug("New max bursts " + this.maxBursts_ + " triggering push"); + } this.executeThreadedFlush(); } } @@ -113,7 +118,9 @@ public synchronized void setByteThreshold (int bytes) { this.thresholdBytes_ = bytes; if (this.queuedBytes_ >= this.thresholdBytes_) { - OutPort.this.logger_.debug("New byte threshold " + this.thresholdBytes_ + " triggering push"); + if (OutPort.this._portLog != null) { + OutPort.this._portLog.debug("New byte threshold " + this.thresholdBytes_ + " triggering push"); + } this.executeThreadedFlush(); } } @@ -129,16 +136,22 @@ protected synchronized void queueBurst (B burst) if (this.queue_.isEmpty()) { this.startTime_ = System.nanoTime(); // Wake up the monitor thread so it can set its timeout - OutPort.this.logger_.trace("Waking monitor thread on first queued burst"); + if (OutPort.this._portLog != null) { + OutPort.this._portLog.trace("Waking monitor thread on first queued burst"); + } OutPort.this.scheduleCheck(this.startTime_ + this.thresholdLatency_); } this.queue_.add(burst); this.queuedBytes_ += OutPort.this.traits_.burstLength(burst) * OutPort.this.bytesPerElement_; - OutPort.this.logger_.trace("Queue size: " + this.queue_.size() + " bursts / " + this.queuedBytes_ + " bytes"); + if (OutPort.this._portLog != null) { + OutPort.this._portLog.trace("Queue size: " + this.queue_.size() + " bursts / " + this.queuedBytes_ + " bytes"); + } if (this.shouldFlush()) { - OutPort.this.logger_.debug("Queued burst exceeded threshold, flushing queue"); + if (OutPort.this._portLog != null) { + OutPort.this._portLog.debug("Queued burst exceeded threshold, flushing queue"); + } this.flushQueue(); } } @@ -435,6 +448,11 @@ public void setLogger (Logger logger) this.logger_ = logger; } + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + public synchronized void start () { if (this.running_) { @@ -444,6 +462,11 @@ public synchronized void start () this.running_ = true; } + public void startPort () + { + start(); + } + public void stop () { synchronized (this) { @@ -456,6 +479,11 @@ public void stop () this.flush(); } + public void stopPort () + { + stop(); + } + public BULKIO.PortUsageType state () { synchronized (this.connections_) { @@ -503,7 +531,9 @@ public void pushBurst(A data, BURSTIO.BurstSRI sri, BULKIO.PrecisionUTCTime time queue.queueBurst(burst); if (eos) { if (!isInterleaved()) { - this.logger_.debug("Flushing " + sri.streamID + " on EOS"); + if (this._portLog != null) { + this._portLog.debug("Flushing " + sri.streamID + " on EOS"); + } queue.flush(); } this.streamQueues_.remove(sri.streamID); @@ -528,7 +558,7 @@ public String getRepid () public String getDirection () { - return "Uses"; + return CF.PortSet.DIRECTION_USES; } protected void sendBursts(B[] bursts, long startTime, float queueDepth, final String streamID) @@ -555,7 +585,9 @@ protected void sendBursts(B[] bursts, long startTime, float queueDepth, final St } catch (org.omg.CORBA.SystemException ex) { if (bursts.length == 1) { if (connection.alive) { - this.logger_.error("pushBursts to " + connectionId + " failed the burst size is too long"); + if (this._portLog != null) { + this._portLog.error("pushBursts to " + connectionId + " failed the burst size is too long"); + } connection.alive = false; } } else { @@ -563,7 +595,9 @@ protected void sendBursts(B[] bursts, long startTime, float queueDepth, final St } } catch (final Exception ex) { if (connection.alive) { - this.logger_.error("pushBursts to " + connectionId + " failed: " + ex); + if (this._portLog != null) { + this._portLog.error("pushBursts to " + connectionId + " failed: " + ex); + } connection.alive = false; } } @@ -659,7 +693,9 @@ private Queue getQueueForStream (final String streamID) if (isInterleaved()) { queue = this.defaultQueue_; } else { - this.logger_.trace("Creating new queue for stream " + streamID); + if (this._portLog != null) { + this._portLog.trace("Creating new queue for stream " + streamID); + } // Propagate the default queue's policy settings final int max_bursts = this.defaultQueue_.getMaxBursts(); final int byte_threshold = this.defaultQueue_.getByteThreshold(); diff --git a/burstioInterfaces/src/java/pom.xml b/burstioInterfaces/src/java/pom.xml deleted file mode 100644 index 5b923dc70..000000000 --- a/burstioInterfaces/src/java/pom.xml +++ /dev/null @@ -1,104 +0,0 @@ - - 4.0.0 - - redhawk.coreframework - parent - 2.0.9-SNAPSHOT - ../../../pom.xml - - burstio - bundle - - - ${project.groupId} - burstio-interfaces - ${project.version} - - - ${project.groupId} - ossie - ${project.version} - - - ${project.groupId} - bulkio-interfaces - ${project.version} - - - ${project.groupId} - cf-interfaces - ${project.version} - - - - log4j - log4j - 1.2.15 - - - com.sun.jmx - jmxri - - - com.sun.jdmk - jmxtools - - - javax.jms - jms - - - - - - burstio - - - org.codehaus.gmaven - gmaven-plugin - 1.3 - - - set-main-artifact - package - - execute - - - - project.artifact.setFile(new - File("${project.basedir}/burstio.jar")) - - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.7 - - - attach-artifacts - package - - attach-artifact - - - - - ${project.build.directory}/${project.artifactId}-${project.version}.jar - beta - jar - - - - - - - - - - diff --git a/burstioInterfaces/src/python/redhawk/burstio/outports.py b/burstioInterfaces/src/python/redhawk/burstio/outports.py index 828d6f984..e8d190652 100644 --- a/burstioInterfaces/src/python/redhawk/burstio/outports.py +++ b/burstioInterfaces/src/python/redhawk/burstio/outports.py @@ -73,7 +73,7 @@ def setMaxBursts(self, bursts): self._maxBursts = bursts if self._burstsExceeded(): # Wake up the monitor thread to do a push - self._log.debug('New max bursts %s triggering push', bursts); + self._portLog.debug('New max bursts %s triggering push', bursts); self._executeThreadedFlush() finally: self._queueMutex.release() @@ -90,7 +90,7 @@ def setByteThreshold(self, bytes): try: self._thresholdBytes = bytes if self._bytesExceeded(): - self._log.debug('New byte threshold %s triggering push', bytes); + self._portLog.debug('New byte threshold %s triggering push', bytes); # Wake up the monitor thread to do a push self._executeThreadedFlush() finally: @@ -130,7 +130,7 @@ def _queueBurst(self, burst): if self._startTime is None: self._startTime = time.time() # Wake the monitor thread so it can set its timeout - self._port._log.trace('Waking monitor thread on first queued burst') + self._port._portLog.trace('Waking monitor thread on first queued burst') self._port._scheduleCheck(self._startTime + self._thresholdLatency) # Add the burst to the queue, tracking the total number of data @@ -140,13 +140,13 @@ def _queueBurst(self, burst): # NB: Whether it is executed or not, this trace statement has a # significant impact on performance - #self._log.trace('Queue size: %s bursts / %s bytes', len(self._queue), self._queuedBytes) + #self._portLog.trace('Queue size: %s bursts / %s bytes', len(self._queue), self._queuedBytes) # Check whether the max bursts or byte threshold has been exceeded # NB: Function calls incur significant overhead in Python, so the # relevant checks are inlined versus calling a method, if len(self._queue) >= self._maxBursts or self._queuedBytes >= self._thresholdBytes: - self._port._log.debug('Queued burst exceeded threshold, flushing queue') + self._port._portLog.debug('Queued burst exceeded threshold, flushing queue') self._flushQueue() finally: self._queueMutex.release() @@ -202,7 +202,7 @@ def __init__(self, name, traits): self._bytesPerElement = traits.size() # Logging; default logger is the class name - self._log = logging.getLogger(self.__class__.__name__) + self._portLog = logging.getLogger(self.__class__.__name__) # Perform latency monitoring and deferred pushes on a separate thread self._monitor = ExecutorService() @@ -263,7 +263,7 @@ def setRoutingMode(self, mode): self._routingMode = mode def setLogger(self, logger): - self._log = logger + self._portLog = logger def updateConnectionFilter(self, filterTable): new_routes = {} @@ -320,7 +320,7 @@ def pushBurst(self, data, sri, timestamp=None, eos=False): queue._queueBurst(burst) if eos: if not self._isInterleaved(): - self._log.debug("Flushing '%s' on EOS", sri.streamID) + self._portLog.debug("Flushing '%s' on EOS", sri.streamID) queue.flush() del self._streamQueues[sri.streamID] finally: @@ -338,7 +338,7 @@ def flush(self): self._queueMutex.release() def _sendBursts(self, bursts, startTime, queueDepth, streamID=None): - self._log.debug('Pushing %d bursts', len(bursts)) + self._portLog.debug('Pushing %d bursts', len(bursts)) # Count up total elements total_elements = sum(len(burst.data) for burst in bursts) @@ -359,17 +359,17 @@ def _sendBursts(self, bursts, startTime, queueDepth, streamID=None): except CORBA.MARSHAL, e: if len(bursts) == 1: if connection.alive: - self._log.error('pushBursts to %s failed because the burst size is too long') + self._portLog.error('pushBursts to %s failed because the burst size is too long') connection.alive = False else: self._partitionBursts(bursts, startTime, queueDepth, connection) except Exception, e: if connection.alive: - self._log.error('pushBursts to %s failed: %s', connectionId, e) + self._portLog.error('pushBursts to %s failed: %s', connectionId, e) connection.alive = False except: if connection.alive: - self._log.error('pushBursts to %s failed', connectionId) + self._portLog.error('pushBursts to %s failed', connectionId) connection.alive = False finally: self._connectionMutex.release() @@ -442,7 +442,7 @@ def _getQueueForStream(self, streamID): elif self._isInterleaved(): queue = self._defaultQueue else: - self._log.trace('Creating new queue for stream %s', streamID) + self._portLog.trace('Creating new queue for stream %s', streamID) # Propogate the default queue's settings max_bursts = self._defaultQueue.getMaxBursts() byte_threshold = self._defaultQueue.getByteThreshold() diff --git a/burstioInterfaces/testing/tests/.gitignore b/burstioInterfaces/testing/tests/.gitignore index 27e8f841e..4cc14636b 100644 --- a/burstioInterfaces/testing/tests/.gitignore +++ b/burstioInterfaces/testing/tests/.gitignore @@ -1,2 +1,3 @@ TEST*.xml cppunit-results.xml +java/Burstio diff --git a/burstioInterfaces/testing/tests/buildtests b/burstioInterfaces/testing/tests/buildtests deleted file mode 100755 index d133f1854..000000000 --- a/burstioInterfaces/testing/tests/buildtests +++ /dev/null @@ -1,35 +0,0 @@ -# -# Build supporting components for burstio test framework -# - -burstio_top=../../../ -burstio_libsrc_top=$burstio_top/libsrc -export LD_LIBRARY_PATH=$burstio_libsrc_top/.libs:$burstio_top/.libs:${LD_LIBRARY_PATH} -export PYTHONPATH=$burstio_libsrc_top/build/lib:${PYTHONPATH} - -cd cpp -./reconf; ./configure; make -cd - - -# -# Build supporting components for burstio test framework -# - -# -# build java tests from template classes -# -sedir=../../../src/java/sed -cd java -for bt in Byte Double Float LongLong Long Short Ubyte UlongLong Ulong Ushort; do - sed -f $sedir/$bt.sed ./templates/InBurstPort_Test.template > InBurst${bt}Port_Test.java - sed -f $sedir/$bt.sed ./templates/OutBurstPort_Test.template > OutBurst${bt}Port_Test.java - sed -f $sedir/$bt.sed ./templates/BurstPush_Test.template > Burst${bt}Push_Test.java -done -sed -f $sedir/$bt.sed ./templates/Burstio_Utils_Test.template > Burstio_Utils_Test.java - -if command -v ant 2>/dev/null -then - ant compile -else - make build-all -fi diff --git a/burstioInterfaces/testing/tests/cpp/Burstio.cpp b/burstioInterfaces/testing/tests/cpp/Burstio.cpp index 651cd9b4d..74d1e7d83 100644 --- a/burstioInterfaces/testing/tests/cpp/Burstio.cpp +++ b/burstioInterfaces/testing/tests/cpp/Burstio.cpp @@ -18,6 +18,7 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ #include +#include #include #include #include @@ -28,14 +29,18 @@ #include "log4cxx/basicconfigurator.h" #include "log4cxx/propertyconfigurator.h" #include "log4cxx/helpers/exception.h" -using namespace std; - int main(int argc, char* argv[]) { + // Locate the logging configuration file relative to the source directory + std::string log_config = "log4j.props"; + char* srcdir = getenv("srcdir"); + if (srcdir) { + log_config = std::string(srcdir) + "/" + log_config; + } // Set up a simple configuration that logs on the console. - log4cxx::PropertyConfigurator::configure("log4j.props"); + log4cxx::PropertyConfigurator::configure(log_config); // Get the top level suite from the registry CppUnit::Test *suite = CppUnit::TestFactoryRegistry::getRegistry().makeTest(); @@ -47,7 +52,7 @@ int main(int argc, char* argv[]) controller.addListener ( &result ); CppUnit::TextUi::TestRunner *runner = new CppUnit::TextUi::TestRunner; - ofstream xmlout ( "../cppunit-results.xml" ); + std::ofstream xmlout ( "../cppunit-results.xml" ); CppUnit::XmlOutputter xmlOutputter ( &result, xmlout ); CppUnit::CompilerOutputter compilerOutputter ( &result, std::cerr ); diff --git a/burstioInterfaces/testing/tests/cpp/Burstio_InPort.cpp b/burstioInterfaces/testing/tests/cpp/Burstio_InPort.cpp index 4a534d543..f9aa2832d 100644 --- a/burstioInterfaces/testing/tests/cpp/Burstio_InPort.cpp +++ b/burstioInterfaces/testing/tests/cpp/Burstio_InPort.cpp @@ -18,8 +18,9 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ #include "Burstio_InPort.h" -#include "burstio.h" -#include "bulkio.h" + +#include +#include // Registers the fixture into the 'registry' CPPUNIT_TEST_SUITE_REGISTRATION( Burstio_InPort ); diff --git a/burstioInterfaces/testing/tests/cpp/Burstio_OutPort.cpp b/burstioInterfaces/testing/tests/cpp/Burstio_OutPort.cpp index be442028a..4078205a1 100644 --- a/burstioInterfaces/testing/tests/cpp/Burstio_OutPort.cpp +++ b/burstioInterfaces/testing/tests/cpp/Burstio_OutPort.cpp @@ -18,9 +18,9 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ #include "Burstio_OutPort.h" -#include "bulkio.h" -#include "burstio.h" +#include +#include // Registers the fixture into the 'registry' CPPUNIT_TEST_SUITE_REGISTRATION( Burstio_OutPort ); diff --git a/burstioInterfaces/testing/tests/cpp/Burstio_PushTest.cpp b/burstioInterfaces/testing/tests/cpp/Burstio_PushTest.cpp index d652770b3..880b04b61 100644 --- a/burstioInterfaces/testing/tests/cpp/Burstio_PushTest.cpp +++ b/burstioInterfaces/testing/tests/cpp/Burstio_PushTest.cpp @@ -18,8 +18,9 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ #include "Burstio_PushTest.h" -#include "bulkio.h" -#include "burstio.h" + +#include +#include template < typename OUT_PORT, typename IN_PORT > diff --git a/burstioInterfaces/testing/tests/cpp/Burstio_PushTest.h b/burstioInterfaces/testing/tests/cpp/Burstio_PushTest.h index 4d2f16e6d..228ca04be 100644 --- a/burstioInterfaces/testing/tests/cpp/Burstio_PushTest.h +++ b/burstioInterfaces/testing/tests/cpp/Burstio_PushTest.h @@ -21,9 +21,9 @@ #define BURSTIO_PUSHTEST_FIXTURE_H #include -#include -#include -#include +#include +#include +#include template< typename OUT_PORT, typename IN_PORT > class Burstio_PushBursts: public CppUnit::TestFixture diff --git a/burstioInterfaces/testing/tests/cpp/LocalTest.cpp b/burstioInterfaces/testing/tests/cpp/LocalTest.cpp new file mode 100644 index 000000000..62176d85e --- /dev/null +++ b/burstioInterfaces/testing/tests/cpp/LocalTest.cpp @@ -0,0 +1,214 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK burstioInterfaces. + * + * REDHAWK burstioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK burstioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "LocalTest.h" + +#include +#include + +template +void LocalTest::setUp() +{ + rootLogger = rh_logger::Logger::getLogger("Local"+getPortName()+"Test"); + + std::string name = "burst" + getPortName(); + outPort = new OutPort(name + "_out"); + outPort->setLogger(rh_logger::Logger::getLogger(rootLogger->getName() + "." + outPort->getName())); + inPort = new InPort(name + "_in"); + inPort->setLogger(rh_logger::Logger::getLogger(rootLogger->getName() + "." + inPort->getName())); + + _activatePort(inPort); + + CORBA::Object_var objref = inPort->_this(); + outPort->connectPort(objref, "local_connection"); + + inPort->start(); + outPort->start(); +} + +template +void LocalTest::tearDown() +{ + inPort->stop(); + outPort->stop(); + + ExtendedCF::UsesConnectionSequence_var connections = outPort->connections(); + for (CORBA::ULong index = 0; index < connections->length(); ++index) { + outPort->disconnectPort(connections[index].connectionId); + } + + for (typename std::vector::iterator servant = servants.begin(); servant != servants.end(); ++servant) { + _deactivatePort(*servant); + (*servant)->_remove_ref(); + } + + delete outPort; +} + +template +void LocalTest::_activatePort(InPort* port) +{ + PortableServer::ObjectId_var oid = ossie::corba::RootPOA()->activate_object(port); + servants.push_back(port); +} + +template +void LocalTest::_deactivatePort(InPort* port) +{ + try { + PortableServer::ObjectId_var oid = ossie::corba::RootPOA()->servant_to_id(port); + ossie::corba::RootPOA()->deactivate_object(oid); + } catch (...) { + // Ignore CORBA exceptions + } +} + +template +void LocalTest::testPushBurst() +{ + // Queue up a bunch of bursts + const size_t BURST_COUNT = 16; + for (size_t ii = 0; ii < BURST_COUNT; ++ii) { + BurstType burst; + burst.SRI = burstio::utils::createSRI("test_stream"); + burst.data.length(50); + std::fill(burst.data.get_buffer(), burst.data.get_buffer() + burst.data.length(), ii); + burst.T = burstio::utils::now(); + burst.EOS = false; + outPort->pushBurst(burst); + } + + // Force the output port to send the bursts + outPort->flush(); + + // Read the bursts one at a time and check that they look reasonable + for (size_t ii = 0; ii < BURST_COUNT; ++ii) { + boost::scoped_ptr burst(inPort->getBurst(0.0)); + CPPUNIT_ASSERT(burst); + CPPUNIT_ASSERT_EQUAL(std::string("test_stream"), burst->getStreamID()); + CPPUNIT_ASSERT_EQUAL((size_t) 50, burst->getSize()); + CPPUNIT_ASSERT(*(burst->getData()) == ii); + } +} + +template +void LocalTest::testPushBursts() +{ + // Build up a sequence of bursts + BurstSequenceType bursts; + for (size_t ii = 0; ii < 24; ++ii) { + BurstType burst; + burst.SRI = burstio::utils::createSRI("test_stream"); + burst.data.length(100); + std::fill(burst.data.get_buffer(), burst.data.get_buffer() + burst.data.length(), ii); + burst.T = burstio::utils::now(); + burst.EOS = false; + ossie::corba::push_back(bursts, burst); + } + + // Push the entire sequence (skips buffering) and make sure the other port + // didn't steal the bursts + outPort->pushBursts(bursts); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 24, bursts.length()); + + // Read the bursts (in bulk) and check them against the originals + BurstSequenceVar results = inPort->getBursts(0.0); + CPPUNIT_ASSERT_EQUAL(bursts.length(), results->length()); + for (CORBA::ULong ii = 0; ii < results->length(); ++ii) { + CPPUNIT_ASSERT_EQUAL(std::string("test_stream"), std::string(results[ii].SRI.streamID)); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 100, results[ii].data.length()); + CPPUNIT_ASSERT(bursts[ii].data[0] == ii); + CPPUNIT_ASSERT_EQUAL(bursts[ii].T, results[ii].T); + CPPUNIT_ASSERT(!results[ii].EOS); + } +} + +template +void LocalTest::testFanOut() +{ + // Create a second input port to check 1:2 fan out + InPort* inPort2 = new InPort("burst" + getPortName() + "_in_2"); + inPort2->setLogger(rh_logger::Logger::getLogger(rootLogger->getName() + "." + inPort2->getName())); + _activatePort(inPort2); + inPort2->start(); + + CORBA::Object_var objref = inPort2->_this(); + outPort->connectPort(objref, "local_connection_2"); + + // Build up a sequence of bursts + BurstSequenceType bursts; + for (size_t ii = 0; ii < 24; ++ii) { + BurstType burst; + burst.SRI = burstio::utils::createSRI("test_stream"); + burst.data.length(100); + std::fill(burst.data.get_buffer(), burst.data.get_buffer() + burst.data.length(), ii); + burst.T = burstio::utils::now(); + burst.EOS = false; + ossie::corba::push_back(bursts, burst); + } + + // Push the entire sequence (skips buffering) and make sure the other ports + // didn't steal the bursts + outPort->pushBursts(bursts); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 24, bursts.length()); + + // Read the bursts (in bulk) and check them against the originals + BurstSequenceVar results = inPort->getBursts(0.0); + CPPUNIT_ASSERT_EQUAL(bursts.length(), results->length()); + for (CORBA::ULong ii = 0; ii < results->length(); ++ii) { + CPPUNIT_ASSERT_EQUAL(std::string("test_stream"), std::string(results[ii].SRI.streamID)); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 100, results[ii].data.length()); + CPPUNIT_ASSERT(bursts[ii].data[0] == ii); + CPPUNIT_ASSERT_EQUAL(bursts[ii].T, results[ii].T); + CPPUNIT_ASSERT(!results[ii].EOS); + } + + // Repeat with the second port; the results should be the same + BurstSequenceVar results2 = inPort2->getBursts(0.0); + CPPUNIT_ASSERT_EQUAL(bursts.length(), results2->length()); + for (CORBA::ULong ii = 0; ii < results2->length(); ++ii) { + CPPUNIT_ASSERT_EQUAL(std::string("test_stream"), std::string(results2[ii].SRI.streamID)); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 100, results2[ii].data.length()); + CPPUNIT_ASSERT(bursts[ii].data[0] == ii); + CPPUNIT_ASSERT_EQUAL(bursts[ii].T, results2[ii].T); + CPPUNIT_ASSERT(!results2[ii].EOS); + } +} + +#define CREATE_TEST(x) \ + class Local##x##Test : public LocalTest \ + { \ + typedef LocalTest TestBase; \ + CPPUNIT_TEST_SUB_SUITE(Local##x##Test, TestBase); \ + CPPUNIT_TEST_SUITE_END(); \ + virtual std::string getPortName() const { return #x; }; \ + }; \ + CPPUNIT_TEST_SUITE_REGISTRATION(Local##x##Test); + +CREATE_TEST(Byte); +CREATE_TEST(Ubyte); +CREATE_TEST(Short); +CREATE_TEST(Ushort); +CREATE_TEST(Long); +CREATE_TEST(Ulong); +CREATE_TEST(LongLong); +CREATE_TEST(UlongLong); +CREATE_TEST(Float); +CREATE_TEST(Double); diff --git a/burstioInterfaces/testing/tests/cpp/LocalTest.h b/burstioInterfaces/testing/tests/cpp/LocalTest.h new file mode 100644 index 000000000..adca8d2d1 --- /dev/null +++ b/burstioInterfaces/testing/tests/cpp/LocalTest.h @@ -0,0 +1,64 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK burstioInterfaces. + * + * REDHAWK burstioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK burstioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef BURSTIO_LOCALTEST_H +#define BURSTIO_LOCALTEST_H + +#include + +#include +#include + +template +class LocalTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(LocalTest); + CPPUNIT_TEST(testPushBurst); + CPPUNIT_TEST(testPushBursts); + CPPUNIT_TEST(testFanOut); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testPushBurst(); + void testPushBursts(); + void testFanOut(); + +protected: + typedef typename OutPort::BurstType BurstType; + typedef typename OutPort::BurstSequenceType BurstSequenceType; + typedef typename InPort::PacketType PacketType; + typedef typename InPort::BurstSequenceVar BurstSequenceVar; + + virtual std::string getPortName() const = 0; + + OutPort* outPort; + InPort* inPort; + + void _activatePort(InPort* port); + void _deactivatePort(InPort* port); + + std::vector servants; + + rh_logger::LoggerPtr rootLogger; +}; + +#endif // BURSTIO_LOCALTEST_H diff --git a/burstioInterfaces/testing/tests/cpp/Makefile.am b/burstioInterfaces/testing/tests/cpp/Makefile.am index 36d6b405b..2dd7e155f 100644 --- a/burstioInterfaces/testing/tests/cpp/Makefile.am +++ b/burstioInterfaces/testing/tests/cpp/Makefile.am @@ -18,17 +18,13 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # -ACLOCAL_AMFLAGS = -I m4 -I ${OSSIEHOME}/share/aclocal/ossie -I acinclude - -burstio_top=../../.. -burstio_cpplib_top=$(burstio_top)/src/cpp -Burstio_SOURCES = Burstio.cpp Burstio_InPort.cpp Burstio_OutPort.cpp Burstio_PushTest.cpp Burstio_Utils_Test.cpp -Burstio_INCLUDES = -I$(burstio_cpplib_top)/include -I$(burstio_cpplib_top)/include/burstio -I$(burstio_cpplib_top)/redhawk -I$(burstio_cpplib_top)/ -Burstio_CXXFLAGS = $(CPPUNIT_CFLAGS) $(Burstio_INCLUDES) $(BOOST_CPPFLAGS) $(BULKIO_CFLAGS) $(RH_DEPS_CFLAGS) -Burstio_LDADD = -L$(burstio_cpplib_top)/.libs -lburstio -lburstioInterfaces $(BULKIO_LIBS) $(RH_DEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_SYSTEM_LIB) $(CPPUNIT_LIBS) -llog4cxx -ldl - +burstio_include=$(top_srcdir)/src/cpp/include +Burstio_SOURCES = Burstio.cpp Burstio_InPort.cpp Burstio_OutPort.cpp Burstio_PushTest.cpp Burstio_Utils_Test.cpp +Burstio_SOURCES += LocalTest.h LocalTest.cpp +Burstio_INCLUDES = -I$(burstio_include) -I$(burstio_include)/burstio -I$(top_builddir)/src/cpp -I$(top_builddir)/src/cpp/redhawk +Burstio_CXXFLAGS = $(CPPUNIT_CFLAGS) $(Burstio_INCLUDES) $(BOOST_CPPFLAGS) $(BULKIO_CFLAGS) +Burstio_LDADD = $(BULKIO_LIBS) $(BOOST_LDFLAGS) $(BOOST_SYSTEM_LIB) $(CPPUNIT_LIBS) -llog4cxx -ldl +Burstio_LDADD += $(top_builddir)/src/cpp/libburstio.la $(top_builddir)/src/cpp/libburstioInterfaces.la TESTS = Burstio -noinst_PROGRAMS=$(TESTS) check_PROGRAMS=$(TESTS) - diff --git a/burstioInterfaces/testing/tests/cpp/configure.ac b/burstioInterfaces/testing/tests/cpp/configure.ac deleted file mode 100644 index 7bef998de..000000000 --- a/burstioInterfaces/testing/tests/cpp/configure.ac +++ /dev/null @@ -1,46 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK burstioInterfaces. - * - * REDHAWK burstioInterfaces is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK burstioInterfaces is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ - -dnl Process this file with autoconf to produce a configure script. -AC_INIT(Burstio_Test,0.1) -AC_CONFIG_SRCDIR([Burstio.cpp]) -AM_INIT_AUTOMAKE -AM_PATH_CPPUNIT(1.9.6) -AC_CONFIG_MACRO_DIR([m4]) -AC_PROG_CXX -AC_PROG_CC -AC_PROG_INSTALL -AC_PREFIX_DEFAULT(${OSSIEHOME}) -AC_CORBA_ORB - -OSSIE_CHECK_OSSIE -OSSIE_OSSIEHOME_AS_PREFIX -PKG_CHECK_MODULES([RH_DEPS], [ossie >= 1.7 omniORB4 >= 4.0.0]) -PKG_CHECK_MODULES([BULKIO], [ bulkio >= 1.0 bulkioInterfaces >= 1.9 ]) - -AX_BOOST_BASE([1.41]) -AX_BOOST_THREAD -AX_BOOST_SYSTEM - -# set PKG_CONFIG_PATH to look at local xxx.pc files -export PKG_CONFIG_PATH="../../../..:../../..":${PKG_CONFIG_PATH} -PKG_CHECK_MODULES([BIO], [ burstio >= 1.0 burstioInterfaces >= 1.8 ]) - -AC_OUTPUT(Makefile) diff --git a/burstioInterfaces/testing/tests/cpp/m4/libtool.m4 b/burstioInterfaces/testing/tests/cpp/m4/libtool.m4 deleted file mode 100644 index 671cde117..000000000 --- a/burstioInterfaces/testing/tests/cpp/m4/libtool.m4 +++ /dev/null @@ -1,7360 +0,0 @@ -# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- -# -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, -# 2006, 2007, 2008 Free Software Foundation, Inc. -# Written by Gordon Matzigkeit, 1996 -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -m4_define([_LT_COPYING], [dnl -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, -# 2006, 2007, 2008 Free Software Foundation, Inc. -# Written by Gordon Matzigkeit, 1996 -# -# This file is part of GNU Libtool. -# -# GNU Libtool is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 2 of -# the License, or (at your option) any later version. -# -# As a special exception to the GNU General Public License, -# if you distribute this file as part of a program or library that -# is built using GNU Libtool, you may include this file under the -# same distribution terms that you use for the rest of that program. -# -# GNU Libtool is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with GNU Libtool; see the file COPYING. If not, a copy -# can be downloaded from http://www.gnu.org/licenses/gpl.html, or -# obtained by writing to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -]) - -# serial 56 LT_INIT - - -# LT_PREREQ(VERSION) -# ------------------ -# Complain and exit if this libtool version is less that VERSION. -m4_defun([LT_PREREQ], -[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, - [m4_default([$3], - [m4_fatal([Libtool version $1 or higher is required], - 63)])], - [$2])]) - - -# _LT_CHECK_BUILDDIR -# ------------------ -# Complain if the absolute build directory name contains unusual characters -m4_defun([_LT_CHECK_BUILDDIR], -[case `pwd` in - *\ * | *\ *) - AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; -esac -]) - - -# LT_INIT([OPTIONS]) -# ------------------ -AC_DEFUN([LT_INIT], -[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT -AC_BEFORE([$0], [LT_LANG])dnl -AC_BEFORE([$0], [LT_OUTPUT])dnl -AC_BEFORE([$0], [LTDL_INIT])dnl -m4_require([_LT_CHECK_BUILDDIR])dnl - -dnl Autoconf doesn't catch unexpanded LT_ macros by default: -m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl -m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl -dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 -dnl unless we require an AC_DEFUNed macro: -AC_REQUIRE([LTOPTIONS_VERSION])dnl -AC_REQUIRE([LTSUGAR_VERSION])dnl -AC_REQUIRE([LTVERSION_VERSION])dnl -AC_REQUIRE([LTOBSOLETE_VERSION])dnl -m4_require([_LT_PROG_LTMAIN])dnl - -dnl Parse OPTIONS -_LT_SET_OPTIONS([$0], [$1]) - -# This can be used to rebuild libtool when needed -LIBTOOL_DEPS="$ltmain" - -# Always use our own libtool. -LIBTOOL='$(SHELL) $(top_builddir)/libtool' -AC_SUBST(LIBTOOL)dnl - -_LT_SETUP - -# Only expand once: -m4_define([LT_INIT]) -])# LT_INIT - -# Old names: -AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) -AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_PROG_LIBTOOL], []) -dnl AC_DEFUN([AM_PROG_LIBTOOL], []) - - -# _LT_CC_BASENAME(CC) -# ------------------- -# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. -m4_defun([_LT_CC_BASENAME], -[for cc_temp in $1""; do - case $cc_temp in - compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; - distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`$ECHO "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` -]) - - -# _LT_FILEUTILS_DEFAULTS -# ---------------------- -# It is okay to use these file commands and assume they have been set -# sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. -m4_defun([_LT_FILEUTILS_DEFAULTS], -[: ${CP="cp -f"} -: ${MV="mv -f"} -: ${RM="rm -f"} -])# _LT_FILEUTILS_DEFAULTS - - -# _LT_SETUP -# --------- -m4_defun([_LT_SETUP], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -AC_REQUIRE([AC_CANONICAL_BUILD])dnl -_LT_DECL([], [host_alias], [0], [The host system])dnl -_LT_DECL([], [host], [0])dnl -_LT_DECL([], [host_os], [0])dnl -dnl -_LT_DECL([], [build_alias], [0], [The build system])dnl -_LT_DECL([], [build], [0])dnl -_LT_DECL([], [build_os], [0])dnl -dnl -AC_REQUIRE([AC_PROG_CC])dnl -AC_REQUIRE([LT_PATH_LD])dnl -AC_REQUIRE([LT_PATH_NM])dnl -dnl -AC_REQUIRE([AC_PROG_LN_S])dnl -test -z "$LN_S" && LN_S="ln -s" -_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl -dnl -AC_REQUIRE([LT_CMD_MAX_LEN])dnl -_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl -_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl -dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_CHECK_SHELL_FEATURES])dnl -m4_require([_LT_CMD_RELOAD])dnl -m4_require([_LT_CHECK_MAGIC_METHOD])dnl -m4_require([_LT_CMD_OLD_ARCHIVE])dnl -m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl - -_LT_CONFIG_LIBTOOL_INIT([ -# See if we are running on zsh, and set the options which allow our -# commands through without removal of \ escapes INIT. -if test -n "\${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST -fi -]) -if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST -fi - -_LT_CHECK_OBJDIR - -m4_require([_LT_TAG_COMPILER])dnl -_LT_PROG_ECHO_BACKSLASH - -case $host_os in -aix3*) - # AIX sometimes has problems with the GCC collect2 program. For some - # reason, if we set the COLLECT_NAMES environment variable, the problems - # vanish in a puff of smoke. - if test "X${COLLECT_NAMES+set}" != Xset; then - COLLECT_NAMES= - export COLLECT_NAMES - fi - ;; -esac - -# Sed substitution that helps us do robust quoting. It backslashifies -# metacharacters that are still active within double-quoted strings. -sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' - -# Same as above, but do not quote variable references. -double_quote_subst='s/\([["`\\]]\)/\\\1/g' - -# Sed substitution to delay expansion of an escaped shell variable in a -# double_quote_subst'ed string. -delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' - -# Sed substitution to delay expansion of an escaped single quote. -delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' - -# Sed substitution to avoid accidental globbing in evaled expressions -no_glob_subst='s/\*/\\\*/g' - -# Global variables: -ofile=libtool -can_build_shared=yes - -# All known linkers require a `.a' archive for static linking (except MSVC, -# which needs '.lib'). -libext=a - -with_gnu_ld="$lt_cv_prog_gnu_ld" - -old_CC="$CC" -old_CFLAGS="$CFLAGS" - -# Set sane defaults for various variables -test -z "$CC" && CC=cc -test -z "$LTCC" && LTCC=$CC -test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS -test -z "$LD" && LD=ld -test -z "$ac_objext" && ac_objext=o - -_LT_CC_BASENAME([$compiler]) - -# Only perform the check for file, if the check method requires it -test -z "$MAGIC_CMD" && MAGIC_CMD=file -case $deplibs_check_method in -file_magic*) - if test "$file_magic_cmd" = '$MAGIC_CMD'; then - _LT_PATH_MAGIC - fi - ;; -esac - -# Use C for the default configuration in the libtool script -LT_SUPPORTED_TAG([CC]) -_LT_LANG_C_CONFIG -_LT_LANG_DEFAULT_CONFIG -_LT_CONFIG_COMMANDS -])# _LT_SETUP - - -# _LT_PROG_LTMAIN -# --------------- -# Note that this code is called both from `configure', and `config.status' -# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, -# `config.status' has no value for ac_aux_dir unless we are using Automake, -# so we pass a copy along to make sure it has a sensible value anyway. -m4_defun([_LT_PROG_LTMAIN], -[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl -_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) -ltmain="$ac_aux_dir/ltmain.sh" -])# _LT_PROG_LTMAIN - - -## ------------------------------------- ## -## Accumulate code for creating libtool. ## -## ------------------------------------- ## - -# So that we can recreate a full libtool script including additional -# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS -# in macros and then make a single call at the end using the `libtool' -# label. - - -# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) -# ---------------------------------------- -# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. -m4_define([_LT_CONFIG_LIBTOOL_INIT], -[m4_ifval([$1], - [m4_append([_LT_OUTPUT_LIBTOOL_INIT], - [$1 -])])]) - -# Initialize. -m4_define([_LT_OUTPUT_LIBTOOL_INIT]) - - -# _LT_CONFIG_LIBTOOL([COMMANDS]) -# ------------------------------ -# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. -m4_define([_LT_CONFIG_LIBTOOL], -[m4_ifval([$1], - [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], - [$1 -])])]) - -# Initialize. -m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) - - -# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) -# ----------------------------------------------------- -m4_defun([_LT_CONFIG_SAVE_COMMANDS], -[_LT_CONFIG_LIBTOOL([$1]) -_LT_CONFIG_LIBTOOL_INIT([$2]) -]) - - -# _LT_FORMAT_COMMENT([COMMENT]) -# ----------------------------- -# Add leading comment marks to the start of each line, and a trailing -# full-stop to the whole comment if one is not present already. -m4_define([_LT_FORMAT_COMMENT], -[m4_ifval([$1], [ -m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], - [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) -)]) - - - -## ------------------------ ## -## FIXME: Eliminate VARNAME ## -## ------------------------ ## - - -# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) -# ------------------------------------------------------------------- -# CONFIGNAME is the name given to the value in the libtool script. -# VARNAME is the (base) name used in the configure script. -# VALUE may be 0, 1 or 2 for a computed quote escaped value based on -# VARNAME. Any other value will be used directly. -m4_define([_LT_DECL], -[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], - [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], - [m4_ifval([$1], [$1], [$2])]) - lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) - m4_ifval([$4], - [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) - lt_dict_add_subkey([lt_decl_dict], [$2], - [tagged?], [m4_ifval([$5], [yes], [no])])]) -]) - - -# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) -# -------------------------------------------------------- -m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) - - -# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) -# ------------------------------------------------ -m4_define([lt_decl_tag_varnames], -[_lt_decl_filter([tagged?], [yes], $@)]) - - -# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) -# --------------------------------------------------------- -m4_define([_lt_decl_filter], -[m4_case([$#], - [0], [m4_fatal([$0: too few arguments: $#])], - [1], [m4_fatal([$0: too few arguments: $#: $1])], - [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], - [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], - [lt_dict_filter([lt_decl_dict], $@)])[]dnl -]) - - -# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) -# -------------------------------------------------- -m4_define([lt_decl_quote_varnames], -[_lt_decl_filter([value], [1], $@)]) - - -# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) -# --------------------------------------------------- -m4_define([lt_decl_dquote_varnames], -[_lt_decl_filter([value], [2], $@)]) - - -# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) -# --------------------------------------------------- -m4_define([lt_decl_varnames_tagged], -[m4_assert([$# <= 2])dnl -_$0(m4_quote(m4_default([$1], [[, ]])), - m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), - m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) -m4_define([_lt_decl_varnames_tagged], -[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) - - -# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) -# ------------------------------------------------ -m4_define([lt_decl_all_varnames], -[_$0(m4_quote(m4_default([$1], [[, ]])), - m4_if([$2], [], - m4_quote(lt_decl_varnames), - m4_quote(m4_shift($@))))[]dnl -]) -m4_define([_lt_decl_all_varnames], -[lt_join($@, lt_decl_varnames_tagged([$1], - lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl -]) - - -# _LT_CONFIG_STATUS_DECLARE([VARNAME]) -# ------------------------------------ -# Quote a variable value, and forward it to `config.status' so that its -# declaration there will have the same value as in `configure'. VARNAME -# must have a single quote delimited value for this to work. -m4_define([_LT_CONFIG_STATUS_DECLARE], -[$1='`$ECHO "X$][$1" | $Xsed -e "$delay_single_quote_subst"`']) - - -# _LT_CONFIG_STATUS_DECLARATIONS -# ------------------------------ -# We delimit libtool config variables with single quotes, so when -# we write them to config.status, we have to be sure to quote all -# embedded single quotes properly. In configure, this macro expands -# each variable declared with _LT_DECL (and _LT_TAGDECL) into: -# -# ='`$ECHO "X$" | $Xsed -e "$delay_single_quote_subst"`' -m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], -[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), - [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) - - -# _LT_LIBTOOL_TAGS -# ---------------- -# Output comment and list of tags supported by the script -m4_defun([_LT_LIBTOOL_TAGS], -[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl -available_tags="_LT_TAGS"dnl -]) - - -# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) -# ----------------------------------- -# Extract the dictionary values for VARNAME (optionally with TAG) and -# expand to a commented shell variable setting: -# -# # Some comment about what VAR is for. -# visible_name=$lt_internal_name -m4_define([_LT_LIBTOOL_DECLARE], -[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], - [description])))[]dnl -m4_pushdef([_libtool_name], - m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl -m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), - [0], [_libtool_name=[$]$1], - [1], [_libtool_name=$lt_[]$1], - [2], [_libtool_name=$lt_[]$1], - [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl -m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl -]) - - -# _LT_LIBTOOL_CONFIG_VARS -# ----------------------- -# Produce commented declarations of non-tagged libtool config variables -# suitable for insertion in the LIBTOOL CONFIG section of the `libtool' -# script. Tagged libtool config variables (even for the LIBTOOL CONFIG -# section) are produced by _LT_LIBTOOL_TAG_VARS. -m4_defun([_LT_LIBTOOL_CONFIG_VARS], -[m4_foreach([_lt_var], - m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), - [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) - - -# _LT_LIBTOOL_TAG_VARS(TAG) -# ------------------------- -m4_define([_LT_LIBTOOL_TAG_VARS], -[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), - [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) - - -# _LT_TAGVAR(VARNAME, [TAGNAME]) -# ------------------------------ -m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) - - -# _LT_CONFIG_COMMANDS -# ------------------- -# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of -# variables for single and double quote escaping we saved from calls -# to _LT_DECL, we can put quote escaped variables declarations -# into `config.status', and then the shell code to quote escape them in -# for loops in `config.status'. Finally, any additional code accumulated -# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. -m4_defun([_LT_CONFIG_COMMANDS], -[AC_PROVIDE_IFELSE([LT_OUTPUT], - dnl If the libtool generation code has been placed in $CONFIG_LT, - dnl instead of duplicating it all over again into config.status, - dnl then we will have config.status run $CONFIG_LT later, so it - dnl needs to know what name is stored there: - [AC_CONFIG_COMMANDS([libtool], - [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], - dnl If the libtool generation code is destined for config.status, - dnl expand the accumulated commands and init code now: - [AC_CONFIG_COMMANDS([libtool], - [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) -])#_LT_CONFIG_COMMANDS - - -# Initialize. -m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], -[ - -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -sed_quote_subst='$sed_quote_subst' -double_quote_subst='$double_quote_subst' -delay_variable_subst='$delay_variable_subst' -_LT_CONFIG_STATUS_DECLARATIONS -LTCC='$LTCC' -LTCFLAGS='$LTCFLAGS' -compiler='$compiler_DEFAULT' - -# Quote evaled strings. -for var in lt_decl_all_varnames([[ \ -]], lt_decl_quote_varnames); do - case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in - *[[\\\\\\\`\\"\\\$]]*) - eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" - ;; - *) - eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" - ;; - esac -done - -# Double-quote double-evaled strings. -for var in lt_decl_all_varnames([[ \ -]], lt_decl_dquote_varnames); do - case \`eval \\\\\$ECHO "X\\\\\$\$var"\` in - *[[\\\\\\\`\\"\\\$]]*) - eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"X\\\$\$var\\" | \\\$Xsed -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" - ;; - *) - eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" - ;; - esac -done - -# Fix-up fallback echo if it was mangled by the above quoting rules. -case \$lt_ECHO in -*'\\\[$]0 --fallback-echo"')dnl " - lt_ECHO=\`\$ECHO "X\$lt_ECHO" | \$Xsed -e 's/\\\\\\\\\\\\\\\[$]0 --fallback-echo"\[$]/\[$]0 --fallback-echo"/'\` - ;; -esac - -_LT_OUTPUT_LIBTOOL_INIT -]) - - -# LT_OUTPUT -# --------- -# This macro allows early generation of the libtool script (before -# AC_OUTPUT is called), incase it is used in configure for compilation -# tests. -AC_DEFUN([LT_OUTPUT], -[: ${CONFIG_LT=./config.lt} -AC_MSG_NOTICE([creating $CONFIG_LT]) -cat >"$CONFIG_LT" <<_LTEOF -#! $SHELL -# Generated by $as_me. -# Run this file to recreate a libtool stub with the current configuration. - -lt_cl_silent=false -SHELL=\${CONFIG_SHELL-$SHELL} -_LTEOF - -cat >>"$CONFIG_LT" <<\_LTEOF -AS_SHELL_SANITIZE -_AS_PREPARE - -exec AS_MESSAGE_FD>&1 -exec AS_MESSAGE_LOG_FD>>config.log -{ - echo - AS_BOX([Running $as_me.]) -} >&AS_MESSAGE_LOG_FD - -lt_cl_help="\ -\`$as_me' creates a local libtool stub from the current configuration, -for use in further configure time tests before the real libtool is -generated. - -Usage: $[0] [[OPTIONS]] - - -h, --help print this help, then exit - -V, --version print version number, then exit - -q, --quiet do not print progress messages - -d, --debug don't remove temporary files - -Report bugs to ." - -lt_cl_version="\ -m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl -m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) -configured by $[0], generated by m4_PACKAGE_STRING. - -Copyright (C) 2008 Free Software Foundation, Inc. -This config.lt script is free software; the Free Software Foundation -gives unlimited permision to copy, distribute and modify it." - -while test $[#] != 0 -do - case $[1] in - --version | --v* | -V ) - echo "$lt_cl_version"; exit 0 ;; - --help | --h* | -h ) - echo "$lt_cl_help"; exit 0 ;; - --debug | --d* | -d ) - debug=: ;; - --quiet | --q* | --silent | --s* | -q ) - lt_cl_silent=: ;; - - -*) AC_MSG_ERROR([unrecognized option: $[1] -Try \`$[0] --help' for more information.]) ;; - - *) AC_MSG_ERROR([unrecognized argument: $[1] -Try \`$[0] --help' for more information.]) ;; - esac - shift -done - -if $lt_cl_silent; then - exec AS_MESSAGE_FD>/dev/null -fi -_LTEOF - -cat >>"$CONFIG_LT" <<_LTEOF -_LT_OUTPUT_LIBTOOL_COMMANDS_INIT -_LTEOF - -cat >>"$CONFIG_LT" <<\_LTEOF -AC_MSG_NOTICE([creating $ofile]) -_LT_OUTPUT_LIBTOOL_COMMANDS -AS_EXIT(0) -_LTEOF -chmod +x "$CONFIG_LT" - -# configure is writing to config.log, but config.lt does its own redirection, -# appending to config.log, which fails on DOS, as config.log is still kept -# open by configure. Here we exec the FD to /dev/null, effectively closing -# config.log, so it can be properly (re)opened and appended to by config.lt. -if test "$no_create" != yes; then - lt_cl_success=: - test "$silent" = yes && - lt_config_lt_args="$lt_config_lt_args --quiet" - exec AS_MESSAGE_LOG_FD>/dev/null - $SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false - exec AS_MESSAGE_LOG_FD>>config.log - $lt_cl_success || AS_EXIT(1) -fi -])# LT_OUTPUT - - -# _LT_CONFIG(TAG) -# --------------- -# If TAG is the built-in tag, create an initial libtool script with a -# default configuration from the untagged config vars. Otherwise add code -# to config.status for appending the configuration named by TAG from the -# matching tagged config vars. -m4_defun([_LT_CONFIG], -[m4_require([_LT_FILEUTILS_DEFAULTS])dnl -_LT_CONFIG_SAVE_COMMANDS([ - m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl - m4_if(_LT_TAG, [C], [ - # See if we are running on zsh, and set the options which allow our - # commands through without removal of \ escapes. - if test -n "${ZSH_VERSION+set}" ; then - setopt NO_GLOB_SUBST - fi - - cfgfile="${ofile}T" - trap "$RM \"$cfgfile\"; exit 1" 1 2 15 - $RM "$cfgfile" - - cat <<_LT_EOF >> "$cfgfile" -#! $SHELL - -# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. -# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION -# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: -# NOTE: Changes made to this file will be lost: look at ltmain.sh. -# -_LT_COPYING -_LT_LIBTOOL_TAGS - -# ### BEGIN LIBTOOL CONFIG -_LT_LIBTOOL_CONFIG_VARS -_LT_LIBTOOL_TAG_VARS -# ### END LIBTOOL CONFIG - -_LT_EOF - - case $host_os in - aix3*) - cat <<\_LT_EOF >> "$cfgfile" -# AIX sometimes has problems with the GCC collect2 program. For some -# reason, if we set the COLLECT_NAMES environment variable, the problems -# vanish in a puff of smoke. -if test "X${COLLECT_NAMES+set}" != Xset; then - COLLECT_NAMES= - export COLLECT_NAMES -fi -_LT_EOF - ;; - esac - - _LT_PROG_LTMAIN - - # We use sed instead of cat because bash on DJGPP gets confused if - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? - sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ - || (rm -f "$cfgfile"; exit 1) - - _LT_PROG_XSI_SHELLFNS - - sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ - || (rm -f "$cfgfile"; exit 1) - - mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" -], -[cat <<_LT_EOF >> "$ofile" - -dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded -dnl in a comment (ie after a #). -# ### BEGIN LIBTOOL TAG CONFIG: $1 -_LT_LIBTOOL_TAG_VARS(_LT_TAG) -# ### END LIBTOOL TAG CONFIG: $1 -_LT_EOF -])dnl /m4_if -], -[m4_if([$1], [], [ - PACKAGE='$PACKAGE' - VERSION='$VERSION' - TIMESTAMP='$TIMESTAMP' - RM='$RM' - ofile='$ofile'], []) -])dnl /_LT_CONFIG_SAVE_COMMANDS -])# _LT_CONFIG - - -# LT_SUPPORTED_TAG(TAG) -# --------------------- -# Trace this macro to discover what tags are supported by the libtool -# --tag option, using: -# autoconf --trace 'LT_SUPPORTED_TAG:$1' -AC_DEFUN([LT_SUPPORTED_TAG], []) - - -# C support is built-in for now -m4_define([_LT_LANG_C_enabled], []) -m4_define([_LT_TAGS], []) - - -# LT_LANG(LANG) -# ------------- -# Enable libtool support for the given language if not already enabled. -AC_DEFUN([LT_LANG], -[AC_BEFORE([$0], [LT_OUTPUT])dnl -m4_case([$1], - [C], [_LT_LANG(C)], - [C++], [_LT_LANG(CXX)], - [Java], [_LT_LANG(GCJ)], - [Fortran 77], [_LT_LANG(F77)], - [Fortran], [_LT_LANG(FC)], - [Windows Resource], [_LT_LANG(RC)], - [m4_ifdef([_LT_LANG_]$1[_CONFIG], - [_LT_LANG($1)], - [m4_fatal([$0: unsupported language: "$1"])])])dnl -])# LT_LANG - - -# _LT_LANG(LANGNAME) -# ------------------ -m4_defun([_LT_LANG], -[m4_ifdef([_LT_LANG_]$1[_enabled], [], - [LT_SUPPORTED_TAG([$1])dnl - m4_append([_LT_TAGS], [$1 ])dnl - m4_define([_LT_LANG_]$1[_enabled], [])dnl - _LT_LANG_$1_CONFIG($1)])dnl -])# _LT_LANG - - -# _LT_LANG_DEFAULT_CONFIG -# ----------------------- -m4_defun([_LT_LANG_DEFAULT_CONFIG], -[AC_PROVIDE_IFELSE([AC_PROG_CXX], - [LT_LANG(CXX)], - [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) - -AC_PROVIDE_IFELSE([AC_PROG_F77], - [LT_LANG(F77)], - [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) - -AC_PROVIDE_IFELSE([AC_PROG_FC], - [LT_LANG(FC)], - [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) - -dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal -dnl pulling things in needlessly. -AC_PROVIDE_IFELSE([AC_PROG_GCJ], - [LT_LANG(GCJ)], - [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], - [LT_LANG(GCJ)], - [AC_PROVIDE_IFELSE([LT_PROG_GCJ], - [LT_LANG(GCJ)], - [m4_ifdef([AC_PROG_GCJ], - [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) - m4_ifdef([A][M_PROG_GCJ], - [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) - m4_ifdef([LT_PROG_GCJ], - [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) - -AC_PROVIDE_IFELSE([LT_PROG_RC], - [LT_LANG(RC)], - [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) -])# _LT_LANG_DEFAULT_CONFIG - -# Obsolete macros: -AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) -AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) -AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) -AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_CXX], []) -dnl AC_DEFUN([AC_LIBTOOL_F77], []) -dnl AC_DEFUN([AC_LIBTOOL_FC], []) -dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) - - -# _LT_TAG_COMPILER -# ---------------- -m4_defun([_LT_TAG_COMPILER], -[AC_REQUIRE([AC_PROG_CC])dnl - -_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl -_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl -_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl -_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl - -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} - -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} - -# Allow CC to be a program name with arguments. -compiler=$CC -])# _LT_TAG_COMPILER - - -# _LT_COMPILER_BOILERPLATE -# ------------------------ -# Check for compiler boilerplate output or warnings with -# the simple compiler test code. -m4_defun([_LT_COMPILER_BOILERPLATE], -[m4_require([_LT_DECL_SED])dnl -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$RM conftest* -])# _LT_COMPILER_BOILERPLATE - - -# _LT_LINKER_BOILERPLATE -# ---------------------- -# Check for linker boilerplate output or warnings with -# the simple link test code. -m4_defun([_LT_LINKER_BOILERPLATE], -[m4_require([_LT_DECL_SED])dnl -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$RM -r conftest* -])# _LT_LINKER_BOILERPLATE - -# _LT_REQUIRED_DARWIN_CHECKS -# ------------------------- -m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ - case $host_os in - rhapsody* | darwin*) - AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) - AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) - AC_CHECK_TOOL([LIPO], [lipo], [:]) - AC_CHECK_TOOL([OTOOL], [otool], [:]) - AC_CHECK_TOOL([OTOOL64], [otool64], [:]) - _LT_DECL([], [DSYMUTIL], [1], - [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) - _LT_DECL([], [NMEDIT], [1], - [Tool to change global to local symbols on Mac OS X]) - _LT_DECL([], [LIPO], [1], - [Tool to manipulate fat objects and archives on Mac OS X]) - _LT_DECL([], [OTOOL], [1], - [ldd/readelf like tool for Mach-O binaries on Mac OS X]) - _LT_DECL([], [OTOOL64], [1], - [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) - - AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], - [lt_cv_apple_cc_single_mod=no - if test -z "${LT_MULTI_MODULE}"; then - # By default we will add the -single_module flag. You can override - # by either setting the environment variable LT_MULTI_MODULE - # non-empty at configure time, or by adding -multi_module to the - # link flags. - rm -rf libconftest.dylib* - echo "int foo(void){return 1;}" > conftest.c - echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ --dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD - $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ - -dynamiclib -Wl,-single_module conftest.c 2>conftest.err - _lt_result=$? - if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then - lt_cv_apple_cc_single_mod=yes - else - cat conftest.err >&AS_MESSAGE_LOG_FD - fi - rm -rf libconftest.dylib* - rm -f conftest.* - fi]) - AC_CACHE_CHECK([for -exported_symbols_list linker flag], - [lt_cv_ld_exported_symbols_list], - [lt_cv_ld_exported_symbols_list=no - save_LDFLAGS=$LDFLAGS - echo "_main" > conftest.sym - LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" - AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], - [lt_cv_ld_exported_symbols_list=yes], - [lt_cv_ld_exported_symbols_list=no]) - LDFLAGS="$save_LDFLAGS" - ]) - case $host_os in - rhapsody* | darwin1.[[012]]) - _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; - darwin1.*) - _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; - darwin*) # darwin 5.x on - # if running on 10.5 or later, the deployment target defaults - # to the OS version, if on x86, and 10.4, the deployment - # target defaults to 10.4. Don't you love it? - case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in - 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) - _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; - 10.[[012]]*) - _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; - 10.*) - _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; - esac - ;; - esac - if test "$lt_cv_apple_cc_single_mod" = "yes"; then - _lt_dar_single_mod='$single_module' - fi - if test "$lt_cv_ld_exported_symbols_list" = "yes"; then - _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' - else - _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - if test "$DSYMUTIL" != ":"; then - _lt_dsymutil='~$DSYMUTIL $lib || :' - else - _lt_dsymutil= - fi - ;; - esac -]) - - -# _LT_DARWIN_LINKER_FEATURES -# -------------------------- -# Checks for linker and compiler features on darwin -m4_defun([_LT_DARWIN_LINKER_FEATURES], -[ - m4_require([_LT_REQUIRED_DARWIN_CHECKS]) - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_automatic, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_TAGVAR(whole_archive_flag_spec, $1)='' - _LT_TAGVAR(link_all_deplibs, $1)=yes - _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" - case $cc_basename in - ifort*) _lt_dar_can_shared=yes ;; - *) _lt_dar_can_shared=$GCC ;; - esac - if test "$_lt_dar_can_shared" = "yes"; then - output_verbose_link_cmd=echo - _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" - _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" - _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" - _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" - m4_if([$1], [CXX], -[ if test "$lt_cv_apple_cc_single_mod" != "yes"; then - _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" - _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" - fi -],[]) - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi -]) - -# _LT_SYS_MODULE_PATH_AIX -# ----------------------- -# Links a minimal program and checks the executable -# for the system default hardcoded library path. In most cases, -# this is /usr/lib:/lib, but when the MPI compilers are used -# the location of the communication and MPI libs are included too. -# If we don't find anything, use the default library path according -# to the aix ld manual. -m4_defun([_LT_SYS_MODULE_PATH_AIX], -[m4_require([_LT_DECL_SED])dnl -AC_LINK_IFELSE(AC_LANG_PROGRAM,[ -lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\(.*\)$/\1/ - p - } - }' -aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -# Check for a 64-bit object if we didn't find anything. -if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -fi],[]) -if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -])# _LT_SYS_MODULE_PATH_AIX - - -# _LT_SHELL_INIT(ARG) -# ------------------- -m4_define([_LT_SHELL_INIT], -[ifdef([AC_DIVERSION_NOTICE], - [AC_DIVERT_PUSH(AC_DIVERSION_NOTICE)], - [AC_DIVERT_PUSH(NOTICE)]) -$1 -AC_DIVERT_POP -])# _LT_SHELL_INIT - - -# _LT_PROG_ECHO_BACKSLASH -# ----------------------- -# Add some code to the start of the generated configure script which -# will find an echo command which doesn't interpret backslashes. -m4_defun([_LT_PROG_ECHO_BACKSLASH], -[_LT_SHELL_INIT([ -# Check that we are running under the correct shell. -SHELL=${CONFIG_SHELL-/bin/sh} - -case X$lt_ECHO in -X*--fallback-echo) - # Remove one level of quotation (which was required for Make). - ECHO=`echo "$lt_ECHO" | sed 's,\\\\\[$]\\[$]0,'[$]0','` - ;; -esac - -ECHO=${lt_ECHO-echo} -if test "X[$]1" = X--no-reexec; then - # Discard the --no-reexec flag, and continue. - shift -elif test "X[$]1" = X--fallback-echo; then - # Avoid inline document here, it may be left over - : -elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' ; then - # Yippee, $ECHO works! - : -else - # Restart under the correct shell. - exec $SHELL "[$]0" --no-reexec ${1+"[$]@"} -fi - -if test "X[$]1" = X--fallback-echo; then - # used as fallback echo - shift - cat <<_LT_EOF -[$]* -_LT_EOF - exit 0 -fi - -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -if test -z "$lt_ECHO"; then - if test "X${echo_test_string+set}" != Xset; then - # find a string as large as possible, as long as the shell can cope with it - for cmd in 'sed 50q "[$]0"' 'sed 20q "[$]0"' 'sed 10q "[$]0"' 'sed 2q "[$]0"' 'echo test'; do - # expected sizes: less than 2Kb, 1Kb, 512 bytes, 16 bytes, ... - if { echo_test_string=`eval $cmd`; } 2>/dev/null && - { test "X$echo_test_string" = "X$echo_test_string"; } 2>/dev/null - then - break - fi - done - fi - - if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && - echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - : - else - # The Solaris, AIX, and Digital Unix default echo programs unquote - # backslashes. This makes it impossible to quote backslashes using - # echo "$something" | sed 's/\\/\\\\/g' - # - # So, first we look for a working echo in the user's PATH. - - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for dir in $PATH /usr/ucb; do - IFS="$lt_save_ifs" - if (test -f $dir/echo || test -f $dir/echo$ac_exeext) && - test "X`($dir/echo '\t') 2>/dev/null`" = 'X\t' && - echo_testing_string=`($dir/echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - ECHO="$dir/echo" - break - fi - done - IFS="$lt_save_ifs" - - if test "X$ECHO" = Xecho; then - # We didn't find a better echo, so look for alternatives. - if test "X`{ print -r '\t'; } 2>/dev/null`" = 'X\t' && - echo_testing_string=`{ print -r "$echo_test_string"; } 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - # This shell has a builtin print -r that does the trick. - ECHO='print -r' - elif { test -f /bin/ksh || test -f /bin/ksh$ac_exeext; } && - test "X$CONFIG_SHELL" != X/bin/ksh; then - # If we have ksh, try running configure again with it. - ORIGINAL_CONFIG_SHELL=${CONFIG_SHELL-/bin/sh} - export ORIGINAL_CONFIG_SHELL - CONFIG_SHELL=/bin/ksh - export CONFIG_SHELL - exec $CONFIG_SHELL "[$]0" --no-reexec ${1+"[$]@"} - else - # Try using printf. - ECHO='printf %s\n' - if test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t' && - echo_testing_string=`{ $ECHO "$echo_test_string"; } 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - # Cool, printf works - : - elif echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && - test "X$echo_testing_string" = 'X\t' && - echo_testing_string=`($ORIGINAL_CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - CONFIG_SHELL=$ORIGINAL_CONFIG_SHELL - export CONFIG_SHELL - SHELL="$CONFIG_SHELL" - export SHELL - ECHO="$CONFIG_SHELL [$]0 --fallback-echo" - elif echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo '\t') 2>/dev/null` && - test "X$echo_testing_string" = 'X\t' && - echo_testing_string=`($CONFIG_SHELL "[$]0" --fallback-echo "$echo_test_string") 2>/dev/null` && - test "X$echo_testing_string" = "X$echo_test_string"; then - ECHO="$CONFIG_SHELL [$]0 --fallback-echo" - else - # maybe with a smaller string... - prev=: - - for cmd in 'echo test' 'sed 2q "[$]0"' 'sed 10q "[$]0"' 'sed 20q "[$]0"' 'sed 50q "[$]0"'; do - if { test "X$echo_test_string" = "X`eval $cmd`"; } 2>/dev/null - then - break - fi - prev="$cmd" - done - - if test "$prev" != 'sed 50q "[$]0"'; then - echo_test_string=`eval $prev` - export echo_test_string - exec ${ORIGINAL_CONFIG_SHELL-${CONFIG_SHELL-/bin/sh}} "[$]0" ${1+"[$]@"} - else - # Oops. We lost completely, so just stick with echo. - ECHO=echo - fi - fi - fi - fi - fi -fi - -# Copy echo and quote the copy suitably for passing to libtool from -# the Makefile, instead of quoting the original, which is used later. -lt_ECHO=$ECHO -if test "X$lt_ECHO" = "X$CONFIG_SHELL [$]0 --fallback-echo"; then - lt_ECHO="$CONFIG_SHELL \\\$\[$]0 --fallback-echo" -fi - -AC_SUBST(lt_ECHO) -]) -_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) -_LT_DECL([], [ECHO], [1], - [An echo program that does not interpret backslashes]) -])# _LT_PROG_ECHO_BACKSLASH - - -# _LT_ENABLE_LOCK -# --------------- -m4_defun([_LT_ENABLE_LOCK], -[AC_ARG_ENABLE([libtool-lock], - [AS_HELP_STRING([--disable-libtool-lock], - [avoid locking (might break parallel builds)])]) -test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes - -# Some flags need to be propagated to the compiler or linker for good -# libtool support. -case $host in -ia64-*-hpux*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.$ac_objext` in - *ELF-32*) - HPUX_IA64_MODE="32" - ;; - *ELF-64*) - HPUX_IA64_MODE="64" - ;; - esac - fi - rm -rf conftest* - ;; -*-*-irix6*) - # Find out which ABI we are using. - echo '[#]line __oline__ "configure"' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - if test "$lt_cv_prog_gnu_ld" = yes; then - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -melf32bsmip" - ;; - *N32*) - LD="${LD-ld} -melf32bmipn32" - ;; - *64-bit*) - LD="${LD-ld} -melf64bmip" - ;; - esac - else - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -32" - ;; - *N32*) - LD="${LD-ld} -n32" - ;; - *64-bit*) - LD="${LD-ld} -64" - ;; - esac - fi - fi - rm -rf conftest* - ;; - -x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ -s390*-*linux*|s390*-*tpf*|sparc*-*linux*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.o` in - *32-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_i386_fbsd" - ;; - x86_64-*linux*) - LD="${LD-ld} -m elf_i386" - ;; - ppc64-*linux*|powerpc64-*linux*) - LD="${LD-ld} -m elf32ppclinux" - ;; - s390x-*linux*) - LD="${LD-ld} -m elf_s390" - ;; - sparc64-*linux*) - LD="${LD-ld} -m elf32_sparc" - ;; - esac - ;; - *64-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_x86_64_fbsd" - ;; - x86_64-*linux*) - LD="${LD-ld} -m elf_x86_64" - ;; - ppc*-*linux*|powerpc*-*linux*) - LD="${LD-ld} -m elf64ppc" - ;; - s390*-*linux*|s390*-*tpf*) - LD="${LD-ld} -m elf64_s390" - ;; - sparc*-*linux*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; - -*-*-sco3.2v5*) - # On SCO OpenServer 5, we need -belf to get full-featured binaries. - SAVE_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -belf" - AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, - [AC_LANG_PUSH(C) - AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) - AC_LANG_POP]) - if test x"$lt_cv_cc_needs_belf" != x"yes"; then - # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf - CFLAGS="$SAVE_CFLAGS" - fi - ;; -sparc*-*solaris*) - # Find out which ABI we are using. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.o` in - *64-bit*) - case $lt_cv_prog_gnu_ld in - yes*) LD="${LD-ld} -m elf64_sparc" ;; - *) - if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then - LD="${LD-ld} -64" - fi - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; -esac - -need_locks="$enable_libtool_lock" -])# _LT_ENABLE_LOCK - - -# _LT_CMD_OLD_ARCHIVE -# ------------------- -m4_defun([_LT_CMD_OLD_ARCHIVE], -[AC_CHECK_TOOL(AR, ar, false) -test -z "$AR" && AR=ar -test -z "$AR_FLAGS" && AR_FLAGS=cru -_LT_DECL([], [AR], [1], [The archiver]) -_LT_DECL([], [AR_FLAGS], [1]) - -AC_CHECK_TOOL(STRIP, strip, :) -test -z "$STRIP" && STRIP=: -_LT_DECL([], [STRIP], [1], [A symbol stripping program]) - -AC_CHECK_TOOL(RANLIB, ranlib, :) -test -z "$RANLIB" && RANLIB=: -_LT_DECL([], [RANLIB], [1], - [Commands used to install an old-style archive]) - -# Determine commands to create old-style static archives. -old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' -old_postinstall_cmds='chmod 644 $oldlib' -old_postuninstall_cmds= - -if test -n "$RANLIB"; then - case $host_os in - openbsd*) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib" - ;; - *) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib" - ;; - esac - old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib" -fi -_LT_DECL([], [old_postinstall_cmds], [2]) -_LT_DECL([], [old_postuninstall_cmds], [2]) -_LT_TAGDECL([], [old_archive_cmds], [2], - [Commands used to build an old-style archive]) -])# _LT_CMD_OLD_ARCHIVE - - -# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, -# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) -# ---------------------------------------------------------------- -# Check whether the given compiler option works -AC_DEFUN([_LT_COMPILER_OPTION], -[m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_DECL_SED])dnl -AC_CACHE_CHECK([$1], [$2], - [$2=no - m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$3" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&AS_MESSAGE_LOG_FD - echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - $2=yes - fi - fi - $RM conftest* -]) - -if test x"[$]$2" = xyes; then - m4_if([$5], , :, [$5]) -else - m4_if([$6], , :, [$6]) -fi -])# _LT_COMPILER_OPTION - -# Old name: -AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) - - -# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, -# [ACTION-SUCCESS], [ACTION-FAILURE]) -# ---------------------------------------------------- -# Check whether the given linker option works -AC_DEFUN([_LT_LINKER_OPTION], -[m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_DECL_SED])dnl -AC_CACHE_CHECK([$1], [$2], - [$2=no - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS $3" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&AS_MESSAGE_LOG_FD - $ECHO "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - $2=yes - fi - else - $2=yes - fi - fi - $RM -r conftest* - LDFLAGS="$save_LDFLAGS" -]) - -if test x"[$]$2" = xyes; then - m4_if([$4], , :, [$4]) -else - m4_if([$5], , :, [$5]) -fi -])# _LT_LINKER_OPTION - -# Old name: -AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) - - -# LT_CMD_MAX_LEN -#--------------- -AC_DEFUN([LT_CMD_MAX_LEN], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -# find the maximum length of command line arguments -AC_MSG_CHECKING([the maximum length of command line arguments]) -AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl - i=0 - teststring="ABCD" - - case $build_os in - msdosdjgpp*) - # On DJGPP, this test can blow up pretty badly due to problems in libc - # (any single argument exceeding 2000 bytes causes a buffer overrun - # during glob expansion). Even if it were fixed, the result of this - # check would be larger than it should be. - lt_cv_sys_max_cmd_len=12288; # 12K is about right - ;; - - gnu*) - # Under GNU Hurd, this test is not required because there is - # no limit to the length of command line arguments. - # Libtool will interpret -1 as no limit whatsoever - lt_cv_sys_max_cmd_len=-1; - ;; - - cygwin* | mingw* | cegcc*) - # On Win9x/ME, this test blows up -- it succeeds, but takes - # about 5 minutes as the teststring grows exponentially. - # Worse, since 9x/ME are not pre-emptively multitasking, - # you end up with a "frozen" computer, even though with patience - # the test eventually succeeds (with a max line length of 256k). - # Instead, let's just punt: use the minimum linelength reported by - # all of the supported platforms: 8192 (on NT/2K/XP). - lt_cv_sys_max_cmd_len=8192; - ;; - - amigaos*) - # On AmigaOS with pdksh, this test takes hours, literally. - # So we just punt and use a minimum line length of 8192. - lt_cv_sys_max_cmd_len=8192; - ;; - - netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) - # This has been around since 386BSD, at least. Likely further. - if test -x /sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` - elif test -x /usr/sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` - else - lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs - fi - # And add a safety zone - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - ;; - - interix*) - # We know the value 262144 and hardcode it with a safety zone (like BSD) - lt_cv_sys_max_cmd_len=196608 - ;; - - osf*) - # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure - # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not - # nice to cause kernel panics so lets avoid the loop below. - # First set a reasonable default. - lt_cv_sys_max_cmd_len=16384 - # - if test -x /sbin/sysconfig; then - case `/sbin/sysconfig -q proc exec_disable_arg_limit` in - *1*) lt_cv_sys_max_cmd_len=-1 ;; - esac - fi - ;; - sco3.2v5*) - lt_cv_sys_max_cmd_len=102400 - ;; - sysv5* | sco5v6* | sysv4.2uw2*) - kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` - if test -n "$kargmax"; then - lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` - else - lt_cv_sys_max_cmd_len=32768 - fi - ;; - *) - lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` - if test -n "$lt_cv_sys_max_cmd_len"; then - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - else - # Make teststring a little bigger before we do anything with it. - # a 1K string should be a reasonable start. - for i in 1 2 3 4 5 6 7 8 ; do - teststring=$teststring$teststring - done - SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} - # If test is not a shell built-in, we'll probably end up computing a - # maximum length that is only half of the actual maximum length, but - # we can't tell. - while { test "X"`$SHELL [$]0 --fallback-echo "X$teststring$teststring" 2>/dev/null` \ - = "XX$teststring$teststring"; } >/dev/null 2>&1 && - test $i != 17 # 1/2 MB should be enough - do - i=`expr $i + 1` - teststring=$teststring$teststring - done - # Only check the string length outside the loop. - lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` - teststring= - # Add a significant safety factor because C++ compilers can tack on - # massive amounts of additional arguments before passing them to the - # linker. It appears as though 1/2 is a usable value. - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` - fi - ;; - esac -]) -if test -n $lt_cv_sys_max_cmd_len ; then - AC_MSG_RESULT($lt_cv_sys_max_cmd_len) -else - AC_MSG_RESULT(none) -fi -max_cmd_len=$lt_cv_sys_max_cmd_len -_LT_DECL([], [max_cmd_len], [0], - [What is the maximum length of a command?]) -])# LT_CMD_MAX_LEN - -# Old name: -AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) - - -# _LT_HEADER_DLFCN -# ---------------- -m4_defun([_LT_HEADER_DLFCN], -[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl -])# _LT_HEADER_DLFCN - - -# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, -# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) -# ---------------------------------------------------------------- -m4_defun([_LT_TRY_DLOPEN_SELF], -[m4_require([_LT_HEADER_DLFCN])dnl -if test "$cross_compiling" = yes; then : - [$4] -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF -[#line __oline__ "configure" -#include "confdefs.h" - -#if HAVE_DLFCN_H -#include -#endif - -#include - -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif - -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif - -void fnord() { int i=42;} -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; - - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - /* dlclose (self); */ - } - else - puts (dlerror ()); - - return status; -}] -_LT_EOF - if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then - (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) $1 ;; - x$lt_dlneed_uscore) $2 ;; - x$lt_dlunknown|x*) $3 ;; - esac - else : - # compilation failed - $3 - fi -fi -rm -fr conftest* -])# _LT_TRY_DLOPEN_SELF - - -# LT_SYS_DLOPEN_SELF -# ------------------ -AC_DEFUN([LT_SYS_DLOPEN_SELF], -[m4_require([_LT_HEADER_DLFCN])dnl -if test "x$enable_dlopen" != xyes; then - enable_dlopen=unknown - enable_dlopen_self=unknown - enable_dlopen_self_static=unknown -else - lt_cv_dlopen=no - lt_cv_dlopen_libs= - - case $host_os in - beos*) - lt_cv_dlopen="load_add_on" - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ;; - - mingw* | pw32* | cegcc*) - lt_cv_dlopen="LoadLibrary" - lt_cv_dlopen_libs= - ;; - - cygwin*) - lt_cv_dlopen="dlopen" - lt_cv_dlopen_libs= - ;; - - darwin*) - # if libdl is installed we need to link against it - AC_CHECK_LIB([dl], [dlopen], - [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ - lt_cv_dlopen="dyld" - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ]) - ;; - - *) - AC_CHECK_FUNC([shl_load], - [lt_cv_dlopen="shl_load"], - [AC_CHECK_LIB([dld], [shl_load], - [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], - [AC_CHECK_FUNC([dlopen], - [lt_cv_dlopen="dlopen"], - [AC_CHECK_LIB([dl], [dlopen], - [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], - [AC_CHECK_LIB([svld], [dlopen], - [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], - [AC_CHECK_LIB([dld], [dld_link], - [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) - ]) - ]) - ]) - ]) - ]) - ;; - esac - - if test "x$lt_cv_dlopen" != xno; then - enable_dlopen=yes - else - enable_dlopen=no - fi - - case $lt_cv_dlopen in - dlopen) - save_CPPFLAGS="$CPPFLAGS" - test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" - - save_LDFLAGS="$LDFLAGS" - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" - - save_LIBS="$LIBS" - LIBS="$lt_cv_dlopen_libs $LIBS" - - AC_CACHE_CHECK([whether a program can dlopen itself], - lt_cv_dlopen_self, [dnl - _LT_TRY_DLOPEN_SELF( - lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, - lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) - ]) - - if test "x$lt_cv_dlopen_self" = xyes; then - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" - AC_CACHE_CHECK([whether a statically linked program can dlopen itself], - lt_cv_dlopen_self_static, [dnl - _LT_TRY_DLOPEN_SELF( - lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, - lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) - ]) - fi - - CPPFLAGS="$save_CPPFLAGS" - LDFLAGS="$save_LDFLAGS" - LIBS="$save_LIBS" - ;; - esac - - case $lt_cv_dlopen_self in - yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; - *) enable_dlopen_self=unknown ;; - esac - - case $lt_cv_dlopen_self_static in - yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; - *) enable_dlopen_self_static=unknown ;; - esac -fi -_LT_DECL([dlopen_support], [enable_dlopen], [0], - [Whether dlopen is supported]) -_LT_DECL([dlopen_self], [enable_dlopen_self], [0], - [Whether dlopen of programs is supported]) -_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], - [Whether dlopen of statically linked programs is supported]) -])# LT_SYS_DLOPEN_SELF - -# Old name: -AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) - - -# _LT_COMPILER_C_O([TAGNAME]) -# --------------------------- -# Check to see if options -c and -o are simultaneously supported by compiler. -# This macro does not hard code the compiler like AC_PROG_CC_C_O. -m4_defun([_LT_COMPILER_C_O], -[m4_require([_LT_DECL_SED])dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_TAG_COMPILER])dnl -AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], - [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], - [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no - $RM -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:__oline__: $lt_compile\"" >&AS_MESSAGE_LOG_FD) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&AS_MESSAGE_LOG_FD - echo "$as_me:__oline__: \$? = $ac_status" >&AS_MESSAGE_LOG_FD - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $ECHO "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes - fi - fi - chmod u+w . 2>&AS_MESSAGE_LOG_FD - $RM conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files - $RM out/* && rmdir out - cd .. - $RM -r conftest - $RM conftest* -]) -_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], - [Does compiler simultaneously support -c and -o options?]) -])# _LT_COMPILER_C_O - - -# _LT_COMPILER_FILE_LOCKS([TAGNAME]) -# ---------------------------------- -# Check to see if we can do hard links to lock some files if needed -m4_defun([_LT_COMPILER_FILE_LOCKS], -[m4_require([_LT_ENABLE_LOCK])dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -_LT_COMPILER_C_O([$1]) - -hard_links="nottested" -if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then - # do not overwrite the value of need_locks provided by the user - AC_MSG_CHECKING([if we can lock with hard links]) - hard_links=yes - $RM conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - AC_MSG_RESULT([$hard_links]) - if test "$hard_links" = no; then - AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) - need_locks=warn - fi -else - need_locks=no -fi -_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) -])# _LT_COMPILER_FILE_LOCKS - - -# _LT_CHECK_OBJDIR -# ---------------- -m4_defun([_LT_CHECK_OBJDIR], -[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], -[rm -f .libs 2>/dev/null -mkdir .libs 2>/dev/null -if test -d .libs; then - lt_cv_objdir=.libs -else - # MS-DOS does not allow filenames that begin with a dot. - lt_cv_objdir=_libs -fi -rmdir .libs 2>/dev/null]) -objdir=$lt_cv_objdir -_LT_DECL([], [objdir], [0], - [The name of the directory that contains temporary libtool files])dnl -m4_pattern_allow([LT_OBJDIR])dnl -AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", - [Define to the sub-directory in which libtool stores uninstalled libraries.]) -])# _LT_CHECK_OBJDIR - - -# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) -# -------------------------------------- -# Check hardcoding attributes. -m4_defun([_LT_LINKER_HARDCODE_LIBPATH], -[AC_MSG_CHECKING([how to hardcode library paths into programs]) -_LT_TAGVAR(hardcode_action, $1)= -if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || - test -n "$_LT_TAGVAR(runpath_var, $1)" || - test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then - - # We can hardcode non-existent directories. - if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && - test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then - # Linking always hardcodes the temporary library directory. - _LT_TAGVAR(hardcode_action, $1)=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - _LT_TAGVAR(hardcode_action, $1)=immediate - fi -else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - _LT_TAGVAR(hardcode_action, $1)=unsupported -fi -AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) - -if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || - test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then - # Fast installation is not supported - enable_fast_install=no -elif test "$shlibpath_overrides_runpath" = yes || - test "$enable_shared" = no; then - # Fast installation is not necessary - enable_fast_install=needless -fi -_LT_TAGDECL([], [hardcode_action], [0], - [How to hardcode a shared library path into an executable]) -])# _LT_LINKER_HARDCODE_LIBPATH - - -# _LT_CMD_STRIPLIB -# ---------------- -m4_defun([_LT_CMD_STRIPLIB], -[m4_require([_LT_DECL_EGREP]) -striplib= -old_striplib= -AC_MSG_CHECKING([whether stripping libraries is possible]) -if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - AC_MSG_RESULT([yes]) -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP" ; then - striplib="$STRIP -x" - old_striplib="$STRIP -S" - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - fi - ;; - *) - AC_MSG_RESULT([no]) - ;; - esac -fi -_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) -_LT_DECL([], [striplib], [1]) -])# _LT_CMD_STRIPLIB - - -# _LT_SYS_DYNAMIC_LINKER([TAG]) -# ----------------------------- -# PORTME Fill in your ld.so characteristics -m4_defun([_LT_SYS_DYNAMIC_LINKER], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -m4_require([_LT_DECL_EGREP])dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_DECL_OBJDUMP])dnl -m4_require([_LT_DECL_SED])dnl -AC_MSG_CHECKING([dynamic linker characteristics]) -m4_if([$1], - [], [ -if test "$GCC" = yes; then - case $host_os in - darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; - *) lt_awk_arg="/^libraries:/" ;; - esac - lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if $ECHO "$lt_search_path_spec" | $GREP ';' >/dev/null ; then - # if the path contains ";" then we assume it to be the separator - # otherwise default to the standard path separator (i.e. ":") - it is - # assumed that no part of a normal pathname contains ";" but that should - # okay in the real world where ";" in dirpaths is itself problematic. - lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e 's/;/ /g'` - else - lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - # Ok, now we have the path, separated by spaces, we can step through it - # and add multilib dir if necessary. - lt_tmp_lt_search_path_spec= - lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` - for lt_sys_path in $lt_search_path_spec; do - if test -d "$lt_sys_path/$lt_multi_os_dir"; then - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" - else - test -d "$lt_sys_path" && \ - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" - fi - done - lt_search_path_spec=`$ECHO $lt_tmp_lt_search_path_spec | awk ' -BEGIN {RS=" "; FS="/|\n";} { - lt_foo=""; - lt_count=0; - for (lt_i = NF; lt_i > 0; lt_i--) { - if ($lt_i != "" && $lt_i != ".") { - if ($lt_i == "..") { - lt_count++; - } else { - if (lt_count == 0) { - lt_foo="/" $lt_i lt_foo; - } else { - lt_count--; - } - } - } - } - if (lt_foo != "") { lt_freq[[lt_foo]]++; } - if (lt_freq[[lt_foo]] == 1) { print lt_foo; } -}'` - sys_lib_search_path_spec=`$ECHO $lt_search_path_spec` -else - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" -fi]) -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext_cmds=".so" -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -need_lib_prefix=unknown -hardcode_into_libs=no - -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown - -case $host_os in -aix3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' - shlibpath_var=LIBPATH - - # AIX 3 has no versioning support, so we append a major version to the name. - soname_spec='${libname}${release}${shared_ext}$major' - ;; - -aix[[4-9]]*) - version_type=linux - need_lib_prefix=no - need_version=no - hardcode_into_libs=yes - if test "$host_cpu" = ia64; then - # AIX 5 supports IA64 - library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - else - # With GCC up to 2.95.x, collect2 would create an import file - # for dependence libraries. The import file would start with - # the line `#! .'. This would cause the generated library to - # depend on `.', always an invalid library. This was fixed in - # development snapshots of GCC prior to 3.0. - case $host_os in - aix4 | aix4.[[01]] | aix4.[[01]].*) - if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' - echo ' yes ' - echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then - : - else - can_build_shared=no - fi - ;; - esac - # AIX (on Power*) has no versioning support, so currently we can not hardcode correct - # soname into executable. Probably we can add versioning support to - # collect2, so additional links can be useful in future. - if test "$aix_use_runtimelinking" = yes; then - # If using run time linking (on AIX 4.2 or later) use lib.so - # instead of lib.a to let people know that these are not - # typical AIX shared libraries. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - else - # We preserve .a as extension for shared libraries through AIX4.2 - # and later when we are not doing run time linking. - library_names_spec='${libname}${release}.a $libname.a' - soname_spec='${libname}${release}${shared_ext}$major' - fi - shlibpath_var=LIBPATH - fi - ;; - -amigaos*) - case $host_cpu in - powerpc) - # Since July 2007 AmigaOS4 officially supports .so libraries. - # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - ;; - m68k) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`$ECHO "X$lib" | $Xsed -e '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' - ;; - esac - ;; - -beos*) - library_names_spec='${libname}${shared_ext}' - dynamic_linker="$host_os ld.so" - shlibpath_var=LIBRARY_PATH - ;; - -bsdi[[45]]*) - version_type=linux - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" - sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" - # the default ld.so.conf also contains /usr/contrib/lib and - # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow - # libtool to hard-code these into programs - ;; - -cygwin* | mingw* | pw32* | cegcc*) - version_type=windows - shrext_cmds=".dll" - need_version=no - need_lib_prefix=no - - case $GCC,$host_os in - yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname~ - if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then - eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; - fi' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - shlibpath_overrides_runpath=yes - - case $host_os in - cygwin*) - # Cygwin DLLs use 'cyg' prefix rather than 'lib' - soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec="/usr/lib /lib/w32api /lib /usr/local/lib" - ;; - mingw* | cegcc*) - # MinGW DLLs use traditional 'lib' prefix - soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - sys_lib_search_path_spec=`$CC -print-search-dirs | $GREP "^libraries:" | $SED -e "s/^libraries://" -e "s,=/,/,g"` - if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then - # It is most probably a Windows format PATH printed by - # mingw gcc, but we are running on Cygwin. Gcc prints its search - # path with ; separators, and with drive letters. We can handle the - # drive letters (cygwin fileutils understands them), so leave them, - # especially as we might pass files found there to a mingw objdump, - # which wouldn't understand a cygwinified path. Ahh. - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - ;; - pw32*) - # pw32 DLLs use 'pw' prefix rather than 'lib' - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - ;; - esac - ;; - - *) - library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' - ;; - esac - dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; - -darwin* | rhapsody*) - dynamic_linker="$host_os dyld" - version_type=darwin - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' - soname_spec='${libname}${release}${major}$shared_ext' - shlibpath_overrides_runpath=yes - shlibpath_var=DYLD_LIBRARY_PATH - shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' -m4_if([$1], [],[ - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) - sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' - ;; - -dgux*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -freebsd1*) - dynamic_linker=no - ;; - -freebsd* | dragonfly*) - # DragonFly does not have aout. When/if they implement a new - # versioning mechanism, adjust this. - if test -x /usr/bin/objformat; then - objformat=`/usr/bin/objformat` - else - case $host_os in - freebsd[[123]]*) objformat=aout ;; - *) objformat=elf ;; - esac - fi - version_type=freebsd-$objformat - case $version_type in - freebsd-elf*) - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - need_version=no - need_lib_prefix=no - ;; - freebsd-*) - library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' - need_version=yes - ;; - esac - shlibpath_var=LD_LIBRARY_PATH - case $host_os in - freebsd2*) - shlibpath_overrides_runpath=yes - ;; - freebsd3.[[01]]* | freebsdelf3.[[01]]*) - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ - freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - *) # from 4.6 on, and DragonFly - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - esac - ;; - -gnu*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - hardcode_into_libs=yes - ;; - -hpux9* | hpux10* | hpux11*) - # Give a soname corresponding to the major version so that dld.sl refuses to - # link against other versions. - version_type=sunos - need_lib_prefix=no - need_version=no - case $host_cpu in - ia64*) - shrext_cmds='.so' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.so" - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - if test "X$HPUX_IA64_MODE" = X32; then - sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" - else - sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" - fi - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) - shrext_cmds='.sl' - dynamic_linker="$host_os dld.sl" - shlibpath_var=SHLIB_PATH - shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - ;; - esac - # HP-UX runs *really* slowly unless shared libraries are mode 555. - postinstall_cmds='chmod 555 $lib' - ;; - -interix[[3-9]]*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - -irix5* | irix6* | nonstopux*) - case $host_os in - nonstopux*) version_type=nonstopux ;; - *) - if test "$lt_cv_prog_gnu_ld" = yes; then - version_type=linux - else - version_type=irix - fi ;; - esac - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' - case $host_os in - irix5* | nonstopux*) - libsuff= shlibsuff= - ;; - *) - case $LD in # libtool.m4 will add one of these switches to LD - *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") - libsuff= shlibsuff= libmagic=32-bit;; - *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") - libsuff=32 shlibsuff=N32 libmagic=N32;; - *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") - libsuff=64 shlibsuff=64 libmagic=64-bit;; - *) libsuff= shlibsuff= libmagic=never-match;; - esac - ;; - esac - shlibpath_var=LD_LIBRARY${shlibsuff}_PATH - shlibpath_overrides_runpath=no - sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" - sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" - hardcode_into_libs=yes - ;; - -# No shared lib support for Linux oldld, aout, or coff. -linux*oldld* | linux*aout* | linux*coff*) - dynamic_linker=no - ;; - -# This must be Linux ELF. -linux* | k*bsd*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - # Some binutils ld are patched to set DT_RUNPATH - save_LDFLAGS=$LDFLAGS - save_libdir=$libdir - eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ - LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" - AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], - [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], - [shlibpath_overrides_runpath=yes])]) - LDFLAGS=$save_LDFLAGS - libdir=$save_libdir - - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - - # Add ABI-specific directories to the system library path. - sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib" - - # Append ld.so.conf contents to the search path - if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra" - fi - - # We used to test for /lib/ld.so.1 and disable shared libraries on - # powerpc, because MkLinux only supported shared libraries with the - # GNU dynamic linker. Since this was broken with cross compilers, - # most powerpc-linux boxes support dynamic linking these days and - # people can always --disable-shared, the test was removed, and we - # assume the GNU/Linux dynamic linker is in use. - dynamic_linker='GNU/Linux ld.so' - ;; - -netbsd*) - version_type=sunos - need_lib_prefix=no - need_version=no - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - dynamic_linker='NetBSD (a.out) ld.so' - else - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - dynamic_linker='NetBSD ld.elf_so' - fi - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - -newsos6) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -*nto* | *qnx*) - version_type=qnx - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='ldqnx.so' - ;; - -openbsd*) - version_type=sunos - sys_lib_dlsearch_path_spec="/usr/lib" - need_lib_prefix=no - # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. - case $host_os in - openbsd3.3 | openbsd3.3.*) need_version=yes ;; - *) need_version=no ;; - esac - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - shlibpath_var=LD_LIBRARY_PATH - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - case $host_os in - openbsd2.[[89]] | openbsd2.[[89]].*) - shlibpath_overrides_runpath=no - ;; - *) - shlibpath_overrides_runpath=yes - ;; - esac - else - shlibpath_overrides_runpath=yes - fi - ;; - -os2*) - libname_spec='$name' - shrext_cmds=".dll" - need_lib_prefix=no - library_names_spec='$libname${shared_ext} $libname.a' - dynamic_linker='OS/2 ld.exe' - shlibpath_var=LIBPATH - ;; - -osf3* | osf4* | osf5*) - version_type=osf - need_lib_prefix=no - need_version=no - soname_spec='${libname}${release}${shared_ext}$major' - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" - sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" - ;; - -rdos*) - dynamic_linker=no - ;; - -solaris*) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - # ldd complains unless libraries are executable - postinstall_cmds='chmod +x $lib' - ;; - -sunos4*) - version_type=sunos - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' - finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - if test "$with_gnu_ld" = yes; then - need_lib_prefix=no - fi - need_version=yes - ;; - -sysv4 | sysv4.3*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - case $host_vendor in - sni) - shlibpath_overrides_runpath=no - need_lib_prefix=no - runpath_var=LD_RUN_PATH - ;; - siemens) - need_lib_prefix=no - ;; - motorola) - need_lib_prefix=no - need_version=no - shlibpath_overrides_runpath=no - sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' - ;; - esac - ;; - -sysv4*MP*) - if test -d /usr/nec ;then - version_type=linux - library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' - soname_spec='$libname${shared_ext}.$major' - shlibpath_var=LD_LIBRARY_PATH - fi - ;; - -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - version_type=freebsd-elf - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - if test "$with_gnu_ld" = yes; then - sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' - else - sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' - case $host_os in - sco3.2v5*) - sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" - ;; - esac - fi - sys_lib_dlsearch_path_spec='/usr/lib' - ;; - -tpf*) - # TPF is a cross-target only. Preferred cross-host = GNU/Linux. - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - -uts4*) - version_type=linux - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -*) - dynamic_linker=no - ;; -esac -AC_MSG_RESULT([$dynamic_linker]) -test "$dynamic_linker" = no && can_build_shared=no - -variables_saved_for_relink="PATH $shlibpath_var $runpath_var" -if test "$GCC" = yes; then - variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" -fi - -if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then - sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" -fi -if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then - sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" -fi - -_LT_DECL([], [variables_saved_for_relink], [1], - [Variables whose values should be saved in libtool wrapper scripts and - restored at link time]) -_LT_DECL([], [need_lib_prefix], [0], - [Do we need the "lib" prefix for modules?]) -_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) -_LT_DECL([], [version_type], [0], [Library versioning type]) -_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) -_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) -_LT_DECL([], [shlibpath_overrides_runpath], [0], - [Is shlibpath searched before the hard-coded library search path?]) -_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) -_LT_DECL([], [library_names_spec], [1], - [[List of archive names. First name is the real one, the rest are links. - The last name is the one that the linker finds with -lNAME]]) -_LT_DECL([], [soname_spec], [1], - [[The coded name of the library, if different from the real name]]) -_LT_DECL([], [postinstall_cmds], [2], - [Command to use after installation of a shared archive]) -_LT_DECL([], [postuninstall_cmds], [2], - [Command to use after uninstallation of a shared archive]) -_LT_DECL([], [finish_cmds], [2], - [Commands used to finish a libtool library installation in a directory]) -_LT_DECL([], [finish_eval], [1], - [[As "finish_cmds", except a single script fragment to be evaled but - not shown]]) -_LT_DECL([], [hardcode_into_libs], [0], - [Whether we should hardcode library paths into libraries]) -_LT_DECL([], [sys_lib_search_path_spec], [2], - [Compile-time system search path for libraries]) -_LT_DECL([], [sys_lib_dlsearch_path_spec], [2], - [Run-time system search path for libraries]) -])# _LT_SYS_DYNAMIC_LINKER - - -# _LT_PATH_TOOL_PREFIX(TOOL) -# -------------------------- -# find a file program which can recognize shared library -AC_DEFUN([_LT_PATH_TOOL_PREFIX], -[m4_require([_LT_DECL_EGREP])dnl -AC_MSG_CHECKING([for $1]) -AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, -[case $MAGIC_CMD in -[[\\/*] | ?:[\\/]*]) - lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD="$MAGIC_CMD" - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR -dnl $ac_dummy forces splitting on constant user-supplied paths. -dnl POSIX.2 word splitting is done only on the output of word expansions, -dnl not every word. This closes a longstanding sh security hole. - ac_dummy="m4_if([$2], , $PATH, [$2])" - for ac_dir in $ac_dummy; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f $ac_dir/$1; then - lt_cv_path_MAGIC_CMD="$ac_dir/$1" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD="$lt_cv_path_MAGIC_CMD" - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <<_LT_EOF 1>&2 - -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org - -_LT_EOF - fi ;; - esac - fi - break - fi - done - IFS="$lt_save_ifs" - MAGIC_CMD="$lt_save_MAGIC_CMD" - ;; -esac]) -MAGIC_CMD="$lt_cv_path_MAGIC_CMD" -if test -n "$MAGIC_CMD"; then - AC_MSG_RESULT($MAGIC_CMD) -else - AC_MSG_RESULT(no) -fi -_LT_DECL([], [MAGIC_CMD], [0], - [Used to examine libraries when file_magic_cmd begins with "file"])dnl -])# _LT_PATH_TOOL_PREFIX - -# Old name: -AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) - - -# _LT_PATH_MAGIC -# -------------- -# find a file program which can recognize a shared library -m4_defun([_LT_PATH_MAGIC], -[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) -if test -z "$lt_cv_path_MAGIC_CMD"; then - if test -n "$ac_tool_prefix"; then - _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) - else - MAGIC_CMD=: - fi -fi -])# _LT_PATH_MAGIC - - -# LT_PATH_LD -# ---------- -# find the pathname to the GNU or non-GNU linker -AC_DEFUN([LT_PATH_LD], -[AC_REQUIRE([AC_PROG_CC])dnl -AC_REQUIRE([AC_CANONICAL_HOST])dnl -AC_REQUIRE([AC_CANONICAL_BUILD])dnl -m4_require([_LT_DECL_SED])dnl -m4_require([_LT_DECL_EGREP])dnl - -AC_ARG_WITH([gnu-ld], - [AS_HELP_STRING([--with-gnu-ld], - [assume the C compiler uses GNU ld @<:@default=no@:>@])], - [test "$withval" = no || with_gnu_ld=yes], - [with_gnu_ld=no])dnl - -ac_prog=ld -if test "$GCC" = yes; then - # Check if gcc -print-prog-name=ld gives a path. - AC_MSG_CHECKING([for ld used by $CC]) - case $host in - *-*-mingw*) - # gcc leaves a trailing carriage return which upsets mingw - ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; - *) - ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; - esac - case $ac_prog in - # Accept absolute paths. - [[\\/]]* | ?:[[\\/]]*) - re_direlt='/[[^/]][[^/]]*/\.\./' - # Canonicalize the pathname of ld - ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` - while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do - ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` - done - test -z "$LD" && LD="$ac_prog" - ;; - "") - # If it fails, then pretend we aren't using GCC. - ac_prog=ld - ;; - *) - # If it is relative, then search for the first ld in PATH. - with_gnu_ld=unknown - ;; - esac -elif test "$with_gnu_ld" = yes; then - AC_MSG_CHECKING([for GNU ld]) -else - AC_MSG_CHECKING([for non-GNU ld]) -fi -AC_CACHE_VAL(lt_cv_path_LD, -[if test -z "$LD"; then - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for ac_dir in $PATH; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then - lt_cv_path_LD="$ac_dir/$ac_prog" - # Check to see if the program is GNU ld. I'd rather use --version, - # but apparently some variants of GNU ld only accept -v. - # Break only if it was the GNU/non-GNU ld that we prefer. - case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null 2>&1; then - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else - lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; - -cegcc) - # use the weaker test based on 'objdump'. See mingw*. - lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' - lt_cv_file_magic_cmd='$OBJDUMP -f' - ;; - -darwin* | rhapsody*) - lt_cv_deplibs_check_method=pass_all - ;; - -freebsd* | dragonfly*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then - case $host_cpu in - i*86 ) - # Not sure whether the presence of OpenBSD here was a mistake. - # Let's accept both of them until this is cleared up. - lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` - ;; - esac - else - lt_cv_deplibs_check_method=pass_all - fi - ;; - -gnu*) - lt_cv_deplibs_check_method=pass_all - ;; - -hpux10.20* | hpux11*) - lt_cv_file_magic_cmd=/usr/bin/file - case $host_cpu in - ia64*) - lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' - lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so - ;; - hppa*64*) - [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - PA-RISC [0-9].[0-9]'] - lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl - ;; - *) - lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]].[[0-9]]) shared library' - lt_cv_file_magic_test_file=/usr/lib/libc.sl - ;; - esac - ;; - -interix[[3-9]]*) - # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' - ;; - -irix5* | irix6* | nonstopux*) - case $LD in - *-32|*"-32 ") libmagic=32-bit;; - *-n32|*"-n32 ") libmagic=N32;; - *-64|*"-64 ") libmagic=64-bit;; - *) libmagic=never-match;; - esac - lt_cv_deplibs_check_method=pass_all - ;; - -# This must be Linux ELF. -linux* | k*bsd*-gnu) - lt_cv_deplibs_check_method=pass_all - ;; - -netbsd*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' - fi - ;; - -newos6*) - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=/usr/lib/libnls.so - ;; - -*nto* | *qnx*) - lt_cv_deplibs_check_method=pass_all - ;; - -openbsd*) - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' - fi - ;; - -osf3* | osf4* | osf5*) - lt_cv_deplibs_check_method=pass_all - ;; - -rdos*) - lt_cv_deplibs_check_method=pass_all - ;; - -solaris*) - lt_cv_deplibs_check_method=pass_all - ;; - -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - lt_cv_deplibs_check_method=pass_all - ;; - -sysv4 | sysv4.3*) - case $host_vendor in - motorola) - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` - ;; - ncr) - lt_cv_deplibs_check_method=pass_all - ;; - sequent) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' - ;; - sni) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" - lt_cv_file_magic_test_file=/lib/libc.so - ;; - siemens) - lt_cv_deplibs_check_method=pass_all - ;; - pc) - lt_cv_deplibs_check_method=pass_all - ;; - esac - ;; - -tpf*) - lt_cv_deplibs_check_method=pass_all - ;; -esac -]) -file_magic_cmd=$lt_cv_file_magic_cmd -deplibs_check_method=$lt_cv_deplibs_check_method -test -z "$deplibs_check_method" && deplibs_check_method=unknown - -_LT_DECL([], [deplibs_check_method], [1], - [Method to check whether dependent libraries are shared objects]) -_LT_DECL([], [file_magic_cmd], [1], - [Command to use when deplibs_check_method == "file_magic"]) -])# _LT_CHECK_MAGIC_METHOD - - -# LT_PATH_NM -# ---------- -# find the pathname to a BSD- or MS-compatible name lister -AC_DEFUN([LT_PATH_NM], -[AC_REQUIRE([AC_PROG_CC])dnl -AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, -[if test -n "$NM"; then - # Let the user override the test. - lt_cv_path_NM="$NM" -else - lt_nm_to_check="${ac_tool_prefix}nm" - if test -n "$ac_tool_prefix" && test "$build" = "$host"; then - lt_nm_to_check="$lt_nm_to_check nm" - fi - for lt_tmp_nm in $lt_nm_to_check; do - lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do - IFS="$lt_save_ifs" - test -z "$ac_dir" && ac_dir=. - tmp_nm="$ac_dir/$lt_tmp_nm" - if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then - # Check to see if the nm accepts a BSD-compat flag. - # Adding the `sed 1q' prevents false positives on HP-UX, which says: - # nm: unknown option "B" ignored - # Tru64's nm complains that /dev/null is an invalid object file - case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in - */dev/null* | *'Invalid file or object type'*) - lt_cv_path_NM="$tmp_nm -B" - break - ;; - *) - case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in - */dev/null*) - lt_cv_path_NM="$tmp_nm -p" - break - ;; - *) - lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but - continue # so that we can try to find one that supports BSD flags - ;; - esac - ;; - esac - fi - done - IFS="$lt_save_ifs" - done - : ${lt_cv_path_NM=no} -fi]) -if test "$lt_cv_path_NM" != "no"; then - NM="$lt_cv_path_NM" -else - # Didn't find any BSD compatible name lister, look for dumpbin. - AC_CHECK_TOOLS(DUMPBIN, ["dumpbin -symbols" "link -dump -symbols"], :) - AC_SUBST([DUMPBIN]) - if test "$DUMPBIN" != ":"; then - NM="$DUMPBIN" - fi -fi -test -z "$NM" && NM=nm -AC_SUBST([NM]) -_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl - -AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], - [lt_cv_nm_interface="BSD nm" - echo "int some_variable = 0;" > conftest.$ac_ext - (eval echo "\"\$as_me:__oline__: $ac_compile\"" >&AS_MESSAGE_LOG_FD) - (eval "$ac_compile" 2>conftest.err) - cat conftest.err >&AS_MESSAGE_LOG_FD - (eval echo "\"\$as_me:__oline__: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) - (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) - cat conftest.err >&AS_MESSAGE_LOG_FD - (eval echo "\"\$as_me:__oline__: output\"" >&AS_MESSAGE_LOG_FD) - cat conftest.out >&AS_MESSAGE_LOG_FD - if $GREP 'External.*some_variable' conftest.out > /dev/null; then - lt_cv_nm_interface="MS dumpbin" - fi - rm -f conftest*]) -])# LT_PATH_NM - -# Old names: -AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) -AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AM_PROG_NM], []) -dnl AC_DEFUN([AC_PROG_NM], []) - - -# LT_LIB_M -# -------- -# check for math library -AC_DEFUN([LT_LIB_M], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -LIBM= -case $host in -*-*-beos* | *-*-cygwin* | *-*-pw32* | *-*-darwin*) - # These system don't have libm, or don't need it - ;; -*-ncr-sysv4.3*) - AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") - AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") - ;; -*) - AC_CHECK_LIB(m, cos, LIBM="-lm") - ;; -esac -AC_SUBST([LIBM]) -])# LT_LIB_M - -# Old name: -AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_CHECK_LIBM], []) - - -# _LT_COMPILER_NO_RTTI([TAGNAME]) -# ------------------------------- -m4_defun([_LT_COMPILER_NO_RTTI], -[m4_require([_LT_TAG_COMPILER])dnl - -_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= - -if test "$GCC" = yes; then - _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' - - _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], - lt_cv_prog_compiler_rtti_exceptions, - [-fno-rtti -fno-exceptions], [], - [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) -fi -_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], - [Compiler flag to turn off builtin functions]) -])# _LT_COMPILER_NO_RTTI - - -# _LT_CMD_GLOBAL_SYMBOLS -# ---------------------- -m4_defun([_LT_CMD_GLOBAL_SYMBOLS], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -AC_REQUIRE([AC_PROG_CC])dnl -AC_REQUIRE([LT_PATH_NM])dnl -AC_REQUIRE([LT_PATH_LD])dnl -m4_require([_LT_DECL_SED])dnl -m4_require([_LT_DECL_EGREP])dnl -m4_require([_LT_TAG_COMPILER])dnl - -# Check for command to grab the raw symbol name followed by C symbol from nm. -AC_MSG_CHECKING([command to parse $NM output from $compiler object]) -AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], -[ -# These are sane defaults that work on at least a few old systems. -# [They come from Ultrix. What could be older than Ultrix?!! ;)] - -# Character class describing NM global symbol codes. -symcode='[[BCDEGRST]]' - -# Regexp to match symbols that can be accessed directly from C. -sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' - -# Define system-specific variables. -case $host_os in -aix*) - symcode='[[BCDT]]' - ;; -cygwin* | mingw* | pw32* | cegcc*) - symcode='[[ABCDGISTW]]' - ;; -hpux*) - if test "$host_cpu" = ia64; then - symcode='[[ABCDEGRST]]' - fi - ;; -irix* | nonstopux*) - symcode='[[BCDEGRST]]' - ;; -osf*) - symcode='[[BCDEGQRST]]' - ;; -solaris*) - symcode='[[BDRT]]' - ;; -sco3.2v5*) - symcode='[[DT]]' - ;; -sysv4.2uw2*) - symcode='[[DT]]' - ;; -sysv5* | sco5v6* | unixware* | OpenUNIX*) - symcode='[[ABDT]]' - ;; -sysv4) - symcode='[[DFNSTU]]' - ;; -esac - -# If we're using GNU nm, then use its standard symbol codes. -case `$NM -V 2>&1` in -*GNU* | *'with BFD'*) - symcode='[[ABCDGIRSTW]]' ;; -esac - -# Transform an extracted symbol line into a proper C declaration. -# Some systems (esp. on ia64) link data and code symbols differently, -# so use this general approach. -lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - -# Transform an extracted symbol line into symbol name and symbol address -lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" -lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" - -# Handle CRLF in mingw tool chain -opt_cr= -case $build_os in -mingw*) - opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp - ;; -esac - -# Try without a prefix underscore, then with it. -for ac_symprfx in "" "_"; do - - # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. - symxfrm="\\1 $ac_symprfx\\2 \\2" - - # Write the raw and C identifiers. - if test "$lt_cv_nm_interface" = "MS dumpbin"; then - # Fake it for dumpbin and say T for any non-static function - # and D for any global variable. - # Also find C++ and __fastcall symbols from MSVC++, - # which start with @ or ?. - lt_cv_sys_global_symbol_pipe="$AWK ['"\ -" {last_section=section; section=\$ 3};"\ -" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ -" \$ 0!~/External *\|/{next};"\ -" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ -" {if(hide[section]) next};"\ -" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ -" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ -" s[1]~/^[@?]/{print s[1], s[1]; next};"\ -" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ -" ' prfx=^$ac_symprfx]" - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi - - # Check to see that the pipe works correctly. - pipe_works=no - - rm -f conftest* - cat > conftest.$ac_ext <<_LT_EOF -#ifdef __cplusplus -extern "C" { -#endif -char nm_test_var; -void nm_test_func(void); -void nm_test_func(void){} -#ifdef __cplusplus -} -#endif -int main(){nm_test_var='a';nm_test_func();return(0);} -_LT_EOF - - if AC_TRY_EVAL(ac_compile); then - # Now try to grab the symbols. - nlist=conftest.nm - if AC_TRY_EVAL(NM conftest.$ac_objext \| $lt_cv_sys_global_symbol_pipe \> $nlist) && test -s "$nlist"; then - # Try sorting and uniquifying the output. - if sort "$nlist" | uniq > "$nlist"T; then - mv -f "$nlist"T "$nlist" - else - rm -f "$nlist"T - fi - - # Make sure that we snagged all the symbols we need. - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -#ifdef __cplusplus -extern "C" { -#endif - -_LT_EOF - # Now generate the symbol file. - eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' - - cat <<_LT_EOF >> conftest.$ac_ext - -/* The mapping between symbol names and symbols. */ -const struct { - const char *name; - void *address; -} -lt__PROGRAM__LTX_preloaded_symbols[[]] = -{ - { "@PROGRAM@", (void *) 0 }, -_LT_EOF - $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext - cat <<\_LT_EOF >> conftest.$ac_ext - {0, (void *) 0} -}; - -/* This works around a problem in FreeBSD linker */ -#ifdef FREEBSD_WORKAROUND -static const void *lt_preloaded_setup() { - return lt__PROGRAM__LTX_preloaded_symbols; -} -#endif - -#ifdef __cplusplus -} -#endif -_LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext - lt_save_LIBS="$LIBS" - lt_save_CFLAGS="$CFLAGS" - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" - if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then - pipe_works=yes - fi - LIBS="$lt_save_LIBS" - CFLAGS="$lt_save_CFLAGS" - else - echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD - fi - else - echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD - fi - else - echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD - fi - else - echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD - cat conftest.$ac_ext >&5 - fi - rm -rf conftest* conftst* - - # Do not use the global_symbol_pipe unless it works. - if test "$pipe_works" = yes; then - break - else - lt_cv_sys_global_symbol_pipe= - fi -done -]) -if test -z "$lt_cv_sys_global_symbol_pipe"; then - lt_cv_sys_global_symbol_to_cdecl= -fi -if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then - AC_MSG_RESULT(failed) -else - AC_MSG_RESULT(ok) -fi - -_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], - [Take the output of nm and produce a listing of raw symbols and C names]) -_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], - [Transform the output of nm in a proper C declaration]) -_LT_DECL([global_symbol_to_c_name_address], - [lt_cv_sys_global_symbol_to_c_name_address], [1], - [Transform the output of nm in a C name address pair]) -_LT_DECL([global_symbol_to_c_name_address_lib_prefix], - [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], - [Transform the output of nm in a C name address pair when lib prefix is needed]) -]) # _LT_CMD_GLOBAL_SYMBOLS - - -# _LT_COMPILER_PIC([TAGNAME]) -# --------------------------- -m4_defun([_LT_COMPILER_PIC], -[m4_require([_LT_TAG_COMPILER])dnl -_LT_TAGVAR(lt_prog_compiler_wl, $1)= -_LT_TAGVAR(lt_prog_compiler_pic, $1)= -_LT_TAGVAR(lt_prog_compiler_static, $1)= - -AC_MSG_CHECKING([for $compiler option to produce PIC]) -m4_if([$1], [CXX], [ - # C++ specific cases for pic, static, wl, etc. - if test "$GXX" = yes; then - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - - case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - m68k) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' - ;; - esac - ;; - - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - mingw* | cygwin* | os2* | pw32* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - m4_if([$1], [GCJ], [], - [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) - ;; - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' - ;; - *djgpp*) - # DJGPP does not support shared libraries at all - _LT_TAGVAR(lt_prog_compiler_pic, $1)= - ;; - interix[[3-9]]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; - sysv4*MP*) - if test -d /usr/nec; then - _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic - fi - ;; - hpux*) - # PIC is the default for 64-bit PA HP-UX, but not for 32-bit - # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag - # sets the default TLS model and affects inlining. - case $host_cpu in - hppa*64*) - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - ;; - *qnx* | *nto*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - else - case $host_os in - aix[[4-9]]*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - else - _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' - fi - ;; - chorus*) - case $cc_basename in - cxch68*) - # Green Hills C++ Compiler - # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" - ;; - esac - ;; - dgux*) - case $cc_basename in - ec++*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - ;; - ghcx*) - # Green Hills C++ Compiler - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - ;; - *) - ;; - esac - ;; - freebsd* | dragonfly*) - # FreeBSD uses GNU C++ - ;; - hpux9* | hpux10* | hpux11*) - case $cc_basename in - CC*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' - if test "$host_cpu" != ia64; then - _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - fi - ;; - aCC*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - ;; - esac - ;; - *) - ;; - esac - ;; - interix*) - # This is c89, which is MS Visual C++ (no shared libs) - # Anyone wants to do a port? - ;; - irix5* | irix6* | nonstopux*) - case $cc_basename in - CC*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - # CC pic flag -KPIC is the default. - ;; - *) - ;; - esac - ;; - linux* | k*bsd*-gnu) - case $cc_basename in - KCC*) - # KAI C++ Compiler - _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - ecpc* ) - # old Intel C++ for x86_64 which still supported -KPIC. - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - icpc* ) - # Intel C++, used to be incompatible with GCC. - # ICC 10 doesn't accept -KPIC any more. - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - pgCC* | pgcpp*) - # Portland Group C++ compiler - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - cxx*) - # Compaq C++ - # Make sure the PIC flag is empty. It appears that all Alpha - # Linux and Compaq Tru64 Unix objects are PIC. - _LT_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - xlc* | xlC*) - # IBM XL 8.0 on PPC - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C++ 5.9 - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' - ;; - esac - ;; - esac - ;; - lynxos*) - ;; - m88k*) - ;; - mvs*) - case $cc_basename in - cxx*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' - ;; - *) - ;; - esac - ;; - netbsd*) - ;; - *qnx* | *nto*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' - ;; - osf3* | osf4* | osf5*) - case $cc_basename in - KCC*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' - ;; - RCC*) - # Rational C++ 2.4.1 - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - ;; - cxx*) - # Digital/Compaq C++ - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # Make sure the PIC flag is empty. It appears that all Alpha - # Linux and Compaq Tru64 Unix objects are PIC. - _LT_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - *) - ;; - esac - ;; - psos*) - ;; - solaris*) - case $cc_basename in - CC*) - # Sun C++ 4.2, 5.x and Centerline C++ - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' - ;; - gcx*) - # Green Hills C++ Compiler - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' - ;; - *) - ;; - esac - ;; - sunos4*) - case $cc_basename in - CC*) - # Sun C++ 4.x - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - lcc*) - # Lucid - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - ;; - *) - ;; - esac - ;; - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - case $cc_basename in - CC*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - esac - ;; - tandem*) - case $cc_basename in - NCC*) - # NonStop-UX NCC 3.20 - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - ;; - *) - ;; - esac - ;; - vxworks*) - ;; - *) - _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; - esac - fi -], -[ - if test "$GCC" = yes; then - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - - case $host_os in - aix*) - # All AIX code is PIC. - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - m68k) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the `-m68020' flag to GCC prevents building anything better, - # like `-m68040'. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' - ;; - esac - ;; - - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - - mingw* | cygwin* | pw32* | os2* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - m4_if([$1], [GCJ], [], - [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) - ;; - - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' - ;; - - hpux*) - # PIC is the default for 64-bit PA HP-UX, but not for 32-bit - # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag - # sets the default TLS model and affects inlining. - case $host_cpu in - hppa*64*) - # +Z the default - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - ;; - - interix[[3-9]]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; - - msdosdjgpp*) - # Just because we use GCC doesn't mean we suddenly get shared libraries - # on systems that don't support them. - _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - enable_shared=no - ;; - - *nto* | *qnx*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic - fi - ;; - - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - else - # PORTME Check for flag to pass linker flags through the system compiler. - case $host_os in - aix*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - if test "$host_cpu" = ia64; then - # AIX 5 now supports IA64 processor - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - else - _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' - fi - ;; - - mingw* | cygwin* | pw32* | os2* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - m4_if([$1], [GCJ], [], - [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) - ;; - - hpux9* | hpux10* | hpux11*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - ;; - esac - # Is there a better lt_prog_compiler_static that works with the bundled CC? - _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' - ;; - - irix5* | irix6* | nonstopux*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # PIC (with -KPIC) is the default. - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - - linux* | k*bsd*-gnu) - case $cc_basename in - # old Intel for x86_64 which still supported -KPIC. - ecc*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - # icc used to be incompatible with GCC. - # ICC 10 doesn't accept -KPIC any more. - icc* | ifort*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - # Lahey Fortran 8.1. - lf95*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' - _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' - ;; - pgcc* | pgf77* | pgf90* | pgf95*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - ccc*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # All Alpha code is PIC. - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - xl*) - # IBM XL C 8.0/Fortran 10.1 on PPC - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C 5.9 - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - ;; - *Sun\ F*) - # Sun Fortran 8.3 passes all unrecognized flags to the linker - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_TAGVAR(lt_prog_compiler_wl, $1)='' - ;; - esac - ;; - esac - ;; - - newsos6) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - *nto* | *qnx*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' - ;; - - osf3* | osf4* | osf5*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # All OSF/1 code is PIC. - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - - rdos*) - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - - solaris*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - case $cc_basename in - f77* | f90* | f95*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; - *) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; - esac - ;; - - sunos4*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - sysv4 | sysv4.2uw2* | sysv4.3*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - sysv4*MP*) - if test -d /usr/nec ;then - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - ;; - - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - unicos*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; - - uts4*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - *) - _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; - esac - fi -]) -case $host_os in - # For platforms which do not support PIC, -DPIC is meaningless: - *djgpp*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)= - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" - ;; -esac -AC_MSG_RESULT([$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) -_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], - [How to pass a linker flag through the compiler]) - -# -# Check to make sure the PIC flag actually works. -# -if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then - _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], - [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], - [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], - [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in - "" | " "*) ;; - *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; - esac], - [_LT_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) -fi -_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], - [Additional compiler flags for building library objects]) - -# -# Check to make sure the static flag actually works. -# -wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" -_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], - _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), - $lt_tmp_static_flag, - [], - [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) -_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], - [Compiler flag to prevent dynamic linking]) -])# _LT_COMPILER_PIC - - -# _LT_LINKER_SHLIBS([TAGNAME]) -# ---------------------------- -# See if the linker supports building shared libraries. -m4_defun([_LT_LINKER_SHLIBS], -[AC_REQUIRE([LT_PATH_LD])dnl -AC_REQUIRE([LT_PATH_NM])dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_DECL_EGREP])dnl -m4_require([_LT_DECL_SED])dnl -m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl -m4_require([_LT_TAG_COMPILER])dnl -AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) -m4_if([$1], [CXX], [ - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - case $host_os in - aix[[4-9]]*) - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then - _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' - else - _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' - fi - ;; - pw32*) - _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" - ;; - cygwin* | mingw* | cegcc*) - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;/^.*[[ ]]__nm__/s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' - ;; - *) - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - ;; - esac - _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] -], [ - runpath_var= - _LT_TAGVAR(allow_undefined_flag, $1)= - _LT_TAGVAR(always_export_symbols, $1)=no - _LT_TAGVAR(archive_cmds, $1)= - _LT_TAGVAR(archive_expsym_cmds, $1)= - _LT_TAGVAR(compiler_needs_object, $1)=no - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no - _LT_TAGVAR(export_dynamic_flag_spec, $1)= - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - _LT_TAGVAR(hardcode_automatic, $1)=no - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_direct_absolute, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= - _LT_TAGVAR(hardcode_libdir_separator, $1)= - _LT_TAGVAR(hardcode_minus_L, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_TAGVAR(inherit_rpath, $1)=no - _LT_TAGVAR(link_all_deplibs, $1)=unknown - _LT_TAGVAR(module_cmds, $1)= - _LT_TAGVAR(module_expsym_cmds, $1)= - _LT_TAGVAR(old_archive_from_new_cmds, $1)= - _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= - _LT_TAGVAR(thread_safe_flag_spec, $1)= - _LT_TAGVAR(whole_archive_flag_spec, $1)= - # include_expsyms should be a list of space-separated symbols to be *always* - # included in the symbol list - _LT_TAGVAR(include_expsyms, $1)= - # exclude_expsyms can be an extended regexp of symbols to exclude - # it will be wrapped by ` (' and `)$', so one must not match beginning or - # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', - # as well as any symbol that contains `d'. - _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] - # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out - # platforms (ab)use it in PIC code, but their linkers get confused if - # the symbol is explicitly referenced. Since portable code cannot - # rely on this symbol name, it's probably fine to never include it in - # preloaded symbol tables. - # Exclude shared library initialization/finalization symbols. -dnl Note also adjust exclude_expsyms for C++ above. - extract_expsyms_cmds= - - case $host_os in - cygwin* | mingw* | pw32* | cegcc*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test "$GCC" != yes; then - with_gnu_ld=no - fi - ;; - interix*) - # we just hope/assume this is gcc and not c89 (= MSVC++) - with_gnu_ld=yes - ;; - openbsd*) - with_gnu_ld=no - ;; - esac - - _LT_TAGVAR(ld_shlibs, $1)=yes - if test "$with_gnu_ld" = yes; then - # If archive_cmds runs LD, not CC, wlarc should be empty - wlarc='${wl}' - - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - runpath_var=LD_RUN_PATH - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - # ancient GNU ld didn't support --whole-archive et. al. - if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then - _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - else - _LT_TAGVAR(whole_archive_flag_spec, $1)= - fi - supports_anon_versioning=no - case `$LD -v 2>&1` in - *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 - *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... - *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... - *\ 2.11.*) ;; # other 2.11 versions - *) supports_anon_versioning=yes ;; - esac - - # See if GNU ld supports shared libraries. - case $host_os in - aix[[3-9]]*) - # On AIX/PPC, the GNU linker is very broken - if test "$host_cpu" != ia64; then - _LT_TAGVAR(ld_shlibs, $1)=no - cat <<_LT_EOF 1>&2 - -*** Warning: the GNU linker, at least up to release 2.9.1, is reported -*** to be unable to reliably create shared libraries on AIX. -*** Therefore, libtool is disabling shared libraries support. If you -*** really care for shared libraries, you may want to modify your PATH -*** so that a non-GNU linker is found, and then restart. - -_LT_EOF - fi - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='' - ;; - m68k) - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_minus_L, $1)=yes - ;; - esac - ;; - - beos*) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - cygwin* | mingw* | pw32* | cegcc*) - # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, - # as there is no search path for DLLs. - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_TAGVAR(always_export_symbols, $1)=no - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - interix[[3-9]]*) - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - - gnu* | linux* | tpf* | k*bsd*-gnu) - tmp_diet=no - if test "$host_os" = linux-dietlibc; then - case $cc_basename in - diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) - esac - fi - if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ - && test "$tmp_diet" = no - then - tmp_addflag= - tmp_sharedflag='-shared' - case $cc_basename,$host_cpu in - pgcc*) # Portland Group C compiler - _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag' - ;; - pgf77* | pgf90* | pgf95*) # Portland Group f77 and f90 compilers - _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' - tmp_addflag=' $pic_flag -Mnomain' ;; - ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 - tmp_addflag=' -i_dynamic' ;; - efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 - tmp_addflag=' -i_dynamic -nofor_main' ;; - ifc* | ifort*) # Intel Fortran compiler - tmp_addflag=' -nofor_main' ;; - lf95*) # Lahey Fortran 8.1 - _LT_TAGVAR(whole_archive_flag_spec, $1)= - tmp_sharedflag='--shared' ;; - xl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) - tmp_sharedflag='-qmkshrobj' - tmp_addflag= ;; - esac - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) # Sun C 5.9 - _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' - _LT_TAGVAR(compiler_needs_object, $1)=yes - tmp_sharedflag='-G' ;; - *Sun\ F*) # Sun Fortran 8.3 - tmp_sharedflag='-G' ;; - esac - _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - - if test "x$supports_anon_versioning" = xyes; then - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' - fi - - case $cc_basename in - xlf*) - # IBM XL Fortran 10.1 on PPC cannot create shared libs itself - _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' - _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - netbsd*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - - solaris*) - if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then - _LT_TAGVAR(ld_shlibs, $1)=no - cat <<_LT_EOF 1>&2 - -*** Warning: The releases 2.8.* of the GNU linker cannot reliably -*** create shared libraries on Solaris systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.9.1 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -_LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) - case `$LD -v 2>&1` in - *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) - _LT_TAGVAR(ld_shlibs, $1)=no - cat <<_LT_EOF 1>&2 - -*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not -*** reliably create shared libraries on SCO systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.16.91.0.3 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -_LT_EOF - ;; - *) - # For security reasons, it is highly recommended that you always - # use absolute paths for naming shared libraries, and exclude the - # DT_RUNPATH tag from executables and libraries. But doing so - # requires that you compile everything twice, which is a pain. - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - - sunos4*) - _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' - wlarc= - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - - if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then - runpath_var= - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_TAGVAR(export_dynamic_flag_spec, $1)= - _LT_TAGVAR(whole_archive_flag_spec, $1)= - fi - else - # PORTME fill in a description of your system's linker (not GNU ld) - case $host_os in - aix3*) - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_TAGVAR(always_export_symbols, $1)=yes - _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - _LT_TAGVAR(hardcode_minus_L, $1)=yes - if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - _LT_TAGVAR(hardcode_direct, $1)=unsupported - fi - ;; - - aix[[4-9]]*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to AIX nm, but means don't demangle with GNU nm - if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then - _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' - else - _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' - fi - aix_use_runtimelinking=no - - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) - for ld_flag in $LDFLAGS; do - if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then - aix_use_runtimelinking=yes - break - fi - done - ;; - esac - - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - _LT_TAGVAR(archive_cmds, $1)='' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_TAGVAR(link_all_deplibs, $1)=yes - _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' - - if test "$GCC" = yes; then - case $host_os in aix4.[[012]]|aix4.[[012]].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && - strings "$collect2name" | $GREP resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - _LT_TAGVAR(hardcode_direct, $1)=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)= - fi - ;; - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi - - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - _LT_TAGVAR(always_export_symbols, $1)=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - _LT_TAGVAR(allow_undefined_flag, $1)='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. - _LT_SYS_MODULE_PATH_AIX - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else - if test "$host_cpu" = ia64; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' - _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" - _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an - # empty executable. - _LT_SYS_MODULE_PATH_AIX - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' - _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' - # Exported symbols can be pulled into shared objects from archives - _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' - _LT_TAGVAR(archive_cmds_need_lc, $1)=yes - # This is similar to how AIX traditionally builds its shared libraries. - _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' - fi - fi - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='' - ;; - m68k) - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_minus_L, $1)=yes - ;; - esac - ;; - - bsdi[[45]]*) - _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic - ;; - - cygwin* | mingw* | pw32* | cegcc*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=".dll" - # FIXME: Setting linknames here is a bad hack. - _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `$ECHO "X$deplibs" | $Xsed -e '\''s/ -lc$//'\''` -link -dll~linknames=' - # The linker will automatically build a .lib file if we build a DLL. - _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' - # FIXME: Should let the user specify the lib program. - _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' - _LT_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - ;; - - darwin* | rhapsody*) - _LT_DARWIN_LINKER_FEATURES($1) - ;; - - dgux*) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - freebsd1*) - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor - # support. Future versions do this automatically, but an explicit c++rt0.o - # does not break anything, and helps significantly (at the cost of a little - # extra space). - freebsd2.2*) - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - # Unfortunately, older versions of FreeBSD 2 do not have this feature. - freebsd2*) - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - hpux9*) - if test "$GCC" = yes; then - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(hardcode_direct, $1)=yes - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - ;; - - hpux10*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi - if test "$with_gnu_ld" = no; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='+b $libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_TAGVAR(hardcode_minus_L, $1)=yes - fi - ;; - - hpux11*) - if test "$GCC" = yes -a "$with_gnu_ld" = no; then - case $host_cpu in - hppa*64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else - case $host_cpu in - hppa*64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - fi - if test "$with_gnu_ld" = no; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - case $host_cpu in - hppa*64*|ia64*) - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - *) - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_TAGVAR(hardcode_minus_L, $1)=yes - ;; - esac - fi - ;; - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. - save_LDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" - AC_LINK_IFELSE(int foo(void) {}, - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' - ) - LDFLAGS="$save_LDFLAGS" - else - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' - fi - _LT_TAGVAR(archive_cmds_need_lc, $1)='no' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(inherit_rpath, $1)=yes - _LT_TAGVAR(link_all_deplibs, $1)=yes - ;; - - netbsd*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out - else - _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF - fi - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - newsos6) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - *nto* | *qnx*) - ;; - - openbsd*) - if test -f /usr/libexec/ld.so; then - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - else - case $host_os in - openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - ;; - esac - fi - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - os2*) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$ECHO DATA >> $output_objdir/$libname.def~$ECHO " SINGLE NONSHARED" >> $output_objdir/$libname.def~$ECHO EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' - _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' - ;; - - osf3*) - if test "$GCC" = yes; then - _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' - fi - _LT_TAGVAR(archive_cmds_need_lc, $1)='no' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - ;; - - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - else - _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ - $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' - - # Both c and cxx compiler support -rpath directly - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - fi - _LT_TAGVAR(archive_cmds_need_lc, $1)='no' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - ;; - - solaris*) - _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' - if test "$GCC" = yes; then - wlarc='${wl}' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) - wlarc='' - _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' - ;; - *) - wlarc='${wl}' - _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - ;; - esac - fi - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands `-z linker_flag'. GCC discards it without `$wl', - # but is careful enough not to reorder. - # Supported since Solaris 2.6 (maybe 2.5.1?) - if test "$GCC" = yes; then - _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' - else - _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' - fi - ;; - esac - _LT_TAGVAR(link_all_deplibs, $1)=yes - ;; - - sunos4*) - if test "x$host_vendor" = xsequent; then - # Use $CC to link under sequent, because it throws in some extra .o - # files that make .init and .fini sections work. - _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' - fi - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - sysv4) - case $host_vendor in - sni) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? - ;; - siemens) - ## LD is ld it makes a PLAMLIB - ## CC just makes a GrossModule. - _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' - _LT_TAGVAR(hardcode_direct, $1)=no - ;; - motorola) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie - ;; - esac - runpath_var='LD_RUN_PATH' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - sysv4.3*) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - _LT_TAGVAR(ld_shlibs, $1)=yes - fi - ;; - - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) - _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var='LD_RUN_PATH' - - if test "$GCC" = yes; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - - sysv5* | sco3.2v5* | sco5v6*) - # Note: We can NOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' - _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_TAGVAR(link_all_deplibs, $1)=yes - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' - runpath_var='LD_RUN_PATH' - - if test "$GCC" = yes; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - - uts4*) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - *) - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - - if test x$host_vendor = xsni; then - case $host in - sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' - ;; - esac - fi - fi -]) -AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) -test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no - -_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld - -_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl -_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl -_LT_DECL([], [extract_expsyms_cmds], [2], - [The commands to extract the exported symbol list from a shared archive]) - -# -# Do we need to explicitly link libc? -# -case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in -x|xyes) - # Assume -lc should be added - _LT_TAGVAR(archive_cmds_need_lc, $1)=yes - - if test "$enable_shared" = yes && test "$GCC" = yes; then - case $_LT_TAGVAR(archive_cmds, $1) in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. - ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - AC_MSG_CHECKING([whether -lc should be explicitly linked in]) - $RM conftest* - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - if AC_TRY_EVAL(ac_compile) 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) - pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) - _LT_TAGVAR(allow_undefined_flag, $1)= - if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) - then - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - else - _LT_TAGVAR(archive_cmds_need_lc, $1)=yes - fi - _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $RM conftest* - AC_MSG_RESULT([$_LT_TAGVAR(archive_cmds_need_lc, $1)]) - ;; - esac - fi - ;; -esac - -_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], - [Whether or not to add -lc for building shared libraries]) -_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], - [enable_shared_with_static_runtimes], [0], - [Whether or not to disallow shared libs when runtime libs are static]) -_LT_TAGDECL([], [export_dynamic_flag_spec], [1], - [Compiler flag to allow reflexive dlopens]) -_LT_TAGDECL([], [whole_archive_flag_spec], [1], - [Compiler flag to generate shared objects directly from archives]) -_LT_TAGDECL([], [compiler_needs_object], [1], - [Whether the compiler copes with passing no objects directly]) -_LT_TAGDECL([], [old_archive_from_new_cmds], [2], - [Create an old-style archive from a shared archive]) -_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], - [Create a temporary old-style archive to link instead of a shared archive]) -_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) -_LT_TAGDECL([], [archive_expsym_cmds], [2]) -_LT_TAGDECL([], [module_cmds], [2], - [Commands used to build a loadable module if different from building - a shared archive.]) -_LT_TAGDECL([], [module_expsym_cmds], [2]) -_LT_TAGDECL([], [with_gnu_ld], [1], - [Whether we are building with GNU ld or not]) -_LT_TAGDECL([], [allow_undefined_flag], [1], - [Flag that allows shared libraries with undefined symbols to be built]) -_LT_TAGDECL([], [no_undefined_flag], [1], - [Flag that enforces no undefined symbols]) -_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], - [Flag to hardcode $libdir into a binary during linking. - This must work even if $libdir does not exist]) -_LT_TAGDECL([], [hardcode_libdir_flag_spec_ld], [1], - [[If ld is used when linking, flag to hardcode $libdir into a binary - during linking. This must work even if $libdir does not exist]]) -_LT_TAGDECL([], [hardcode_libdir_separator], [1], - [Whether we need a single "-rpath" flag with a separated argument]) -_LT_TAGDECL([], [hardcode_direct], [0], - [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes - DIR into the resulting binary]) -_LT_TAGDECL([], [hardcode_direct_absolute], [0], - [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes - DIR into the resulting binary and the resulting library dependency is - "absolute", i.e impossible to change by setting ${shlibpath_var} if the - library is relocated]) -_LT_TAGDECL([], [hardcode_minus_L], [0], - [Set to "yes" if using the -LDIR flag during linking hardcodes DIR - into the resulting binary]) -_LT_TAGDECL([], [hardcode_shlibpath_var], [0], - [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR - into the resulting binary]) -_LT_TAGDECL([], [hardcode_automatic], [0], - [Set to "yes" if building a shared library automatically hardcodes DIR - into the library and all subsequent libraries and executables linked - against it]) -_LT_TAGDECL([], [inherit_rpath], [0], - [Set to yes if linker adds runtime paths of dependent libraries - to runtime path list]) -_LT_TAGDECL([], [link_all_deplibs], [0], - [Whether libtool must link a program against all its dependency libraries]) -_LT_TAGDECL([], [fix_srcfile_path], [1], - [Fix the shell variable $srcfile for the compiler]) -_LT_TAGDECL([], [always_export_symbols], [0], - [Set to "yes" if exported symbols are required]) -_LT_TAGDECL([], [export_symbols_cmds], [2], - [The commands to list exported symbols]) -_LT_TAGDECL([], [exclude_expsyms], [1], - [Symbols that should not be listed in the preloaded symbols]) -_LT_TAGDECL([], [include_expsyms], [1], - [Symbols that must always be exported]) -_LT_TAGDECL([], [prelink_cmds], [2], - [Commands necessary for linking programs (against libraries) with templates]) -_LT_TAGDECL([], [file_list_spec], [1], - [Specify filename containing input files]) -dnl FIXME: Not yet implemented -dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], -dnl [Compiler flag to generate thread safe objects]) -])# _LT_LINKER_SHLIBS - - -# _LT_LANG_C_CONFIG([TAG]) -# ------------------------ -# Ensure that the configuration variables for a C compiler are suitably -# defined. These variables are subsequently used by _LT_CONFIG to write -# the compiler configuration to `libtool'. -m4_defun([_LT_LANG_C_CONFIG], -[m4_require([_LT_DECL_EGREP])dnl -lt_save_CC="$CC" -AC_LANG_PUSH(C) - -# Source file extension for C test sources. -ac_ext=c - -# Object file extension for compiled C test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;" - -# Code to be used in simple link tests -lt_simple_link_test_code='int main(){return(0);}' - -_LT_TAG_COMPILER -# Save the default compiler, since it gets overwritten when the other -# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. -compiler_DEFAULT=$CC - -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE - -## CAVEAT EMPTOR: -## There is no encapsulation within the following macros, do not change -## the running order or otherwise move them around unless you know exactly -## what you are doing... -if test -n "$compiler"; then - _LT_COMPILER_NO_RTTI($1) - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_SYS_DYNAMIC_LINKER($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - LT_SYS_DLOPEN_SELF - _LT_CMD_STRIPLIB - - # Report which library types will actually be built - AC_MSG_CHECKING([if libtool supports shared libraries]) - AC_MSG_RESULT([$can_build_shared]) - - AC_MSG_CHECKING([whether to build shared libraries]) - test "$can_build_shared" = "no" && enable_shared=no - - # On AIX, shared libraries and static libraries use the same namespace, and - # are all built from PIC. - case $host_os in - aix3*) - test "$enable_shared" = yes && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; - - aix[[4-9]]*) - if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then - test "$enable_shared" = yes && enable_static=no - fi - ;; - esac - AC_MSG_RESULT([$enable_shared]) - - AC_MSG_CHECKING([whether to build static libraries]) - # Make sure either enable_shared or enable_static is yes. - test "$enable_shared" = yes || enable_static=yes - AC_MSG_RESULT([$enable_static]) - - _LT_CONFIG($1) -fi -AC_LANG_POP -CC="$lt_save_CC" -])# _LT_LANG_C_CONFIG - - -# _LT_PROG_CXX -# ------------ -# Since AC_PROG_CXX is broken, in that it returns g++ if there is no c++ -# compiler, we have our own version here. -m4_defun([_LT_PROG_CXX], -[ -pushdef([AC_MSG_ERROR], [_lt_caught_CXX_error=yes]) -AC_PROG_CXX -if test -n "$CXX" && ( test "X$CXX" != "Xno" && - ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || - (test "X$CXX" != "Xg++"))) ; then - AC_PROG_CXXCPP -else - _lt_caught_CXX_error=yes -fi -popdef([AC_MSG_ERROR]) -])# _LT_PROG_CXX - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([_LT_PROG_CXX], []) - - -# _LT_LANG_CXX_CONFIG([TAG]) -# -------------------------- -# Ensure that the configuration variables for a C++ compiler are suitably -# defined. These variables are subsequently used by _LT_CONFIG to write -# the compiler configuration to `libtool'. -m4_defun([_LT_LANG_CXX_CONFIG], -[AC_REQUIRE([_LT_PROG_CXX])dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_DECL_EGREP])dnl - -AC_LANG_PUSH(C++) -_LT_TAGVAR(archive_cmds_need_lc, $1)=no -_LT_TAGVAR(allow_undefined_flag, $1)= -_LT_TAGVAR(always_export_symbols, $1)=no -_LT_TAGVAR(archive_expsym_cmds, $1)= -_LT_TAGVAR(compiler_needs_object, $1)=no -_LT_TAGVAR(export_dynamic_flag_spec, $1)= -_LT_TAGVAR(hardcode_direct, $1)=no -_LT_TAGVAR(hardcode_direct_absolute, $1)=no -_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= -_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= -_LT_TAGVAR(hardcode_libdir_separator, $1)= -_LT_TAGVAR(hardcode_minus_L, $1)=no -_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported -_LT_TAGVAR(hardcode_automatic, $1)=no -_LT_TAGVAR(inherit_rpath, $1)=no -_LT_TAGVAR(module_cmds, $1)= -_LT_TAGVAR(module_expsym_cmds, $1)= -_LT_TAGVAR(link_all_deplibs, $1)=unknown -_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_TAGVAR(no_undefined_flag, $1)= -_LT_TAGVAR(whole_archive_flag_spec, $1)= -_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no - -# Source file extension for C++ test sources. -ac_ext=cpp - -# Object file extension for compiled C++ test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# No sense in running all these tests if we already determined that -# the CXX compiler isn't working. Some variables (like enable_shared) -# are currently assumed to apply to all compilers on this platform, -# and will be corrupted by setting them based on a non-working compiler. -if test "$_lt_caught_CXX_error" != yes; then - # Code to be used in simple compile tests - lt_simple_compile_test_code="int some_variable = 0;" - - # Code to be used in simple link tests - lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' - - # ltmain only uses $CC for tagged configurations so make sure $CC is set. - _LT_TAG_COMPILER - - # save warnings/boilerplate of simple test code - _LT_COMPILER_BOILERPLATE - _LT_LINKER_BOILERPLATE - - # Allow CC to be a program name with arguments. - lt_save_CC=$CC - lt_save_LD=$LD - lt_save_GCC=$GCC - GCC=$GXX - lt_save_with_gnu_ld=$with_gnu_ld - lt_save_path_LD=$lt_cv_path_LD - if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then - lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx - else - $as_unset lt_cv_prog_gnu_ld - fi - if test -n "${lt_cv_path_LDCXX+set}"; then - lt_cv_path_LD=$lt_cv_path_LDCXX - else - $as_unset lt_cv_path_LD - fi - test -z "${LDCXX+set}" || LD=$LDCXX - CC=${CXX-"c++"} - compiler=$CC - _LT_TAGVAR(compiler, $1)=$CC - _LT_CC_BASENAME([$compiler]) - - if test -n "$compiler"; then - # We don't want -fno-exception when compiling C++ code, so set the - # no_builtin_flag separately - if test "$GXX" = yes; then - _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' - else - _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= - fi - - if test "$GXX" = yes; then - # Set up default GNU C++ configuration - - LT_PATH_LD - - # Check if GNU C++ uses GNU ld as the underlying linker, since the - # archiving commands below assume that GNU ld is being used. - if test "$with_gnu_ld" = yes; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - - # If archive_cmds runs LD, not CC, wlarc should be empty - # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to - # investigate it a little bit more. (MM) - wlarc='${wl}' - - # ancient GNU ld didn't support --whole-archive et. al. - if eval "`$CC -print-prog-name=ld` --help 2>&1" | - $GREP 'no-whole-archive' > /dev/null; then - _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - else - _LT_TAGVAR(whole_archive_flag_spec, $1)= - fi - else - with_gnu_ld=no - wlarc= - - # A generic and very simple default shared library creation - # command for GNU C++ for the case where it uses the native - # linker, instead of GNU ld. If possible, this setting should - # overridden to take advantage of the native linker features on - # the platform it is being used on. - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' - fi - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' - - else - GXX=no - with_gnu_ld=no - wlarc= - fi - - # PORTME: fill in a description of your system's C++ link characteristics - AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) - _LT_TAGVAR(ld_shlibs, $1)=yes - case $host_os in - aix3*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - aix[[4-9]]*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag="" - else - aix_use_runtimelinking=no - - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) - for ld_flag in $LDFLAGS; do - case $ld_flag in - *-brtl*) - aix_use_runtimelinking=yes - break - ;; - esac - done - ;; - esac - - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - _LT_TAGVAR(archive_cmds, $1)='' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_TAGVAR(link_all_deplibs, $1)=yes - _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' - - if test "$GXX" = yes; then - case $host_os in aix4.[[012]]|aix4.[[012]].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && - strings "$collect2name" | $GREP resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - _LT_TAGVAR(hardcode_direct, $1)=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)= - fi - esac - shared_flag='-shared' - if test "$aix_use_runtimelinking" = yes; then - shared_flag="$shared_flag "'${wl}-G' - fi - else - # not using gcc - if test "$host_cpu" = ia64; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test "$aix_use_runtimelinking" = yes; then - shared_flag='${wl}-G' - else - shared_flag='${wl}-bM:SRE' - fi - fi - fi - - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to - # export. - _LT_TAGVAR(always_export_symbols, $1)=yes - if test "$aix_use_runtimelinking" = yes; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - _LT_TAGVAR(allow_undefined_flag, $1)='-berok' - # Determine the default libpath from the value encoded in an empty - # executable. - _LT_SYS_MODULE_PATH_AIX - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then $ECHO "X${wl}${allow_undefined_flag}" | $Xsed; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else - if test "$host_cpu" = ia64; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' - _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" - _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an - # empty executable. - _LT_SYS_MODULE_PATH_AIX - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' - _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' - # Exported symbols can be pulled into shared objects from archives - _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' - _LT_TAGVAR(archive_cmds_need_lc, $1)=yes - # This is similar to how AIX traditionally builds its shared - # libraries. - _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' - fi - fi - ;; - - beos*) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - chorus*) - case $cc_basename in - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - cygwin* | mingw* | pw32* | cegcc*) - # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, - # as there is no search path for DLLs. - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_TAGVAR(always_export_symbols, $1)=no - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file (1st line - # is EXPORTS), use it as is; otherwise, prepend... - _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - darwin* | rhapsody*) - _LT_DARWIN_LINKER_FEATURES($1) - ;; - - dgux*) - case $cc_basename in - ec++*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - ghcx*) - # Green Hills C++ Compiler - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - freebsd[[12]]*) - # C++ shared libraries reported to be fairly broken before - # switch to ELF - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - freebsd-elf*) - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - ;; - - freebsd* | dragonfly*) - # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF - # conventions - _LT_TAGVAR(ld_shlibs, $1)=yes - ;; - - gnu*) - ;; - - hpux9*) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, - # but as the default - # location of the library. - - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - aCC*) - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' - ;; - *) - if test "$GXX" = yes; then - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - - hpux10*|hpux11*) - if test $with_gnu_ld = no; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - case $host_cpu in - hppa*64*|ia64*) - ;; - *) - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - ;; - esac - fi - case $host_cpu in - hppa*64*|ia64*) - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - *) - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, - # but as the default - # location of the library. - ;; - esac - - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - aCC*) - case $host_cpu in - hppa*64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - ia64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' - ;; - *) - if test "$GXX" = yes; then - if test $with_gnu_ld = no; then - case $host_cpu in - hppa*64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - ia64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - fi - else - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - - interix[[3-9]]*) - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - irix5* | irix6*) - case $cc_basename in - CC*) - # SGI C++ - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' - - # Archives containing C++ object files must be created using - # "CC -ar", where "CC" is the IRIX C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' - ;; - *) - if test "$GXX" = yes; then - if test "$with_gnu_ld" = no; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` -o $lib' - fi - fi - _LT_TAGVAR(link_all_deplibs, $1)=yes - ;; - esac - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(inherit_rpath, $1)=yes - ;; - - linux* | k*bsd*-gnu) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - - # Archives containing C++ object files must be created using - # "CC -Bstatic", where "CC" is the KAI C++ compiler. - _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' - ;; - icpc* | ecpc* ) - # Intel C++ - with_gnu_ld=yes - # version 8.0 and above of icpc choke on multiply defined symbols - # if we add $predep_objects and $postdep_objects, however 7.1 and - # earlier do not add the objects themselves. - case `$CC -V 2>&1` in - *"Version 7."*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - ;; - *) # Version 8.0 or newer - tmp_idyn= - case $host_cpu in - ia64*) tmp_idyn=' -i_dynamic';; - esac - _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - ;; - esac - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' - ;; - pgCC* | pgcpp*) - # Portland Group C++ compiler - case `$CC -V` in - *pgCC\ [[1-5]]* | *pgcpp\ [[1-5]]*) - _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ - compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"' - _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ - $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~ - $RANLIB $oldlib' - _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ - $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ - $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' - ;; - *) # Version 6 will use weak symbols - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' - ;; - esac - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' - ;; - cxx*) - # Compaq C++ - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' - - runpath_var=LD_RUN_PATH - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' - ;; - xl*) - # IBM XL 8.0 on PPC, with GNU ld - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' - _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' - fi - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C++ 5.9 - _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' - _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; $ECHO \"$new_convenience\"` ${wl}--no-whole-archive' - _LT_TAGVAR(compiler_needs_object, $1)=yes - - # Not sure whether something based on - # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 - # would be better. - output_verbose_link_cmd='echo' - - # Archives containing C++ object files must be created using - # "CC -xar", where "CC" is the Sun C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' - ;; - esac - ;; - esac - ;; - - lynxos*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - m88k*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - mvs*) - case $cc_basename in - cxx*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - netbsd*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' - wlarc= - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - fi - # Workaround some broken pre-1.5 toolchains - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' - ;; - - *nto* | *qnx*) - _LT_TAGVAR(ld_shlibs, $1)=yes - ;; - - openbsd2*) - # C++ shared libraries are fairly broken - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - openbsd*) - if test -f /usr/libexec/ld.so; then - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' - _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' - fi - output_verbose_link_cmd=echo - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - osf3* | osf4* | osf5*) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - # Archives containing C++ object files must be created using - # the KAI C++ compiler. - case $host in - osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; - *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; - esac - ;; - RCC*) - # Rational C++ 2.4.1 - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - cxx*) - case $host in - osf3*) - _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && $ECHO "X${wl}-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - ;; - *) - _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ - echo "-hidden">> $lib.exp~ - $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "X-set_version $verstring" | $Xsed` -update_registry ${output_objdir}/so_locations -o $lib~ - $RM $lib.exp' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - ;; - esac - - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`$ECHO "X$templist" | $Xsed -e "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; $ECHO "X$list" | $Xsed' - ;; - *) - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' - case $host in - osf3*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "X${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && $ECHO "${wl}-set_version ${wl}$verstring" | $Xsed` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - ;; - esac - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' - - else - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - - psos*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - sunos4*) - case $cc_basename in - CC*) - # Sun C++ 4.x - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - lcc*) - # Lucid - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - solaris*) - case $cc_basename in - CC*) - # Sun C++ 4.2, 5.x and Centerline C++ - _LT_TAGVAR(archive_cmds_need_lc,$1)=yes - _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' - _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands `-z linker_flag'. - # Supported since Solaris 2.6 (maybe 2.5.1?) - _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' - ;; - esac - _LT_TAGVAR(link_all_deplibs, $1)=yes - - output_verbose_link_cmd='echo' - - # Archives containing C++ object files must be created using - # "CC -xar", where "CC" is the Sun C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' - ;; - gcx*) - # Green Hills C++ Compiler - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - - # The C++ compiler must be used to create the archive. - _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' - ;; - *) - # GNU C++ compiler with Solaris linker - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' - if $CC --version | $GREP -v '^2\.7' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' - else - # g++ 2.7 appears to require `-G' NOT `-shared' on this - # platform. - _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP "\-L"' - fi - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; - *) - _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' - ;; - esac - fi - ;; - esac - ;; - - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) - _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var='LD_RUN_PATH' - - case $cc_basename in - CC*) - _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - ;; - - sysv5* | sco3.2v5* | sco5v6*) - # Note: We can NOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' - _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_TAGVAR(link_all_deplibs, $1)=yes - _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' - runpath_var='LD_RUN_PATH' - - case $cc_basename in - CC*) - _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - ;; - - tandem*) - case $cc_basename in - NCC*) - # NonStop-UX NCC 3.20 - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - vxworks*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - - AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) - test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no - - _LT_TAGVAR(GCC, $1)="$GXX" - _LT_TAGVAR(LD, $1)="$LD" - - ## CAVEAT EMPTOR: - ## There is no encapsulation within the following macros, do not change - ## the running order or otherwise move them around unless you know exactly - ## what you are doing... - _LT_SYS_HIDDEN_LIBDEPS($1) - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_SYS_DYNAMIC_LINKER($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - - _LT_CONFIG($1) - fi # test -n "$compiler" - - CC=$lt_save_CC - LDCXX=$LD - LD=$lt_save_LD - GCC=$lt_save_GCC - with_gnu_ld=$lt_save_with_gnu_ld - lt_cv_path_LDCXX=$lt_cv_path_LD - lt_cv_path_LD=$lt_save_path_LD - lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld - lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld -fi # test "$_lt_caught_CXX_error" != yes - -AC_LANG_POP -])# _LT_LANG_CXX_CONFIG - - -# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) -# --------------------------------- -# Figure out "hidden" library dependencies from verbose -# compiler output when linking a shared library. -# Parse the compiler output and extract the necessary -# objects, libraries and library flags. -m4_defun([_LT_SYS_HIDDEN_LIBDEPS], -[m4_require([_LT_FILEUTILS_DEFAULTS])dnl -# Dependencies to place before and after the object being linked: -_LT_TAGVAR(predep_objects, $1)= -_LT_TAGVAR(postdep_objects, $1)= -_LT_TAGVAR(predeps, $1)= -_LT_TAGVAR(postdeps, $1)= -_LT_TAGVAR(compiler_lib_search_path, $1)= - -dnl we can't use the lt_simple_compile_test_code here, -dnl because it contains code intended for an executable, -dnl not a library. It's possible we should let each -dnl tag define a new lt_????_link_test_code variable, -dnl but it's only used here... -m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF -int a; -void foo (void) { a = 0; } -_LT_EOF -], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF -class Foo -{ -public: - Foo (void) { a = 0; } -private: - int a; -}; -_LT_EOF -], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF - subroutine foo - implicit none - integer*4 a - a=0 - return - end -_LT_EOF -], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF - subroutine foo - implicit none - integer a - a=0 - return - end -_LT_EOF -], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF -public class foo { - private int a; - public void bar (void) { - a = 0; - } -}; -_LT_EOF -]) -dnl Parse the compiler output and extract the necessary -dnl objects, libraries and library flags. -if AC_TRY_EVAL(ac_compile); then - # Parse the compiler output and extract the necessary - # objects, libraries and library flags. - - # Sentinel used to keep track of whether or not we are before - # the conftest object file. - pre_test_object_deps_done=no - - for p in `eval "$output_verbose_link_cmd"`; do - case $p in - - -L* | -R* | -l*) - # Some compilers place space between "-{L,R}" and the path. - # Remove the space. - if test $p = "-L" || - test $p = "-R"; then - prev=$p - continue - else - prev= - fi - - if test "$pre_test_object_deps_done" = no; then - case $p in - -L* | -R*) - # Internal compiler library paths should come after those - # provided the user. The postdeps already come after the - # user supplied libs so there is no need to process them. - if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then - _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" - else - _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" - fi - ;; - # The "-l" case would never come before the object being - # linked, so don't bother handling this case. - esac - else - if test -z "$_LT_TAGVAR(postdeps, $1)"; then - _LT_TAGVAR(postdeps, $1)="${prev}${p}" - else - _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" - fi - fi - ;; - - *.$objext) - # This assumes that the test object file only shows up - # once in the compiler output. - if test "$p" = "conftest.$objext"; then - pre_test_object_deps_done=yes - continue - fi - - if test "$pre_test_object_deps_done" = no; then - if test -z "$_LT_TAGVAR(predep_objects, $1)"; then - _LT_TAGVAR(predep_objects, $1)="$p" - else - _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" - fi - else - if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then - _LT_TAGVAR(postdep_objects, $1)="$p" - else - _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" - fi - fi - ;; - - *) ;; # Ignore the rest. - - esac - done - - # Clean up. - rm -f a.out a.exe -else - echo "libtool.m4: error: problem compiling $1 test program" -fi - -$RM -f confest.$objext - -# PORTME: override above test on systems where it is broken -m4_if([$1], [CXX], -[case $host_os in -interix[[3-9]]*) - # Interix 3.5 installs completely hosed .la files for C++, so rather than - # hack all around it, let's just trust "g++" to DTRT. - _LT_TAGVAR(predep_objects,$1)= - _LT_TAGVAR(postdep_objects,$1)= - _LT_TAGVAR(postdeps,$1)= - ;; - -linux*) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C++ 5.9 - - # The more standards-conforming stlport4 library is - # incompatible with the Cstd library. Avoid specifying - # it if it's in CXXFLAGS. Ignore libCrun as - # -library=stlport4 depends on it. - case " $CXX $CXXFLAGS " in - *" -library=stlport4 "*) - solaris_use_stlport4=yes - ;; - esac - - if test "$solaris_use_stlport4" != yes; then - _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' - fi - ;; - esac - ;; - -solaris*) - case $cc_basename in - CC*) - # The more standards-conforming stlport4 library is - # incompatible with the Cstd library. Avoid specifying - # it if it's in CXXFLAGS. Ignore libCrun as - # -library=stlport4 depends on it. - case " $CXX $CXXFLAGS " in - *" -library=stlport4 "*) - solaris_use_stlport4=yes - ;; - esac - - # Adding this requires a known-good setup of shared libraries for - # Sun compiler versions before 5.6, else PIC objects from an old - # archive will be linked into the output, leading to subtle bugs. - if test "$solaris_use_stlport4" != yes; then - _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' - fi - ;; - esac - ;; -esac -]) - -case " $_LT_TAGVAR(postdeps, $1) " in -*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; -esac - _LT_TAGVAR(compiler_lib_search_dirs, $1)= -if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then - _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` -fi -_LT_TAGDECL([], [compiler_lib_search_dirs], [1], - [The directories searched by this compiler when creating a shared library]) -_LT_TAGDECL([], [predep_objects], [1], - [Dependencies to place before and after the objects being linked to - create a shared library]) -_LT_TAGDECL([], [postdep_objects], [1]) -_LT_TAGDECL([], [predeps], [1]) -_LT_TAGDECL([], [postdeps], [1]) -_LT_TAGDECL([], [compiler_lib_search_path], [1], - [The library search path used internally by the compiler when linking - a shared library]) -])# _LT_SYS_HIDDEN_LIBDEPS - - -# _LT_PROG_F77 -# ------------ -# Since AC_PROG_F77 is broken, in that it returns the empty string -# if there is no fortran compiler, we have our own version here. -m4_defun([_LT_PROG_F77], -[ -pushdef([AC_MSG_ERROR], [_lt_disable_F77=yes]) -AC_PROG_F77 -if test -z "$F77" || test "X$F77" = "Xno"; then - _lt_disable_F77=yes -fi -popdef([AC_MSG_ERROR]) -])# _LT_PROG_F77 - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([_LT_PROG_F77], []) - - -# _LT_LANG_F77_CONFIG([TAG]) -# -------------------------- -# Ensure that the configuration variables for a Fortran 77 compiler are -# suitably defined. These variables are subsequently used by _LT_CONFIG -# to write the compiler configuration to `libtool'. -m4_defun([_LT_LANG_F77_CONFIG], -[AC_REQUIRE([_LT_PROG_F77])dnl -AC_LANG_PUSH(Fortran 77) - -_LT_TAGVAR(archive_cmds_need_lc, $1)=no -_LT_TAGVAR(allow_undefined_flag, $1)= -_LT_TAGVAR(always_export_symbols, $1)=no -_LT_TAGVAR(archive_expsym_cmds, $1)= -_LT_TAGVAR(export_dynamic_flag_spec, $1)= -_LT_TAGVAR(hardcode_direct, $1)=no -_LT_TAGVAR(hardcode_direct_absolute, $1)=no -_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= -_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= -_LT_TAGVAR(hardcode_libdir_separator, $1)= -_LT_TAGVAR(hardcode_minus_L, $1)=no -_LT_TAGVAR(hardcode_automatic, $1)=no -_LT_TAGVAR(inherit_rpath, $1)=no -_LT_TAGVAR(module_cmds, $1)= -_LT_TAGVAR(module_expsym_cmds, $1)= -_LT_TAGVAR(link_all_deplibs, $1)=unknown -_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_TAGVAR(no_undefined_flag, $1)= -_LT_TAGVAR(whole_archive_flag_spec, $1)= -_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no - -# Source file extension for f77 test sources. -ac_ext=f - -# Object file extension for compiled f77 test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# No sense in running all these tests if we already determined that -# the F77 compiler isn't working. Some variables (like enable_shared) -# are currently assumed to apply to all compilers on this platform, -# and will be corrupted by setting them based on a non-working compiler. -if test "$_lt_disable_F77" != yes; then - # Code to be used in simple compile tests - lt_simple_compile_test_code="\ - subroutine t - return - end -" - - # Code to be used in simple link tests - lt_simple_link_test_code="\ - program t - end -" - - # ltmain only uses $CC for tagged configurations so make sure $CC is set. - _LT_TAG_COMPILER - - # save warnings/boilerplate of simple test code - _LT_COMPILER_BOILERPLATE - _LT_LINKER_BOILERPLATE - - # Allow CC to be a program name with arguments. - lt_save_CC="$CC" - lt_save_GCC=$GCC - CC=${F77-"f77"} - compiler=$CC - _LT_TAGVAR(compiler, $1)=$CC - _LT_CC_BASENAME([$compiler]) - GCC=$G77 - if test -n "$compiler"; then - AC_MSG_CHECKING([if libtool supports shared libraries]) - AC_MSG_RESULT([$can_build_shared]) - - AC_MSG_CHECKING([whether to build shared libraries]) - test "$can_build_shared" = "no" && enable_shared=no - - # On AIX, shared libraries and static libraries use the same namespace, and - # are all built from PIC. - case $host_os in - aix3*) - test "$enable_shared" = yes && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; - aix[[4-9]]*) - if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then - test "$enable_shared" = yes && enable_static=no - fi - ;; - esac - AC_MSG_RESULT([$enable_shared]) - - AC_MSG_CHECKING([whether to build static libraries]) - # Make sure either enable_shared or enable_static is yes. - test "$enable_shared" = yes || enable_static=yes - AC_MSG_RESULT([$enable_static]) - - _LT_TAGVAR(GCC, $1)="$G77" - _LT_TAGVAR(LD, $1)="$LD" - - ## CAVEAT EMPTOR: - ## There is no encapsulation within the following macros, do not change - ## the running order or otherwise move them around unless you know exactly - ## what you are doing... - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_SYS_DYNAMIC_LINKER($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - - _LT_CONFIG($1) - fi # test -n "$compiler" - - GCC=$lt_save_GCC - CC="$lt_save_CC" -fi # test "$_lt_disable_F77" != yes - -AC_LANG_POP -])# _LT_LANG_F77_CONFIG - - -# _LT_PROG_FC -# ----------- -# Since AC_PROG_FC is broken, in that it returns the empty string -# if there is no fortran compiler, we have our own version here. -m4_defun([_LT_PROG_FC], -[ -pushdef([AC_MSG_ERROR], [_lt_disable_FC=yes]) -AC_PROG_FC -if test -z "$FC" || test "X$FC" = "Xno"; then - _lt_disable_FC=yes -fi -popdef([AC_MSG_ERROR]) -])# _LT_PROG_FC - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([_LT_PROG_FC], []) - - -# _LT_LANG_FC_CONFIG([TAG]) -# ------------------------- -# Ensure that the configuration variables for a Fortran compiler are -# suitably defined. These variables are subsequently used by _LT_CONFIG -# to write the compiler configuration to `libtool'. -m4_defun([_LT_LANG_FC_CONFIG], -[AC_REQUIRE([_LT_PROG_FC])dnl -AC_LANG_PUSH(Fortran) - -_LT_TAGVAR(archive_cmds_need_lc, $1)=no -_LT_TAGVAR(allow_undefined_flag, $1)= -_LT_TAGVAR(always_export_symbols, $1)=no -_LT_TAGVAR(archive_expsym_cmds, $1)= -_LT_TAGVAR(export_dynamic_flag_spec, $1)= -_LT_TAGVAR(hardcode_direct, $1)=no -_LT_TAGVAR(hardcode_direct_absolute, $1)=no -_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= -_LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)= -_LT_TAGVAR(hardcode_libdir_separator, $1)= -_LT_TAGVAR(hardcode_minus_L, $1)=no -_LT_TAGVAR(hardcode_automatic, $1)=no -_LT_TAGVAR(inherit_rpath, $1)=no -_LT_TAGVAR(module_cmds, $1)= -_LT_TAGVAR(module_expsym_cmds, $1)= -_LT_TAGVAR(link_all_deplibs, $1)=unknown -_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_TAGVAR(no_undefined_flag, $1)= -_LT_TAGVAR(whole_archive_flag_spec, $1)= -_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no - -# Source file extension for fc test sources. -ac_ext=${ac_fc_srcext-f} - -# Object file extension for compiled fc test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# No sense in running all these tests if we already determined that -# the FC compiler isn't working. Some variables (like enable_shared) -# are currently assumed to apply to all compilers on this platform, -# and will be corrupted by setting them based on a non-working compiler. -if test "$_lt_disable_FC" != yes; then - # Code to be used in simple compile tests - lt_simple_compile_test_code="\ - subroutine t - return - end -" - - # Code to be used in simple link tests - lt_simple_link_test_code="\ - program t - end -" - - # ltmain only uses $CC for tagged configurations so make sure $CC is set. - _LT_TAG_COMPILER - - # save warnings/boilerplate of simple test code - _LT_COMPILER_BOILERPLATE - _LT_LINKER_BOILERPLATE - - # Allow CC to be a program name with arguments. - lt_save_CC="$CC" - lt_save_GCC=$GCC - CC=${FC-"f95"} - compiler=$CC - GCC=$ac_cv_fc_compiler_gnu - - _LT_TAGVAR(compiler, $1)=$CC - _LT_CC_BASENAME([$compiler]) - - if test -n "$compiler"; then - AC_MSG_CHECKING([if libtool supports shared libraries]) - AC_MSG_RESULT([$can_build_shared]) - - AC_MSG_CHECKING([whether to build shared libraries]) - test "$can_build_shared" = "no" && enable_shared=no - - # On AIX, shared libraries and static libraries use the same namespace, and - # are all built from PIC. - case $host_os in - aix3*) - test "$enable_shared" = yes && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; - aix[[4-9]]*) - if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then - test "$enable_shared" = yes && enable_static=no - fi - ;; - esac - AC_MSG_RESULT([$enable_shared]) - - AC_MSG_CHECKING([whether to build static libraries]) - # Make sure either enable_shared or enable_static is yes. - test "$enable_shared" = yes || enable_static=yes - AC_MSG_RESULT([$enable_static]) - - _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" - _LT_TAGVAR(LD, $1)="$LD" - - ## CAVEAT EMPTOR: - ## There is no encapsulation within the following macros, do not change - ## the running order or otherwise move them around unless you know exactly - ## what you are doing... - _LT_SYS_HIDDEN_LIBDEPS($1) - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_SYS_DYNAMIC_LINKER($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - - _LT_CONFIG($1) - fi # test -n "$compiler" - - GCC=$lt_save_GCC - CC="$lt_save_CC" -fi # test "$_lt_disable_FC" != yes - -AC_LANG_POP -])# _LT_LANG_FC_CONFIG - - -# _LT_LANG_GCJ_CONFIG([TAG]) -# -------------------------- -# Ensure that the configuration variables for the GNU Java Compiler compiler -# are suitably defined. These variables are subsequently used by _LT_CONFIG -# to write the compiler configuration to `libtool'. -m4_defun([_LT_LANG_GCJ_CONFIG], -[AC_REQUIRE([LT_PROG_GCJ])dnl -AC_LANG_SAVE - -# Source file extension for Java test sources. -ac_ext=java - -# Object file extension for compiled Java test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="class foo {}" - -# Code to be used in simple link tests -lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' - -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -_LT_TAG_COMPILER - -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE - -# Allow CC to be a program name with arguments. -lt_save_CC="$CC" -lt_save_GCC=$GCC -GCC=yes -CC=${GCJ-"gcj"} -compiler=$CC -_LT_TAGVAR(compiler, $1)=$CC -_LT_TAGVAR(LD, $1)="$LD" -_LT_CC_BASENAME([$compiler]) - -# GCJ did not exist at the time GCC didn't implicitly link libc in. -_LT_TAGVAR(archive_cmds_need_lc, $1)=no - -_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds - -## CAVEAT EMPTOR: -## There is no encapsulation within the following macros, do not change -## the running order or otherwise move them around unless you know exactly -## what you are doing... -if test -n "$compiler"; then - _LT_COMPILER_NO_RTTI($1) - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - - _LT_CONFIG($1) -fi - -AC_LANG_RESTORE - -GCC=$lt_save_GCC -CC="$lt_save_CC" -])# _LT_LANG_GCJ_CONFIG - - -# _LT_LANG_RC_CONFIG([TAG]) -# ------------------------- -# Ensure that the configuration variables for the Windows resource compiler -# are suitably defined. These variables are subsequently used by _LT_CONFIG -# to write the compiler configuration to `libtool'. -m4_defun([_LT_LANG_RC_CONFIG], -[AC_REQUIRE([LT_PROG_RC])dnl -AC_LANG_SAVE - -# Source file extension for RC test sources. -ac_ext=rc - -# Object file extension for compiled RC test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' - -# Code to be used in simple link tests -lt_simple_link_test_code="$lt_simple_compile_test_code" - -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -_LT_TAG_COMPILER - -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE - -# Allow CC to be a program name with arguments. -lt_save_CC="$CC" -lt_save_GCC=$GCC -GCC= -CC=${RC-"windres"} -compiler=$CC -_LT_TAGVAR(compiler, $1)=$CC -_LT_CC_BASENAME([$compiler]) -_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes - -if test -n "$compiler"; then - : - _LT_CONFIG($1) -fi - -GCC=$lt_save_GCC -AC_LANG_RESTORE -CC="$lt_save_CC" -])# _LT_LANG_RC_CONFIG - - -# LT_PROG_GCJ -# ----------- -AC_DEFUN([LT_PROG_GCJ], -[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], - [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], - [AC_CHECK_TOOL(GCJ, gcj,) - test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" - AC_SUBST(GCJFLAGS)])])[]dnl -]) - -# Old name: -AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([LT_AC_PROG_GCJ], []) - - -# LT_PROG_RC -# ---------- -AC_DEFUN([LT_PROG_RC], -[AC_CHECK_TOOL(RC, windres,) -]) - -# Old name: -AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([LT_AC_PROG_RC], []) - - -# _LT_DECL_EGREP -# -------------- -# If we don't have a new enough Autoconf to choose the best grep -# available, choose the one first in the user's PATH. -m4_defun([_LT_DECL_EGREP], -[AC_REQUIRE([AC_PROG_EGREP])dnl -AC_REQUIRE([AC_PROG_FGREP])dnl -test -z "$GREP" && GREP=grep -_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) -_LT_DECL([], [EGREP], [1], [An ERE matcher]) -_LT_DECL([], [FGREP], [1], [A literal string matcher]) -dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too -AC_SUBST([GREP]) -]) - - -# _LT_DECL_OBJDUMP -# -------------- -# If we don't have a new enough Autoconf to choose the best objdump -# available, choose the one first in the user's PATH. -m4_defun([_LT_DECL_OBJDUMP], -[AC_CHECK_TOOL(OBJDUMP, objdump, false) -test -z "$OBJDUMP" && OBJDUMP=objdump -_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) -AC_SUBST([OBJDUMP]) -]) - - -# _LT_DECL_SED -# ------------ -# Check for a fully-functional sed program, that truncates -# as few characters as possible. Prefer GNU sed if found. -m4_defun([_LT_DECL_SED], -[AC_PROG_SED -test -z "$SED" && SED=sed -Xsed="$SED -e 1s/^X//" -_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) -_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], - [Sed that helps us avoid accidentally triggering echo(1) options like -n]) -])# _LT_DECL_SED - -m4_ifndef([AC_PROG_SED], [ -############################################################ -# NOTE: This macro has been submitted for inclusion into # -# GNU Autoconf as AC_PROG_SED. When it is available in # -# a released version of Autoconf we should remove this # -# macro and use it instead. # -############################################################ - -m4_defun([AC_PROG_SED], -[AC_MSG_CHECKING([for a sed that does not truncate output]) -AC_CACHE_VAL(lt_cv_path_SED, -[# Loop through the user's path and test for sed and gsed. -# Then use that list of sed's as ones to test for truncation. -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for lt_ac_prog in sed gsed; do - for ac_exec_ext in '' $ac_executable_extensions; do - if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then - lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" - fi - done - done -done -IFS=$as_save_IFS -lt_ac_max=0 -lt_ac_count=0 -# Add /usr/xpg4/bin/sed as it is typically found on Solaris -# along with /bin/sed that truncates output. -for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do - test ! -f $lt_ac_sed && continue - cat /dev/null > conftest.in - lt_ac_count=0 - echo $ECHO_N "0123456789$ECHO_C" >conftest.in - # Check for GNU sed and select it if it is found. - if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then - lt_cv_path_SED=$lt_ac_sed - break - fi - while true; do - cat conftest.in conftest.in >conftest.tmp - mv conftest.tmp conftest.in - cp conftest.in conftest.nl - echo >>conftest.nl - $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break - cmp -s conftest.out conftest.nl || break - # 10000 chars as input seems more than enough - test $lt_ac_count -gt 10 && break - lt_ac_count=`expr $lt_ac_count + 1` - if test $lt_ac_count -gt $lt_ac_max; then - lt_ac_max=$lt_ac_count - lt_cv_path_SED=$lt_ac_sed - fi - done -done -]) -SED=$lt_cv_path_SED -AC_SUBST([SED]) -AC_MSG_RESULT([$SED]) -])#AC_PROG_SED -])#m4_ifndef - -# Old name: -AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([LT_AC_PROG_SED], []) - - -# _LT_CHECK_SHELL_FEATURES -# ------------------------ -# Find out whether the shell is Bourne or XSI compatible, -# or has some other useful features. -m4_defun([_LT_CHECK_SHELL_FEATURES], -[AC_MSG_CHECKING([whether the shell understands some XSI constructs]) -# Try some XSI features -xsi_shell=no -( _lt_dummy="a/b/c" - test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ - = c,a/b,, \ - && eval 'test $(( 1 + 1 )) -eq 2 \ - && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ - && xsi_shell=yes -AC_MSG_RESULT([$xsi_shell]) -_LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) - -AC_MSG_CHECKING([whether the shell understands "+="]) -lt_shell_append=no -( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ - >/dev/null 2>&1 \ - && lt_shell_append=yes -AC_MSG_RESULT([$lt_shell_append]) -_LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) - -if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then - lt_unset=unset -else - lt_unset=false -fi -_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl - -# test EBCDIC or ASCII -case `echo X|tr X '\101'` in - A) # ASCII based system - # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr - lt_SP2NL='tr \040 \012' - lt_NL2SP='tr \015\012 \040\040' - ;; - *) # EBCDIC based system - lt_SP2NL='tr \100 \n' - lt_NL2SP='tr \r\n \100\100' - ;; -esac -_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl -_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl -])# _LT_CHECK_SHELL_FEATURES - - -# _LT_PROG_XSI_SHELLFNS -# --------------------- -# Bourne and XSI compatible variants of some useful shell functions. -m4_defun([_LT_PROG_XSI_SHELLFNS], -[case $xsi_shell in - yes) - cat << \_LT_EOF >> "$cfgfile" - -# func_dirname file append nondir_replacement -# Compute the dirname of FILE. If nonempty, add APPEND to the result, -# otherwise set result to NONDIR_REPLACEMENT. -func_dirname () -{ - case ${1} in - */*) func_dirname_result="${1%/*}${2}" ;; - * ) func_dirname_result="${3}" ;; - esac -} - -# func_basename file -func_basename () -{ - func_basename_result="${1##*/}" -} - -# func_dirname_and_basename file append nondir_replacement -# perform func_basename and func_dirname in a single function -# call: -# dirname: Compute the dirname of FILE. If nonempty, -# add APPEND to the result, otherwise set result -# to NONDIR_REPLACEMENT. -# value returned in "$func_dirname_result" -# basename: Compute filename of FILE. -# value retuned in "$func_basename_result" -# Implementation must be kept synchronized with func_dirname -# and func_basename. For efficiency, we do not delegate to -# those functions but instead duplicate the functionality here. -func_dirname_and_basename () -{ - case ${1} in - */*) func_dirname_result="${1%/*}${2}" ;; - * ) func_dirname_result="${3}" ;; - esac - func_basename_result="${1##*/}" -} - -# func_stripname prefix suffix name -# strip PREFIX and SUFFIX off of NAME. -# PREFIX and SUFFIX must not contain globbing or regex special -# characters, hashes, percent signs, but SUFFIX may contain a leading -# dot (in which case that matches only a dot). -func_stripname () -{ - # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are - # positional parameters, so assign one to ordinary parameter first. - func_stripname_result=${3} - func_stripname_result=${func_stripname_result#"${1}"} - func_stripname_result=${func_stripname_result%"${2}"} -} - -# func_opt_split -func_opt_split () -{ - func_opt_split_opt=${1%%=*} - func_opt_split_arg=${1#*=} -} - -# func_lo2o object -func_lo2o () -{ - case ${1} in - *.lo) func_lo2o_result=${1%.lo}.${objext} ;; - *) func_lo2o_result=${1} ;; - esac -} - -# func_xform libobj-or-source -func_xform () -{ - func_xform_result=${1%.*}.lo -} - -# func_arith arithmetic-term... -func_arith () -{ - func_arith_result=$(( $[*] )) -} - -# func_len string -# STRING may not start with a hyphen. -func_len () -{ - func_len_result=${#1} -} - -_LT_EOF - ;; - *) # Bourne compatible functions. - cat << \_LT_EOF >> "$cfgfile" - -# func_dirname file append nondir_replacement -# Compute the dirname of FILE. If nonempty, add APPEND to the result, -# otherwise set result to NONDIR_REPLACEMENT. -func_dirname () -{ - # Extract subdirectory from the argument. - func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"` - if test "X$func_dirname_result" = "X${1}"; then - func_dirname_result="${3}" - else - func_dirname_result="$func_dirname_result${2}" - fi -} - -# func_basename file -func_basename () -{ - func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"` -} - -dnl func_dirname_and_basename -dnl A portable version of this function is already defined in general.m4sh -dnl so there is no need for it here. - -# func_stripname prefix suffix name -# strip PREFIX and SUFFIX off of NAME. -# PREFIX and SUFFIX must not contain globbing or regex special -# characters, hashes, percent signs, but SUFFIX may contain a leading -# dot (in which case that matches only a dot). -# func_strip_suffix prefix name -func_stripname () -{ - case ${2} in - .*) func_stripname_result=`$ECHO "X${3}" \ - | $Xsed -e "s%^${1}%%" -e "s%\\\\${2}\$%%"`;; - *) func_stripname_result=`$ECHO "X${3}" \ - | $Xsed -e "s%^${1}%%" -e "s%${2}\$%%"`;; - esac -} - -# sed scripts: -my_sed_long_opt='1s/^\(-[[^=]]*\)=.*/\1/;q' -my_sed_long_arg='1s/^-[[^=]]*=//' - -# func_opt_split -func_opt_split () -{ - func_opt_split_opt=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_opt"` - func_opt_split_arg=`$ECHO "X${1}" | $Xsed -e "$my_sed_long_arg"` -} - -# func_lo2o object -func_lo2o () -{ - func_lo2o_result=`$ECHO "X${1}" | $Xsed -e "$lo2o"` -} - -# func_xform libobj-or-source -func_xform () -{ - func_xform_result=`$ECHO "X${1}" | $Xsed -e 's/\.[[^.]]*$/.lo/'` -} - -# func_arith arithmetic-term... -func_arith () -{ - func_arith_result=`expr "$[@]"` -} - -# func_len string -# STRING may not start with a hyphen. -func_len () -{ - func_len_result=`expr "$[1]" : ".*" 2>/dev/null || echo $max_cmd_len` -} - -_LT_EOF -esac - -case $lt_shell_append in - yes) - cat << \_LT_EOF >> "$cfgfile" - -# func_append var value -# Append VALUE to the end of shell variable VAR. -func_append () -{ - eval "$[1]+=\$[2]" -} -_LT_EOF - ;; - *) - cat << \_LT_EOF >> "$cfgfile" - -# func_append var value -# Append VALUE to the end of shell variable VAR. -func_append () -{ - eval "$[1]=\$$[1]\$[2]" -} - -_LT_EOF - ;; - esac -]) diff --git a/burstioInterfaces/testing/tests/cpp/m4/ltoptions.m4 b/burstioInterfaces/testing/tests/cpp/m4/ltoptions.m4 deleted file mode 100644 index 34151a3ba..000000000 --- a/burstioInterfaces/testing/tests/cpp/m4/ltoptions.m4 +++ /dev/null @@ -1,368 +0,0 @@ -# Helper functions for option handling. -*- Autoconf -*- -# -# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. -# Written by Gary V. Vaughan, 2004 -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# serial 6 ltoptions.m4 - -# This is to help aclocal find these macros, as it can't see m4_define. -AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) - - -# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) -# ------------------------------------------ -m4_define([_LT_MANGLE_OPTION], -[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) - - -# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) -# --------------------------------------- -# Set option OPTION-NAME for macro MACRO-NAME, and if there is a -# matching handler defined, dispatch to it. Other OPTION-NAMEs are -# saved as a flag. -m4_define([_LT_SET_OPTION], -[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl -m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), - _LT_MANGLE_DEFUN([$1], [$2]), - [m4_warning([Unknown $1 option `$2'])])[]dnl -]) - - -# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) -# ------------------------------------------------------------ -# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. -m4_define([_LT_IF_OPTION], -[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) - - -# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) -# ------------------------------------------------------- -# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME -# are set. -m4_define([_LT_UNLESS_OPTIONS], -[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), - [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), - [m4_define([$0_found])])])[]dnl -m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 -])[]dnl -]) - - -# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) -# ---------------------------------------- -# OPTION-LIST is a space-separated list of Libtool options associated -# with MACRO-NAME. If any OPTION has a matching handler declared with -# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about -# the unknown option and exit. -m4_defun([_LT_SET_OPTIONS], -[# Set options -m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), - [_LT_SET_OPTION([$1], _LT_Option)]) - -m4_if([$1],[LT_INIT],[ - dnl - dnl Simply set some default values (i.e off) if boolean options were not - dnl specified: - _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no - ]) - _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no - ]) - dnl - dnl If no reference was made to various pairs of opposing options, then - dnl we run the default mode handler for the pair. For example, if neither - dnl `shared' nor `disable-shared' was passed, we enable building of shared - dnl archives by default: - _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) - _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) - _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) - _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], - [_LT_ENABLE_FAST_INSTALL]) - ]) -])# _LT_SET_OPTIONS - - -## --------------------------------- ## -## Macros to handle LT_INIT options. ## -## --------------------------------- ## - -# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) -# ----------------------------------------- -m4_define([_LT_MANGLE_DEFUN], -[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) - - -# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) -# ----------------------------------------------- -m4_define([LT_OPTION_DEFINE], -[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl -])# LT_OPTION_DEFINE - - -# dlopen -# ------ -LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes -]) - -AU_DEFUN([AC_LIBTOOL_DLOPEN], -[_LT_SET_OPTION([LT_INIT], [dlopen]) -AC_DIAGNOSE([obsolete], -[$0: Remove this warning and the call to _LT_SET_OPTION when you -put the `dlopen' option into LT_INIT's first parameter.]) -]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) - - -# win32-dll -# --------- -# Declare package support for building win32 dll's. -LT_OPTION_DEFINE([LT_INIT], [win32-dll], -[enable_win32_dll=yes - -case $host in -*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-cegcc*) - AC_CHECK_TOOL(AS, as, false) - AC_CHECK_TOOL(DLLTOOL, dlltool, false) - AC_CHECK_TOOL(OBJDUMP, objdump, false) - ;; -esac - -test -z "$AS" && AS=as -_LT_DECL([], [AS], [0], [Assembler program])dnl - -test -z "$DLLTOOL" && DLLTOOL=dlltool -_LT_DECL([], [DLLTOOL], [0], [DLL creation program])dnl - -test -z "$OBJDUMP" && OBJDUMP=objdump -_LT_DECL([], [OBJDUMP], [0], [Object dumper program])dnl -])# win32-dll - -AU_DEFUN([AC_LIBTOOL_WIN32_DLL], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -_LT_SET_OPTION([LT_INIT], [win32-dll]) -AC_DIAGNOSE([obsolete], -[$0: Remove this warning and the call to _LT_SET_OPTION when you -put the `win32-dll' option into LT_INIT's first parameter.]) -]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) - - -# _LT_ENABLE_SHARED([DEFAULT]) -# ---------------------------- -# implement the --enable-shared flag, and supports the `shared' and -# `disable-shared' LT_INIT options. -# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. -m4_define([_LT_ENABLE_SHARED], -[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl -AC_ARG_ENABLE([shared], - [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], - [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_shared=yes ;; - no) enable_shared=no ;; - *) - enable_shared=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_shared=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac], - [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) - - _LT_DECL([build_libtool_libs], [enable_shared], [0], - [Whether or not to build shared libraries]) -])# _LT_ENABLE_SHARED - -LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) -LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) - -# Old names: -AC_DEFUN([AC_ENABLE_SHARED], -[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) -]) - -AC_DEFUN([AC_DISABLE_SHARED], -[_LT_SET_OPTION([LT_INIT], [disable-shared]) -]) - -AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) -AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AM_ENABLE_SHARED], []) -dnl AC_DEFUN([AM_DISABLE_SHARED], []) - - - -# _LT_ENABLE_STATIC([DEFAULT]) -# ---------------------------- -# implement the --enable-static flag, and support the `static' and -# `disable-static' LT_INIT options. -# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. -m4_define([_LT_ENABLE_STATIC], -[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl -AC_ARG_ENABLE([static], - [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], - [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_static=yes ;; - no) enable_static=no ;; - *) - enable_static=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_static=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac], - [enable_static=]_LT_ENABLE_STATIC_DEFAULT) - - _LT_DECL([build_old_libs], [enable_static], [0], - [Whether or not to build static libraries]) -])# _LT_ENABLE_STATIC - -LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) -LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) - -# Old names: -AC_DEFUN([AC_ENABLE_STATIC], -[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) -]) - -AC_DEFUN([AC_DISABLE_STATIC], -[_LT_SET_OPTION([LT_INIT], [disable-static]) -]) - -AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) -AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AM_ENABLE_STATIC], []) -dnl AC_DEFUN([AM_DISABLE_STATIC], []) - - - -# _LT_ENABLE_FAST_INSTALL([DEFAULT]) -# ---------------------------------- -# implement the --enable-fast-install flag, and support the `fast-install' -# and `disable-fast-install' LT_INIT options. -# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. -m4_define([_LT_ENABLE_FAST_INSTALL], -[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl -AC_ARG_ENABLE([fast-install], - [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], - [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_fast_install=yes ;; - no) enable_fast_install=no ;; - *) - enable_fast_install=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," - for pkg in $enableval; do - IFS="$lt_save_ifs" - if test "X$pkg" = "X$p"; then - enable_fast_install=yes - fi - done - IFS="$lt_save_ifs" - ;; - esac], - [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) - -_LT_DECL([fast_install], [enable_fast_install], [0], - [Whether or not to optimize for fast installation])dnl -])# _LT_ENABLE_FAST_INSTALL - -LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) -LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) - -# Old names: -AU_DEFUN([AC_ENABLE_FAST_INSTALL], -[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) -AC_DIAGNOSE([obsolete], -[$0: Remove this warning and the call to _LT_SET_OPTION when you put -the `fast-install' option into LT_INIT's first parameter.]) -]) - -AU_DEFUN([AC_DISABLE_FAST_INSTALL], -[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) -AC_DIAGNOSE([obsolete], -[$0: Remove this warning and the call to _LT_SET_OPTION when you put -the `disable-fast-install' option into LT_INIT's first parameter.]) -]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) -dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) - - -# _LT_WITH_PIC([MODE]) -# -------------------- -# implement the --with-pic flag, and support the `pic-only' and `no-pic' -# LT_INIT options. -# MODE is either `yes' or `no'. If omitted, it defaults to `both'. -m4_define([_LT_WITH_PIC], -[AC_ARG_WITH([pic], - [AS_HELP_STRING([--with-pic], - [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], - [pic_mode="$withval"], - [pic_mode=default]) - -test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) - -_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl -])# _LT_WITH_PIC - -LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) -LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) - -# Old name: -AU_DEFUN([AC_LIBTOOL_PICMODE], -[_LT_SET_OPTION([LT_INIT], [pic-only]) -AC_DIAGNOSE([obsolete], -[$0: Remove this warning and the call to _LT_SET_OPTION when you -put the `pic-only' option into LT_INIT's first parameter.]) -]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) - -## ----------------- ## -## LTDL_INIT Options ## -## ----------------- ## - -m4_define([_LTDL_MODE], []) -LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], - [m4_define([_LTDL_MODE], [nonrecursive])]) -LT_OPTION_DEFINE([LTDL_INIT], [recursive], - [m4_define([_LTDL_MODE], [recursive])]) -LT_OPTION_DEFINE([LTDL_INIT], [subproject], - [m4_define([_LTDL_MODE], [subproject])]) - -m4_define([_LTDL_TYPE], []) -LT_OPTION_DEFINE([LTDL_INIT], [installable], - [m4_define([_LTDL_TYPE], [installable])]) -LT_OPTION_DEFINE([LTDL_INIT], [convenience], - [m4_define([_LTDL_TYPE], [convenience])]) diff --git a/burstioInterfaces/testing/tests/cpp/m4/ltsugar.m4 b/burstioInterfaces/testing/tests/cpp/m4/ltsugar.m4 deleted file mode 100644 index 9000a057d..000000000 --- a/burstioInterfaces/testing/tests/cpp/m4/ltsugar.m4 +++ /dev/null @@ -1,123 +0,0 @@ -# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- -# -# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. -# Written by Gary V. Vaughan, 2004 -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# serial 6 ltsugar.m4 - -# This is to help aclocal find these macros, as it can't see m4_define. -AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) - - -# lt_join(SEP, ARG1, [ARG2...]) -# ----------------------------- -# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their -# associated separator. -# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier -# versions in m4sugar had bugs. -m4_define([lt_join], -[m4_if([$#], [1], [], - [$#], [2], [[$2]], - [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) -m4_define([_lt_join], -[m4_if([$#$2], [2], [], - [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) - - -# lt_car(LIST) -# lt_cdr(LIST) -# ------------ -# Manipulate m4 lists. -# These macros are necessary as long as will still need to support -# Autoconf-2.59 which quotes differently. -m4_define([lt_car], [[$1]]) -m4_define([lt_cdr], -[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], - [$#], 1, [], - [m4_dquote(m4_shift($@))])]) -m4_define([lt_unquote], $1) - - -# lt_append(MACRO-NAME, STRING, [SEPARATOR]) -# ------------------------------------------ -# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. -# Note that neither SEPARATOR nor STRING are expanded; they are appended -# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). -# No SEPARATOR is output if MACRO-NAME was previously undefined (different -# than defined and empty). -# -# This macro is needed until we can rely on Autoconf 2.62, since earlier -# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. -m4_define([lt_append], -[m4_define([$1], - m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) - - - -# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) -# ---------------------------------------------------------- -# Produce a SEP delimited list of all paired combinations of elements of -# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list -# has the form PREFIXmINFIXSUFFIXn. -# Needed until we can rely on m4_combine added in Autoconf 2.62. -m4_define([lt_combine], -[m4_if(m4_eval([$# > 3]), [1], - [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl -[[m4_foreach([_Lt_prefix], [$2], - [m4_foreach([_Lt_suffix], - ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, - [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) - - -# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) -# ----------------------------------------------------------------------- -# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited -# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. -m4_define([lt_if_append_uniq], -[m4_ifdef([$1], - [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], - [lt_append([$1], [$2], [$3])$4], - [$5])], - [lt_append([$1], [$2], [$3])$4])]) - - -# lt_dict_add(DICT, KEY, VALUE) -# ----------------------------- -m4_define([lt_dict_add], -[m4_define([$1($2)], [$3])]) - - -# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) -# -------------------------------------------- -m4_define([lt_dict_add_subkey], -[m4_define([$1($2:$3)], [$4])]) - - -# lt_dict_fetch(DICT, KEY, [SUBKEY]) -# ---------------------------------- -m4_define([lt_dict_fetch], -[m4_ifval([$3], - m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), - m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) - - -# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) -# ----------------------------------------------------------------- -m4_define([lt_if_dict_fetch], -[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], - [$5], - [$6])]) - - -# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) -# -------------------------------------------------------------- -m4_define([lt_dict_filter], -[m4_if([$5], [], [], - [lt_join(m4_quote(m4_default([$4], [[, ]])), - lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), - [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl -]) diff --git a/burstioInterfaces/testing/tests/cpp/m4/ltversion.m4 b/burstioInterfaces/testing/tests/cpp/m4/ltversion.m4 deleted file mode 100644 index f3c530980..000000000 --- a/burstioInterfaces/testing/tests/cpp/m4/ltversion.m4 +++ /dev/null @@ -1,23 +0,0 @@ -# ltversion.m4 -- version numbers -*- Autoconf -*- -# -# Copyright (C) 2004 Free Software Foundation, Inc. -# Written by Scott James Remnant, 2004 -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# Generated from ltversion.in. - -# serial 3017 ltversion.m4 -# This file is part of GNU Libtool - -m4_define([LT_PACKAGE_VERSION], [2.2.6b]) -m4_define([LT_PACKAGE_REVISION], [1.3017]) - -AC_DEFUN([LTVERSION_VERSION], -[macro_version='2.2.6b' -macro_revision='1.3017' -_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) -_LT_DECL(, macro_revision, 0) -]) diff --git a/burstioInterfaces/testing/tests/cpp/m4/lt~obsolete.m4 b/burstioInterfaces/testing/tests/cpp/m4/lt~obsolete.m4 deleted file mode 100644 index 637bb2066..000000000 --- a/burstioInterfaces/testing/tests/cpp/m4/lt~obsolete.m4 +++ /dev/null @@ -1,92 +0,0 @@ -# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- -# -# Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. -# Written by Scott James Remnant, 2004. -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# serial 4 lt~obsolete.m4 - -# These exist entirely to fool aclocal when bootstrapping libtool. -# -# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) -# which have later been changed to m4_define as they aren't part of the -# exported API, or moved to Autoconf or Automake where they belong. -# -# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN -# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us -# using a macro with the same name in our local m4/libtool.m4 it'll -# pull the old libtool.m4 in (it doesn't see our shiny new m4_define -# and doesn't know about Autoconf macros at all.) -# -# So we provide this file, which has a silly filename so it's always -# included after everything else. This provides aclocal with the -# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything -# because those macros already exist, or will be overwritten later. -# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. -# -# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. -# Yes, that means every name once taken will need to remain here until -# we give up compatibility with versions before 1.7, at which point -# we need to keep only those names which we still refer to. - -# This is to help aclocal find these macros, as it can't see m4_define. -AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) - -m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) -m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) -m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) -m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) -m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) -m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) -m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) -m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) -m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) -m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) -m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) -m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) -m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) -m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) -m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) -m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) -m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) -m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) -m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) -m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) -m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) -m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) -m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) -m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) -m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) -m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) -m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) -m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) -m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) -m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) -m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) -m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) -m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) -m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) -m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) -m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) -m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) -m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) -m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) -m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) -m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) -m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) -m4_ifndef([AC_LIBTOOL_RC], [AC_DEFUN([AC_LIBTOOL_RC])]) -m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) -m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) -m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) -m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) -m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) -m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) -m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) -m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) -m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) -m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) -m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) -m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) diff --git a/burstioInterfaces/testing/tests/cpp/reconf b/burstioInterfaces/testing/tests/cpp/reconf deleted file mode 100755 index 59ffdc071..000000000 --- a/burstioInterfaces/testing/tests/cpp/reconf +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK burstioInterfaces. -# -# REDHAWK burstioInterfaces is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK burstioInterfaces is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -test -d m4 || mkdir m4 -test -d acinclude || mkdir acinclude -test -e NEWS || touch NEWS -test -e README || touch README -test -e AUTHORS || touch AUTHORS -test -e ChangeLog || touch ChangeLog -autoreconf -i diff --git a/burstioInterfaces/testing/tests/cpp/runtests b/burstioInterfaces/testing/tests/cpp/runtests deleted file mode 100755 index fac7b1c03..000000000 --- a/burstioInterfaces/testing/tests/cpp/runtests +++ /dev/null @@ -1,8 +0,0 @@ -# -# -burstio_top=../../.. -burstio_libsrc_top=$burstio_top/src -burstio_cpp_lib=$burstio_libsrc_top/cpp -export LD_LIBRARY_PATH=$burstio_cpp_lib/.libs:$burstio_libsrc_top/.libs:${LD_LIBRARY_PATH} -make check - diff --git a/burstioInterfaces/testing/tests/java/.gitignore b/burstioInterfaces/testing/tests/java/.gitignore deleted file mode 100644 index 96966d7c4..000000000 --- a/burstioInterfaces/testing/tests/java/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.class -!Makefile diff --git a/burstioInterfaces/testing/tests/java/templates/Burstio_Utils_Test.template b/burstioInterfaces/testing/tests/java/Burstio_Utils_Test.java similarity index 100% rename from burstioInterfaces/testing/tests/java/templates/Burstio_Utils_Test.template rename to burstioInterfaces/testing/tests/java/Burstio_Utils_Test.java diff --git a/burstioInterfaces/testing/tests/java/Makefile b/burstioInterfaces/testing/tests/java/Makefile deleted file mode 100644 index cfc17e1dc..000000000 --- a/burstioInterfaces/testing/tests/java/Makefile +++ /dev/null @@ -1,52 +0,0 @@ - -burstio_top_dir=../../../ -burstio_idl_dir=$(burstio_top_dir) -burstio_idl_java_dir=$(burstio_top_dir) -burstio_libsrc_dir=$(burstio_top_dir)/src -burstio_libsrc_java_dir=$(burstio_libsrc_dir)/java -OSSIE_HOME=$(shell echo $(OSSIEHOME)) -BURSTIO_JARS=$(OSSIE_HOME)/lib/CFInterfaces.jar:$(OSSIE_HOME)/lib/log4j-1.2.15.jar:$(OSSIE_HOME)/lib/ossie.jar:$(OSSIE_HOME)/lib/bulkio.jar:$(OSSIE_HOME)/lib/BULKIOInterfaces.jar:$(burstio_libsrc_java_dir)/burstio.jar:$(burstio_libsrc_java_dir)/BURSTIOInterfaces.jar - -JAVA_HOME = $(shell echo $(JAVAHOME)) -JAVA = $(JAVA_HOME)/bin/java -JDB = $(JAVA_HOME)/bin/jdb -JAVAC = $(JAVA_HOME)/bin/javac -JAVA_CP=$(CLASSPATH):.:/usr/share/java/junit4.jar:$(BURSTIO_JARS) - -.SUFFIXES: .java .class -.PHONEY: all check build-all clean tcheck - -IN_PORTS=$(patsubst %.java,%.class,$(wildcard InBurst*.java)) -OUT_PORTS=$(patsubst %.java,%.class,$(wildcard OutBurst*.java)) -PUSH_TESTS=$(patsubst %.java,%.class,$(wildcard Burst*Push*.java)) -UTILS=Burstio_Utils_Test.class -#MULTIOUT_PORTS=$(patsubst %.java,%.class,$(wildcard Multi*.java)) - -JTESTS=$(IN_PORTS:.class=) $(OUT_PORTS:.class=) $(PUSH_TESTS:.class=) $(MULTIOUT_PORTS:.class=) $(UTILS:.class=) -.java.class: - $(JAVAC) -cp $(JAVA_CP) -Xlint $^ - -all: build-all check - -build-all: $(IN_PORTS) $(OUT_PORTS) $(PUSH_TESTS) $(MULTIOUT_PORTS) $(UTILS) - -tcheck: - $(JAVA) -cp $(JAVA_CP) org.junit.runner.JUnitCore OutVectorPort_Test - -check: - @for jtest in "$(JTESTS)" ; do \ - $(JAVA) -cp $(JAVA_CP) -Dlog4j.configuration=file:log4j_config.txt org.junit.runner.JUnitCore $$jtest ; \ - done - -debug: - @for jtest in "$(JTESTS)" ; do \ - $(JDB) -sourcepath $(burstio_libsrc_java_dir)/BURSTIO:$(burstio_libsrc_java_dir)/burstio -classpath $(JAVA_CP) -Dlog4j.configuration=file:log4j_config.txt org.junit.runner.JUnitCore $$jtest ; \ - done - -clean: - -rm *.class - -rm InBurst*_Test.java - -rm OutBurst*_Test.java - -rm Burst*Push_Test.java - -rm Burstio_Utils_Test.java - diff --git a/burstioInterfaces/testing/tests/java/Makefile.am b/burstioInterfaces/testing/tests/java/Makefile.am new file mode 100644 index 000000000..1eeea7732 --- /dev/null +++ b/burstioInterfaces/testing/tests/java/Makefile.am @@ -0,0 +1,57 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK burstioInterfaces. +# +# REDHAWK burstioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK burstioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +burstio_libsrc_dir=$(top_srcdir)/src +burstio_libsrc_java_dir=$(burstio_libsrc_dir)/java +BURSTIO_JARS=$(top_builddir)/src/java/burstio.jar:$(top_builddir)/src/java/BURSTIOInterfaces.jar + +sed_dir = $(top_srcdir)/src/java/sed +sed_generate = $(AM_V_GEN)$(SED) -f $(sed_dir)/$*.sed $< > $@ + +InBurst%Port_Test.java : $(srcdir)/templates/InBurstPort_Test.template $(sed_dir)/%.sed + $(sed_generate) + +OutBurst%Port_Test.java : $(srcdir)/templates/OutBurstPort_Test.template $(sed_dir)/%.sed + $(sed_generate) + +Burst%Push_Test.java : $(srcdir)/templates/BurstPush_Test.template $(sed_dir)/%.sed + $(sed_generate) + +BUILT_SOURCES = $(patsubst $(sed_dir)/%.sed,InBurst%Port_Test.java,$(wildcard $(sed_dir)/*.sed)) +BUILT_SOURCES += $(patsubst $(sed_dir)/%.sed,OutBurst%Port_Test.java,$(wildcard $(sed_dir)/*.sed)) +BUILT_SOURCES += $(patsubst $(sed_dir)/%.sed,Burst%Push_Test.java,$(wildcard $(sed_dir)/*.sed)) + +Burstio_CLASSES = Burstio_Utils_Test.class $(patsubst %.java,%.class,$(BUILT_SOURCES)) +Burstio_CLASSPATH = $(BURSTIO_JARS):$(OSSIE_CLASSPATH):$(BULKIO_CLASSPATH):$(JUNIT_CLASSPATH) + +TEST_CLASSES = $(patsubst %.class,%,$(Burstio_CLASSES)) + +TESTS = Burstio +check_SCRIPTS = Burstio + +%.class : %.java + $(AM_V_at)$(JAVAC) -d $(builddir) -cp $(Burstio_CLASSPATH) -g -Xlint $< + +Burstio : $(Burstio_CLASSES) Makefile.am + @echo "#!/bin/sh" > $@ + @echo "export LD_LIBRARY_PATH=$(top_builddir)/src/java/.libs:${LD_LIBRARY_PATH}" >> $@ + @echo "exec java -cp $(Burstio_CLASSPATH):. -Dlog4j.configuration=file:$(srcdir)/log4j_config.txt org.junit.runner.JUnitCore $(TEST_CLASSES)" >> $@ + @chmod +x $@ + +CLEANFILES = Burstio $(BUILT_SOURCES) *.class diff --git a/burstioInterfaces/testing/tests/java/build.xml b/burstioInterfaces/testing/tests/java/build.xml index 7e5786c8b..56b4f41ff 100644 --- a/burstioInterfaces/testing/tests/java/build.xml +++ b/burstioInterfaces/testing/tests/java/build.xml @@ -40,7 +40,11 @@ with this program. If not, see http://www.gnu.org/licenses/. - + + + + + diff --git a/burstioInterfaces/testing/tests/python/runtests b/burstioInterfaces/testing/tests/python/runtests index 137008171..52b27ee8c 100755 --- a/burstioInterfaces/testing/tests/python/runtests +++ b/burstioInterfaces/testing/tests/python/runtests @@ -2,26 +2,30 @@ # Runs relative to burstio project # -burstio_top=../../../ +burstio_top=../../.. burstio_libsrc_top=$burstio_top/src/python -#export LD_LIBRARY_PATH=$burstio_libsrc_top/.libs:$burstio_top/.libs:${LD_LIBRARY_PATH} -export PYTHONPATH=$burstio_libsrc_top/build/lib:${PYTHONPATH} +export PYTHONPATH=$burstio_libsrc_top:${PYTHONPATH} -# -# Run Python based testing.. -# test_xxx_vector.py -- uses sandbox to load components and test data flow -# test_python_helpers.py -- test sri and time helpers -# +# work around for import issues with redhawk bitbuffer +bbuffer_link="../../../src/python/redhawk/bitbuffer.py" -# RESOLVE -# make sure there is link to burstioInterfaces in the build/burstio/ directory -# -#if [ ! -h $burstio_libsrc_top/build/lib/burstio/burstioInterfaces ]; -#then -# cd $burstio_libsrc_top/build/lib/burstio -# ln -s ../../../../build/lib/burstio/burstioInterfaces -# cd - -#fi +cleanup_redhawk_import() { + [ -L $bbuffer_link ] && rm "$bbuffer_link" + [ -f $bbuffer_link"c" ] && rm $bbuffer_link"c" +} + +setup_redhawk_import() { + + cleanup_redhawk_import + + pushd $burstio_libsrc_top/redhawk + ln -s $OSSIEHOME/lib/python/redhawk/bitbuffer.py bitbuffer.py + popd +} + + + +setup_redhawk_import if [ $# -gt 0 ] then @@ -33,4 +37,4 @@ else done fi - +cleanup_redhawk_import diff --git a/burstioInterfaces/testing/tests/runtests b/burstioInterfaces/testing/tests/runtests index ce32b24a0..3359cb949 100755 --- a/burstioInterfaces/testing/tests/runtests +++ b/burstioInterfaces/testing/tests/runtests @@ -5,11 +5,30 @@ burstio_top=../../.. burstio_libsrc_top=$burstio_top/src export LD_LIBRARY_PATH=$burstio_libsrc_top/cpp/.libs:$burstio_libsrc_top/java/.libs:${LD_LIBRARY_PATH} -export PYTHONPATH=$burstio_libsrc_top/python/redhawk:$burstio_libsrc_top/python:${PYTHONPATH} +export PYTHONPATH=$burstio_libsrc_top/python:${PYTHONPATH} # # Run Python based testing.. # + +# work around for import issues with redhawk bitbuffer +bbuffer_link="../../src/python/redhawk/bitbuffer.py" + +cleanup_redhawk_import() { + [ -L $bbuffer_link ] && rm "$bbuffer_link" + [ -f $bbuffer_link"c" ] && rm $bbuffer_link"c" +} + +setup_redhawk_import() { + + cleanup_redhawk_import + + pushd ../../src/python/redhawk + ln -s $OSSIEHOME/lib/python/redhawk/bitbuffer.py bitbuffer.py + popd +} + +setup_redhawk_import if [ $# -gt 0 ] then # run an associated test script @@ -24,7 +43,8 @@ else cd - fi - +# clean up +cleanup_redhawk_import # # Run Java based testing @@ -39,8 +59,5 @@ fi # # Run C++ based testing # -cd cpp -./runtests -cd - - +make -j 6 -C cpp check diff --git a/codegenTesting/.gitignore b/codegenTesting/.gitignore index 8d05759d8..8e4e92b89 100644 --- a/codegenTesting/.gitignore +++ b/codegenTesting/.gitignore @@ -1,54 +1,9 @@ -Makefile -build -Makefile.in -.deps -.libs +*.class +*.jar *.lo *.la *.o *.pyc *~ -aclocal.m4 -acinclude.m4 -autom4te.cache -config.guess -config.log -config.status -config.sub -configure -depcomp -install-sh -libtool -ltmain.sh -missing -ossie.pc -*.class -py-compile -reconf -configure.ac -Makefile.am -struct_props.h -*_base.cpp -*_base.h -build.sh -main.cpp -port_impl.h -port_impl.cpp -startJava.sh -*_base.py -*.spec -*.jar - -sdr/dom/components/basic/basic_cpp_impl1/basic_cpp_impl1 -sdr/dom/components/bulkio_ports/bulkio_ports_cpp_impl1/bulkio_ports_cpp_impl1 -sdr/dom/components/event_props/cpp/event_props -sdr/dom/components/props/props_cpp_impl1/props_cpp_impl1 -sdr/dom/components/sri/sri_cpp_impl1/sri_cpp_impl1 -sdr/dom/components/bulkio_ports/bulkio_ports_java_impl1/src/bulkio_ports_java_impl1/ports/ -sdr/dom/components/sri/sri_java_impl1/src/sri_java_impl1/ports/ -sdr/dev/devices/basic_device/cpp/basic_device -sdr/dev/devices/basic_loadable_device/cpp/basic_loadable_device -sdr/dev/devices/basic_executable_device/cpp/basic_executable_device -sdr/dev/devices/basic_aggregate_device/cpp/basic_aggregate_device -sdr/dev/services/basic_service/python/ -TEST-*.xml +build/ +sdr/dom/mgr diff --git a/codegenTesting/helpers/scatest.py b/codegenTesting/helpers/scatest.py index 060872883..89a6b0981 100755 --- a/codegenTesting/helpers/scatest.py +++ b/codegenTesting/helpers/scatest.py @@ -20,7 +20,8 @@ # from _unitTestHelpers.scatest import * import commands -from subprocess import Popen +import glob +import os from xml.dom.minidom import parse import common @@ -182,22 +183,26 @@ def runUnitTests(self): impl_id = impl['id'] impl_name = impl['name'] spd_file = os.path.join(self.build_dir, self.base_name+'.spd.xml') - if self.octave_test_dir: - # setup octave components to run from their test directory - start_dir = os.getcwd(); - spd_file = "../"+self.base_name+'.spd.xml' - os.chdir(self.test_dir) - - retval = ossie.utils.testing.ScaComponentTestProgram(spd_file, - module='test_'+self.base_name, - impl=impl_id) - - if self.octave_test_dir: - os.chdir(start_dir) - - for result in retval.results: - if result.errors or result.failures: - if not lang in failures: - failures.append(lang) + + for test_file in glob.glob(self.build_dir+'/tests/test_*.py'): + _file = os.path.basename(test_file) + + if self.octave_test_dir: + # setup octave components to run from their test directory + start_dir = os.getcwd(); + spd_file = "../"+self.base_name+'.spd.xml' + os.chdir(self.test_dir) + + retval = ossie.utils.testing.ScaComponentTestProgram(spd_file, + module=_file[:-3], + impl=impl_id) + + if self.octave_test_dir: + os.chdir(start_dir) + + for result in retval.results: + if result.errors or result.failures: + if not lang in failures: + failures.append(lang) self.assertEqual(len(failures), 0, msg='failed for ' + ', '.join(failures)) diff --git a/codegenTesting/runtests.py b/codegenTesting/runtests.py index 7cd598703..a8cc6dd22 100755 --- a/codegenTesting/runtests.py +++ b/codegenTesting/runtests.py @@ -25,8 +25,16 @@ from _unitTestHelpers import scatest from _unitTestHelpers import runtestHelpers +# Create a symbolic link to $SDRROOT/dom/mgr so that the sandbox can find +# ComponentHost for shared library components +sdrroot = os.path.join(os.getcwd(), "sdr") +mgrpath = os.path.join(sdrroot, 'dom/mgr') +if os.path.exists(mgrpath): + os.unlink(mgrpath) +os.symlink(os.path.join(os.environ['SDRROOT'], 'dom/mgr'), mgrpath) + # Point to the testing SDR folder -os.environ['SDRROOT'] = os.path.join(os.getcwd(), "sdr") +os.environ['SDRROOT'] = sdrroot os.environ['CODEGENTESTHOME'] = os.path.join(os.getcwd()) def removeAll( src, items): diff --git a/codegenTesting/sdr/dev/devices/ProgrammableDevice/ProgrammableDevice.prf.xml b/codegenTesting/sdr/dev/devices/ProgrammableDevice/ProgrammableDevice.prf.xml index fb709f704..7edde600c 100644 --- a/codegenTesting/sdr/dev/devices/ProgrammableDevice/ProgrammableDevice.prf.xml +++ b/codegenTesting/sdr/dev/devices/ProgrammableDevice/ProgrammableDevice.prf.xml @@ -94,6 +94,6 @@ with this program. If not, see http://www.gnu.org/licenses/. IE: "file://path/to/local/filesystem/load.bit" - + diff --git a/codegenTesting/sdr/dev/devices/ProgrammableDevice/tests/test_ProgrammableDevice.py b/codegenTesting/sdr/dev/devices/ProgrammableDevice/tests/test_ProgrammableDevice.py new file mode 100644 index 000000000..7d305e9ec --- /dev/null +++ b/codegenTesting/sdr/dev/devices/ProgrammableDevice/tests/test_ProgrammableDevice.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python + +import ossie.utils.testing +from ossie.utils import sb +import frontend +from omniORB import CORBA, any +from ossie.cf import CF +from ossie.properties import * +from ossie.utils import uuid + +class HwLoadRequest(object): + request_id = simple_property( + id_="hw_load_request::request_id", + name="request_id", + type_="string") + + requester_id = simple_property( + id_="hw_load_request::requester_id", + name="requester_id", + type_="string") + + hardware_id = simple_property( + id_="hw_load_request::hardware_id", + name="hardware_id", + type_="string") + + load_filepath = simple_property( + id_="hw_load_request::load_filepath", + name="load_filepath", + type_="string") + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["request_id"] = self.request_id + d["requester_id"] = self.requester_id + d["hardware_id"] = self.hardware_id + d["load_filepath"] = self.load_filepath + return str(d) + + @classmethod + def getId(cls): + return "hw_load_request" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("request_id",self.request_id),("requester_id",self.requester_id),("hardware_id",self.hardware_id),("load_filepath",self.load_filepath)] + +hw_load_request = struct_property(id_="hw_load_request", + name="hw_load_request", + structdef=HwLoadRequest, + configurationkind=("property",), + mode="readwrite") + + +class HwLoadStatus(object): + request_id = simple_property( + id_="hw_load_status::request_id", + name="request_id", + type_="string") + + requester_id = simple_property( + id_="hw_load_status::requester_id", + name="requester_id", + type_="string") + + hardware_id = simple_property( + id_="hw_load_status::hardware_id", + name="hardware_id", + type_="string") + + load_filepath = simple_property( + id_="hw_load_status::load_filepath", + name="load_filepath", + type_="string") + + state = simple_property( + id_="hw_load_status::state", + name="state", + type_="short") + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["request_id"] = self.request_id + d["requester_id"] = self.requester_id + d["hardware_id"] = self.hardware_id + d["load_filepath"] = self.load_filepath + d["state"] = self.state + return str(d) + + @classmethod + def getId(cls): + return "hw_load_status" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("request_id",self.request_id),("requester_id",self.requester_id),("hardware_id",self.hardware_id),("load_filepath",self.load_filepath),("state",self.state)] + + +hw_load_status = struct_property(id_="hw_load_status", + name="hw_load_status", + structdef=HwLoadStatus, + configurationkind=("property",), + mode="readwrite") + +hw_load_requests = structseq_property(id_="hw_load_requests", + name="hw_load_requests", + structdef=HwLoadRequest, + defvalue=[], + configurationkind=("property",), + mode="readwrite") + + + +class DeviceTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the device. + SPD_FILE = '../ProgrammableDevice.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a device using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the device, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl) + pass + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def test_hw_load_request(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + + my_request = HwLoadRequest() + my_request.request_id = str(uuid.uuid1()) + my_request.requestor_id = "PG_TESTER" + my_request.hardware_id = "PG_TESTER:1" + my_request.load_filepath = "/the/path/file/to/load.bin" + + my_request_any = CORBA.Any(CORBA.TypeCode("IDL:CF/Properties:1.0"), struct_to_props(my_request)) + + my_requests = CF.DataType(id='hw_load_requests', + value=CORBA.Any(CORBA.TypeCode("IDL:omg.org/CORBA/AnySeq:1.0"), + [ my_request_any ] )) + + + hw_load_requests = structseq_property(id_="hw_load_requests", + name="hw_load_requests", + structdef=HwLoadRequest, + defvalue=[], + configurationkind=("property",), + mode="readwrite") + + self.comp.start() + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/.RX_Digitizer_Sim.wavedev b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/.RX_Digitizer_Sim.wavedev new file mode 100644 index 000000000..d1ea167e5 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/.RX_Digitizer_Sim.wavedev @@ -0,0 +1,6 @@ + + + + + + diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/.md5sums b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/.md5sums new file mode 100644 index 000000000..54e1f9ba6 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/.md5sums @@ -0,0 +1,2 @@ +240cf9a66910ade2818854253acf38f6 build.sh +1ba61a4cc571613d3eb6e37e4685ec88 RX_Digitizer_Sim.spec diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/RX_Digitizer_Sim.prf.xml b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/RX_Digitizer_Sim.prf.xml new file mode 100644 index 000000000..a8de80de7 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/RX_Digitizer_Sim.prf.xml @@ -0,0 +1,129 @@ + + + + + This specifies the device kind + FRONTEND::TUNER + + + + + This specifies the specific device + + + + + Status of each tuner, including entries for both allocated and un-allocated tuners. Each entry represents a single tuner. + + + Comma separated list of current Allocation IDs. + + + Available bandwidth (Hz) in range (XX-YY) or csv (X,Y,Z) format. Do not put units in field. + Hz + + + Available frequencies (Hz) in range (XX-YY) or csv (X,Y,Z) format. Do not put units in field. + Hz + + + Available gain (dB) in range (XX-YY) or csv (X,Y,Z) format. Do not put units in field. + dB + + + Available sample_rate (sps) in range (XX-YY) or csv (X,Y,Z) format. Do not put units in field. + sps + + + Current bandwidth in Hz + Hz + + + Current center frequency in Hz. + Hz + + + Current decimation of tuner. For DDC tuners, this is the ratio of input sample rate to output sample rate regardless of data format. + + + Indicates if tuner is enabled, in reference to the output state of the tuner. + + + Current gain in dB. + dB + + + Unique ID that specifies a group of Device. + + + Specifies a certain RF flow to allocate against. + + + Current sample rate in samples per second. + sps + + + Physical tuner ID. + + + Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGTIZIER_CHANNELIZER + + + + + + + Frontend Interfaces v2 listener allocation structure + + + + + + Frontend Interfaces v2 main allocation structure + + Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGTIZIER_CHANNELIZER + + + The allocation_id set by the caller. Used by the caller to reference the allocation uniquely + + + Requested center frequency + Hz + + + Requested bandwidth (+/- the tolerance) + Hz + + + Allowable Percent above requested bandwidth (ie - 100 would be up to twice) + percent + + + Requested sample rate (+/- the tolerance). This can be ignored for such devices as analog tuners + Hz + + + Allowable Percent above requested sample rate (ie - 100 would be up to twice) + percent + + + True: Has control over the device to make changes +False: Does not need control and can just attach to any currently tasked device that satisfies the parameters (essentually a listener) + + + Unique identifier that specifies the group a device must be in. Must match group_id on the device + + + Optional. Specifies the RF flow of a specific input source to allocate against. If left empty, it will match all FrontEnd devices. + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/RX_Digitizer_Sim.scd.xml b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/RX_Digitizer_Sim.scd.xml new file mode 100644 index 000000000..1b932fdc5 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/RX_Digitizer_Sim.scd.xml @@ -0,0 +1,73 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/RX_Digitizer_Sim.spd.xml b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/RX_Digitizer_Sim.spd.xml new file mode 100644 index 000000000..4eeb6887a --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/RX_Digitizer_Sim.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/RX_Digitizer_Sim.py + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/.md5sums b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/.md5sums new file mode 100644 index 000000000..cacf81c03 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/.md5sums @@ -0,0 +1,6 @@ +b1139cb052c7e855dc8e0d9daeb9ae41 RX_Digitizer_Sim.py +8bfcd22353c3a57fee561ad86ee2a56b reconf +1dc23ee4edb38a3ac1e4bea610d4334e configure.ac +77735ac9d958acdca4cf52727c42a652 RX_Digitizer_Sim_base.py +9506fe5bbdeb899b120123613b655f93 Makefile.am.ide +0184c25415022e8035f6618a80392374 Makefile.am diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/RX_Digitizer_Sim.py b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/RX_Digitizer_Sim.py new file mode 100755 index 000000000..0a167c020 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/RX_Digitizer_Sim.py @@ -0,0 +1,389 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: RX_Digitizer_Sim.spd.xml +from ossie.device import start_device +import logging +from frontend.tuner_device import validateRequestSingle, floatingPointCompare, validateRequestVsRFInfo +from redhawk.frontendInterfaces import FRONTEND + +from data_generator import DataGenerator + +from RX_Digitizer_Sim_base import * + +class RX_Digitizer_Sim_i(RX_Digitizer_Sim_base): + """""" + MINFREQ = 50000000.0 + MAXFREQ = 3000000000.0 + AVAILABLE_BW_SR = ((2000000,2500000.0,8),(4000000,5000000.0,4),(8000000,10000000.0,2)) + + + def constructor(self): + """ + This is called by the framework immediately after your device registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + For a tuner device, the structure frontend_tuner_status needs to match the number + of tuners that this device controls and what kind of device it is. + The options for devices are: TX, RX, RX_DIGITIZER, CHANNELIZER, DDC, RC_DIGITIZER_CHANNELIZER + + For example, if this device has 5 physical + tuners, each an RX_DIGITIZER, then the code in the construct function should look like this: + + self.setNumChannels(5, "RX_DIGITIZER"); + + The incoming request for tuning contains a string describing the requested tuner + type. The string for the request must match the string in the tuner status. + """ + # TODO add customization here. + self.setNumChannels(2, "RX_DIGITIZER"); + + self.datagenerators =[] + self.datagenerators.append(DataGenerator(self.port_dataShort_out)) + self.datagenerators.append(DataGenerator(self.port_dataShort_out)) + + self.rfinfo = self._createEmptyRFInfo() + + for datagenerator in self.datagenerators: + datagenerator.keyword_dict['FRONTEND::DEVICE_ID'] = self._get_identifier() + datagenerator.keyword_dict['FRONTEND::RF_FLOW_ID'] = self.rfinfo.rf_flow_id + datagenerator.waveform_type = "Sine" + + def process(self): + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + ''' + ************************************************************* + Functions supporting tuning allocation + *************************************************************''' + def deviceEnable(self, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + Make sure to set the 'enabled' member of fts to indicate that tuner as enabled + ************************************************************''' + print "deviceEnable(): Enable the given tuner *********" + try: + self.datagenerators[tuner_id].enableDataFlow() + fts.enabled = True + + except Exception, e: + self._log.exception("Got exception % s" %str(e)) + return False + + if fts.center_frequency == 112e6: + print self.getTunerStatus(fts.allocation_id_csv) + + return True + + def deviceDisable(self,fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + Make sure to reset the 'enabled' member of fts to indicate that tuner as disabled + ************************************************************''' + self._log.debug( "deviceDisable(): Disable the given tuner *********") + self.datagenerators[tuner_id].disableDataFlow() + fts.enabled = False + + return + + def deviceSetTuning(self,request, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + + The bandwidth, center frequency, and sampling rate that the hardware was actually tuned + to needs to populate fts (to make sure that it meets the tolerance requirement. For example, + if the tuned values match the requested values, the code would look like this: + + fts.bandwidth = request.bandwidth + fts.center_frequency = request.center_frequency + fts.sample_rate = request.sample_rate + + return True if the tuning succeeded, and False if it failed + ************************************************************''' + self._log.debug( "deviceSetTuning(): Evaluate whether or not a tuner is added *********") + + self._log.debug( "allocating tuner_id %s" % tuner_id) + + if fts.center_frequency == 111e6: + raise Exception('bad center frequency') + + # Check that allocation can be satisfied + if self.rfinfo.rf_flow_id !="": + try: + validateRequestVsRFInfo(request,self.rfinfo,1) + except FRONTEND.BadParameterException , e: + self._log.info("ValidateRequestVsRFInfo Failed: %s" %(str(e))) + return False + + + #Convert CF based on RFInfo. + tuneFreq = self.convert_rf_to_if(request.center_frequency) + + # Check the CF + + if not(validateRequestSingle(self.MINFREQ,self.MAXFREQ,tuneFreq)): + self._log.debug( "Center Freq Does not fit %s, %s, %s" %(tuneFreq, self.MINFREQ , self.MAXFREQ)) + return False + # Check the BW/SR + + + bw,sr,decimation = self.findBestBWSR(request.bandwidth,request.sample_rate) + if not bw: + self._log.debug( "Can't Satisfy BW and SR request") + return False + + # Update Tuner Status + fts.bandwidth = bw + fts.center_frequency = request.center_frequency + fts.sample_rate = sr + fts.decimation = decimation + print "deviceSetTuning(): 5" + #Update output multiPort to add this allocation. Make Allocation ID the same as StreamID + self.matchAllocationIdToStreamId(request.allocation_id, request.allocation_id,"dataShort_out") + + # Setup data Generator and start data for that tuner + self.datagenerators[tuner_id].stream_id = request.allocation_id + self.datagenerators[tuner_id].sr = sr + self.datagenerators[tuner_id].cf = tuneFreq + self.datagenerators[tuner_id].keyword_dict['FRONTEND::BANDWIDTH'] = bw + self.datagenerators[tuner_id].keyword_dict['COL_RF'] = request.center_frequency + self.datagenerators[tuner_id].keyword_dict['CHAN_RF'] = request.center_frequency + self.datagenerators[tuner_id].start() + + print "Done with deviceSetTuning():" + return True + + + def deviceDeleteTuning(self, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + return True if the tune deletion succeeded, and False if it failed + ************************************************************''' + + self._log.debug( "deviceDeleteTuning(): Deallocate an allocated tuner *********") + self.datagenerators[tuner_id].stop() + controlAllocationID = fts.allocation_id_csv.split(',')[0] + self.removeStreamIdRouting(controlAllocationID, controlAllocationID) + return True + + ''' + ************************************************************* + Functions servicing the tuner control port + *************************************************************''' + def getTunerType(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].tuner_type + + def getTunerDeviceControl(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if self.getControlAllocationId(idx) == allocation_id: + return True + return False + + def getTunerGroupId(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].group_id + + def getTunerRfFlowId(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].rf_flow_id + + + def setTunerCenterFrequency(self,allocation_id, freq): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if freq<0: raise FRONTEND.BadParameterException("Bad CF") + # set hardware to new value. Raise an exception if it's not possible + + #Check Frequency again min/max Range + #Convert CF based on RFInfo. + tuneFreq = self.convert_rf_to_if(freq) + + # Check the CF + if not(validateRequestSingle(self.MINFREQ,self.MAXFREQ,tuneFreq)): + self._log.debug( "Center Freq Does not fit %s, %s, %s" %(tuneFreq, self.MINFREQ , self.MAXFREQ)) + raise FRONTEND.BadParameterException("Radio Center Freq of %s Does not fit in %s, %s" %(tuneFreq, self.MINFREQ , self.MAXFREQ)) + + #set tuner new freq + self.datagenerators[idx].cf = tuneFreq + self.datagenerators[idx].keyword_dict['COL_RF'] = freq + self.datagenerators[idx].keyword_dict['CHAN_RF'] = freq + self.datagenerators[idx].updateandPushSRI() + self.frontend_tuner_status[idx].center_frequency = freq + + def getTunerCenterFrequency(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].center_frequency + + def setTunerBandwidth(self,allocation_id, bw): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if bw<0: raise FRONTEND.BadParameterException("Invalid BW") + + newbw,newsr,decimation = self.findBestBWSR(bw,0) + if not newbw: + self._log.debug( "Can't Satisfy BW and SR request") + raise FRONTEND.BadParameterException("Can't Satisfy BW and SR request") + + # set hardware to new value. Raise an exception if it's not possible + self.datagenerators[idx].keyword_dict['FRONTEND::BANDWIDTH'] = newbw + self.datagenerators[idx].updateandPushSRI() + self.frontend_tuner_status[idx].bandwidth = newsr + self.frontend_tuner_status[idx].bandwidth = newbw + self.frontend_tuner_status[idx].decimation = decimation + + def getTunerBandwidth(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].bandwidth + + def setTunerAgcEnable(self,allocation_id, enable): + raise FRONTEND.NotSupportedException("setTunerAgcEnable not supported") + + def getTunerAgcEnable(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerAgcEnable not supported") + + def setTunerGain(self,allocation_id, gain): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if (gain<0 or gain>10) : raise FRONTEND.BadParameterException("Invalid Gain") + gain = round(gain,1) + # magnitude on data generators is 100+gain*10 + self.datagenerators[idx].magnitude = 100+gain*10 + self.frontend_tuner_status[idx].gain = gain + + def getTunerGain(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].gain + def setTunerReferenceSource(self,allocation_id, source): + raise FRONTEND.NotSupportedException("setTunerReferenceSource not supported") + + def getTunerReferenceSource(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerReferenceSource not supported") + + def setTunerEnable(self,allocation_id, enable): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].enabled = enable + + def getTunerEnable(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].enabled + + + def setTunerOutputSampleRate(self,allocation_id, sr): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if sr<0: raise FRONTEND.BadParameterException("Invalid SR") + + newbw,newsr,decimation = self.findBestBWSR(0,sr) + if not newbw: + self._log.debug( "Can't Satisfy BW and SR request") + raise FRONTEND.BadParameterException( "Can't Satisfy BW and SR request") + + self._log.debug("Setting BW and Sample Rate %s, %s " %(newbw,newsr)) + #set new SR + self.datagenerators[idx].sr = sr + self.datagenerators[idx].keyword_dict['FRONTEND::BANDWIDTH'] = newbw + self.datagenerators[idx].updateandPushSRI() + self.frontend_tuner_status[idx].sample_rate = newsr + self.frontend_tuner_status[idx].bandwidth = newbw + self.frontend_tuner_status[idx].decimation = decimation + + def getTunerOutputSampleRate(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].sample_rate + + ''' + ************************************************************* + Functions servicing the RFInfo port(s) + - port_name is the port over which the call was received + *************************************************************''' + def get_rf_flow_id(self,port_name): + return self.rfinfo.rf_flow_id + + def set_rf_flow_id(self,port_name, id): + pass + + def get_rfinfo_pkt(self,port_name): + return self.rfinfo + + def _createEmptyRFInfo(self): + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + _rfinfopkt=FRONTEND.RFInfoPkt('',0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + return _rfinfopkt + + def set_rfinfo_pkt(self,port_name, pkt): + self.rfinfo = pkt + for tuner in self.frontend_tuner_status: + tuner.rf_flow_id=self.rfinfo.rf_flow_id + for datagenerator in self.datagenerators: + datagenerator.keyword_dict['FRONTEND::RF_FLOW_ID'] = self.rfinfo.rf_flow_id + + ''' + ************************************************************* + Helper Functions + *************************************************************''' + + def convert_rf_to_if(self,rf_freq): + #Convert freq based on RF/IF of Analog Tuner + + if self.rfinfo.if_center_freq > 0: + ifoffset = rf_freq - self.rfinfo.rf_center_freq + if_freq =self.rfinfo.if_center_freq+ifoffset + else: + if_freq = rf_freq + + self._log.debug("Converted RF Freq of %s to IF Freq %s based on Input RF of %s, IF of %s, and spectral inversion %s" %(rf_freq,if_freq,self.rfinfo.rf_center_freq,self.rfinfo.if_center_freq,self.rfinfo.spectrum_inverted)) + return float(if_freq) + + #Start with smallest possible and see if that can satisfy request. + # Sending 0 for don't care should pass because we are looking for requested to be less than available + def findBestBWSR(self,requestedBW,requestedSR): + self._log.debug("findBestBWSR") + for bw,sr,decimation in self.AVAILABLE_BW_SR: + self._log.debug("findBestBWSR. Requested: " + str(requestedBW) +" "+ str(requestedSR) + " evaluating: " + str(bw)+" "+ str(sr)) + if bw>= requestedBW and sr>=requestedSR: + return (bw,sr,decimation) + return (False,False,False) + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Device") + start_device(RX_Digitizer_Sim_i) + diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/Waveform.py b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/Waveform.py new file mode 100644 index 000000000..d620dd174 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/Waveform.py @@ -0,0 +1,297 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file distributed with this +# source distribution. +# +# This file is part of REDHAWK Basic Components SigGen. +# +# REDHAWK Basic Components SigGen is free software: you can redistribute it and/or modify it under the terms of +# the GNU Lesser General Public License as published by the Free Software Foundation, either +# version 3 of the License, or (at your option) any later version. +# +# REDHAWK Basic Components SigGen is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +# PURPOSE. See the GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License along with this +# program. If not, see http://www.gnu.org/licenses/. +# +''' +Waveform class produces the following types of waveforms: + - whitenoise + - sincos + - square + - triangle + - sawtooth + - pulse + - constant + - lrs + - ramp +''' +import math, os + +class Waveform: + A = 67081293.0 + B=14181771.0 + T26=67108864.0 + + BI = B/T26 + seed = 123456789 + + TWOPI = math.pi * 2.0 + HALFPI = math.pi / 2.0 + + # Binary variant of a Giga. Note that this differs from the typical version + # (the decimal) version of Giga which is 10^9 + # Value: = 1G = 2^30 + B1G = 1073741824. + + def setSeed(self, value): + if value > 0: self.seed = value + + # Create a white noise array of given magnitude + # @param fbuf The output array + # @param sdev Standard deviation + # @param n Number of elements + # @param spa Scalars per atom, 2 for Complex + # @return the new data buffer + def whitenoise(self, sdev, n, spa=1): + outbuff = range(n*spa) + v1 = 0.0; v2 = 0.0; sum1 = 0.0 + fdev = float(sdev) + factor = -2.0 / math.log(10.0) + sis = float(self.seed)/self.T26 + + maxIndex = n*spa + #Do this is in a while loop instead of a for loop + #because we don't know how many times the sum1 is invalid + #we are forced to continue + i=0 + while i < maxIndex: + sis = sis*self.A + self.BI; + sis = sis - float(int(sis)) + v1 = float(sis) + v1 = v1+v1-1 + + sis = sis*self.A + self.BI; + sis = sis - float(int(sis)) + v2 = float(sis) + v2 = v2+v2-1 + + sum1 = v1*v1 + v2*v2 + if sum1 >= 1.0 or sum1 <1e-20: continue + sum1 = fdev * float(math.sqrt(factor*math.log(sum1)/sum1)) + outbuff[i] = float(v1*sum1) + if (i+1) < maxIndex: + outbuff[i+1] = float(v2*sum1) + i+=2 + + self.seed = int(sis*self.T26) + + return outbuff + + # Create a SIN or COSINE array of given magnitude + # @param fbuf The output array + # @param amp Amplitude + # @param p Phase + # @param dp Delta Phase + # @param n Number of elements + # @param spa Scalars per atom, 2 for Complex + # @return the new data buffer + + # fast algorithm based on: sin(x+dp) = sin(x)*cos(dp) + cos(x)*sin(dp) + # cos(x+dp) = cos(x)*cos(dp) - sin(x)*sin(dp) + def sincos(self, amp, p, dp, n, spa): + outbuff = range(n*spa) + cxr = amp*math.cos(p*self.TWOPI) + cxi = amp*math.sin(p*self.TWOPI) + dxr = math.cos(dp*self.TWOPI) + dxi = math.sin(dp*self.TWOPI) + if spa==2: + for i in range(0, n*2, 2): + outbuff[i] = float(cxr) + outbuff[i+1] = float(cxi) + axr = (cxr*dxr) - (cxi*dxi) + axi = (cxr*dxi) + (cxi*dxr) + cxr=axr + cxi=axi + elif spa==1: + for i in range(n): + outbuff[i] = float(cxi) + axr = (cxr*dxr) - (cxi*dxi) + axi = (cxr*dxi) + (cxi*dxr) + cxr=axr + cxi=axi + elif spa==-1: + for i in range(n): + outbuff[i] = float(amp*math.sin(p*self.TWOPI)) + p += dp + elif spa==-2: + for i in range(0, n*2, 2): + outbuff[i] = float(amp*math.cos(p*self.TWOPI)) + outbuff[i+1] = float(amp*math.sin(p*self.TWOPI)) + p += dp + + return outbuff + + # Create a SQUARE array of given amplitude + # @param fbuf The output array + # @param amp Amplitude + # @param p Phase + # @param dp Delta Phase + # @param n Number of elements + # @param spa Scalars per atom, 2 for Complex + # @return the new data buffer + def square(self, amp, p, dp, n, spa): + outbuff = range(n*spa) + value = 0.0 + famp = float(amp) + famp2 = -famp + for i in range(0, n*spa, spa): + value = famp2 + if p >= 1.0: + p -= 1.0 + elif p >= 0.5: + value = famp + outbuff[i] = float(value) + if spa == 2: + outbuff[i+1] = float(value) + p += dp + + return outbuff + + # Create a TRIANGLE array of given amplitude + # @param fbuf The output array + # @param amp Amplitude + # @param p Phase + # @param dp Delta Phase + # @param n Number of elements + # @param spa Scalars per atom, 2 for Complex + # @return the new data buffer + def triangle(self, amp, p, dp, n, spa): + outbuff = range(n*spa) + value = 0.0 + famp = float(amp) + famp2 = 4*famp + fp = float(p) - 0.5 + for i in range(0, n*spa, spa): + if fp >= 0.5: + fp -= 1.0 + if fp > 0: + value = float(famp - fp*famp2) + else: + value = float(famp + fp*famp2) + outbuff[i] = float(value) + if spa == 2: + outbuff[i+1] = float(value) + fp += dp + + return outbuff + + # Create a SAWTOOTH array of given amplitude + # @param fbuf The output array + # @param amp Amplitude + # @param p Phase + # @param dp Delta Phase + # @param n Number of elements + # @param spa Scalars per atom, 2 for Complex + # @return the new data buffer + def sawtooth(self, amp, p, dp, n, spa): + outbuff = range(n*spa) + value = 0.0 + famp = float(amp) + famp2 = 2*famp + fp = float(p) - 0.5 + for i in range(0, n*spa, spa): + if fp >= 0.5: + fp -= 1.0 + value = float(fp*famp2) + outbuff[i] = float(value) + if spa == 2: + outbuff[i+1] = float(value) + fp += dp + + return outbuff + + # Create a PULSE array of given amplitude + # @param fbuf The output array + # @param amp Amplitude + # @param p Phase + # @param dp Delta Phase + # @param n Number of elements + # @param spa Scalars per atom, 2 for Complex + # @return the new data buffer + def pulse(self, amp, p, dp, n, spa): + outbuff = range(n*spa) + value = 0.0 + famp = float(amp) + for i in range(0, n*spa, spa): + if p >= 1.0: + value = famp + p -= 1.0 + else: + value = 0 + outbuff[i] = float(value) + if spa == 2: + outbuff[i+1] = float(value) + p += dp + + return outbuff + + # Create a CONSTANT array of given amplitude + # @param fbuf The output array + # @param amp Amplitude + # @param n Number of elements + # @param spa Scalars per atom, 2 for Complex + # @return the new data buffer + def constant(self, amp, n, spa): + outbuff = range(n*spa) + for i in range(n*spa): + outbuff[i] = float(amp) + + return outbuff + + # Create an LRS noise array of given magnitude + # @param fbuf The output array + # @param amp Amplitude + # @param n Number of elements + # @param spa Scalars per atom, 2 for Complex + # @param lrs LRS seed from previous call + # @return the new data buffer and the LRS at end of array + def lrs(self, amp, n, spa, lrs): + outbuff = range(n*spa) + factor = (amp/2.0/self.B1G) + for i in range(0, n*spa, spa): + data = (factor * lrs) + outbuff[i] = float(data) + if spa == 2: + outbuff[i+1] = float(data) + + bit0 = (~(lrs ^ (lrs>>1) ^ (lrs>>5) ^ (lrs>>25)))&0x1 + lrs <<= 1 + lrs |= bit0 + # Correct for python not overflowing int_32s + lrs &= 0xffffffff + if lrs >= 2**31 -1: + lrs &= 0x7fffffff + lrs -= 2**31 + + return outbuff + + # Create an RAMP array of given magnitude + # @param fbuf The output array + # @param amp Amplitude + # @param n Number of elements + # @param spa Scalars per atom, 2 for Complex + # @param data RAMP seed from previous call + # @return the new data buffer and the RAMP value at end of array + def ramp(self, amp, n, spa, data): + outbuff = range(n*spa) + for i in range(0, n*spa, spa): + outbuff[i] = float(data) + if spa == 2: + outbuff[i+1] = float(data) + data = data + 1 + if data >= amp: + data = int(-amp) + + return outbuff, data diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/data_generator.py b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/data_generator.py new file mode 100644 index 000000000..e7e0ed3ed --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/python/data_generator.py @@ -0,0 +1,132 @@ + +import Waveform +import math +import time +from bulkio.bulkioInterfaces import BULKIO +from omniORB import any +from ossie.cf import CF +import threading +import bulkio + + +class DataGenerator(object): + + def __init__(self,outputPort,stream_id="stream_id",cf=100e6,sr=10e6): + self.outputPort = outputPort + self.stream_id = stream_id + self.cf = cf + self.sr = sr + self._waveform = Waveform.Waveform() + self.enable = False + self.phase = 0 + self.chirp = 0 + self.sample_time_delta = 0.0 + self.delta_phase = 0.0 + self.delta_phase_offset = 0.0 + self.magnitude = 100 + self.xfer_len = 100000 + self.last_xfer_len = self.xfer_len + self.next_time = None + self.throttle = True + self.terminate = False + self.thread = None + self.firstTime = True + self.keyword_dict = {} + self.sri = BULKIO.StreamSRI(1, 0.0, 0.0, BULKIO.UNITS_TIME, 0, 0.0, 0.0, BULKIO.UNITS_NONE, 0, self.stream_id, False, []) + + + def start(self): + + if not self.thread: + print "Starting Thread" + self.thread = threading.Thread(target = self._run) + self.thread.start() + + + + def stop(self): + if self.thread: + print "Stopping Thread" + self.terminate = True + self.thread.join(2) + else: + print "No Thread to Stop" + + + def _run(self): + while True: + if self.terminate: + return + self._push_data() + + def enableDataFlow(self): + self.next_time = bulkio.timestamp.now() + self.updateandPushSRI() + self.enable = True + + def disableDataFlow(self): + print "Disable Data Flow" + self.enable = False + self.outputPort.pushPacket([], self.next_time, True, self.stream_id) + + def _push_data(self): + if self.enable: + + self.sample_time_delta = 1.0/self.sr + + self.delta_phase = self.cf * self.sample_time_delta + self.delta_phase_offset = self.chirp * self.sample_time_delta * self.sample_time_delta + + data = self._waveform.sincos(self.magnitude, self.phase, self.delta_phase, self.last_xfer_len, 1) + data = [int(i) for i in data] + self.phase += self.delta_phase*self.last_xfer_len # increment phase + self.phase -= math.floor(self.phase) # module 1.0 + + self.outputPort.pushPacket(data, self.next_time, False, self.stream_id) + + # Advance time + self.next_time.tfsec += self.last_xfer_len * self.sample_time_delta + if self.next_time.tfsec > 1.0: + self.next_time.tfsec -= 1.0 + self.next_time.twsec += 1.0 + + # If we are throttling, wait...otherwise run at full speed + if self.throttle: + wait_amt = self.last_xfer_len * self.sample_time_delta + try: + time.sleep(wait_amt) + finally: + return + return + def updateandPushSRI(self): + self.sri.xdelta =1.0/self.sr + self.sri.streamID = self.stream_id + keywords = [] + for keyword in self.keyword_dict.keys(): + keywords.append(CF.DataType(keyword, any.to_any(self.keyword_dict[keyword]))) + self.sri.keywords = keywords + self.sri.mode = 1 + try: + self.outputPort.pushSRI(self.sri) + except Exception, e: + print "Exception on pushSRI" , str(e) + +class TestPort(object): + + def pushPacket(self,data,sometime,EOS,stream_id): + print "Pusing Data of length" , len(data) + +if __name__ == "__main__": + + aport = TestPort() + generator = DataGenerator(aport) + generator.start() + time.sleep(2) + generator.enableDataFlow() + time.sleep(2) + generator.stop() + print "complete" + + + + \ No newline at end of file diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/tests/fei_base/frontend_tuner_unit_test_base.py b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/tests/fei_base/frontend_tuner_unit_test_base.py new file mode 100644 index 000000000..7d34632dd --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/tests/fei_base/frontend_tuner_unit_test_base.py @@ -0,0 +1,2943 @@ +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of USRP_UHD Device. +# +# USRP_UHD Device is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# USRP_UHD Device is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. + +import unittest +import nose +from ossie.utils import sb#, testing +import ossie.utils.testing +from ossie.utils.testing import PRFParser, SPDParser, SCDParser + +import os, sys, time, inspect, random, copy, signal +from pprint import pprint as pp +from pprint import pformat as pf + +from omniORB import any +from omniORB import CORBA + +from ossie import properties +from ossie.cf import CF, CF__POA +from ossie.utils import uuid +#from ossie.cf import ExtendedCF +#from ossie.resource import usesport, providesport + +from redhawk.frontendInterfaces import FRONTEND, FRONTEND__POA, TunerControl_idl +from bulkio.bulkioInterfaces import BULKIO, BULKIO__POA +from ossie.utils.bulkio import bulkio_data_helpers + +DEBUG_LEVEL = 0 +def set_debug_level(lvl=0): + global DEBUG_LEVEL + DEBUG_LEVEL = lvl +def get_debug_level(): + return DEBUG_LEVEL + +# Define device under test below +DEVICE_INFO = {'SPD':None} +def set_device_info(dev_info): + global DEVICE_INFO + DEVICE_INFO = dev_info +def get_device_info(): + return DEVICE_INFO + +IMPL_ID = None +def set_impl_id(id): + global IMPL_ID + IMPL_ID = id +def get_impl_id(): + return IMPL_ID + +# execparams {'prop_name':'value',...} +DEVICE_INFO['execparams'] = {} + + +#class FrontendTunerTests: +#class FrontendTunerTests(ossie.utils.testing.ScaComponentTestCase): +class FrontendTunerTests(unittest.TestCase): + ''' FrontEnd device compatibility tests + Define DUT using the global DEVICE_INFO dict + Customize deviceStartup function if your device has special start up requirements + Customize deviceShutdown function if your device has special shut down requirements + ''' + + dut = None + dut_ref = None + device_discovery = {'TX':0, 'RX':0, 'CHANNELIZER':0, 'DDC':0, 'RX_DIGITIZER':0, + 'RX_DIGITIZER_CHANNELIZER':0, 'UNKNOWN':0} + testReport = [] + testReportStats = {} + + # mapping of required/optional frontend tuner status elements and the allowable data types + FE_tuner_status_fields_req = {'FRONTEND::tuner_status::tuner_type':[str], + 'FRONTEND::tuner_status::allocation_id_csv':[str], + 'FRONTEND::tuner_status::center_frequency':[float], + 'FRONTEND::tuner_status::bandwidth':[float], + 'FRONTEND::tuner_status::sample_rate':[float], + 'FRONTEND::tuner_status::group_id':[str], + 'FRONTEND::tuner_status::rf_flow_id':[str], + 'FRONTEND::tuner_status::enabled':[bool]} + FE_tuner_status_fields_opt = {'FRONTEND::tuner_status::bandwidth_tolerance':[float], + 'FRONTEND::tuner_status::sample_rate_tolerance':[float], + 'FRONTEND::tuner_status::complex':[bool], + 'FRONTEND::tuner_status::gain':[float], + 'FRONTEND::tuner_status::agc':[bool], + 'FRONTEND::tuner_status::valid':[bool], + 'FRONTEND::tuner_status::available_frequency':[str], + 'FRONTEND::tuner_status::available_bandwidth':[str], + 'FRONTEND::tuner_status::available_gain':[str], + 'FRONTEND::tuner_status::available_sample_rate':[str], + 'FRONTEND::tuner_status::reference_source':[int,long], + 'FRONTEND::tuner_status::output_format':[str], + 'FRONTEND::tuner_status::output_multicast':[str], + 'FRONTEND::tuner_status::output_vlan':[int,long], + 'FRONTEND::tuner_status::output_port':[int,long], + 'FRONTEND::tuner_status::decimation':[int,long], + 'FRONTEND::tuner_status::tuner_number':[int,long]} + + # get lists of all methods/functions defined in digital tuner idl + digital_tuner_idl = filter(lambda x: x[0]!='_', dir(TunerControl_idl._0_FRONTEND._objref_DigitalTuner)) + # In future, could also do this: + #import frontend + #digital_tuner_idl = filter(lambda x: x[0]!='_', dir(frontend.InDigitalTunerPort)) + + # map data types to DataSink port names + port_map = {'dataShort':'shortIn', + 'dataFloat':'floatIn', + 'dataUlong':'uLongIn', + 'dataDouble':'doubleIn', + 'dataUshort':'ushortIn', + 'dataLong':'longIn', + 'dataUlongLong':'ulonglongIn', + 'dataLongLong':'longlongIn', + 'dataOctet':'octetIn', + 'dataXML':'xmlIn', + 'dataChar':'charIn', + 'dataFile':'fileIn'} + + @classmethod + def devicePreLaunch(self): + pass + @classmethod + def devicePostLaunch(self): + pass + + @classmethod + def devicePreRelease(self): + pass + @classmethod + def devicePostRelease(self): + pass + + @classmethod + def getToBasicState(self, execparams={}, configure={}, initialize=True): + ''' Function used to launch device before each test case + With no arguments, uses execparams defined in global DEVICE_INFO['execparams'] dict, + configures props with values from prf, and initializes device. + If specified, execparams overrides those specified in DEVICE_INFO dict, and configure + overrides those specified in the prf. + Add special start-up commands for your device to deviceStartup() function + ''' + if not execparams: + #execparams = self.getPropertySet(kinds=('execparam',), modes=('readwrite', 'writeonly'), includeNil=False) + execparams = getPropertySet(DEVICE_INFO['SPD'],kinds=('execparam',), modes=('readwrite', 'writeonly'), includeNil=False) + execparams = dict([(x.id, any.from_any(x.value)) for x in execparams]) + execparams['DEBUG_LEVEL'] = DEBUG_LEVEL + #Add custom execparams here + for param,val in DEVICE_INFO['execparams'].items(): + execparams[param] = val + + #Add custom configure here + for param,val in DEVICE_INFO['configure'].items(): + configure[param] = val + + ### device-specific pre-launch commands + self.devicePreLaunch() + + print 'Launching device --',DEVICE_INFO['SPD'] + print '\texecparams:',str(execparams) + print '\tconfigure:',str(configure) + print '\tinitialize:',str(initialize) + + try: + # new method, use in versions >= 1.9 + self.dut = sb.launch(DEVICE_INFO['SPD'],execparams=execparams,configure=configure,initialize=initialize,impl=IMPL_ID) + except: + # deprecated, use in 1.8.x versions + self.dut = sb.Component(DEVICE_INFO['SPD'],execparams=execparams,configure=configure,initialize=initialize,impl=IMPL_ID) + + self.dut_ref = self.dut.ref._narrow(CF.Device) + + ### device-specific post-launch commands + self.devicePostLaunch() + + @classmethod + def getToShutdownState(self): + ''' Function used to release device after each test case + Add special shut-down commands for your device to deviceShutdown() function + ''' + + ### device-specific pre-release commands + self.devicePreRelease() + + if self.dut_ref: + self.dut_ref = None + if self.dut: + self.dut.releaseObject() + self.dut = None + + ### device-specific post-release commands + self.devicePostRelease() + + @classmethod + def setUpClass(self): + + self.spd_file = DEVICE_INFO['SPD'] + self.spd = SPDParser.parse(self.spd_file) + + try: + self.prf_file = self.spd.get_propertyfile().get_localfile().get_name() + if (self.prf_file[0] != '/'): + self.prf_file = os.path.join(os.path.dirname(self.spd_file), self.prf_file) + self.prf = PRFParser.parse(self.prf_file) + except: + self.prf_file = None + self.prf = None + + self.scd_file = self.spd.get_descriptor().get_localfile().get_name() + if (self.scd_file[0] != '/'): + self.scd_file = os.path.join(os.path.dirname(self.spd_file), self.scd_file) + self.scd = SCDParser.parse(self.scd_file) + + # create a map between prop ids and names + #if self.prf: + # self._props = prop_helpers.getPropNameDict(self.prf) + + self.testReport = ['\nDiscovering Tuner Types'] + self.getToBasicState() + + #Count # of each tuner type + props = self.dut.query([]) + props = properties.props_to_dict(props) + for tuner in props['FRONTEND::tuner_status']: + if tuner['FRONTEND::tuner_status::tuner_type'] in self.device_discovery.keys(): + self.device_discovery[tuner['FRONTEND::tuner_status::tuner_type']] += 1 + else: + self.device_discovery['UNKNOWN'] += 1 + + for k,v in self.device_discovery.items(): + if v > 0: + self.testReport.append(' Found %s %s'%(v,k)) + + self.getToShutdownState() + self.testReport.append('Completed discovery') + + def setUp(self): + + signal.signal(signal.SIGINT, self.tearDown) + signal.signal(signal.SIGTERM, self.tearDown) + signal.signal(signal.SIGQUIT, self.tearDown) + + self.getToBasicState() + + def tearDown(self): + self.getToShutdownState() + #self.testReport.append('\n%s - STOP'%test_name) + #ossie.utils.testing.ScaComponentTestCase.tearDown(self) + + @classmethod + def tearDownClass(self): + self.testReport.append('\nFRONTEND Test - Completed') + for line in self.testReport: + print >> sys.stderr, line + + print >> sys.stderr, '\nReport Statistics:' + MAX_LHS_WIDTH=40 + MIN_SEPARATION=5 + total_nonsilent_checks=0 + for key in sorted(self.testReportStats.keys()): + if key == 'Total checks made': + continue + total_nonsilent_checks+=self.testReportStats[key] + print >> sys.stderr, ' ',key[:MAX_LHS_WIDTH], '.'*(MIN_SEPARATION+MAX_LHS_WIDTH-len(key[:MAX_LHS_WIDTH])), self.testReportStats[key] + if 'Total checks made' not in self.testReportStats: + self.testReportStats['Total checks made'] = 0 + key='Checks with silent results' + total_silent_checks=self.testReportStats['Total checks made']-total_nonsilent_checks + print >> sys.stderr, ' ',key[:MAX_LHS_WIDTH], '.'*(MIN_SEPARATION+MAX_LHS_WIDTH-len(key[:MAX_LHS_WIDTH])), total_silent_checks + key='Total checks made' + print >> sys.stderr, ' ',key[:MAX_LHS_WIDTH], '.'*(MIN_SEPARATION+MAX_LHS_WIDTH-len(key[:MAX_LHS_WIDTH])), self.testReportStats[key] + + #self.printTestReport() + + def skipTest(self, msg=None, silent=False): + if msg == None: + msg = 'Skipping test %s'%(self.id().split('.')[-1]) + if not silent: + self.testReport.append(msg) + raise nose.SkipTest + + def attachChanInput(self): + pass + + def detachChanInput(self): + pass + + def testFRONTEND_6(self): + ''' TX 0 - Not Implemented + ''' + self.testReport.append('\nFRONTEND Test 6 - TX - Not implemented!') + #self.testReport.append('\nFRONTEND Test 6 - TX') + #self.testReport.append('\nFRONTEND Test 6 - Completed') + + def testFRONTEND_1_1(self): + ''' ALL 1.1 Verify device_kind property + ''' + props = self.dut.query([]) + props = properties.props_to_dict(props) + #pp(props) + self.check(props.has_key('DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d'), True, 'Has device_kind property') + self.check(props['DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d'], 'FRONTEND::TUNER', 'device_kind = FRONTEND::TUNER') + + def testFRONTEND_1_2(self): + ''' ALL 1.2 Verify that there is a device_model property + ''' + props = self.dut.query([]) + props = properties.props_to_dict(props) + self.check(props.has_key('DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb'), True, 'Has device_model property') + + def testFRONTEND_1_3(self): + ''' ALL 1.3 Verify that there is a FRONTEND Status property + ''' + props = self.dut.query([]) + props = properties.props_to_dict(props) + self.check(props.has_key('FRONTEND::tuner_status'), True, 'Has tuner_status property') + # check for required fields + #pp(props['FRONTEND::tuner_status']) + if (len(props['FRONTEND::tuner_status']) == 0): + print '\nERROR - tuner_status is empty. Check that the unit test is configured to reach the target device hardware.\n' + self.check(False,True,'\nERROR - tuner_status is empty. Check that the unit test is configured to reach the target device hardware.') + + success = True + for field in self.FE_tuner_status_fields_req: + if not self.check(props['FRONTEND::tuner_status'][-1].has_key(field), True, 'tuner_status has %s required field'%field): + success = False + if not success: + self.check(False,True,'\nERROR - tuner_status does not have all required fields.') + + + def testFRONTEND_1_4(self): + ''' ALL 1.4 Verify there is a tuner port + ''' + #Attempt to get both ports and compare if None, then xor (^) the boolean result + reason = 'both' + try: + + DigitalTuner = self.dut.getPort('DigitalTuner_in') + print "&&&&&&&&", DigitalTuner + except: + print "%%%%%%%%%%%" + DigitalTuner= None + reason = 'analog' + try: + AnalogTuner = self.dut.getPort('AnalogTuner_in') + print "&&&&&&&&", AnalogTuner + except: + print "%%%%%%%%%%%" + AnalogTuner = None + reason = 'digital' + if (DigitalTuner==None) and (AnalogTuner==None): + reason = 'none' + self.check( (DigitalTuner== None)^(AnalogTuner== None), True, 'Has an analog or digital tuner input port (%s)'%reason) + + + + def testFRONTEND_3_1_1(self): + ''' RX_DIG 1.1 Allocate a single tuner + ''' + t1 = self._generateRD() + t1Alloc = self._generateAlloc(t1) + if not self.check(self.dut_ref.allocateCapacity(t1Alloc), True, 'Can allocate single RX_DIGITIZER') and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 1.1 FAILURE - Can allocate single RX_DIGITIZER' + pp(t1) + pp(t1Alloc) + + # Deallocate the tuner + self.dut_ref.deallocateCapacity(t1Alloc) + self.check(True, True, 'Deallocated RX_DIGITIZER without error') + + def testFRONTEND_3_1_2(self): + ''' RX_DIG 1.2 Allocate to max tuners + ''' + ts = [] + for t in range(0,self.device_discovery['RX_DIGITIZER']): + ts.append(self._generateRD()) + tAlloc = self._generateAlloc(ts[-1]) + if not self.check(self.dut_ref.allocateCapacity(tAlloc), True, 'Allocating RX_DIGITIZER number: %s'%(t), silentSuccess=True) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 1.2 FAILURE - Allocating RX_DIGITIZER number: %s'%(t) + pp(ts) + pp(tAlloc) + self.check(True, True, 'Allocated to max RX_DIGITIZERs') + + # deallocate everything + for t in ts: + tAlloc = self._generateAlloc(t) + self.dut_ref.deallocateCapacity(tAlloc) + self.check(True, True, 'Deallocated all RX_DIGITIZER tuners') + + def testFRONTEND_3_1_3(self): + ''' RX_DIG 1.3 Verify over-allocation failure + ''' + + # Allocate to max tuners + ts = [] + for t in range(0,self.device_discovery['RX_DIGITIZER']): + ts.append(self._generateRD()) + tAlloc = self._generateAlloc(ts[-1]) + if not self.check(self.dut_ref.allocateCapacity(tAlloc), True, 'Allocating RX_DIGITIZER number: %s'%(t), silentSuccess=True) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 1.3 FAILURE - Allocating RX_DIGITIZER number: %s'%(t) + pp(ts) + pp(tAlloc) + self.check(True, True, 'Allocated to max RX_DIGITIZERs') + + # Verify over-allocation failure + over_t = self._generateRD() + over_tAlloc = self._generateAlloc(over_t) + if not self.check(self.dut_ref.allocateCapacity(over_tAlloc), False, 'Over-allocate RX_DIGITIZER check') and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 1.3 FAILURE - Over-allocate RX_DIGITIZER check' + pp(ts) + pp(over_t) + pp(over_tAlloc) + try: + self.dut_ref.deallocateCapacity(over_tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + # deallocate everything + for t in ts: + tAlloc = self._generateAlloc(t) + self.dut_ref.deallocateCapacity(tAlloc) + self.check(True, True, 'Deallocated all RX_DIGITIZER tuners') + + def testFRONTEND_3_2_01(self): + ''' RX_DIG 2.1 Verify InvalidCapacityException on repeat Alloc ID + ''' + ttype = 'RX_DIGITIZER' + tuner = self._generateRD() + alloc_id = tuner['ALLOC_ID'] + tAlloc = self._generateAlloc(tuner) + if not self.check(self.dut_ref.allocateCapacity(tAlloc), True, 'Allocate single %s with alloc id: %s'%(ttype,alloc_id)) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.1 FAILURE - Allocate single %s with alloc id: %s'%(ttype,alloc_id) + pp(tuner) + pp(tAlloc) + try: + retval = self.dut_ref.allocateCapacity(tAlloc) + except CF.Device.InvalidCapacity: + self.check(True, True, 'Allocate second %s with same alloc id check (produces InvalidCapacity exception)'%(ttype)) + except Exception, e: + self.check(False, True, 'Allocate second %s with same alloc id check (produces %s exception, should produce InvalidCapacity exception)'%(ttype,e.__class__.__name__)) + else: + self.check(False, True, 'Allocate second %s with same alloc id check (returns %s, should produce InvalidCapacity exception)'%(ttype,retval)) + self.dut_ref.deallocateCapacity(tAlloc) # this will deallocate the original successful allocation + + def testFRONTEND_3_2_02(self): + ''' RX_DIG 2.2 Verify InvalidCapacityException on malformed request (missing alloc ID) + ''' + ttype = 'RX_DIGITIZER' + tuner = self._generateRD() + # First, check empty string + tuner['ALLOC_ID'] = '' + tAlloc = self._generateAlloc(tuner) + try: + retval = self.dut_ref.allocateCapacity(tAlloc) + except CF.Device.InvalidCapacity: + self.check(True, True, 'Allocate %s with malformed request (alloc_id="") check (produces InvalidCapcity exception)'%(ttype)) + except Exception, e: + self.check(False, True, 'Allocate %s with malformed request (alloc_id="") check (produces %s exception, should produce InvalidCapacity exception)'%(ttype,e.__class__.__name__)) + else: + self.check(False, True, 'Allocate %s with malformed request (alloc_id="") check (returns %s, should produce InvalidCapacity exception)'%(ttype,retval)) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + + def testFRONTEND_3_2_03(self): + ''' RX_DIG 2.3 Verify InvalidCapacityException on malformed request (missing alloc ID) + ''' + ttype = 'RX_DIGITIZER' + tuner = self._generateRD() + # now try None + tuner['ALLOC_ID'] = None + tAlloc = self._generateAlloc(tuner) + try: + retval = self.dut_ref.allocateCapacity(tAlloc) + except CF.Device.InvalidCapacity: + self.check(True, True, 'Allocate %s with malformed request (alloc_id=None) check (produces InvalidCapcity exception)'%(ttype)) + except Exception, e: + self.check(False, True, 'Allocate %s with malformed request (alloc_id=None) check (produces %s exception, should produce InvalidCapacity exception)'%(ttype,e.__class__.__name__)) + else: + self.check(False, True, 'Allocate %s with malformed request (alloc_id=None) check (returns %s, should produce InvalidCapacity exception)'%(ttype,retval)) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_04(self): + ''' RX_DIG 2.4 Verify failure on alloc with invalid group id (generate new uuid) + ''' + ttype = 'RX_DIGITIZER' + tuner = self._generateRD() + tuner['GROUP_ID'] = str(uuid.uuid4()) + tAlloc = self._generateAlloc(tuner) + try: + retval = self.dut_ref.allocateCapacity(tAlloc) + except Exception, e: + self.check(False, True, 'Allocate %s with invalid GROUP_ID check (produces %s exception, should return False)'%(ttype,e.__class__.__name__)) + else: + self.check(False, retval, 'Allocate %s with invalid GROUP_ID check'%(ttype)) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_05(self): + ''' RX_DIG 2.5 Verify failure on alloc with invalid rf flow id (generate new uuid) + ''' + ttype = 'RX_DIGITIZER' + tuner = self._generateRD() + tuner['RF_FLOW_ID'] = str(uuid.uuid4()) + tAlloc = self._generateAlloc(tuner) + try: + retval = self.dut_ref.allocateCapacity(tAlloc) + except Exception, e: + self.check(False, True, 'Allocate %s with invalid RF_FLOW_ID check (produces %s exception, should return False)'%(ttype,e.__class__.__name__)) + else: + self.check(False, retval, 'Allocate %s with invalid RF_FLOW_ID check'%(ttype)) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + + def testFRONTEND_3_2_06(self): + ''' RX_DIG 2.6 Allocate Listener via listener struct + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + tAlloc = self._generateAlloc(tuner) + #self.dut_ref.allocateCapacity(tAlloc) + if not self.check(self.dut_ref.allocateCapacity(tAlloc), True, 'Allocate controller %s'%(ttype)) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.6 FAILURE - Allocate controller %s'%(ttype) + pp(tuner) + pp(tAlloc) + + tListener = self._generateListener(tuner) + tListenerAlloc = self._generateListenerAlloc(tListener) + if not self.check(self.dut_ref.allocateCapacity(tListenerAlloc), True, 'Allocate listener %s using listener allocation struct'%(ttype)) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.6 FAILURE - Allocate listener %s using listener allocation struct'%(ttype) + pp(tuner) + pp(tAlloc) + pp(tListener) + pp(tListenerAlloc) + + print "DEBUG -- done with allocations, now time to deallocate" + + # Deallocate listener using listener allocation struct + try: + self.dut_ref.deallocateCapacity(tListenerAlloc) + except Exception,e: + self.check(False, True, 'Deallocated listener %s using listener allocation struct without error'%(ttype)) + else: + self.check(True, True, 'Deallocated listener %s using listener allocation struct without error'%(ttype)) + + print "DEBUG -- done with deallocation of listener, now time to deallocate the controller" + self.dut_ref.deallocateCapacity(tAlloc) + + def testFRONTEND_3_2_07(self): + ''' RX_DIG 2.7 Allocate Listener via tuner allocation struct + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + tAlloc = self._generateAlloc(tuner) + + + pp(tuner) + if not self.dut_ref.allocateCapacity(tAlloc) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.7 FAILURE - Allocate controller %s'%(ttype) + pp(tuner) + pp(tAlloc) + + tunerStatusProp = self._getTunerStatusProp(tuner['ALLOC_ID']) + tListener = copy.deepcopy(tuner) + tListener['ALLOC_ID'] = str(uuid.uuid4()) + tListener['CONTROL'] = False + tListener['CF'] = tunerStatusProp['FRONTEND::tuner_status::center_frequency'] + tListener['BW'] = tunerStatusProp['FRONTEND::tuner_status::bandwidth'] + tListenerAlloc = self._generateAlloc(tListener) + + if not self.check(self.dut_ref.allocateCapacity(tListenerAlloc), True, 'Allocate listener %s using tuner allocation struct'%(ttype)) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.7 FAILURE - Allocate listener %s using tuner allocation struct'%(ttype) + pp(tuner) + pp(tAlloc) + pp(tListener) + pp(tListenerAlloc) + + # Deallocate listener using tuner allocation struct + try: + self.dut_ref.deallocateCapacity(tListenerAlloc) + except Exception,e: + self.check(False, True, 'Deallocated listener %s using tuner allocation struct without error'%(ttype)) + else: + self.check(True, True, 'Deallocated listener %s using tuner allocation struct without error'%(ttype)) + self.dut_ref.deallocateCapacity(tAlloc) + + def testFRONTEND_3_2_08(self): + ''' RX_DIG 2.8 Verify failure on listener alloc w/o matching existing alloc id + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + tAlloc = self._generateAlloc(tuner) + if not self.dut_ref.allocateCapacity(tAlloc) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.8 FAILURE - Controller allocation' + pp(tuner) + pp(tAlloc) + tListener = self._generateListener(tuner) + tListener['ALLOC_ID'] = str(uuid.uuid4()) + tListenerAlloc = self._generateListenerAlloc(tListener) + if not self.check(self.dut_ref.allocateCapacity(tListenerAlloc), False, 'Allocate listener %s using listener allocation struct with bad allocation id check'%(ttype)) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.8 FAILURE - Allocate listener %s using listener allocation struct with bad allocation id check'%(ttype) + pp(tuner) + pp(tAlloc) + pp(tListener) + pp(tListenerAlloc) + try: + self.dut_ref.deallocateCapacity(tListenerAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + self.dut_ref.deallocateCapacity(tAlloc) + + def testFRONTEND_3_2_09(self): + ''' RX_DIG 2.9 Verify failure on listener alloc w/o suitable existing channel (bad freq) + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + tAlloc = self._generateAlloc(tuner) + self.dut_ref.allocateCapacity(tAlloc) + tListener = copy.deepcopy(tuner) + tListener['ALLOC_ID'] = str(uuid.uuid4()) + tListener['CF'] = tuner['CF'] * 2.0 + #tListener['BW'] = tuner['BW'] * 2.0 + tListener['SR'] = tuner['SR'] * 2.0 + #rdListener = self._generateRD() + tListener['CONTROL'] = False + tListenerAlloc = self._generateAlloc(tListener) + self.check(self.dut_ref.allocateCapacity(tListenerAlloc), False, 'Allocate listener %s using tuner allocation struct without suitable controller %s check'%(ttype,ttype)) + try: + self.dut_ref.deallocateCapacity(tListenerAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + self.dut_ref.deallocateCapacity(tAlloc) + + def testFRONTEND_3_2_10(self): + ''' RX_DIG 2.10 Verify listener allocations are deallocated following deallocation of controlling allocation + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + tAlloc = self._generateAlloc(tuner) + self.dut_ref.allocateCapacity(tAlloc) + tListener = self._generateListener(tuner) + tListenerAlloc = self._generateListenerAlloc(tListener) + self.check(self.dut_ref.allocateCapacity(tListenerAlloc), True, 'Allocate listener %s using listener allocation struct'%(ttype)) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except Exception, e: + self.check(False, True, 'Deallocated controller %s which has a listener allocation'%(ttype)) + else: + self.check(True, True, 'Deallocated controller %s which has a listener allocation'%(ttype)) + has_listener = self._tunerStatusHasAllocId(tListener['LISTENER_ID']) + self.check(has_listener, False, 'Listener %s deallocated as result of controller %s deallocation'%(ttype,ttype)) + try: + self.dut_ref.deallocateCapacity(tListenerAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_11(self): + ''' RX_DIG 2.11 allocate below minimum center frequency + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + low=DEVICE_INFO['RX_DIGITIZER']['CF_MIN'] + + tuner['CF'] = float(int(low / 2.0)) + tAlloc = self._generateAlloc(tuner) + self.check(self.dut_ref.allocateCapacity(tAlloc), False, 'Allocate %s below lowest frequency in range(%s < %s)'%(ttype,tuner['CF'],low)) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_12(self): + ''' RX_DIG 2.12 allocate above maximum center frequency + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + high=DEVICE_INFO['RX_DIGITIZER']['CF_MAX'] + + tuner['CF'] = float(high * 2.0) + tAlloc = self._generateAlloc(tuner) + self.check(self.dut_ref.allocateCapacity(tAlloc), False, 'Allocate %s above highest frequency in range(%s > %s)'%(ttype,tuner['CF'],high)) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_13a(self): + ''' RX_DIG 2.13a allocate at minimum center frequency + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + low=DEVICE_INFO['RX_DIGITIZER']['CF_MIN'] + + tuner['CF'] = float(low) + tAlloc = self._generateAlloc(tuner) + if not self.check(self.dut_ref.allocateCapacity(tAlloc), True, 'Allocate %s at lowest center frequency in range (%s)'%(ttype,low)) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.13a FAILURE' + pp(tuner) + pp(tAlloc) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_13b(self): + ''' RX_DIG 2.13b allocate just below minimum center frequency (partial coverage, should fail) + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + low=DEVICE_INFO['RX_DIGITIZER']['CF_MIN'] + bw=DEVICE_INFO['RX_DIGITIZER']['BW_MIN'] + sr=DEVICE_INFO['RX_DIGITIZER']['SR_MIN'] + bw_sr = max(bw,sr) + print low,bw,sr + tuner['CF'] = float(low-bw_sr/2.0) + tuner['BW'] = float(bw) + tuner['SR'] = float(sr) + tAlloc = self._generateAlloc(tuner) + if not self.check(self.dut_ref.allocateCapacity(tAlloc), False, 'Check failure when allocating partially covered %s channel at lowest frequency in range (%s)'%(ttype,low)) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.13b FAILURE' + pp(tuner) + pp(tAlloc) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_14a(self): + ''' RX_DIG 2.14a allocate at maximum center frequency + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + high=DEVICE_INFO['RX_DIGITIZER']['CF_MAX'] + + tuner['CF'] = float(high) + tAlloc = self._generateAlloc(tuner) + if not self.check(self.dut_ref.allocateCapacity(tAlloc), True, 'Allocate %s at highest center frequency in range(%s)'%(ttype,high)) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.14a FAILURE' + pp(tuner) + pp(tAlloc) + self.dut_ref.deallocateCapacity(tAlloc) + + def testFRONTEND_3_2_14b(self): + ''' RX_DIG 2.14b allocate just above maximum center frequency (partial coverage, should fail) + ''' + tuner = self._generateRD() + ttype='RX_DIGITIZER' + high=DEVICE_INFO['RX_DIGITIZER']['CF_MAX'] + bw=DEVICE_INFO['RX_DIGITIZER']['BW_MIN'] + sr=DEVICE_INFO['RX_DIGITIZER']['SR_MIN'] + bw_sr = max(bw,sr) + + tuner['CF'] = float(high+bw_sr/2.0) + tuner['BW'] = float(bw) + tuner['SR'] = float(sr) + tAlloc = self._generateAlloc(tuner) + if not self.check(self.dut_ref.allocateCapacity(tAlloc), False, 'Check failure when allocating partially covered %s channel at highest frequency in range (%s)'%(ttype,high)) and DEBUG_LEVEL >= 4: + # Do some DEBUG + print 'RX_DIG 2.14b FAILURE' + pp(tuner) + pp(tAlloc) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_15(self): + ''' RX_DIG 2.15 allocate with bandwidth = 0 (succeed) + ''' + ttype='RX_DIGITIZER' + tuner = self._generateRD() + + tuner['BW'] = float(0.0) + tAlloc = self._generateAlloc(tuner) + self.check(self.dut_ref.allocateCapacity(tAlloc), True, 'Allocate %s without specifying bandwidth (BW=0)'%(ttype)) + self.dut_ref.deallocateCapacity(tAlloc) + + def testFRONTEND_3_2_16(self): + ''' RX_DIG 2.16 allocate with sample rate = 0 (succeed) + ''' + ttype='RX_DIGITIZER' + tuner = self._generateRD() + + tuner['SR'] = float(0.0) + tAlloc = self._generateAlloc(tuner) + self.check(self.dut_ref.allocateCapacity(tAlloc), True, 'Allocate %s without specifying sample rate (SR=0)'%(ttype)) + self.dut_ref.deallocateCapacity(tAlloc) + + def testFRONTEND_3_2_17(self): + ''' RX_DIG 2.17 allocate below minimum bandwidth capable (succeed) + ''' + ttype='RX_DIGITIZER' + tuner = self._generateRD() + low=DEVICE_INFO['RX_DIGITIZER']['BW_MIN'] + + tuner['BW'] = float(int(low / 1.333333333)) + tuner['SR'] = float(0) + tAlloc = self._generateAlloc(tuner) + self.check(self.dut_ref.allocateCapacity(tAlloc), True, 'Allocate %s below lowest bandwidth in range(%s < %s)'%(ttype,tuner['BW'],low)) + self.dut_ref.deallocateCapacity(tAlloc) + + def testFRONTEND_3_2_18(self): + ''' RX_DIG 2.18 allocate above maximum bandwidth capable (fail) + ''' + ttype='RX_DIGITIZER' + tuner = self._generateRD() + high=DEVICE_INFO['RX_DIGITIZER']['BW_MAX'] + + if self.check(high, 0, 'Upper bandwidth range set to 0, cannot test above highest bandwidth', silentFailure=True, successMsg='info'): + return + + tuner['BW'] = float(high * 2.0) + tuner['SR'] = float(0) + tAlloc = self._generateAlloc(tuner) + failed = not self.check(self.dut_ref.allocateCapacity(tAlloc), False, 'Allocate %s above highest bandwidth in range(%s > %s)'%(ttype,tuner['BW'],high)) + # DEBUG + ''' + if failed: + print 'DEBUG - failed max bw alloc test' + print 'alloc request:' + pp(tuner) + print 'tuner status:' + pp(self._getTunerStatusProp(tuner['ALLOC_ID'])) + print 'END DEBUG - failed max bw alloc test' + ''' + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_19(self): + ''' RX_DIG 2.19 allocate outside of bandwidth tolerance (fail) + ''' + ttype='RX_DIGITIZER' + tuner = self._generateRD() + low=DEVICE_INFO['RX_DIGITIZER']['BW_MIN'] + + if self.check(low, 0, 'Lower bandwidth range set to 0, cannot test tolerance below lowest bandwidth', silentFailure=True, successMsg='info'): + return + + tuner['BW'] = float(int(low / 2.0)) + tuner['BW_TOLERANCE'] = float(10.0) + tuner['SR'] = float(0) + tAlloc = self._generateAlloc(tuner) + failed = not self.check(self.dut_ref.allocateCapacity(tAlloc), False, 'Allocate %s outside of bandwidth tolerance (%s + %s%% < %s'%(ttype,tuner['BW'],tuner['BW_TOLERANCE'],low)) + # DEBUG + ''' + if failed: + print 'DEBUG - failed outside bw tolerance test' + print 'alloc request:' + pp(tuner) + print 'tuner status:' + pp(self._getTunerStatusProp(tuner['ALLOC_ID'])) + print 'END DEBUG - failed outside bw tolerance test' + ''' + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_20(self): + ''' RX_DIG 2.20 allocate below minimum sample rate capable (succeed) + ''' + ttype='RX_DIGITIZER' + tuner = self._generateRD() + low=DEVICE_INFO['RX_DIGITIZER']['SR_MIN'] + + tuner['SR'] = float(int(low / 1.333333333)) + tuner['BW'] = float(0) + tAlloc = self._generateAlloc(tuner) + self.check(self.dut_ref.allocateCapacity(tAlloc), True, 'Allocate %s below lowest sample rate in range(%s < %s)'%(ttype,tuner['SR'],low)) + self.dut_ref.deallocateCapacity(tAlloc) + + def testFRONTEND_3_2_21(self): + ''' RX_DIG 2.21 allocate above maximum sample rate capable (fail) + ''' + ttype='RX_DIGITIZER' + tuner = self._generateRD() + high=DEVICE_INFO['RX_DIGITIZER']['SR_MAX'] + + tuner['SR'] = float(high * 2.0) + tuner['BW'] = float(0) + tAlloc = self._generateAlloc(tuner) + self.check(self.dut_ref.allocateCapacity(tAlloc), False, 'Allocate %s above highest sample rate in range(%s > %s)'%(ttype,tuner['SR'],high)) + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_2_22(self): + ''' RX_DIG 2.22 allocate outside of sample rate tolerance (fail) + ''' + ttype='RX_DIGITIZER' + tuner = self._generateRD() + low=DEVICE_INFO['RX_DIGITIZER']['SR_MIN'] + + tuner['SR'] = float(int(low / 2.0)) + tuner['SR_TOLERANCE'] = float(10.0) + tuner['BW'] = float(0) + tAlloc = self._generateAlloc(tuner) + failed = not self.check(self.dut_ref.allocateCapacity(tAlloc), False, 'Allocate %s outside of sample rate tolerance (%s + %s%% < %s'%(ttype,tuner['SR'],tuner['SR_TOLERANCE'],low)) + # DEBUG + ''' + if failed: + print 'DEBUG - failed outside sr tolerance test' + print 'alloc request:' + pp(tuner) + print 'tuner status:' + pp(self._getTunerStatusProp(tuner['ALLOC_ID'])) + print 'END DEBUG - failed outside sr tolerance test' + ''' + try: + self.dut_ref.deallocateCapacity(tAlloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testFRONTEND_3_3_01(self): + ''' RX_DIG 3.1 Verify connection to Tuner port + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + + # Verify connection to Tuner port + self.check(tuner_control != None, True, 'Can get %s port'%(port_name)) + self.check(CORBA.is_nil(tuner_control), False, 'Port reference is not nil') + + def testFRONTEND_3_3_02(self): + ''' RX_DIG 3.2 Verify digital tuner port functions exist + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + function_list = self.digital_tuner_idl + + for attr in function_list: + try: + self.check(callable(getattr(tuner_control,attr)), True, '%s port has function %s'%(port_name,attr)) + except AttributeError, e: + self.check(False, True, '%s port has function %s'%(port_name,attr)) + + def testFRONTEND_3_3_03(self): + ''' RX_DIG 3.3 Verify digital tuner port getTunerType function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id, 'FRONTEND::tuner_status::tuner_type') + except KeyError: + status_val = None + + try: + resp = tuner_control.getTunerType(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerType produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerType produces exception %s'%(port_name,e)) + else: + self.check(type(resp), str, '%s.getTunerType has correct return type'%(port_name)) + self.check(resp in ['RX','TX','RX_DIGITIZER','CHANNELIZER','RX_DIGITIZER_CHANNELIZER','DDC'], True, '%s.getTunerType return value is within expected results'%(port_name)) + self.check(resp, 'RX_DIGITIZER', '%s.getTunerType return value is correct for RX_DIGITIZER'%(port_name)) + if status_val!=None: + self.check(resp, status_val, '%s.getTunerType matches frontend tuner status prop'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_04(self): + ''' RX_DIG 3.4 Verify digital tuner port getTunerDeviceControl function w/ controller + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'%s.getTunerDeviceControl(controller_id) ERROR -- could not allocate controller'%(port_name),throwOnFailure=True,silentSuccess=True) + + try: + resp = tuner_control.getTunerDeviceControl(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerDeviceControl(controller_id) produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerDeviceControl(controller_id) produces exception %s'%(port_name,e)) + else: + self.check(type(resp), bool, '%s.getTunerDeviceControl(controller_id) has correct return type'%(port_name)) + self.check(resp in [True,False], True, '%s.getTunerDeviceControl(controller_id) return value is within expected results'%(port_name)) + self.check(resp, True, '%s.getTunerDeviceControl(controller_id) return True for controller alloc_id'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_05(self): + ''' RX_DIG 3.5 Verify digital tuner port getTunerDeviceControl function w/ listener + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + + controller = self._generateRD() + pp(controller) + listener = self._generateListener(controller) + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'%s.getTunerDeviceControl(listener_id) ERROR -- could not allocate controller'%(port_name),throwOnFailure=True,silentSuccess=True) + listener_id = listener['LISTENER_ID'] + listener_alloc = self._generateListenerAlloc(listener) + self.check(self.dut_ref.allocateCapacity(listener_alloc),True,'%s.getTunerDeviceControl(listener_id) ERROR -- could not allocate listener'%(port_name),throwOnFailure=True,silentSuccess=True) + + try: + resp = tuner_control.getTunerDeviceControl(listener_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerDeviceControl(listener_id) produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerDeviceControl(listener_id) produces exception %s'%(port_name,e)) + else: + self.check(type(resp), bool, '%s.getTunerDeviceControl(listener_id) has correct return type'%(port_name)) + self.check(resp in [True,False], True, '%s.getTunerDeviceControl(listener_id) return value is within expected results'%(port_name)) + self.check(resp, False, '%s.getTunerDeviceControl(listener_id) returns False for listener alloc_id'%(port_name)) + + self.dut_ref.deallocateCapacity(listener_alloc) + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_06(self): + ''' RX_DIG 3.6 Verify digital tuner port getTunerGroupId function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id, 'FRONTEND::tuner_status::group_id') + except KeyError: + status_val = None + + try: + resp = tuner_control.getTunerGroupId(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerGroupId produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerGroupId produces exception %s'%(port_name,e)) + else: + self.check(type(resp), str, '%s.getTunerGroupId has correct return type'%(port_name)) + self.check(type(resp), str, '%s.getTunerGroupId return value is within expected results'%(port_name)) + if status_val!=None: + print "###################################" + print resp + print status_val + self.check(resp, status_val, '%s.getTunerGroupId matches frontend tuner status prop'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_07(self): + ''' RX_DIG 3.7 Verify digital tuner port getTunerRfFlowId function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id, 'FRONTEND::tuner_status::rf_flow_id') + except KeyError: + status_val = None + + try: + resp = tuner_control.getTunerRfFlowId(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerRfFlowId produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerRfFlowId produces exception %s'%(port_name,e)) + else: + self.check(type(resp), str, '%s.getTunerRfFlowId has correct return type'%(port_name)) + self.check(type(resp), str, '%s.getTunerRfFlowId return value is within expected results'%(port_name)) + if status_val!=None: + print "###################################" + print resp + print status_val + self.check(resp, status_val, '%s.getTunerRfFlowId matches frontend tuner status prop'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_08(self): + ''' RX_DIG 3.8 Verify digital tuner port getTunerCenterFrequency function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id, 'FRONTEND::tuner_status::center_frequency') + except KeyError: + status_val = None + + try: + resp = tuner_control.getTunerCenterFrequency(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerCenterFrequency produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerCenterFrequency produces exception %s'%(port_name,e)) + else: + self.check(type(resp), float, '%s.getTunerCenterFrequency has correct return type'%(port_name)) + self.check(resp >= 0.0, True, '%s.getTunerCenterFrequency return value is within expected results'%(port_name)) + if status_val!=None: + print "###################################" + print resp + print status_val + self.checkAlmostEqual(resp, status_val, '%s.getTunerCenterFrequency matches frontend tuner status prop'%(port_name),places=0) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_09(self): + ''' RX_DIG 3.9 Verify digital tuner port getTunerBandwidth function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id, 'FRONTEND::tuner_status::bandwidth') + except KeyError: + status_val = None + + # getTunerBandwidth + # double: >= 0? + try: + resp = tuner_control.getTunerBandwidth(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerBandwidth produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerBandwidth produces exception %s'%(port_name,e)) + else: + self.check(type(resp), float, '%s.getTunerBandwidth has correct return type'%(port_name)) + self.check(resp >= 0.0, True, '%s.getTunerBandwidth return value is within expected results'%(port_name)) + if status_val!=None: + self.checkAlmostEqual(resp, status_val, '%s.getTunerBandwidth matches frontend tuner status prop'%(port_name),places=0) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_10(self): + ''' RX_DIG 3.10 Verify digital tuner port getTunerOutputSampleRate function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id, 'FRONTEND::tuner_status::sample_rate') + except KeyError: + status_val = None + + try: + resp = tuner_control.getTunerOutputSampleRate(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerOutputSampleRate produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerOutputSampleRate produces exception %s'%(port_name,e)) + else: + self.check(type(resp), float, '%s.getTunerOutputSampleRate has correct return type'%(port_name)) + self.check(resp >= 0.0, True, '%s.getTunerOutputSampleRate return value is within expected results'%(port_name)) + if status_val!=None: + self.checkAlmostEqual(resp, status_val, '%s.getTunerOutputSampleRate matches frontend tuner status prop'%(port_name),places=0) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_11(self): + ''' RX_DIG 3.11 Verify digital tuner port getTunerAgcEnable function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id, 'FRONTEND::tuner_status::agc') + except KeyError: + status_val = None + + try: + resp = tuner_control.getTunerAgcEnable(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerAgcEnable produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerAgcEnable produces exception %s'%(port_name,e)) + else: + self.check(type(resp), bool, '%s.getTunerAgcEnable has correct return type'%(port_name)) + self.check(resp in [True,False], True, '%s.getTunerAgcEnable return value is within expected results'%(port_name)) + if status_val!=None: + self.check(resp, status_val, '%s.getTunerAgcEnable matches frontend tuner status prop'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_12(self): + ''' RX_DIG 3.12 Verify digital tuner port getTunerGain function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id, 'FRONTEND::tuner_status::gain') + except KeyError: + status_val = None + + try: + resp = tuner_control.getTunerGain(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerGain produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerGain produces exception %s'%(port_name,e)) + else: + self.check(type(resp), float, '%s.getTunerGain has correct return type'%(port_name)) + self.check(type(resp), float, '%s.getTunerGain return value is within expected results'%(port_name)) + if status_val!=None: + self.checkAlmostEqual(resp, status_val, '%s.getTunerGain matches frontend tuner status prop'%(port_name),places=2) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_13(self): + ''' RX_DIG 3.13 Verify digital tuner port getTunerReferenceSource function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id, 'FRONTEND::tuner_status::reference_source') + except KeyError: + status_val = None + + try: + resp = tuner_control.getTunerReferenceSource(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerReferenceSource produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerReferenceSource produces exception %s'%(port_name,e)) + else: + self.check(type(resp) in [int,long], True, '%s.getTunerReferenceSource returns correct type'%(port_name)) + self.check(resp in [0,1], True, '%s.getTunerReferenceSource return value within expected results'%(port_name)) + if status_val!=None: + self.check(resp, status_val, '%s.getTunerReferenceSource matches frontend tuner status prop'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_14(self): + ''' RX_DIG 3.14 Verify digital tuner port getTunerEnable function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id, 'FRONTEND::tuner_status::enabled') + except KeyError: + status_val = None + + try: + resp = tuner_control.getTunerEnable(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerEnable produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerEnable produces exception %s'%(port_name,e)) + else: + self.check(type(resp), bool, '%s.getTunerEnable has correct return type'%(port_name)) + self.check(resp in [True,False], True, '%s.getTunerEnable return value is within expected results'%(port_name)) + if status_val!=None: + self.check(resp, status_val, '%s.getTunerEnable matches frontend tuner status prop'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_15(self): + ''' RX_DIG 3.15 Verify digital tuner port getTunerStatus function + ''' + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + try: + status_val = self._getTunerStatusProp(controller_id) + except KeyError: + status_val = None + props_type = type(properties.props_from_dict({})) + + try: + resp = tuner_control.getTunerStatus(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerStatus produces NotSupportedException'%(port_name)) + except Exception, e: + self.check(True,False,'%s.getTunerStatus produces exception %s'%(port_name,e)) + else: + self.check(type(resp), props_type, '%s.getTunerStatus has correct return type'%(port_name)) + self.check(type(resp), props_type, '%s.getTunerStatus return value is within expected results'%(port_name)) + resp = properties.props_to_dict(resp) + #pp(resp) + self.check(controller_id in resp['FRONTEND::tuner_status::allocation_id_csv'].split(','), True, '%s.getTunerStatus return value has correct tuner status for allocation ID requested'%(port_name)) + if status_val!=None: + print "###################################" + print resp + print status_val + self.check(resp, status_val, '%s.getTunerStatus matches frontend tuner status prop'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + # Verify setter functions + # for each of the following, do bounds checking in addition to simple setter checking + # setTunerCenterFrequency + # setTunerBandwidth + # setTunerOutputSampleRate + # setTunerGain + + # Verify in-bounds retune + + def testFRONTEND_3_3_16(self): + ''' RX_DIG 3.16 Verify digital tuner port setTunerCenterFrequency function in-bounds retune + ''' + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + #check Center Freq: tune to min, max, then orig + try: + cf = tuner_control.getTunerCenterFrequency(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerCenterFrequency produces NotSupportedException -- cannot verify setTunerCenterFrequency function'%(port_name), successMsg='info') + try: + tuner_control.setTunerCenterFrequency(controller_id, tuner_info['CF_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerCenterFrequency produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerCenterFrequency executes without throwing exception'%(port_name)) + except Exception, e: + self.check(False, True,'%s.getTunerCenterFrequency produces Exception -- cannot verify setTunerCenterFrequency function',failureMsg='WARN') + try: + tuner_control.setTunerCenterFrequency(controller_id, tuner_info['CF_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerCenterFrequency produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerCenterFrequency executes without throwing exception'%(port_name)) + else: + try: + tuner_control.setTunerCenterFrequency(controller_id, tuner_info['CF_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerCenterFrequency produces NotSupportedException'%(port_name)) + except FRONTEND.BadParameterException, e: + self.check(False, True,'In-bounds setting of frequency - set to minimum CF (%s) produces BadParameterException'%tuner_info['CF_MIN'] ) + raise + except FRONTEND.FrontendException, e: + self.check(False, True,'In-bounds setting of frequency - set to minimum CF (%s) produces FrontendException'%tuner_info['CF_MIN'] ) + raise + except Exception, e: + self.check(False, True,'In-bounds setting of frequency - set to minimum CF (%s) produces Exception'%tuner_info['CF_MIN']) + raise + else: + self.checkAlmostEqual(tuner_info['CF_MIN'],tuner_control.getTunerCenterFrequency(controller_id),'In-bounds re-tune of frequency - tuned to minimum CF (%s)'%(tuner_info['CF_MIN']),places=0) + tuner_control.setTunerCenterFrequency(controller_id, tuner_info['CF_MAX']) + self.checkAlmostEqual(tuner_info['CF_MAX'],tuner_control.getTunerCenterFrequency(controller_id),'In-bounds re-tune of frequency - tuned to maximum CF (%s)'%(tuner_info['CF_MAX']),places=0) + tuner_control.setTunerCenterFrequency(controller_id, cf) + self.checkAlmostEqual(cf,tuner_control.getTunerCenterFrequency(controller_id),'In-bounds re-tune of frequency - tuned back to original CF (%s)'%(cf),places=0) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_17(self): + ''' RX_DIG 3.17 Verify digital tuner port setTunerBandwidth function in-bounds retune + ''' + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + #Check Bandwidth: tune to min, max, then orig + try: + bw = tuner_control.getTunerBandwidth(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerBandwidth produces NotSupportedException -- cannot verify setTunerBandwidth function'%(port_name), successMsg='info') + try: + tuner_control.setTunerBandwidth(controller_id, tuner_info['BW_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerBandwidth produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerBandwidth executes without throwing exception'%(port_name)) + except Exception, e: + self.check(False, True,'%s.getTunerBandwidth produces Exception -- cannot verify setTunerBandwidth function',failureMsg='WARN') + try: + tuner_control.setTunerBandwidth(controller_id, tuner_info['BW_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerBandwidth produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerBandwidth executes without throwing exception'%(port_name)) + else: + try: + tuner_control.setTunerBandwidth(controller_id, tuner_info['BW_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerBandwidth produces NotSupportedException'%(port_name)) + except FRONTEND.BadParameterException, e: + self.check(False, True,'In-bounds setting of bandwidth - set to minimum BW (%s) produces BadParameterException'%tuner_info['BW_MIN'] ) + raise + except FRONTEND.FrontendException, e: + self.check(False, True,'In-bounds setting of bandwidth - set to minimum BW (%s) produces FrontendException'%tuner_info['BW_MIN'] ) + raise + except Exception, e: + self.check(False, True,'In-bounds setting of bandwidth - set to minimum BW (%s) produces Exception'%tuner_info['BW_MIN']) + raise + else: + self.checkAlmostEqual(tuner_info['BW_MIN'],tuner_control.getTunerBandwidth(controller_id),'In-bounds re-tune of bandwidth - set to minimum BW (%s)'%tuner_info['BW_MIN'],places=0) + tuner_control.setTunerBandwidth(controller_id, tuner_info['BW_MAX']) + self.checkAlmostEqual(tuner_info['BW_MAX'],tuner_control.getTunerBandwidth(controller_id),'In-bounds re-tune of bandwidth - set to maximum BW (%s)'%tuner_info['BW_MAX'],places=0) + tuner_control.setTunerBandwidth(controller_id, bw) + self.checkAlmostEqual(bw,tuner_control.getTunerBandwidth(controller_id),'In-bounds re-tune of bandwidth - set to original BW (%s)'%bw,places=0) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_18(self): + ''' RX_DIG 3.18 Verify digital tuner port setTunerOutputSampleRate function in-bounds retune + ''' + + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + #Check SR: tune to min, max, then orig + try: + sr = tuner_control.getTunerOutputSampleRate(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerOutputSampleRate produces NotSupportedException -- cannot verify setTunerOutputSampleRate function'%(port_name), successMsg='info') + try: + tuner_control.setTunerOutputSampleRate(controller_id, tuner_info['SR_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerOutputSampleRate produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerOutputSampleRate executes without throwing exception'%(port_name)) + except Exception, e: + self.check(False, True,'%s.getTunerOutputSampleRate produces Exception -- cannot verify setTunerOutputSampleRate function',failureMsg='WARN') + try: + tuner_control.setTunerOutputSampleRate(controller_id, tuner_info['SR_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerOutputSampleRate produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerOutputSampleRate executes without throwing exception'%(port_name)) + else: + try: + tuner_control.setTunerOutputSampleRate(controller_id, tuner_info['SR_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerOutputSampleRate produces NotSupportedException'%(port_name)) + except FRONTEND.BadParameterException, e: + self.check(False, True,'In-bounds setting of sample rate - set to minimum SR (%s) produces BadParameterException'%tuner_info['SR_MIN'] ) + raise + except FRONTEND.FrontendException, e: + self.check(False, True,'In-bounds setting of sample rate - set to minimum SR (%s) produces FrontendException'%tuner_info['SR_MIN'] ) + raise + except Exception, e: + self.check(False, True,'In-bounds setting of sample rate - set to minimum SR (%s) produces Exception'%tuner_info['SR_MIN']) + raise + else: + self.checkAlmostEqual(tuner_info['SR_MIN'],tuner_control.getTunerOutputSampleRate(controller_id),'In-bounds re-tune of sample rate - set to minimum SR (%s)'%tuner_info['SR_MIN'],places=0) + tuner_control.setTunerOutputSampleRate(controller_id, tuner_info['SR_MAX']) + self.checkAlmostEqual(tuner_info['SR_MAX'],tuner_control.getTunerOutputSampleRate(controller_id),'In-bounds re-tune of sample rate - set to maximum SR (%s)'%tuner_info['SR_MAX'],places=0) + tuner_control.setTunerOutputSampleRate(controller_id, sr) + self.checkAlmostEqual(sr,tuner_control.getTunerOutputSampleRate(controller_id),'In-bounds re-tune of sample rate - set to original SR (%s)'%sr,places=0) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_19(self): + ''' RX_DIG 3.19 Verify digital tuner port setTunerGain function in-bounds retune + ''' + + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # check gain: set to min, max, then orig + try: + gain = tuner_control.getTunerGain(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerGain produces NotSupportedException -- cannot verify setTunerGain function'%(port_name), successMsg='info') + try: + tuner_control.setTunerGain(controller_id, tuner_info['GAIN_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerGain produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerGain executes without throwing exception'%(port_name)) + except Exception, e: + self.check(False, True,'%s.getTunerGain produces Exception -- cannot verify setTunerGain function',failureMsg='WARN') + try: + tuner_control.setTunerGain(controller_id, tuner_info['GAIN_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerGain produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerGain executes without throwing exception'%(port_name)) + else: + try: + tuner_control.setTunerGain(controller_id, tuner_info['GAIN_MIN']) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerGain produces NotSupportedException'%(port_name)) + except FRONTEND.BadParameterException, e: + self.check(False, True,'In-bounds setting of gain - set to minimum gain (%s) produces BadParameterException'%tuner_info['GAIN_MIN']) + raise + except FRONTEND.FrontendException, e: + self.check(False, True,'In-bounds setting of gain - set to minimum gain (%s) produces FrontendException'%tuner_info['GAIN_MIN']) + raise + except Exception, e: + self.check(False, True,'In-bounds setting of gain - set to minimum gain (%s) produces Exception'%tuner_info['GAIN_MIN']) + raise + else: + self.checkAlmostEqual(tuner_info['GAIN_MIN'],tuner_control.getTunerGain(controller_id),'In-bounds setting of gain - set to minimum gain (%s)'%tuner_info['GAIN_MIN'],places=2) + tuner_control.setTunerGain(controller_id, tuner_info['GAIN_MAX']) + self.checkAlmostEqual(tuner_info['GAIN_MAX'],tuner_control.getTunerGain(controller_id),'In-bounds setting of gain - set to maximum gain (%s)'%tuner_info['GAIN_MAX'],places=2) + tuner_control.setTunerGain(controller_id, gain) + self.checkAlmostEqual(gain,tuner_control.getTunerGain(controller_id),'In-bounds setting of gain - set to original gain (%s)'%gain,places=2) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_20(self): + ''' RX_DIG 3.20 Verify digital tuner port setTunerCenterFrequency function out of bounds retune + ''' + + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + #Verify outside-bounds retune + #check Center Freq: + try: + cf = tuner_control.getTunerCenterFrequency(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerCenterFrequency produces NotSupportedException -- cannot verify out-of-bounds frequency tuning'%(port_name), successMsg='info') + except Exception, e: + self.check(False, True,'%s.getTunerCenterFrequency produces Exception -- cannot verify out-of-bounds frequency tuning',failureMsg='WARN') + else: + try: + tuner_control.setTunerCenterFrequency(controller_id, tuner_info['CF_MAX'] + cf) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerCenterFrequency produces NotSupportedException -- cannot verify out-of-bounds frequency tuning'%(port_name)) + except FRONTEND.BadParameterException, e: + self.check(True, True,'Out-of-bounds re-tune of frequency produces BadParameterException') + except FRONTEND.FrontendException, e: + self.check(False, True,'Out-of-bounds re-tune of frequency produces BadParameterException (produces FrontendException instead)') + raise + except Exception, e: + self.check(False, True,'Out-of-bounds re-tune of frequency produces BadParameterException (produces another Exception instead)') + raise + else: + self.check(False, True,'Out-of-bounds re-tune of frequency produces BadParameterException') + if not self.checkAlmostEqual(cf, tuner_control.getTunerCenterFrequency(controller_id),'Out-of-bounds re-tune of frequency - CF unchanged',places=0): + try: + tuner_control.setTunerCenterFrequency(controller_id, cf) + except: + pass + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_21(self): + ''' RX_DIG 3.21 Verify digital tuner port setTunerBandwidth function out of bounds retune + ''' + + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + #Check Bandwidth + try: + bw = tuner_control.getTunerBandwidth(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerBandwidth produces NotSupportedException -- cannot verify out-of-bounds bandwidth tuning'%(port_name), successMsg='info') + except Exception, e: + self.check(False, True,'%s.getTunerBandwidth produces Exception -- cannot verify out-of-bounds bandwidth tuning',failureMsg='WARN') + else: + try: + tuner_control.setTunerBandwidth(controller_id, tuner_info['BW_MAX'] + bw) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerBandwidth produces NotSupportedException -- cannot verify out-of-bounds bandwidth tuning'%(port_name)) + except FRONTEND.BadParameterException, e: + self.check(True, True,'Out-of-bounds re-tune of bandwidth produces BadParameterException') + except FRONTEND.FrontendException, e: + self.check(False, True,'Out-of-bounds re-tune of bandwidth produces BadParameterException (produces FrontendException instead)') + raise + except Exception, e: + self.check(False, True,'Out-of-bounds re-tune of bandwidth produces BadParameterException (produces another Exception instead)') + raise + else: + self.check(False, True,'Out-of-bounds re-tune of bandwidth produces BadParameterException') + # DEBUG + ''' + print 'DEBUG - out of bounds retune of bw did not produce exception' + print 'DEBUG - tuned bw: %s'%(tuner_info['BW_MAX'] + bw) + print 'DEBUG - tuner status:' + pp(self._getTunerStatusProp(controller_id)) + ''' + new_bw = tuner_control.getTunerBandwidth(controller_id) + if not self.checkAlmostEqual(bw, new_bw,'Out-of-bounds re-tune of bandwidth - BW unchanged',places=0): + # DEBUG + ''' + print 'DEBUG - out of bounds retune of bw incorrectly caused change in bw' + print 'DEBUG - orig bw: %s new bw: %s tuned bw: %s'%(bw,new_bw,tuner_info['BW_MAX'] + bw) + # end DEBUG + ''' + try: + tuner_control.setTunerBandwidth(controller_id, bw) + except: + pass + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_22(self): + ''' RX_DIG 3.22 Verify digital tuner port setTunerOutputSampleRate function out of bounds retune + ''' + + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + #Check SR + try: + sr = tuner_control.getTunerOutputSampleRate(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerOutputSampleRate produces NotSupportedException -- cannot verify out-of-bounds sample rate tuning'%(port_name), successMsg='info') + except Exception, e: + self.check(False, True,'%s.getTunerOutputSampleRate produces Exception -- cannot verify out-of-bounds sample rate tuning',failureMsg='WARN') + else: + try: + tuner_control.setTunerOutputSampleRate(controller_id, tuner_info['SR_MAX'] + sr) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerOutputSampleRate produces NotSupportedException -- cannot verify out-of-bounds sample rate tuning'%(port_name)) + except FRONTEND.BadParameterException, e: + self.check(True, True,'Out-of-bounds re-tune of sample rate produces BadParameterException') + except FRONTEND.FrontendException, e: + self.check(False, True,'Out-of-bounds re-tune of sample rate produces BadParameterException (produces FrontendException instead)') + raise + except Exception, e: + self.check(False, True,'Out-of-bounds re-tune of sample rate produces BadParameterException (produces another Exception instead)') + raise + else: + self.check(False, True,'Out-of-bounds re-tune of sample rate produces BadParameterException') + new_sr = tuner_control.getTunerOutputSampleRate(controller_id) + if not self.checkAlmostEqual(sr, new_sr,'Out-of-bounds re-tune of sample rate - SR unchanged',places=0): + # DEBUG + ''' + print 'DEBUG - out of bounds retune of sr incorrectly caused change in sr' + print 'DEBUG - orig sr: %s new sr: %s tuned sr: %s'%(sr,new_sr,DEVICE_INFO['RX_DIGITIZER']['SR_MAX'] + sr) + # end DEBUG + ''' + try: + tuner_control.setTunerOutputSampleRate(controller_id, sr) + except: + pass + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_23(self): + ''' RX_DIG 3.23 Verify digital tuner port setTunerGain function out of bounds retune + ''' + + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + #Check gain + try: + gain = tuner_control.getTunerGain(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerGain produces NotSupportedException -- cannot verify out-of-bounds gain setting'%(port_name), successMsg='info') + except Exception, e: + self.check(False, True,'%s.getTunerGain produces Exception -- cannot verify out-of-bounds gain tuning',failureMsg='WARN') + else: + try: + tuner_control.setTunerGain(controller_id, tuner_info['GAIN_MAX'] + abs(tuner_info['GAIN_MAX']-tuner_info['GAIN_MIN']) + 1) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerGain produces NotSupportedException -- cannot verify out-of-bounds gain setting'%(port_name)) + except FRONTEND.BadParameterException, e: + self.check(True, True,'Out-of-bounds setting of gain produces BadParameterException') + except FRONTEND.FrontendException, e: + self.check(False, True,'Out-of-bounds setting of gain produces BadParameterException (produces FrontendException instead)') + raise + except Exception, e: + self.check(False, True,'Out-of-bounds re-tune of gain produces BadParameterException (produces another Exception instead)') + raise + else: + self.check(False, True,'Out-of-bounds setting of gain produces BadParameterException') + new_gain = tuner_control.getTunerGain(controller_id) + if not self.checkAlmostEqual(gain, new_gain,'Out-of-bounds setting of gain - gain unchanged',places=2): + + # DEBUG + print 'DEBUG - out of bounds retune of gain incorrectly caused change in gain' + print 'DEBUG - orig gain: %s new gain: %s tuned gain: %s'%(gain,new_gain,tuner_info['GAIN_MAX'] + abs(tuner_info['GAIN_MAX']-tuner_info['GAIN_MIN']) + 1) + # end DEBUG + + try: + tuner_control.setTunerGain(controller_id, gain) + except: + pass + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_24(self): + ''' RX_DIG 3.24 Verify digital tuner port setTunerAgcEnable function + ''' + + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # test changing values for the rest + # setTunerAgcEnable + try: + orig = tuner_control.getTunerAgcEnable(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerAgcEnable produces NotSupportedException -- cannot test setTunerAgcEnable function'%(port_name)) + try: + tuner_control.setTunerAgcEnable(controller_id, False) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerAgcEnable produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerAgcEnable executes without throwing exception'%(port_name)) + else: + try: + tuner_control.setTunerAgcEnable(controller_id, not orig) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerAgcEnable produces NotSupportedException'%(port_name)) + else: + self.check(not orig,tuner_control.getTunerAgcEnable(controller_id),'setting agc enable -- set to new value') + tuner_control.setTunerAgcEnable(controller_id, orig) + self.check(orig,tuner_control.getTunerAgcEnable(controller_id),'setting agc enable -- set back to original value') + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_25(self): + ''' RX_DIG 3.25 Verify digital tuner port setTunerReferenceSource function + ''' + + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # setTunerReferenceSource + try: + orig = tuner_control.getTunerReferenceSource(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerReferenceSource produces NotSupportedException -- cannot test setTunerReferenceSource function'%(port_name)) + try: + tuner_control.setTunerReferenceSource(controller_id, False) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerReferenceSource produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerReferenceSource executes without throwing exception'%(port_name)) + else: + try: + tuner_control.setTunerReferenceSource(controller_id, int(not orig)) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerReferenceSource produces NotSupportedException'%(port_name)) + else: + self.check(int(not orig),tuner_control.getTunerReferenceSource(controller_id),'setting tuner reference source -- set to new value') + tuner_control.setTunerReferenceSource(controller_id, orig) + self.check(orig,tuner_control.getTunerReferenceSource(controller_id),'setting tuner reference source -- set back to original value') + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_26(self): + ''' RX_DIG 3.26 Verify digital tuner port setTunerEnable function + ''' + + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # setTunerEnable + try: + orig = tuner_control.getTunerEnable(controller_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.getTunerEnable produces NotSupportedException -- cannot test setTunerEnable function'%(port_name)) + try: + tuner_control.setTunerEnable(controller_id, True) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerEnable produces NotSupportedException'%(port_name)) + else: + self.check(True,True,'%s.setTunerEnable executes without throwing exception'%(port_name)) + else: + try: + tuner_control.setTunerEnable(controller_id, not orig) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerEnable produces NotSupportedException'%(port_name)) + else: + self.check(not orig,tuner_control.getTunerEnable(controller_id),'setting tuner enable -- set to new value') + tuner_control.setTunerEnable(controller_id, orig) + self.check(orig,tuner_control.getTunerEnable(controller_id),'setting tuner enable -- set back to original value') + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_27(self): + ''' RX_DIG 3.27 Verify digital tuner port getter functions w/ bad alloc id + ''' + + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + function_list = self.digital_tuner_idl + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # verify invalid alloc_id -> FrontendException + bad_id = str(uuid.uuid4()) + for attr in filter(lambda x: x.startswith('get'),function_list): + f = getattr(tuner_control,attr) + try: + resp = f(bad_id) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.%s called with bad alloc_id produces NotSupportedException'%(port_name,attr)) + except FRONTEND.FrontendException: + self.check(True,True,'%s.%s called with bad alloc_id (should produce FrontendException)'%(port_name,attr)) + except Exception, e: + self.check(False,True,'%s.%s called with bad alloc_id (produces %s exception, should produce FrontendException)'%(port_name,attr,e.__class__.__name__)) + else: + self.check(False,True,'%s.%s called with bad alloc_id (does not produce exception, should produce FrontendException)'%(port_name,attr)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_28(self): + ''' RX_DIG 3.28 Verify digital tuner port setTunerCenterFrequency function w/ bad alloc id + ''' + + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # setTunerCenterFrequency + bad_id = str(uuid.uuid4()) + try: + tuner_control.setTunerCenterFrequency(bad_id, float(tuner_info['CF_MIN'])) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerCenterFrequency called with bad alloc_id produces NotSupportedException'%(port_name)) + except FRONTEND.FrontendException: + self.check(True,True,'%s.setTunerCenterFrequency called with bad alloc_id produces FrontendException'%(port_name)) + except Exception, e: + self.check(False,True,'%s.setTunerCenterFrequency called with bad alloc_id (produces %s exception, should produce FrontendException)'%(port_name,e.__class__.__name__)) + else: + self.check(False,True,'%s.setTunerCenterFrequency called with bad alloc_id produces FrontendException (no exception)'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_29(self): + ''' RX_DIG 3.29 Verify digital tuner port setTunerBandwidth function w/ bad alloc id + ''' + + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # setTunerBandwidth + bad_id = str(uuid.uuid4()) + try: + tuner_control.setTunerBandwidth(bad_id, float(tuner_info['BW_MIN'])) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerBandwidth called with bad alloc_id produces NotSupportedException'%(port_name)) + except FRONTEND.FrontendException: + self.check(True,True,'%s.setTunerBandwidth called with bad alloc_id produces FrontendException'%(port_name)) + except Exception, e: + self.check(False,True,'%s.setTunerBandwidth called with bad alloc_id (produces %s exception, should produce FrontendException)'%(port_name,e.__class__.__name__)) + else: + self.check(False,True,'%s.setTunerBandwidth called with bad alloc_id produces FrontendException (no exception)'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_30(self): + ''' RX_DIG 3.30 Verify digital tuner port setTunerOutputSampleRate function w/ bad alloc id + ''' + + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # setTunerOutputSampleRate + bad_id = str(uuid.uuid4()) + try: + tuner_control.setTunerOutputSampleRate(bad_id, float(tuner_info['SR_MIN'])) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerOutputSampleRate called with bad alloc_id produces NotSupportedException'%(port_name)) + except FRONTEND.FrontendException: + self.check(True,True,'%s.setTunerOutputSampleRate called with bad alloc_id produces FrontendException'%(port_name)) + except Exception, e: + self.check(False,True,'%s.setTunerOutputSampleRate called with bad alloc_id (produces %s exception, should produce FrontendException)'%(port_name,e.__class__.__name__)) + else: + self.check(False,True,'%s.setTunerOutputSampleRate called with bad alloc_id produces FrontendException (no exception)'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_31(self): + ''' RX_DIG 3.31 Verify digital tuner port setTunerGain function w/ bad alloc id + ''' + + tuner_info=DEVICE_INFO['RX_DIGITIZER'] + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # setTunerGain + bad_id = str(uuid.uuid4()) + try: + tuner_control.setTunerGain(bad_id, float(tuner_info['GAIN_MIN'])) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerGain called with bad alloc_id produces NotSupportedException'%(port_name)) + except FRONTEND.FrontendException: + self.check(True,True,'%s.setTunerGain called with bad alloc_id produces FrontendException'%(port_name)) + except Exception, e: + self.check(False,True,'%s.setTunerGain called with bad alloc_id (produces %s exception, should produce FrontendException)'%(port_name,e.__class__.__name__)) + else: + self.check(False,True,'%s.setTunerGain called with bad alloc_id produces FrontendException (no exception)'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_32(self): + ''' RX_DIG 3.32 Verify digital tuner port setTunerAgcEnable function w/ bad alloc id + ''' + + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # setTunerAgcEnable + bad_id = str(uuid.uuid4()) + try: + tuner_control.setTunerAgcEnable(bad_id, False) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerAgcEnable called with bad alloc_id produces NotSupportedException'%(port_name)) + except FRONTEND.FrontendException: + self.check(True,True,'%s.setTunerAgcEnable called with bad alloc_id produces FrontendException'%(port_name)) + except Exception, e: + self.check(False,True,'%s.setTunerAgcEnable called with bad alloc_id (produces %s exception, should produce FrontendException)'%(port_name,e.__class__.__name__)) + else: + self.check(False,True,'%s.setTunerAgcEnable called with bad alloc_id produces FrontendException (no exception)'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_33(self): + ''' RX_DIG 3.33 Verify digital tuner port setTunerReferenceSource function w/ bad alloc id + ''' + + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # setTunerReferenceSource + bad_id = str(uuid.uuid4()) + try: + tuner_control.setTunerReferenceSource(bad_id, 0) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerReferenceSource called with bad alloc_id produces NotSupportedException'%(port_name)) + except FRONTEND.FrontendException: + self.check(True,True,'%s.setTunerReferenceSource called with bad alloc_id produces FrontendException'%(port_name)) + except Exception, e: + self.check(False,True,'%s.setTunerReferenceSource called with bad alloc_id (produces %s exception, should produce FrontendException)'%(port_name,e.__class__.__name__)) + else: + self.check(False,True,'%s.setTunerReferenceSource called with bad alloc_id produces FrontendException (no exception)'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + def testFRONTEND_3_3_34(self): + ''' RX_DIG 3.34 Verify digital tuner port setTunerEnable function w/ bad alloc id + ''' + + port_name = 'DigitalTuner_in' + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + controller = self._generateRD() + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + # setTunerEnable + bad_id = str(uuid.uuid4()) + try: + tuner_control.setTunerEnable(bad_id, False) + except FRONTEND.NotSupportedException: + self.check(True,True,'%s.setTunerEnable called with bad alloc_id produces NotSupportedException'%(port_name)) + except FRONTEND.FrontendException: + self.check(True,True,'%s.setTunerEnable called with bad alloc_id produces FrontendException'%(port_name)) + except Exception, e: + self.check(False,True,'%s.setTunerEnable called with bad alloc_id (produces %s exception, should produce FrontendException)'%(port_name,e.__class__.__name__)) + else: + self.check(False,True,'%s.setTunerEnable called with bad alloc_id produces FrontendException (no exception)'%(port_name)) + + self.dut_ref.deallocateCapacity(controller_alloc) + + # TODO - noseify + def testFRONTEND_3_4_DataFlow(self): + ''' RX_DIG 4 DataFlow + ''' + + ttype='RX_DIGITIZER' + controller = self._generateRD() + #controller['CF'] = float(DEVICE_INFO[ttype]['CF_MIN'] + max(DEVICE_INFO[ttype]['BW_MIN'],DEVICE_INFO[ttype]['SR_MIN'])) + controller['BW'] = float(DEVICE_INFO[ttype]['BW_MIN']) + controller['SR'] = float(DEVICE_INFO[ttype]['SR_MIN']) + listener1 = self._generateListener(controller) + listener2 = self._generateListener(controller) + + tuner_control = self.dut.getPort('DigitalTuner_in') + for port in self.dut.ports: + if port._direction == 'Uses': + comp_port_name = port.name + comp_port_type = port._using.name + self._testBULKIO(tuner_control,comp_port_name,comp_port_type,ttype,controller,listener1,listener2) + + def _testBULKIO(self,tuner_control,comp_port_name,comp_port_type,ttype,controller,listener1=None,listener2=None): + if comp_port_type == 'dataSDDS': + print 'WARNING - dataSDDS output port testing not supported' + return + print 'Testing data flow on port:',comp_port_type,comp_port_name + pp(controller) + comp_port_obj = self.dut.getPort(str(comp_port_name)) + dataSink1 = sb.DataSink() + dataSink2 = sb.DataSink() + dataSink3 = sb.DataSink() + dataSink4 = sb.DataSink() + dataSink1_port_obj = dataSink1.getPort(self.port_map[comp_port_type]) + dataSink2_port_obj = dataSink2.getPort(self.port_map[comp_port_type]) + dataSink3_port_obj = dataSink3.getPort(self.port_map[comp_port_type]) + dataSink4_port_obj = dataSink4.getPort(self.port_map[comp_port_type]) + + #sb.start() + + # alloc a tuner + controller['ALLOC_ID'] = "control:"+str(uuid.uuid4()) # unique for each loop + tAlloc = self._generateAlloc(controller) + pp(controller) + pp(tAlloc) + comp_port_obj.connectPort(dataSink1_port_obj, controller['ALLOC_ID']) + self.dut_ref.allocateCapacity(tAlloc) + + # verify basic data flow + print >> sys.stderr,'attempting to get data from tuner' + for attempt in xrange(10): + time.sleep(1.0) + data1 = dataSink1.getData() + print >> sys.stderr,'attempt',attempt,'len(data1)=',len(data1) + if len(data1)>0: + break + self.check(len(data1)>0,True,'%s: Received data from tuner allocation'%(comp_port_name)) + + # verify SRI + #try: + # status = properties.props_to_dict(tuner_control.getTunerStatus(controller['ALLOC_ID'])) + #except FRONTEND.NotSupportedException, e: + status = self._getTunerStatusProp(controller['ALLOC_ID']) + pp(status) + if ttype=='DDC': + # get tuner status of parent CHAN/RDC... may be ambiguous + chan_props = {'FRONTEND::tuner_status::group_id':status['FRONTEND::tuner_status::group_id'], + 'FRONTEND::tuner_status::rf_flow_id':status['FRONTEND::tuner_status::rf_flow_id']} + ddc_props = {'FRONTEND::tuner_status::tuner_type':'DDC'} + try: + chan_status = self._findTunerStatusProps(match=chan_props,notmatch=ddc_props) + except KeyError: + chan_status = None + else: + if len(chan_status) != 1: + # ambiguous or no match found, can't be sure we're checking correct COL_RF + chan_status = None + else: + chan_status = chan_status[0] + + sri1 = dataSink1.sri() + print 'sri1',sri1 + self.checkAlmostEqual(status['FRONTEND::tuner_status::sample_rate'], 1.0/sri1.xdelta, '%s: SRI xdelta has correct value'%(comp_port_name),places=0) + + #complex is an optional property but if it is present check that it matches sri. + if 'FRONTEND::tuner_status::complex' in status: + self.check(status['FRONTEND::tuner_status::complex'],sri1.mode,'%s: SRI mode has correct value'%(comp_port_name)) + + # verify SRI keywords + keywords = properties.props_to_dict(sri1.keywords) + if 'COL_RF' in keywords: + self.check(True,True,'%s: SRI has COL_RF keyword'%(comp_port_name)) + if ttype == 'DDC': + if chan_status != None: + self.checkAlmostEqual(chan_status['FRONTEND::tuner_status::center_frequency'],keywords['COL_RF'],'%s: SRI keyword COL_RF has correct value'%(comp_port_name),places=0) + else: + print 'WARNING - could not determine center frequency of collector to compare with COL_RF keyword' + else: + self.checkAlmostEqual(status['FRONTEND::tuner_status::center_frequency'],keywords['COL_RF'],'%s: SRI keyword COL_RF has correct value'%(comp_port_name),places=0) + else: + self.check(False,True,'%s: SRI has COL_RF keyword'%(comp_port_name)) + + if 'CHAN_RF' in keywords: + self.check(True,True,'%s: SRI has CHAN_RF keyword'%(comp_port_name)) + self.checkAlmostEqual(status['FRONTEND::tuner_status::center_frequency'],keywords['CHAN_RF'],'%s: SRI keyword CHAN_RF has correct value'%(comp_port_name),places=0) + else: + self.check(False,True,'%s: SRI has CHAN_RF keyword'%(comp_port_name)) + + if 'FRONTEND::BANDWIDTH' in keywords: + self.check(True,True,'%s: SRI has FRONTEND::BANDWIDTH keyword'%(comp_port_name)) + if not self.checkAlmostEqual(status['FRONTEND::tuner_status::bandwidth'],keywords['FRONTEND::BANDWIDTH'],'%s: SRI keyword FRONTEND::BANDWIDTH has correct value'%(comp_port_name),places=0): + self.checkAlmostEqual(status['FRONTEND::tuner_status::sample_rate'],keywords['FRONTEND::BANDWIDTH'],'%s: SRI keyword FRONTEND::BANDWIDTH has sample rate value'%(comp_port_name),places=0, silentFailure=True, successMsg='WARN') + else: + self.check(False,True,'%s: SRI has FRONTEND::BANDWIDTH keyword'%(comp_port_name)) + + if 'FRONTEND::RF_FLOW_ID' in keywords: + self.check(True,True,'%s: SRI has FRONTEND::RF_FLOW_ID keyword'%(comp_port_name)) + self.check(status['FRONTEND::tuner_status::rf_flow_id'],keywords['FRONTEND::RF_FLOW_ID'],'%s: SRI keyword FRONTEND::RF_FLOW_ID has correct value'%(comp_port_name)) + else: + self.check(False,True,'%s: SRI has FRONTEND::RF_FLOW_ID keyword'%(comp_port_name)) + + if 'FRONTEND::DEVICE_ID' in keywords: + self.check(True,True,'%s: SRI has FRONTEND::DEVICE_ID keyword'%(comp_port_name)) + #self.check(1,keywords['FRONTEND::DEVICE_ID'],'SRI keyword FRONTEND::DEVICE_ID has correct value') + else: + self.check(False,True,'%s: SRI has FRONTEND::DEVICE_ID keyword'%(comp_port_name)) + + # verify multi-out port + bad_conn_id = "bad:"+str(uuid.uuid4()) + comp_port_obj.connectPort(dataSink2_port_obj, bad_conn_id) + for attempt in xrange(5): + time.sleep(1.0) + data2 = dataSink2.getData() + #print >> sys.stderr,'attempt',attempt,'len(data2)=',len(data2) + if len(data2)>0: + break + #print 'data2',len(data2) + self.check(len(data2)>0,False,'%s: Did not receive data from tuner allocation with wrong alloc_id (multiport test)'%(comp_port_name)) + sri1 = dataSink1.sri() + sri2 = dataSink2.sri() + print 'sri2',sri2 + self.check(sri1.streamID==sri2.streamID,False,'%s: Did not receive correct SRI from tuner allocation with wrong alloc_id (multiport test)'%(comp_port_name)) + + if self.device_discovery[ttype] < 2: + self.check(True,True,'%s: Cannot fully test multiport because only single %s tuner capability'%(comp_port_name,ttype),successMsg='info') + else: + pass # TODO - additional multiport tests here + + if listener1: + # verify listener + listener1 = self._generateListener(controller) # unique for each loop + listener1['LISTENER_ID'] = "listener1:"+listener1['LISTENER_ID'] + listenerAlloc1 = self._generateListenerAlloc(listener1) + comp_port_obj.connectPort(dataSink3_port_obj, listener1['LISTENER_ID']) + self.dut_ref.allocateCapacity(listenerAlloc1) + + for attempt in xrange(5): + time.sleep(1.0) + data3 = dataSink3.getData() + #print >> sys.stderr,'attempt',attempt,'len(data3)=',len(data3) + if len(data3)>0: + break + #print 'data3',len(data3) + self.check(len(data3)>0,True,'%s: Received data from listener allocation'%(comp_port_name)) + sri1 = dataSink1.sri() + sri3 = dataSink3.sri() + print 'sri3',sri3 + self.check(sri1.streamID==sri3.streamID,True,'%s: Received correct SRI from listener allocation'%(comp_port_name)) + + # verify EOS + if listener2: + listener2 = self._generateListener(controller) # unique for each loop + listener2['LISTENER_ID'] = "listener2:"+listener2['LISTENER_ID'] + listenerAlloc2 = self._generateListenerAlloc(listener2) + comp_port_obj.connectPort(dataSink4_port_obj, listener2['LISTENER_ID']) + self.dut_ref.allocateCapacity(listenerAlloc2) + time.sleep(1.0) + #for port_dict in port_list: + #data4 = dataSink4.getData() + self.dut_ref.deallocateCapacity(listenerAlloc1) + self.check(dataSink3.eos(),True,'%s: Listener received EOS after deallocation of listener'%(comp_port_name)) + self.check(dataSink1.eos(),False,'%s: Controller did not receive EOS after deallocation of listener'%(comp_port_name)) + self.dut_ref.deallocateCapacity(tAlloc) + self.check(dataSink1.eos(),True,'%s: Controller did receive EOS after deallocation of tuner'%(comp_port_name)) + if listener2: + self.check(dataSink4.eos(),True,'%s: Listener received EOS after deallocation of tuner'%(comp_port_name)) + + # TODO - noseify + def testFRONTEND_3_5_TunerStatusProperties(self): + ''' RX_DIG 5 TunerStatusProperties + ''' + #self.testReport.append('\nTest 3.5 - Tuner Status Properties') + #self.getToBasicState() + + tuner_control = self.dut.getPort('DigitalTuner_in') + tuner_control._narrow(FRONTEND.FrontendTuner) + + controller = self._generateRD() + listener1 = self._generateListener(controller) + listener2 = self._generateListener(controller) + + # make allocations + controller_id = controller['ALLOC_ID'] + controller_alloc = self._generateAlloc(controller) + self.check(self.dut_ref.allocateCapacity(controller_alloc),True,'ERROR -- could not allocate controller',throwOnFailure=True,silentSuccess=True) + + listener1_id = listener1['LISTENER_ID'] + listener1_alloc = self._generateListenerAlloc(listener1) + retval = self.dut_ref.allocateCapacity(listener1_alloc) + if not retval: + self.testReport.append('Could not allocate listener1 -- limited test') + listener1 = None + else: + listener2_id = listener2['LISTENER_ID'] + listener2_alloc = self._generateListenerAlloc(listener2) + retval = self.dut_ref.allocateCapacity(listener2_alloc) + if not retval: + self.testReport.append('Could not allocate listener2 -- limited test') + listener2 = None + + # Verify correct tuner status structure (fields, types) + # check presence of tuner status property + # check that it contains the required fields of correct data type + # check which optional fields it contains, and that they are of correct data type + # check for unknown/undefined fields + try: + status = self._getTunerStatusProp(controller_id) + except KeyError: + self.check(False, True, 'Device has FRONTEND::tuner_status property (failure, cannot complete test)') + else: + if status == None: + self.check(False, True, 'Device has FRONTEND::tuner_status property (failure, cannot complete test)') + else: + self.check(True, True, 'Device has FRONTEND::tuner_status property') + for name,dtype in self.FE_tuner_status_fields_req.items(): + if status.has_key(name): + self.check(True, True, 'tuner_status has required field %s'%name) + self.check(type(status[name]) in dtype, True, 'value has correct data type for %s'%(name)) + else: + self.check(False, True, 'tuner_status has required field %s'%name) + for name,dtype in self.FE_tuner_status_fields_opt.items(): + if status.has_key(name): + self.check(True, True, 'tuner_status has OPTIONAL field %s'%name)#, successMsg='yes') + self.check(type(status[name]) in dtype, True, 'value has correct data type for %s'%(name)) + else: + self.check(False, True, 'tuner_status has OPTIONAL field %s'%name, failureMsg='no') + all_names = self.FE_tuner_status_fields_req.keys()+self.FE_tuner_status_fields_opt.keys() + for name in filter(lambda x: x not in all_names,status.keys()): + self.check(False, True, 'tuner_status has UNKNOWN field %s'%name, failureMsg='WARN') + + # Verify alloc_id_csv is populated after controller allocation + try: + status_val = self._getTunerStatusProp(controller_id,'FRONTEND::tuner_status::allocation_id_csv') + except KeyError: + pass + else: + if status_val == None: + self.check(True, False, 'controller allocation id added to tuner status after allocation of controller (could not get tuner status prop)') + else: + self.check(controller_id, status_val.split(',')[0], 'controller allocation id added to tuner status after allocation of controller (must be first in CSV list)') + + # Verify tuner is enabled following allocation + try: + status_val = self._getTunerStatusProp(controller_id,'FRONTEND::tuner_status::enabled') + except KeyError: + pass + else: + if status_val == None: + self.check(True, False, 'Tuner is enabled in tuner status after tuner allocation (could not get tuner status prop)') + else: + self.check(True, status_val, 'Tuner is enabled in tuner status after tuner allocation') + + if listener1: + # Verify listener allocation id is added after allocation of listener + try: + status_val = self._getTunerStatusProp(controller_id,'FRONTEND::tuner_status::allocation_id_csv') + except KeyError: + pass + else: + if status_val == None: + self.check(True, False, 'listener allocation id added to tuner status after allocation of listener (could not get tuner status prop)') + else: + self.check(listener1_id in status_val.split(',')[1:], True, 'listener allocation id added to tuner status after allocation of listener (must not be first in CSV list)') + + if tuner_control: + # Verify frequency prop + try: + val = tuner_control.getTunerCenterFrequency(controller_id) + except FRONTEND.NotSupportedException, e: + pass + else: + try: + status_val = self._getTunerStatusProp(controller_id,'FRONTEND::tuner_status::center_frequency') + except KeyError: + pass + else: + self.checkAlmostEqual(status_val, val, 'correct value for FRONTEND::tuner_status::center_frequency property',places=0) + #setTunerCenterFrequency + + # Verify bandwidth prop + try: + val = tuner_control.getTunerBandwidth(controller_id) + except FRONTEND.NotSupportedException, e: + pass + else: + try: + status_val = self._getTunerStatusProp(controller_id,'FRONTEND::tuner_status::bandwidth') + except KeyError: + pass + else: + self.checkAlmostEqual(status_val, val, 'correct value for FRONTEND::tuner_status::bandwidth property',places=0) + #setTunerBandwidth + + # Verify sample rate prop + try: + val = tuner_control.getTunerOutputSampleRate(controller_id) + except FRONTEND.NotSupportedException, e: + pass + else: + try: + status_val = self._getTunerStatusProp(controller_id,'FRONTEND::tuner_status::sample_rate') + except KeyError: + pass + else: + self.checkAlmostEqual(status_val, val, 'correct value for FRONTEND::tuner_status::sample_rate property',places=0) + #setTunerOutputSampleRate + + # Verify group id prop + try: + val = tuner_control.getTunerGroupId(controller_id) + except FRONTEND.NotSupportedException, e: + pass + else: + try: + status_val = self._getTunerStatusProp(controller_id,'FRONTEND::tuner_status::group_id') + except KeyError: + pass + else: + self.check(status_val, val, 'correct value for FRONTEND::tuner_status::group_id property') + + # Verify rf flow id prop + try: + val = tuner_control.getTunerRfFlowId(controller_id) + except FRONTEND.NotSupportedException, e: + pass + else: + try: + status_val = self._getTunerStatusProp(controller_id,'FRONTEND::tuner_status::rf_flow_id') + except KeyError: + pass + else: + self.check(status_val, val, 'correct value for FRONTEND::tuner_status::rf_flow_id property') + + if listener1: + # Verify listener allocation id is removed after deallocation of listener + self.dut_ref.deallocateCapacity(listener1_alloc) + try: + status_val = self._getTunerStatusProp(controller_id,'FRONTEND::tuner_status::allocation_id_csv') + except KeyError: + pass + else: + self.check(listener1_id in status_val, False, 'listener allocation id removed from tuner status after deallocation of listener') + + # Verify controller allocation id is removed after deallocation of controller + self.dut_ref.deallocateCapacity(controller_alloc) + try: + status = self._getTunerStatusProp(controller_id) + except KeyError: + pass + else: + self.check(None, status, 'controller allocation id removed from tuner status after deallocation of controller') + + if listener2: + # Verify listener allocation id is removed after deallocation of controller + #self.dut_ref.deallocateCapacity(controllerAlloc) + try: + status = self._getTunerStatusProp(listener2_id) + except KeyError: + pass + else: + self.check(None, status, 'listener allocation id removed from tuner status after deallocation of controller') + + @classmethod + def printTestReport(self): + sys.stderr.writelines(self.testReport) + + #Helpers + def check(self, A, B, message, throwOnFailure=False, silentFailure=False, silentSuccess=False, indent_width=0, failureMsg='FAILURE', successMsg='ok'): + # successMsg suggestions: PASS, YES, ok, u'\u2714' (check mark) + # failureMsg suggestions: FAIL, NO, u'\u2718' (x mark) + if 'Total checks made' in self.testReportStats: self.testReportStats['Total checks made']+=1 + else: self.testReportStats['Total checks made']=1 + if A == B: + if not silentSuccess: + self.testReport.append(self._buildRow(message,successMsg,indent_width)) + tmp = 'Checks that returned "%s"'%successMsg[:4] + if tmp in self.testReportStats: self.testReportStats[tmp]+=1 + else: self.testReportStats[tmp]=1 + return True # success! + else: + if not silentFailure: + self.testReport.append(self._buildRow(message,failureMsg,indent_width)) + tmp = 'Checks that returned "%s"'%failureMsg[:4] + if tmp in self.testReportStats: self.testReportStats[tmp]+=1 + else: self.testReportStats[tmp]=1 + if throwOnFailure: + #self.testReport.append('Terminal error, stopping current test...') + self.getToShutdownState() + self.assertFalse(failureMsg+'::'+message) + return False # failure! + + def checkAlmostEqual(self, A, B, message, throwOnFailure=False, silentFailure=False, silentSuccess=False, indent_width=0, failureMsg='FAILURE', successMsg='ok', places=7): + # successMsg suggestions: PASS, YES, ok, u'\u2714' (check mark) + # failureMsg suggestions: FAIL, NO, u'\u2718' (x mark) + #print "DEBUG","A=",A,"B=",B + if 'Total checks made' in self.testReportStats: self.testReportStats['Total checks made']+=1 + else: self.testReportStats['Total checks made']=1 + if round(B-A, places) == 0: + if not silentSuccess: + self.testReport.append(self._buildRow(message,successMsg,indent_width)) + tmp = 'Checks that returned "%s"'%successMsg[:4] + if tmp in self.testReportStats: self.testReportStats[tmp]+=1 + else: self.testReportStats[tmp]=1 + return True # success! + else: + if not silentFailure: + self.testReport.append(self._buildRow(message,failureMsg,indent_width)) + tmp = 'Checks that returned "%s"'%failureMsg[:4] + if tmp in self.testReportStats: self.testReportStats[tmp]+=1 + else: self.testReportStats[tmp]=1 + if throwOnFailure: + #self.testReport.append('Terminal error, stopping current test...') + self.getToShutdownState() + self.assertFalse(failureMsg+'::'+message) + return False # failure! + + def _buildRow(self, lhs, rhs, indent_width=0, filler='.', len_total=80, rhs_width=4, depth=1): + ''' builds a row (or multiple rows, if required) that fit within len_total columns + format: + -will be split over multiple lines if necessary + -pads rhs text to number of characters specified by rhs_width using spaces + -truncates rhs text to number of characters specified by rhs_width + ''' + min_filler = 3 + max_lines = 5 + filler_width = len_total - (indent_width + len(lhs) + rhs_width) + if filler_width >= min_filler: + return (' '*indent_width + lhs + filler*filler_width + rhs)[0:len_total] + else: + lhs1_width = len_total - (indent_width + min_filler + rhs_width) + idx = lhs.rfind(' ',0,lhs1_width) # try to split on a space + if idx == -1: + idx = (lhs+' ').find(' ') # split at first space, if any, or take the whole string + line1 = ' '*indent_width + lhs[:idx] + if depth==1: + indent_width += 4 + if depth >= max_lines: + return line1 + else: + line2 = self._buildRow(lhs[idx:], rhs, indent_width, filler, len_total, rhs_width, depth=depth+1) + return line1 + '\n' + line2 + + def _tunerStatusHasAllocId(self,alloc_id): + props = self.dut.query([]) + props = properties.props_to_dict(props) + for tuner in props['FRONTEND::tuner_status']: + if alloc_id in tuner['FRONTEND::tuner_status::allocation_id_csv'].split(','): + return True + return False + + def _findTunerStatusProps(match={},notmatch={}): + ''' query latest props, find tuner status associated with key/value pairs + in "match" dict where the key/value pairs of "notmatch" dict don't match + return a list of tuner status prop dicts + return empty list no tuner status satisfies the criteria + if FRONTEND::tuner_status prop not found, raises KeyError + if any key in match or notmatch not found, raises KeyError + ''' + props = self.dut.query([]) + props = properties.props_to_dict(props) + tuners = copy.deepcopy(props['FRONTEND::tuner_status']) + for k,v in match.items(): + bad = [] + for tuner in tuners: + if tuner[k] != v: + bad.append(tuner) + tuners = [x for x in tuners if x not in bad] + #tuners = filter(lambda x: x not in bad, tuners) + for k,v in notmatch.items(): + bad = [] + for tuner in tuners: + if tuner[k] == v: + bad.append(tuner) + tuners = [x for x in tuners if x not in bad] + #tuners = filter(lambda x: x not in bad, tuners) + return tuners + + def _getTunerStatusProp(self,alloc_id,name=None): + ''' query latest props, find tuner status associated with alloc_id + if name arg is specified, return the tuner status property of that name + otherwise, return the tuner status prop as a dict + return None if either alloc_id or name not found + if FRONTEND::tuner_status prop not found, raises KeyError + ''' + props = self.dut.query([]) + props = properties.props_to_dict(props) + for tuner in props['FRONTEND::tuner_status']: + if alloc_id in tuner['FRONTEND::tuner_status::allocation_id_csv'].split(','): + break + else: + return None + + if name!=None: + try: + return tuner[name] + except KeyError: + return None + else: + return tuner + + def _generateRD(self): + #Pick a random set for CF,BW,SR and return + value = {} + value['ALLOC_ID'] = str(uuid.uuid4()) + value['TYPE'] = 'RX_DIGITIZER' + value['BW_TOLERANCE'] = 100.0 + value['SR_TOLERANCE'] = 100.0 + value['RF_FLOW_ID'] = '' + value['GROUP_ID'] = '' + value['CONTROL'] = True + + if (DEVICE_INFO['RX_DIGITIZER']['CF_MIN'] != DEVICE_INFO['RX_DIGITIZER']['CF_MAX']): + #value['CF'] = float(random.randrange(DEVICE_INFO['RX_DIGITIZER']['CF_MIN'], DEVICE_INFO['RX_DIGITIZER']['CF_MAX'], 1.0e3)) + value['CF'] = float(int(random.uniform(DEVICE_INFO['RX_DIGITIZER']['CF_MIN'], DEVICE_INFO['RX_DIGITIZER']['CF_MAX']))) + else: + value['CF'] = float(DEVICE_INFO['RX_DIGITIZER']['CF_MIN']) + + if (DEVICE_INFO['RX_DIGITIZER']['SR_MIN'] != DEVICE_INFO['RX_DIGITIZER']['SR_MAX']): + #value['SR'] = float(random.randrange(DEVICE_INFO['RX_DIGITIZER']['SR_MIN'], DEVICE_INFO['RX_DIGITIZER']['SR_MAX'], 1.0e3)) + value['SR'] = float(random.uniform(DEVICE_INFO['RX_DIGITIZER']['SR_MIN'], DEVICE_INFO['RX_DIGITIZER']['SR_MAX'])) + else: + value['SR'] = float(DEVICE_INFO['RX_DIGITIZER']['SR_MIN']) + + + if (DEVICE_INFO['RX_DIGITIZER']['BW_MIN'] != DEVICE_INFO['RX_DIGITIZER']['BW_MAX']): + #value['BW'] = float(random.randrange(DEVICE_INFO['RX_DIGITIZER']['BW_MIN'], DEVICE_INFO['RX_DIGITIZER']['BW_MAX'], 1.0e3)) + #value['BW'] = float(random.uniform(DEVICE_INFO['RX_DIGITIZER']['BW_MIN'], DEVICE_INFO['RX_DIGITIZER']['BW_MAX'])) + # calculate a random BW value that is + # a) within bandwidth limits of device + # b) within 100% tolerance of the usable bandwidth given the SR chosen above (usable BW = SR if complex, else usable BW = SR*0.5) + if DEVICE_INFO['RX_DIGITIZER']['COMPLEX']: + bw_min = max(value['SR']*0.8,DEVICE_INFO['RX_DIGITIZER']['BW_MIN']) + bw_max = min(value['SR']*0.8,DEVICE_INFO['RX_DIGITIZER']['BW_MAX']) + #value['BW'] = float(value['SR']) + else: + bw_min = max(value['SR']*0.5*0.5,DEVICE_INFO['RX_DIGITIZER']['BW_MIN']) + bw_max = min(value['SR']*0.5,DEVICE_INFO['RX_DIGITIZER']['BW_MAX']) + #value['BW'] = float(value['SR']*0.5) + value['BW'] = float(random.uniform(bw_min, bw_max)) + else: + value['BW'] = float(DEVICE_INFO['RX_DIGITIZER']['BW_MIN']) + + return value + + def _generateListener(self, c): + value = {} + value['LISTENER_ID'] = str(uuid.uuid4()) + value['ALLOC_ID'] = c['ALLOC_ID'] + return value + + def _generateListenerAlloc(self, value): + allocationPropDict = {'FRONTEND::listener_allocation':{ + 'FRONTEND::listener_allocation::existing_allocation_id': value['ALLOC_ID'], + 'FRONTEND::listener_allocation::listener_allocation_id': value['LISTENER_ID'], + }} + return properties.props_from_dict(allocationPropDict) + + def _generateAlloc(self, value): + #generate the allocation + allocationPropDict = {'FRONTEND::tuner_allocation':{ + 'FRONTEND::tuner_allocation::tuner_type': value['TYPE'], + 'FRONTEND::tuner_allocation::allocation_id': value['ALLOC_ID'], + 'FRONTEND::tuner_allocation::center_frequency': float(value['CF']), + 'FRONTEND::tuner_allocation::bandwidth': float(value['BW']), + 'FRONTEND::tuner_allocation::bandwidth_tolerance': float(value['BW_TOLERANCE']), + 'FRONTEND::tuner_allocation::sample_rate': float(value['SR']), + 'FRONTEND::tuner_allocation::sample_rate_tolerance': float(value['SR_TOLERANCE']), + 'FRONTEND::tuner_allocation::device_control': value['CONTROL'], + 'FRONTEND::tuner_allocation::group_id': value['GROUP_ID'], + 'FRONTEND::tuner_allocation::rf_flow_id': value['RF_FLOW_ID'], + }} + return properties.props_from_dict(allocationPropDict) + + +######################################################### +## CODE FROM unit_test_helpers with @classmethod added ## +######################################################### + +def isMatch(prop, modes, kinds, actions): + if prop.get_mode() == None: + m = "readwrite" + else: + m = prop.get_mode() + matchMode = (m in modes) + if prop.__class__ in (PRFParser.simple, PRFParser.simpleSequence): + if prop.get_action() == None: + a = "external" + else: + a = prop.get_action().get_type() + matchAction = (a in actions) + + matchKind = False + if prop.get_kind() == None: + k = ["configure"] + else: + k = prop.get_kind() + for kind in k: + if kind.get_kindtype() in kinds: + matchKind = True + + elif prop.__class__ in (PRFParser.struct, PRFParser.structSequence): + matchAction = True # There is no action, so always match + + matchKind = False + if prop.get_configurationkind() == None: + k = ["configure"] + else: + k = prop.get_configurationkind() + for kind in k: + if kind.get_kindtype() in kinds: + matchKind = True + + if k in kinds: + matchKind = True + + + return matchMode and matchKind and matchAction + +def getPropertySet(spd_file, kinds=("configure",), \ + modes=("readwrite", "writeonly", "readonly"), \ + action="external", \ + includeNil=True): + """ + A useful utility function that extracts specified property types from + the PRF file and turns them into a CF.PropertySet + """ + propertySet = [] + + spd = SPDParser.parse(spd_file) + prf_file = spd.get_propertyfile().get_localfile().get_name() + if (prf_file[0] != '/'): + prf_file = os.path.join(os.path.dirname(spd_file), prf_file) + prf = PRFParser.parse(prf_file) + + # Simples + for prop in prf.get_simple(): + if isMatch(prop, modes, kinds, (action,)): + if prop.get_value() is not None: + dt = properties.to_tc_value(prop.get_value(), prop.get_type()) + elif not includeNil: + continue + else: + dt = any.to_any(None) + p = CF.DataType(id=str(prop.get_id()), value=dt) + propertySet.append(p) + + # Simple Sequences + for prop in prf.get_simplesequence(): + if isMatch(prop, modes, kinds, (action,)): + if prop.get_values() is not None: + seq = [] + for v in prop.get_values().get_value(): + seq.append(properties.to_pyvalue(v, prop.get_type())) + dt = any.to_any(seq) + elif not includeNil: + continue + else: + dt = any.to_any(None) + p = CF.DataType(id=str(prop.get_id()), value=dt) + propertySet.append(p) + + # Structures + for prop in prf.get_struct(): + if isMatch(prop, modes, kinds, (action,)): + if prop.get_simple() is not None: + fields = [] + hasValue = False + for s in prop.get_simple(): + if s.get_value() is not None: + hasValue = True + dt = properties.to_tc_value(s.get_value(), s.get_type()) + fields.append(CF.DataType(id=str(s.get_id()), value=dt)) + if not hasValue and not includeNil: + continue + dt = any.to_any(fields) + else: + dt = any.to_any(None) + p = CF.DataType(id=str(prop.get_id()), value=dt) + propertySet.append(p) + # Structures + + for prop in prf.get_structsequence(): + if isMatch(prop, modes, kinds, (action,)): + baseProp = [] + if prop.get_struct() != None: + fields = [] + for internal_prop in prop.get_struct().get_simple(): + fields.append(CF.DataType(id=str(internal_prop.get_id()), value=any.to_any(None))) + for val in prop.get_structvalue(): + baseProp.append(copy.deepcopy(fields)) + for entry in val.get_simpleref(): + val_type = None + for internal_prop in prop.get_struct().get_simple(): + if str(internal_prop.get_id()) == entry.refid: + val_type = internal_prop.get_type() + for subfield in baseProp[-1]: + if subfield.id == entry.refid: + subfield.value = properties.to_tc_value(entry.get_value(), val_type) + anybp = [] + for bp in baseProp: + anybp.append(properties.props_to_any(bp)) + p = CF.DataType(id=str(prop.get_id()), value=any.to_any(anybp)) + propertySet.append(p) + # Struct Sequence + + return propertySet diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/tests/test_RX_Digitizer_Sim.py b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/tests/test_RX_Digitizer_Sim.py new file mode 100644 index 000000000..ae8065877 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/tests/test_RX_Digitizer_Sim.py @@ -0,0 +1,437 @@ +#!/usr/bin/env python + +import ossie.utils.testing +from ossie.utils import sb +from ossie.cf import CF +from omniORB import any +from omniORB import CORBA +from ossie.utils.bulkio.bulkio_data_helpers import SDDSSink +from redhawk.frontendInterfaces import FRONTEND +from ossie.utils import uuid +from ossie import properties +import time +from ossie.utils.bluefile.bluefile_helpers import sri_to_hdr +import frontend + +DEBUG_LEVEL = 2 + +class DeviceTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the device. + SPD_FILE = '../RX_Digitizer_Sim.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a device using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the device, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl,execparams={'DEBUG_LEVEL': DEBUG_LEVEL}) + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def testBasicBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + self.comp.start() + self.comp.stop() + + def testSingleTunerAllocation(self): + + #self.comp.start() + + sink = sb.DataSink() + + + alloc = self._generateAlloc(cf=110e6,sr=2.5e6,bw=2e6) + allocationID = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.comp.connect(sink,connectionId=allocationID) + sink.start() + + try: + retval = self.comp.allocateCapacity(alloc) + except Exception, e: + print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" + print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" + print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%" + print str(e) + self.assertFalse("Exception thrown on allocateCapactiy %s" % str(e)) + if not retval: + self.assertFalse("Allocation Failed") + + time.sleep(1) + + sri = sink.sri() + self.assertEqual(sri.streamID, allocationID) + self.assertAlmostEqual(sri.xdelta, 1.0/2.5e6,5) + #time.sleep(1) + + data= sink.getData() + self.assertTrue(len(data)>0) + + self.comp.deallocateCapacity(alloc) + time.sleep(1) + + sri = sink.sri() + self.assertTrue( sink.eos()) + + def testDoubleTunerAllocation(self): + + #self.comp.start() + + #Create Allocation and Sink for Tuner 1 + sink = sb.StreamSink() + alloc = self._generateAlloc(cf=110e6,sr=2.5e6,bw=2e6) + allocationID = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.comp.connect(sink,connectionId=allocationID) + sink.start() + + + + #Allocate a Tuner + try: + retval = self.comp.allocateCapacity(alloc) + except Exception, e: + self.assertFalse("Exception thrown on allocateCapactiy %s" % str(e)) + if not retval: + self.assertFalse("Allocation Failed") + + #Check Tuner 1 SRI, letting data and SRI flow for up to 1s + data = sink.read(timeout=1) + self.failIf(data is None) + print "SRI 1 1st Time" , data.sri + self.assertEqual(data.sri.streamID, allocationID, "SRI 1 Did not Match") + self.assertAlmostEqual(data.sri.xdelta, 1.0/2.5e6,5) + + #Create Allocation and Sink for Tuner 2 + sink2 = sb.StreamSink() + alloc2 = self._generateAlloc(cf=110e6,sr=5e6,bw=4e6) + allocationID2 = properties.props_to_dict(alloc2)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.comp.connect(sink2,connectionId=allocationID2) + sink2.start() + + #Allocate a Second Tuner + try: + retval = self.comp.allocateCapacity(alloc2) + except Exception, e: + print str(e) + self.assertFalse("Exception thrown on allocateCapactiy on second Tuner %s" % str(e)) + if not retval: + self.assertFalse("Allocation Failed on second Tuner") + + #Sleep and let data and SRI flow + time.sleep(1) + + #Check Tuner 1 SRI Again (should not change) + data = sink.read(timeout=0) + self.failIf(data is None) + print "SRI 1 2nd Time" , data.sri + self.assertEqual(data.sri.streamID, allocationID, "SRI 1 Did not Match Second Time") + self.assertAlmostEqual(data.sri.xdelta, 1.0/2.5e6,5) + + #Check Tuner 2 SRI + data2 = sink2.read(timeout=0) + print "SRI 2 " , data2.sri + print "allocationID2", allocationID2 + self.assertEqual(data2.sri.streamID, allocationID2,"SRI 2 Did not MAtch") + self.assertAlmostEqual(data2.sri.xdelta, 1.0/5.0e6,5) + + #Check Tuner 1 Data + self.assertTrue(len(data.data)>0) + + #Check Tuner 2 Data + self.assertTrue(len(data2.data)>0) + + #Deallocate Tuners + self.comp.deallocateCapacity(alloc) + self.comp.deallocateCapacity(alloc2) + + #Check that they sent EOS + data = sink.read(timeout=1, eos=True) + self.failIf(data is None) + self.failUnless(data.eos) + + data2 = sink2.read(timeout=1, eos=True) + self.failIf(data2 is None) + self.failUnless(data2.eos) + + def testValidRFInfoPacket(self): + + #Create Allocation and Sink for Tuner 1 + sink = sb.DataSink() + alloc = self._generateAlloc(cf=110e6,sr=2.5e6,bw=2e6,rf_flow_id="testRFInfoPacket_FlowID") + allocationID = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.comp.connect(sink,connectionId=allocationID) + sink.start() + + #Send an RF Info Packet + rfInfo_port = self.comp.getPort("RFInfo_in") + rf_info_pkt = self._generateRFInfoPkt(rf_freq=100e6,rf_bw=100e6,if_freq=0,rf_flow_id="testRFInfoPacket_FlowID") + rfInfo_port._set_rfinfo_pkt(rf_info_pkt) + + + #Allocate a Tuner + try: + retval = self.comp.allocateCapacity(alloc) + except Exception, e: + self.assertFalse("Exception thrown on allocateCapactiy %s" % str(e)) + if not retval: + self.assertFalse("Allocation Failed") + + self.comp.deallocateCapacity(alloc) + + def testInValidRFInfoPacket(self): + + #Create Allocation and Sink for Tuner 1 + sink = sb.DataSink() + alloc = self._generateAlloc(cf=110e6,sr=2.5e6,bw=2e6,rf_flow_id="invalid_FlowID") + allocationID = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.comp.connect(sink,connectionId=allocationID) + sink.start() + + #Send an RF Info Packet + rfInfo_port = self.comp.getPort("RFInfo_in") + rf_info_pkt = self._generateRFInfoPkt(rf_freq=100e6,rf_bw=100e6,if_freq=0,rf_flow_id="testRFInfoPacket_FlowID") + rfInfo_port._set_rfinfo_pkt(rf_info_pkt) + + + #Allocate a Tuner + try: + retval = self.comp.allocateCapacity(alloc) + except Exception, e: + self.assertFalse("Exception thrown on allocateCapactiy %s" % str(e)) + if retval: + self.assertFalse("Allocation Succeeded but should have failed") + + try: + self.comp.deallocateCapacity(alloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testValidRFInfoFreq(self): + """ Ask for a Allocation outside the range of the REceiver but should still work because the RFInfo packet tells the receiver a frequency down conversation has already occured""" + + #Create Allocation and Sink for Tuner 1 + sink = sb.DataSink() + alloc = self._generateAlloc(cf=9001e6,sr=2.5e6,bw=2e6,rf_flow_id="testRFInfoPacket_FlowID") + allocationID = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.comp.connect(sink,connectionId=allocationID) + sink.start() + + #Send an RF Info Packet + rfInfo_port = self.comp.getPort("RFInfo_in") + rf_info_pkt = self._generateRFInfoPkt(rf_freq=9000e6,rf_bw=100e6,if_freq=100e6,rf_flow_id="testRFInfoPacket_FlowID") + rfInfo_port._set_rfinfo_pkt(rf_info_pkt) + + + #Allocate a Tuner + try: + retval = self.comp.allocateCapacity(alloc) + except Exception, e: + self.assertFalse("Exception thrown on allocateCapactiy %s" % str(e)) + if not retval: + self.assertFalse("Allocation Failed") + + try: + self.comp.deallocateCapacity(alloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testInValidRFInfoFreq(self): + """ Ask for a Allocation outside the range of the REceiver but should still work because the RFInfo packet tells the receiver a frequency down conversation has already occured""" + + #Create Allocation and Sink for Tuner 1 + sink = sb.DataSink() + alloc = self._generateAlloc(cf=100e6,sr=2.5e6,bw=2e6,rf_flow_id="testRFInfoPacket_FlowID") + allocationID = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.comp.connect(sink,connectionId=allocationID) + sink.start() + + #Send an RF Info Packet + rfInfo_port = self.comp.getPort("RFInfo_in") + rf_info_pkt = self._generateRFInfoPkt(rf_freq=9000e6,rf_bw=100e6,if_freq=100e6,rf_flow_id="testRFInfoPacket_FlowID") + rfInfo_port._set_rfinfo_pkt(rf_info_pkt) + + + #Allocate a Tuner + try: + retval = self.comp.allocateCapacity(alloc) + except Exception, e: + self.assertFalse("Exception thrown on allocateCapactiy %s" % str(e)) + if retval: + self.assertFalse("Allocation Succeeded but should have failed") + + try: + self.comp.deallocateCapacity(alloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testInValidRFInfoBW(self): + """ Ask for a Allocation outside the range of the REceiver but should still work because the RFInfo packet tells the receiver a frequency down conversation has already occured""" + + #Create Allocation and Sink for Tuner 1 + sink = sb.DataSink() + alloc = self._generateAlloc(cf=9050.1e6,sr=0,bw=2e6,rf_flow_id="testRFInfoPacket_FlowID") + allocationID = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.comp.connect(sink,connectionId=allocationID) + sink.start() + + #Send an RF Info Packet + rfInfo_port = self.comp.getPort("RFInfo_in") + rf_info_pkt = self._generateRFInfoPkt(rf_freq=9000e6,rf_bw=100e6,if_freq=100e6,rf_flow_id="testRFInfoPacket_FlowID") + rfInfo_port._set_rfinfo_pkt(rf_info_pkt) + + + #Allocate a Tuner + try: + retval = self.comp.allocateCapacity(alloc) + except Exception, e: + self.assertFalse("Exception thrown on allocateCapactiy %s" % str(e)) + if retval: + self.assertFalse("Allocation Succeeded but should have failed") + + try: + self.comp.deallocateCapacity(alloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testInValidRFInfoSR(self): + """ Ask for a Allocation outside the range of the REceiver but should still work because the RFInfo packet tells the receiver a frequency down conversation has already occured""" + + #Create Allocation and Sink for Tuner 1 + sink = sb.DataSink() + alloc = self._generateAlloc(cf=9050.1e6,sr=2.5e6,bw=0,rf_flow_id="testRFInfoPacket_FlowID") + allocationID = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.comp.connect(sink,connectionId=allocationID) + sink.start() + + #Send an RF Info Packet + rfInfo_port = self.comp.getPort("RFInfo_in") + rf_info_pkt = self._generateRFInfoPkt(rf_freq=9000e6,rf_bw=100e6,if_freq=100e6,rf_flow_id="testRFInfoPacket_FlowID") + rfInfo_port._set_rfinfo_pkt(rf_info_pkt) + + + #Allocate a Tuner + try: + retval = self.comp.allocateCapacity(alloc) + except Exception, e: + self.assertFalse("Exception thrown on allocateCapactiy %s" % str(e)) + if retval: + self.assertFalse("Allocation Succeeded but should have failed") + + try: + self.comp.deallocateCapacity(alloc) + except CF.Device.InvalidCapacity, e: + # Deallocating shouldn't be required if the allocation failed so we would expect this deallocation to be invalid + pass + + def testInvalidAllocation(self): + alloc = self._generateAlloc(cf=110e6,sr=2.5e6,bw=2e6) + retval = self.comp.allocateCapacity(alloc) + self.assertRaises(CF.Device.InvalidCapacity, self.comp.allocateCapacity,alloc) + + def testTuningException(self): + alloc = self._generateAlloc(cf=111e6,sr=2.5e6,bw=2e6) + self.assertEquals(self.comp.allocateCapacity(alloc), False) + + def testBasicOkAllocation(self): + alloc = self._generateAlloc(cf=112e6,sr=2.5e6,bw=2e6) + self.assertEquals(self.comp.allocateCapacity(alloc), True) + + def testTypoErrorAllocation(self): + center_frequency = 110e6 + sample_rate = 2.5e6 + bandwidth = 2e6 + alloc = self._generateAlloc(cf=center_frequency,sr=sample_rate,bw=bandwidth) + retval = self.comp.allocateCapacity(alloc) + self.assertEquals(retval, True) + _type = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::tuner_type'] + _alloc_id = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + listen_alloc = [frontend.createTunerListenerAllocation(_alloc_id, listener_allocation_id='foo',returnDict=False)] + retval = self.comp.allocateCapacity(listen_alloc) + self.assertEquals(retval, True) + self.assertRaises(CF.Device.InvalidCapacity, self.comp.allocateCapacity, listen_alloc) + self.comp.deallocateCapacity(listen_alloc) + self.comp.deallocateCapacity(alloc) + + def testFalseControl(self): + center_frequency = 110e6 + sample_rate = 2.5e6 + bandwidth = 2e6 + alloc = self._generateAlloc(cf=center_frequency,sr=sample_rate,bw=bandwidth) + retval = self.comp.allocateCapacity(alloc) + self.assertEquals(retval, True) + _type = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::tuner_type'] + _alloc_id = properties.props_to_dict(alloc)['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + listen_alloc = [frontend.createTunerGenericListenerAllocation(_type, allocation_id='foo', center_frequency=center_frequency, bandwidth=bandwidth, sample_rate=sample_rate,returnDict=False)] + retval = self.comp.allocateCapacity(listen_alloc) + self.assertEquals(retval, True) + self.comp.deallocateCapacity(listen_alloc) + self.comp.deallocateCapacity(alloc) + + def _generateAlloc(self,cf=100e6,sr=25e6,bw=20e6,rf_flow_id=''): + + value = {} + value['ALLOC_ID'] = str(uuid.uuid4()) + value['TYPE'] = 'RX_DIGITIZER' + value['BW_TOLERANCE'] = 100.0 + value['SR_TOLERANCE'] = 100.0 + value['RF_FLOW_ID'] = rf_flow_id + value['GROUP_ID'] = '' + value['CONTROL'] = True + value['CF'] = cf + value['SR'] = sr + value['BW'] = bw + + #generate the allocation + allocationPropDict = {'FRONTEND::tuner_allocation':{ + 'FRONTEND::tuner_allocation::tuner_type': value['TYPE'], + 'FRONTEND::tuner_allocation::allocation_id': value['ALLOC_ID'], + 'FRONTEND::tuner_allocation::center_frequency': float(value['CF']), + 'FRONTEND::tuner_allocation::bandwidth': float(value['BW']), + 'FRONTEND::tuner_allocation::bandwidth_tolerance': float(value['BW_TOLERANCE']), + 'FRONTEND::tuner_allocation::sample_rate': float(value['SR']), + 'FRONTEND::tuner_allocation::sample_rate_tolerance': float(value['SR_TOLERANCE']), + 'FRONTEND::tuner_allocation::device_control': value['CONTROL'], + 'FRONTEND::tuner_allocation::group_id': value['GROUP_ID'], + 'FRONTEND::tuner_allocation::rf_flow_id': value['RF_FLOW_ID'], + }} + return properties.props_from_dict(allocationPropDict) + + def _generateRFInfoPkt(self,rf_freq=1e9,rf_bw=1e9,if_freq=0,spec_inverted=False,rf_flow_id="testflowID"): + antenna_info = FRONTEND.AntennaInfo("antenna_name","antenna_type","antenna.size","description") + freqRange= FRONTEND.FreqRange(0,1e12,[] ) + feed_info = FRONTEND.FeedInfo("feed_name", "polarization",freqRange) + sensor_info = FRONTEND.SensorInfo("msn_name", "collector_name", "receiver_name",antenna_info,feed_info) + delays = []; + cap = FRONTEND.RFCapabilities(freqRange,freqRange); + add_props = []; + rf_info_pkt = FRONTEND.RFInfoPkt(rf_flow_id,rf_freq, rf_bw, if_freq, spec_inverted, sensor_info, delays, cap, add_props) + + return rf_info_pkt + + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/tests/test_RX_Digitizer_Sim_FEI.py b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/tests/test_RX_Digitizer_Sim_FEI.py new file mode 100644 index 000000000..e0075359a --- /dev/null +++ b/codegenTesting/sdr/dev/devices/RX_Digitizer_Sim/tests/test_RX_Digitizer_Sim_FEI.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python + + +import sys, os +script_dir = os.path.dirname(os.path.abspath(__file__)) +project_dir = os.path.abspath(os.path.join(script_dir, '..')) +lib_dir = os.path.join(script_dir, 'fei_base') +sys.path.append(lib_dir) +import frontend_tuner_unit_test_base as fe + +''' TODO: + 1) set the desired DEBUG_LEVEL (typical values include 0, 1, 2, 3, 4 and 5) + 2) set DUT correctly to specify the USRP under test + 3) set IMPL_ID to the implementation that should be tested. + 4) Optional: set dut_execparams if it is necessary to specify a particular + USRP. Default behavior is to target the first device of the type specified. + 5) Optional: set dut_capabilities to reflect the hardware capabilities of + the dut if different from what is provided below. +''' + +DEBUG_LEVEL = 4 +dut_name = 'RX_Digitizer_Sim' +IMPL_ID = 'python' +dut_execparams = {} +dut_configure = {} +dut_capabilities = {'RX_DIGITIZER':{'COMPLEX': True, + 'CF_MAX': 3000e6, + 'CF_MIN': 50e6, + 'BW_MAX': 8000000.0, + 'BW_MIN': 2000000.0, + 'SR_MAX': 10000000.0, + 'SR_MIN': 2500000.0, + 'GAIN_MIN' :0, + 'GAIN_MAX' :10}} + + + +DEVICE_INFO = {} +DEVICE_INFO[dut_name] = dut_capabilities +DEVICE_INFO[dut_name]['SPD'] = os.path.join(project_dir, 'RX_Digitizer_Sim.spd.xml') +DEVICE_INFO[dut_name]['execparams'] = dut_execparams +DEVICE_INFO[dut_name]['configure'] = dut_configure +#******* DO NOT MODIFY ABOVE **********# + + + +class FrontendTunerTests(fe.FrontendTunerTests): + + def __init__(self,*args,**kwargs): + import ossie.utils.testing + super(FrontendTunerTests,self).__init__(*args,**kwargs) + fe.set_debug_level(DEBUG_LEVEL) + fe.set_device_info(DEVICE_INFO[dut_name]) + fe.set_impl_id(IMPL_ID) + + # Use functions below to add pre-/post-launch commands if your device has special startup requirements + @classmethod + def devicePreLaunch(self): + pass + @classmethod + def devicePostLaunch(self): + pass + + # Use functions below to add pre-/post-release commands if your device has special shutdown requirements + @classmethod + def devicePreRelease(self): + pass + @classmethod + def devicePostRelease(self): + pass + + +if __name__ == '__main__': + fe.set_debug_level(DEBUG_LEVEL) + fe.set_device_info(DEVICE_INFO[dut_name]) + fe.set_impl_id(IMPL_ID) + + # run using nose + import nose + nose.main(defaultTest=__name__) diff --git a/codegenTesting/sdr/dev/devices/basic_fei_device/basic_fei_device.prf.xml b/codegenTesting/sdr/dev/devices/basic_fei_device/basic_fei_device.prf.xml index 229fadcf2..3ad243f7a 100644 --- a/codegenTesting/sdr/dev/devices/basic_fei_device/basic_fei_device.prf.xml +++ b/codegenTesting/sdr/dev/devices/basic_fei_device/basic_fei_device.prf.xml @@ -68,7 +68,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + Allocates a listener (subscriber) based off a previous allocation @@ -78,7 +78,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + Frontend Interfaces v2 main allocation structure Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGTIZIER_CHANNELIZER diff --git a/codegenTesting/sdr/dev/devices/basic_fei_device/tests/test_basic_fei_device.py b/codegenTesting/sdr/dev/devices/basic_fei_device/tests/test_basic_fei_device.py index 8429418f0..ff6151ca1 100644 --- a/codegenTesting/sdr/dev/devices/basic_fei_device/tests/test_basic_fei_device.py +++ b/codegenTesting/sdr/dev/devices/basic_fei_device/tests/test_basic_fei_device.py @@ -23,6 +23,7 @@ import os from omniORB import any from ossie.utils import sb +from ossie.cf import CF class ResourceTests(ossie.utils.testing.RHComponentTestCase): # setUp is run before every function preceded by "test" is executed @@ -65,6 +66,10 @@ def testBasicProperties(self): # Check the that tuner status exists and contains the extra "agc" field self.assertEqual(len(self.comp.frontend_tuner_status), 1) self.assertTrue('FRONTEND::tuner_status::agc' in self.comp.frontend_tuner_status[0]) + tuner_alloc = CF.DataType(id='FRONTEND::tuner_allocation', value=any.to_any(None)) + listen_alloc = CF.DataType(id='FRONTEND::listener_allocation', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.comp.query, [tuner_alloc]) + self.assertRaises(CF.UnknownProperties, self.comp.query, [listen_alloc]) if __name__ == "__main__": ossie.utils.testing.main("../basic_fei_device.spd.xml") # By default tests all implementations diff --git a/codegenTesting/sdr/dev/devices/fei_dev_python/tests/test_fei_dev_python.py b/codegenTesting/sdr/dev/devices/fei_dev_python/tests/test_fei_dev_python.py index 43a9b79e7..5a82197db 100644 --- a/codegenTesting/sdr/dev/devices/fei_dev_python/tests/test_fei_dev_python.py +++ b/codegenTesting/sdr/dev/devices/fei_dev_python/tests/test_fei_dev_python.py @@ -113,7 +113,7 @@ def testBasicBehavior(self): retval = port.ref.getTunerEnable('hello') self.assertEquals(retval, True) sample_rate = port.ref.getTunerOutputSampleRate('hello') - self.assertEquals(sample_rate, 1) + self.assertEquals(sample_rate, 0.0) port.ref.setTunerOutputSampleRate('hello',5) sample_rate = port.ref.getTunerOutputSampleRate('hello') self.assertEquals(sample_rate, 5) diff --git a/codegenTesting/sdr/dev/devices/fei_exception_through/cpp/fei_exception_through.cpp b/codegenTesting/sdr/dev/devices/fei_exception_through/cpp/fei_exception_through.cpp index 4cb0a00f2..afd732425 100644 --- a/codegenTesting/sdr/dev/devices/fei_exception_through/cpp/fei_exception_through.cpp +++ b/codegenTesting/sdr/dev/devices/fei_exception_through/cpp/fei_exception_through.cpp @@ -313,6 +313,23 @@ void fei_exception_through_i::constructor() int fei_exception_through_i::serviceFunction() { LOG_DEBUG(fei_exception_through_i, "serviceFunction() example log message"); + + // test out RFSource output port... + if ( RFSource_out ) { + frontend::RFInfoPktSequence ret =get_available_rf_inputs(""); + if ( ret.size() == 10 ) { + frontend::RFInfoPktSequence::iterator i = ret.begin(); + int cnt=0; + // check freqs.. 0 == 100, 1 == 200 ... + for (; i != ret.end(); i++, cnt++ ) { + double freq=(cnt+1)*100.0; + if ( freq != i->rf_center_freq ) { + LOG_ERROR(fei_exception_through_i, "Match was bad, index = " << cnt); + } + } + + } + } return NOOP; } @@ -517,35 +534,26 @@ void fei_exception_through_i::set_rfinfo_pkt(const std::string& port_name, const std::vector fei_exception_through_i::get_available_rf_inputs(const std::string& port_name) { - std::vector inputs; - FRONTEND::RFInfoPktSequence_var _inputs = this->RFSource_out->available_rf_inputs(); - for (unsigned int i=0; i<_inputs->length(); i++) { - inputs.push_back(frontend::returnRFInfoPkt(_inputs[i])); - } - return inputs; + return this->RFSource_out->available_rf_inputs(); + } void fei_exception_through_i::set_available_rf_inputs(const std::string& port_name, const std::vector &inputs) { - FRONTEND::RFInfoPktSequence_var _inputs = new FRONTEND::RFInfoPktSequence(); - _inputs->length(inputs.size()); - for (unsigned int i=0; iRFSource_out->available_rf_inputs(_inputs); + this->RFSource_out->available_rf_inputs(inputs); } frontend::RFInfoPkt fei_exception_through_i::get_current_rf_input(const std::string& port_name) { frontend::RFInfoPkt pkt; - pkt = frontend::returnRFInfoPkt(*this->RFSource_out->current_rf_input()); + frontend::RFInfoPkt *_ret=0; + _ret = this->RFSource_out->current_rf_input(); + if ( _ret ) { pkt = *_ret; delete _ret; } return pkt; } void fei_exception_through_i::set_current_rf_input(const std::string& port_name, const frontend::RFInfoPkt &pkt) { - FRONTEND::RFInfoPkt_var _pkt = frontend::returnRFInfoPkt(pkt); - this->RFSource_out->current_rf_input(_pkt); + this->RFSource_out->current_rf_input(pkt); } diff --git a/codegenTesting/sdr/dev/devices/fei_exception_through/java/src/fei_exception_through/java/fei_exception_through.java b/codegenTesting/sdr/dev/devices/fei_exception_through/java/src/fei_exception_through/java/fei_exception_through.java index c2603b731..b771fa5a2 100644 --- a/codegenTesting/sdr/dev/devices/fei_exception_through/java/src/fei_exception_through/java/fei_exception_through.java +++ b/codegenTesting/sdr/dev/devices/fei_exception_through/java/src/fei_exception_through/java/fei_exception_through.java @@ -31,6 +31,7 @@ import FRONTEND.NotSupportedException; import CF.DevicePackage.InvalidCapacity; import CF.InvalidObjectReference; +import org.ossie.redhawk.PortCallError; /** * This is the device code. This file contains the derived class where custom @@ -376,123 +377,218 @@ public boolean deviceDeleteTuning(frontend_tuner_status_struct_struct fts, int t public String getTunerType(final String allocation_id) { - return this.port_DigitalTuner_out.getTunerType(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerType(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public boolean getTunerDeviceControl(final String allocation_id) { - return this.port_DigitalTuner_out.getTunerDeviceControl(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerDeviceControl(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public String getTunerGroupId(final String allocation_id) { - return this.port_DigitalTuner_out.getTunerGroupId(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerGroupId(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public String getTunerRfFlowId(final String allocation_id) throws FRONTEND.FrontendException, BadParameterException, NotSupportedException { - return this.port_DigitalTuner_out.getTunerRfFlowId(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerRfFlowId(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void setTunerCenterFrequency(final String allocation_id, double freq) throws FRONTEND.FrontendException, FRONTEND.BadParameterException, NotSupportedException { - this.port_DigitalTuner_out.setTunerCenterFrequency(allocation_id, freq); + try { + this.port_DigitalTuner_out.setTunerCenterFrequency(allocation_id, freq); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public double getTunerCenterFrequency(final String allocation_id) throws FRONTEND.FrontendException, BadParameterException, NotSupportedException { - return this.port_DigitalTuner_out.getTunerCenterFrequency(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerCenterFrequency(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void setTunerBandwidth(final String allocation_id, double bw) throws FRONTEND.FrontendException, FRONTEND.BadParameterException, NotSupportedException { - this.port_DigitalTuner_out.setTunerBandwidth(allocation_id, bw); + try { + this.port_DigitalTuner_out.setTunerBandwidth(allocation_id, bw); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public double getTunerBandwidth(final String allocation_id) throws FRONTEND.FrontendException, BadParameterException, NotSupportedException { - return this.port_DigitalTuner_out.getTunerBandwidth(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerBandwidth(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void setTunerAgcEnable(final String allocation_id, boolean enable) throws FRONTEND.NotSupportedException, FrontendException, BadParameterException { - this.port_DigitalTuner_out.setTunerAgcEnable(allocation_id, enable); + try { + this.port_DigitalTuner_out.setTunerAgcEnable(allocation_id, enable); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public boolean getTunerAgcEnable(final String allocation_id) throws FRONTEND.NotSupportedException, FrontendException, BadParameterException { - return this.port_DigitalTuner_out.getTunerAgcEnable(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerAgcEnable(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void setTunerGain(final String allocation_id, float gain) throws FRONTEND.NotSupportedException, FrontendException, BadParameterException { - this.port_DigitalTuner_out.setTunerGain(allocation_id, gain); + try { + this.port_DigitalTuner_out.setTunerGain(allocation_id, gain); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public float getTunerGain(final String allocation_id) throws FRONTEND.NotSupportedException, FrontendException, BadParameterException { - return this.port_DigitalTuner_out.getTunerGain(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerGain(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void setTunerReferenceSource(final String allocation_id, int source) throws FRONTEND.NotSupportedException, FrontendException, BadParameterException { - this.port_DigitalTuner_out.setTunerReferenceSource(allocation_id, source); + try { + this.port_DigitalTuner_out.setTunerReferenceSource(allocation_id, source); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public int getTunerReferenceSource(final String allocation_id) throws FRONTEND.NotSupportedException, FrontendException, BadParameterException { - return this.port_DigitalTuner_out.getTunerReferenceSource(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerReferenceSource(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void setTunerEnable(final String allocation_id, boolean enable) throws FRONTEND.FrontendException, BadParameterException, NotSupportedException { - this.port_DigitalTuner_out.setTunerEnable(allocation_id, enable); + try { + this.port_DigitalTuner_out.setTunerEnable(allocation_id, enable); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public boolean getTunerEnable(final String allocation_id) throws FRONTEND.FrontendException, BadParameterException, NotSupportedException { - return this.port_DigitalTuner_out.getTunerEnable(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerEnable(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void setTunerOutputSampleRate(final String allocation_id, double sr) throws FRONTEND.FrontendException, FRONTEND.BadParameterException, NotSupportedException { - this.port_DigitalTuner_out.setTunerOutputSampleRate(allocation_id, sr); + try { + this.port_DigitalTuner_out.setTunerOutputSampleRate(allocation_id, sr); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public double getTunerOutputSampleRate(final String allocation_id) throws FRONTEND.FrontendException, BadParameterException, NotSupportedException { - return this.port_DigitalTuner_out.getTunerOutputSampleRate(allocation_id); + try { + return this.port_DigitalTuner_out.getTunerOutputSampleRate(allocation_id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public FRONTEND.GPSInfo get_gps_info(final String port_name) { - return this.port_GPS_out.gps_info(); + try { + return this.port_GPS_out.gps_info(); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void set_gps_info(final String port_name, final FRONTEND.GPSInfo gps_info) { - - this.port_GPS_out.gps_info(gps_info); + try { + this.port_GPS_out.gps_info(gps_info); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public FRONTEND.GpsTimePos get_gps_time_pos(final String port_name) { - return this.port_GPS_out.gps_time_pos(); + try { + return this.port_GPS_out.gps_time_pos(); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void set_gps_time_pos(final String port_name, final FRONTEND.GpsTimePos gps_time_pos) { - this.port_GPS_out.gps_time_pos(gps_time_pos); + try { + this.port_GPS_out.gps_time_pos(gps_time_pos); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public FRONTEND.NavigationPacket get_nav_packet(final String port_name) { - return this.port_NavData_out.nav_packet(); + try { + return this.port_NavData_out.nav_packet(); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void set_nav_packet(final String port_name, final FRONTEND.NavigationPacket nav_info) { - this.port_NavData_out.nav_packet(nav_info); + try { + this.port_NavData_out.nav_packet(nav_info); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } /************************************************************* @@ -501,41 +597,73 @@ Functions servicing the RFInfo port(s) *************************************************************/ public String get_rf_flow_id(final String port_name) { - return this.port_RFInfo_out.rf_flow_id(); + try { + return this.port_RFInfo_out.rf_flow_id(); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void set_rf_flow_id(final String port_name, final String id) { - this.port_RFInfo_out.rf_flow_id(id); + try { + this.port_RFInfo_out.rf_flow_id(id); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public FRONTEND.RFInfoPkt get_rfinfo_pkt(final String port_name) { - return this.port_RFInfo_out.rfinfo_pkt(); + try { + return this.port_RFInfo_out.rfinfo_pkt(); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void set_rfinfo_pkt(final String port_name, final FRONTEND.RFInfoPkt pkt) { - this.port_RFInfo_out.rfinfo_pkt(pkt); + try { + this.port_RFInfo_out.rfinfo_pkt(pkt); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public FRONTEND.RFInfoPkt[] get_available_rf_inputs(final String port_name) { - return this.port_RFSource_out.available_rf_inputs(); + try { + return this.port_RFSource_out.available_rf_inputs(); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void set_available_rf_inputs(final String port_name, final FRONTEND.RFInfoPkt[] inputs) { - this.port_RFSource_out.available_rf_inputs(inputs); + try { + this.port_RFSource_out.available_rf_inputs(inputs); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public FRONTEND.RFInfoPkt get_current_rf_input(final String port_name) { - return this.port_RFSource_out.current_rf_input(); + try { + return this.port_RFSource_out.current_rf_input(); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } public void set_current_rf_input(final String port_name, final FRONTEND.RFInfoPkt pkt) { - this.port_RFSource_out.current_rf_input(pkt); + try { + this.port_RFSource_out.current_rf_input(pkt); + } catch (PortCallError e) { + throw new RuntimeException("Port call error: "+e.getMessage()); + } } } diff --git a/codegenTesting/sdr/dev/devices/fei_exception_through/tests/fei_exc_src/python/fei_exc_src.py b/codegenTesting/sdr/dev/devices/fei_exception_through/tests/fei_exc_src/python/fei_exc_src.py index 0c8ac37aa..334e8a248 100755 --- a/codegenTesting/sdr/dev/devices/fei_exception_through/tests/fei_exc_src/python/fei_exc_src.py +++ b/codegenTesting/sdr/dev/devices/fei_exception_through/tests/fei_exc_src/python/fei_exc_src.py @@ -53,6 +53,8 @@ def constructor(self): """ # TODO add customization here. self.setNumChannels(1, "RX_DIGITIZER"); + self.rf_info_list=None + self.rf_idx=0 def process(self): """ @@ -375,6 +377,44 @@ def get_current_rf_input(self,port_name): def set_current_rf_input(self, port_name, pkt): pass + + ''' + ************************************************************* + Functions servicing the RFInfo port(s) + - port_name is the port over which the call was received + ************************************************************* + ''' + def make_rfinfo(self,freq): + _antennainfo = FRONTEND.AntennaInfo('','','','') + _freqrange = FRONTEND.FreqRange(0,0,[]) + _feedinfo = FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo = FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities = FRONTEND.RFCapabilities(_freqrange,_freqrange) + _rfinfopkt = FRONTEND.RFInfoPkt('',0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + _rfinfopkt.rf_center_freq=freq; + _rfinfopkt.rf_bandwidth=freq/2.0; + return _rfinfopkt + + def get_available_rf_inputs(self,port_name): + if not self.rf_info_list: + self.rf_info_list=[] + for x in range(10): + rf=self.make_rfinfo(( (x+1)*100.0)) + self.rf_info_list.append( rf ) + + return self.rf_info_list + + def set_available_rf_inputs(self,port_name, inputs): + pass + + def get_current_rf_input(self,port_name): + _ret=self.rf_info_list[ self.rf_idx ] + self.rf_idx = ( self.rf_idx + 1) % size(self.rf_info_list) + return _ret + + def set_current_rf_input(self, port_name, pkt): + pass + if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) diff --git a/codegenTesting/sdr/dev/devices/fei_exception_through/tests/test_fei_exception_through.py b/codegenTesting/sdr/dev/devices/fei_exception_through/tests/test_fei_exception_through.py index 652ca50a3..90681bd1d 100644 --- a/codegenTesting/sdr/dev/devices/fei_exception_through/tests/test_fei_exception_through.py +++ b/codegenTesting/sdr/dev/devices/fei_exception_through/tests/test_fei_exception_through.py @@ -79,7 +79,7 @@ def tearDown(self): def testAllocation(self): frontend_alloc = frontend.createTunerAllocation(returnDict=False) retval = self.comp.allocateCapacity([frontend_alloc]) - self.assertEquals(retval, False) + self.assertEquals(retval, True) def testBasicBehavior(self): self.assertEquals(self.got_logmsg, False) @@ -188,5 +188,17 @@ def testBasicBehavior(self): self.assertRaises(exception, RFInfo_in.ref._get_rfinfo_pkt) self.assertRaises(exception, RFInfo_in.ref._set_rfinfo_pkt, _rfinfopkt) + + def test_RFSource_Out(self): + self.assertEquals(self.got_logmsg, False) + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + exc_src = sb.launch('./build/fei_exception_through/tests/fei_exc_src/fei_exc_src.spd.xml') + self.comp.connect(exc_src,usesPortName='RFSource_out') + self.comp.start(); + self.comp.stop(); + sb.release() + + if __name__ == "__main__": ossie.utils.testing.main() # By default tests all implementations diff --git a/codegenTesting/sdr/dev/devices/fei_src/.fei_src.wavedev b/codegenTesting/sdr/dev/devices/fei_src/.fei_src.wavedev new file mode 100644 index 000000000..d1ea167e5 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/fei_src/.fei_src.wavedev @@ -0,0 +1,6 @@ + + + + + + diff --git a/codegenTesting/sdr/dev/devices/fei_src/.md5sums b/codegenTesting/sdr/dev/devices/fei_src/.md5sums new file mode 100644 index 000000000..169d75b51 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/fei_src/.md5sums @@ -0,0 +1,2 @@ +f6f2a29cc59001538ca0f857cc33799d fei_src.spec +7e35e10f3e5ecc313d95333317184d9f build.sh diff --git a/codegenTesting/sdr/dev/devices/fei_src/fei_src.prf.xml b/codegenTesting/sdr/dev/devices/fei_src/fei_src.prf.xml new file mode 100644 index 000000000..4fc24f7e2 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/fei_src/fei_src.prf.xml @@ -0,0 +1,102 @@ + + + + + This specifies the device kind + FRONTEND::TUNER + + + + + This specifies the specific device + + + + + Status of each tuner, including entries for both allocated and un-allocated tuners. Each entry represents a single tuner. + + + Comma separated list of current Allocation IDs. + + + Current bandwidth in Hz + Hz + + + Current center frequency in Hz. + Hz + + + Indicates if tuner is enabled, in reference to the output state of the tuner. + + + Unique ID that specifies a group of Device. + + + Specifies a certain RF flow to allocate against. + + + Current sample rate in samples per second. + sps + + + Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGTIZIER_CHANNELIZER + + + + + + Frontend Interfaces v2 listener allocation structure + + + + + + Frontend Interfaces v2 main allocation structure + + Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGTIZIER_CHANNELIZER + + + The allocation_id set by the caller. Used by the caller to reference the allocation uniquely + + + Requested center frequency + Hz + + + Requested bandwidth (+/- the tolerance) + Hz + + + Allowable Percent above requested bandwidth (ie - 100 would be up to twice) + percent + + + Requested sample rate (+/- the tolerance). This can be ignored for such devices as analog tuners + Hz + + + Allowable Percent above requested sample rate (ie - 100 would be up to twice) + percent + + + True: Has control over the device to make changes +False: Does not need control and can just attach to any currently tasked device that satisfies the parameters (essentually a listener) + + + Unique identifier that specifies the group a device must be in. Must match group_id on the device + + + Optional. Specifies the RF flow of a specific input source to allocate against. If left empty, it will match all FrontEnd devices. + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/fei_src/fei_src.scd.xml b/codegenTesting/sdr/dev/devices/fei_src/fei_src.scd.xml new file mode 100644 index 000000000..fa312e702 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/fei_src/fei_src.scd.xml @@ -0,0 +1,73 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/fei_src/fei_src.spd.xml b/codegenTesting/sdr/dev/devices/fei_src/fei_src.spd.xml new file mode 100644 index 000000000..14c6529dc --- /dev/null +++ b/codegenTesting/sdr/dev/devices/fei_src/fei_src.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/fei_src.py + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/fei_src/python/.md5sums b/codegenTesting/sdr/dev/devices/fei_src/python/.md5sums new file mode 100644 index 000000000..4b6c40b00 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/fei_src/python/.md5sums @@ -0,0 +1,6 @@ +8bfcd22353c3a57fee561ad86ee2a56b reconf +158196b688b5b0360e37ef98481028a1 fei_src.py +144eaa191497c9563235f7a811f7076e fei_src_base.py +2eddc7677137b4c8c6a0100cc06b7a6f configure.ac +fbb2dec4590f0c6f0855de1d8e5329f2 Makefile.am +fcb710dca24448d72c477de9580f395a Makefile.am.ide diff --git a/codegenTesting/sdr/dev/devices/fei_src/python/fei_src.py b/codegenTesting/sdr/dev/devices/fei_src/python/fei_src.py new file mode 100755 index 000000000..025f8e016 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/fei_src/python/fei_src.py @@ -0,0 +1,383 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: fei_src.spd.xml +from ossie.device import start_device +import logging + +from fei_src_base import * + +class fei_src_i(fei_src_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your device registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + For a tuner device, the structure frontend_tuner_status needs to match the number + of tuners that this device controls and what kind of device it is. + The options for devices are: TX, RX, RX_DIGITIZER, CHANNELIZER, DDC, RC_DIGITIZER_CHANNELIZER + + For example, if this device has 5 physical + tuners, 3 RX_DIGITIZER and 2 CHANNELIZER, then the code in the construct function + should look like this: + + self.addChannels(3, "RX_DIGITIZER"); + self.addChannels(2, "CHANNELIZER"); + + The incoming request for tuning contains a string describing the requested tuner + type. The string for the request must match the string in the tuner status. + """ + # TODO add customization here. + self.addChannels(1, "RX_DIGITIZER"); + self._stream_started = False + self._H = None + self.data_count = 0 + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the device. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + To create a StreamSRI object based on tuner status structure index 'idx' and collector center frequency of 100: + sri = frontend.sri.create("my_stream_id", self.frontend_tuner_status[idx], self._id, collector_frequency=100) + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the device developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", fei_src_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = fei_src_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Application: + app = self.getApplication().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + Allocation: + + Allocation callbacks are available to customize a Device's response to an allocation request. + Callback allocation/deallocation functions are registered using the setAllocationImpl function, + usually in the initialize() function + For example, allocation property "my_alloc" can be registered with allocation function + my_alloc_fn and deallocation function my_dealloc_fn as follows: + + self.setAllocationImpl("my_alloc", self.my_alloc_fn, self.my_dealloc_fn) + + def my_alloc_fn(self, value): + # perform logic + return True # successful allocation + + def my_dealloc_fn(self, value): + # perform logic + pass + + Example: + + # This example assumes that the device has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the device + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + # TODO fill in your code here + if self.frontend_tuner_status[0].enabled: + if self.data_count == 5: + return NOOP + if not self._stream_started: + #push sri + self._H = bulkio.sri.create("my_data") + self.port_dataFloat_out.pushPacket([1.0,2.0,3.0,4.0,5.0], bulkio.timestamp.now(), False, "my_data") + self.data_count += 1 + else: + if self._H: + self.port_dataFloat_out.pushPacket([], bulkio.timestamp.now(), True, "my_data") + self._H = None + self.data_count = 0 + self._log.debug("process() example log message") + return NOOP + + ''' + ************************************************************* + Functions supporting tuning allocation + *************************************************************''' + def deviceEnable(self, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + Make sure to set the 'enabled' member of fts to indicate that tuner as enabled + ************************************************************''' + #print "deviceEnable(): Enable the given tuner *********" + fts.enabled = True + return + + def deviceDisable(self,fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + Make sure to reset the 'enabled' member of fts to indicate that tuner as disabled + ************************************************************''' + #print "deviceDisable(): Disable the given tuner *********" + fts.enabled = False + return + + def deviceSetTuning(self,request, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + + The bandwidth, center frequency, and sampling rate that the hardware was actually tuned + to needs to populate fts (to make sure that it meets the tolerance requirement. For example, + if the tuned values match the requested values, the code would look like this: + + fts.bandwidth = request.bandwidth + fts.center_frequency = request.center_frequency + fts.sample_rate = request.sample_rate + + return True if the tuning succeeded, and False if it failed + ************************************************************''' + #print "deviceSetTuning(): Evaluate whether or not a tuner is added *********" + fts.bandwidth = request.bandwidth + fts.center_frequency = request.center_frequency + fts.sample_rate = request.sample_rate + self.matchAllocationIdToStreamId(request.allocation_id, "my_data", self.port_dataFloat_out.name) + return True + + def deviceDeleteTuning(self, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + return True if the tune deletion succeeded, and False if it failed + ************************************************************''' + #print "deviceDeleteTuning(): Deallocate an allocated tuner *********" + return True + + ''' + ************************************************************* + Functions servicing the tuner control port + *************************************************************''' + def getTunerType(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].tuner_type + + def getTunerDeviceControl(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if self.getControlAllocationId(idx) == allocation_id: + return True + return False + + def getTunerGroupId(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].group_id + + def getTunerRfFlowId(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].rf_flow_id + + + def setTunerCenterFrequency(self,allocation_id, freq): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if freq<0: raise FRONTEND.BadParameterException("Center frequency cannot be less than 0") + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].center_frequency = freq + + def getTunerCenterFrequency(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].center_frequency + + def setTunerBandwidth(self,allocation_id, bw): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if bw<0: raise FRONTEND.BadParameterException("Bandwidth cannot be less than 0") + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].bandwidth = bw + + def getTunerBandwidth(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].bandwidth + + def setTunerAgcEnable(self,allocation_id, enable): + raise FRONTEND.NotSupportedException("setTunerAgcEnable not supported") + + def getTunerAgcEnable(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerAgcEnable not supported") + + def setTunerGain(self,allocation_id, gain): + raise FRONTEND.NotSupportedException("setTunerGain not supported") + + def getTunerGain(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerGain not supported") + + def setTunerReferenceSource(self,allocation_id, source): + raise FRONTEND.NotSupportedException("setTunerReferenceSource not supported") + + def getTunerReferenceSource(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerReferenceSource not supported") + + def setTunerEnable(self,allocation_id, enable): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].enabled = enable + + def getTunerEnable(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].enabled + + + def setTunerOutputSampleRate(self,allocation_id, sr): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if sr<0: raise FRONTEND.BadParameterException("Sample rate cannot be less than 0") + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].sample_rate = sr + + def getTunerOutputSampleRate(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].sample_rate + + ''' + ************************************************************* + Functions servicing the RFInfo port(s) + - port_name is the port over which the call was received + *************************************************************''' + def get_rf_flow_id(self,port_name): + return "" + + def set_rf_flow_id(self,port_name, _id): + pass + + def get_rfinfo_pkt(self,port_name): + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + _rfinfopkt=FRONTEND.RFInfoPkt('',0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + return _rfinfopkt + + def set_rfinfo_pkt(self,port_name, pkt): + pass + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Device") + start_device(fei_src_i) + diff --git a/codegenTesting/sdr/dev/devices/fei_src/tests/.md5sums b/codegenTesting/sdr/dev/devices/fei_src/tests/.md5sums new file mode 100644 index 000000000..52dadc04f --- /dev/null +++ b/codegenTesting/sdr/dev/devices/fei_src/tests/.md5sums @@ -0,0 +1 @@ +d9b9023c886e7b54bb6bbd8caac4a25b test_fei_src.py diff --git a/codegenTesting/sdr/dev/devices/fei_src/tests/test_fei_src.py b/codegenTesting/sdr/dev/devices/fei_src/tests/test_fei_src.py new file mode 100644 index 000000000..eba76ffb1 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/fei_src/tests/test_fei_src.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python + +import ossie.utils.testing +from ossie.utils import sb +import time, frontend + +class DeviceTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the device. + SPD_FILE = '../fei_src.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a device using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the device, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl) + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def testDefinedConnectionId(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + alloc=frontend.createTunerAllocation() + snk_1 = sb.StreamSink() + snk_2 = sb.StreamSink() + self.comp.allocateCapacity(alloc) + self.comp.connect(snk_1, connectionId='some_connection') + tuner_status = self.comp.frontend_tuner_status[0] + alloc_id = alloc['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.assertEquals(tuner_status.allocation_id_csv, alloc_id) + self.comp.connect(snk_2, connectionId=alloc_id) + self.assertEquals(len(self.comp.connectionTable), 1) + self.assertEquals(tuner_status.allocation_id_csv, alloc_id) + sb.start() + time.sleep(0.1) + time.sleep(0.25) + self.comp.deallocateCapacity(alloc) + time.sleep(0.1) + data_1 = snk_1.read(timeout=1) + self.assertEquals(data_1, None) + data_2 = snk_2.read(timeout=1) + self.assertEquals(data_2.streamID, 'my_data') + self.assertEquals(len(data_2.data), 25) + self.assertEquals(len(self.comp.connectionTable), 0) + sb.stop() + + def testConnectBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + alloc=frontend.createTunerAllocation() + snk_1 = sb.StreamSink() + snk_2 = sb.StreamSink() + self.comp.allocateCapacity(alloc) + self.comp.connect(snk_1, connectionId='some_connection') + tuner_status = self.comp.frontend_tuner_status[0] + alloc_id = alloc['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.assertEquals(tuner_status.allocation_id_csv, alloc_id) + self.comp.connect(snk_2) + self.assertEquals(len(self.comp.connectionTable), 2) + alloc_id = alloc_id + ',' + self.comp.connectionTable[1].connection_id + self.assertEquals(tuner_status.allocation_id_csv, alloc_id) + sb.start() + time.sleep(0.1) + time.sleep(0.25) + self.comp.deallocateCapacity(alloc) + time.sleep(0.1) + data_1 = snk_1.read(timeout=1) + self.assertEquals(data_1, None) + data_2 = snk_2.read(timeout=1) + self.assertEquals(data_2.streamID, 'my_data') + self.assertEquals(len(data_2.data), 25) + self.assertEquals(len(self.comp.connectionTable), 0) + sb.stop() + + def testDisconnectBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + alloc=frontend.createTunerAllocation() + snk_1 = sb.StreamSink() + snk_2 = sb.StreamSink() + self.comp.allocateCapacity(alloc) + self.comp.connect(snk_1, connectionId='some_connection') + tuner_status = self.comp.frontend_tuner_status[0] + alloc_id = alloc['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'] + self.assertEquals(tuner_status.allocation_id_csv, alloc_id) + self.comp.connect(snk_2) + sb.start() + time.sleep(0.1) + time.sleep(0.25) + self.comp.disconnect(snk_2) + time.sleep(0.1) + data_1 = snk_1.read(timeout=1) + self.assertEquals(data_1, None) + data_2 = snk_2.read(timeout=1) + self.assertEquals(data_2.streamID, 'my_data') + self.assertEquals(len(data_2.data), 25) + self.assertEquals(tuner_status.allocation_id_csv, alloc_id) + sb.stop() + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/codegenTesting/sdr/dev/devices/inverted_fei/.inverted_fei.wavedev b/codegenTesting/sdr/dev/devices/inverted_fei/.inverted_fei.wavedev new file mode 100644 index 000000000..d1ea167e5 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/inverted_fei/.inverted_fei.wavedev @@ -0,0 +1,6 @@ + + + + + + diff --git a/codegenTesting/sdr/dev/devices/inverted_fei/.md5sums b/codegenTesting/sdr/dev/devices/inverted_fei/.md5sums new file mode 100644 index 000000000..da8c10eb3 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/inverted_fei/.md5sums @@ -0,0 +1,2 @@ +d47de16404adb5cecbf3345af05be33d build.sh +b0f6fbc5ffd14595001da7e65d637ca9 inverted_fei.spec diff --git a/codegenTesting/sdr/dev/devices/inverted_fei/inverted_fei.prf.xml b/codegenTesting/sdr/dev/devices/inverted_fei/inverted_fei.prf.xml new file mode 100644 index 000000000..7e151f5ae --- /dev/null +++ b/codegenTesting/sdr/dev/devices/inverted_fei/inverted_fei.prf.xml @@ -0,0 +1,112 @@ + + + + + This specifies the device kind + FRONTEND::TUNER + + + + + This specifies the specific device + + + + + Status of each tuner, including entries for both allocated and un-allocated tuners. Each entry represents a single tuner. + + + Comma separated list of current Allocation IDs. + + + Current bandwidth in Hz + Hz + + + Current center frequency in Hz. + Hz + + + Indicates if tuner is enabled, in reference to the output state of the tuner. + + + Unique ID that specifies a group of Device. + + + Specifies a certain RF flow to allocate against. + + + Current sample rate in samples per second. + sps + + + Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGTIZIER_CHANNELIZER + + + + + + Frontend Interfaces v2 listener allocation structure + + + + + + Frontend Interfaces v2 main allocation structure + + Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGTIZIER_CHANNELIZER + + + The allocation_id set by the caller. Used by the caller to reference the allocation uniquely + + + Requested center frequency + Hz + + + Requested bandwidth (+/- the tolerance) + Hz + + + Allowable Percent above requested bandwidth (ie - 100 would be up to twice) + percent + + + Requested sample rate (+/- the tolerance). This can be ignored for such devices as analog tuners + Hz + + + Allowable Percent above requested sample rate (ie - 100 would be up to twice) + percent + + + True: Has control over the device to make changes +False: Does not need control and can just attach to any currently tasked device that satisfies the parameters (essentually a listener) + + + Unique identifier that specifies the group a device must be in. Must match group_id on the device + + + Optional. Specifies the RF flow of a specific input source to allocate against. If left empty, it will match all FrontEnd devices. + + + + + + + + + + + + + 1.15e6 + + + + + 8.5e5 + + + + diff --git a/codegenTesting/sdr/dev/devices/inverted_fei/inverted_fei.scd.xml b/codegenTesting/sdr/dev/devices/inverted_fei/inverted_fei.scd.xml new file mode 100644 index 000000000..42a732fe2 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/inverted_fei/inverted_fei.scd.xml @@ -0,0 +1,73 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/inverted_fei/inverted_fei.spd.xml b/codegenTesting/sdr/dev/devices/inverted_fei/inverted_fei.spd.xml new file mode 100644 index 000000000..44592662c --- /dev/null +++ b/codegenTesting/sdr/dev/devices/inverted_fei/inverted_fei.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/inverted_fei.py + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/inverted_fei/python/.md5sums b/codegenTesting/sdr/dev/devices/inverted_fei/python/.md5sums new file mode 100644 index 000000000..444924c58 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/inverted_fei/python/.md5sums @@ -0,0 +1,6 @@ +37fb5ee26a2f37202487708fa6df0e7f inverted_fei.py +8bfcd22353c3a57fee561ad86ee2a56b reconf +12ed7331befcb36daf8f285a7e37e16a inverted_fei_base.py +119dd94144b52c42dcc771d1f28bc722 configure.ac +6e0a2acb68f7c93d07051411ae7dc980 Makefile.am +560d3551066b7f8cf4f8fdb2f31b12eb Makefile.am.ide diff --git a/codegenTesting/sdr/dev/devices/inverted_fei/python/inverted_fei.py b/codegenTesting/sdr/dev/devices/inverted_fei/python/inverted_fei.py new file mode 100755 index 000000000..67171711e --- /dev/null +++ b/codegenTesting/sdr/dev/devices/inverted_fei/python/inverted_fei.py @@ -0,0 +1,366 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: inverted_fei.spd.xml +from ossie.device import start_device +import logging + +from inverted_fei_base import * + +class inverted_fei_i(inverted_fei_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your device registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + For a tuner device, the structure frontend_tuner_status needs to match the number + of tuners that this device controls and what kind of device it is. + The options for devices are: TX, RX, RX_DIGITIZER, CHANNELIZER, DDC, RC_DIGITIZER_CHANNELIZER + + For example, if this device has 5 physical + tuners, 3 RX_DIGITIZER and 2 CHANNELIZER, then the code in the construct function + should look like this: + + self.addChannels(3, "RX_DIGITIZER"); + self.addChannels(2, "CHANNELIZER"); + + The incoming request for tuning contains a string describing the requested tuner + type. The string for the request must match the string in the tuner status. + """ + # TODO add customization here. + self.addChannels(1, "RX_DIGITIZER"); + _antennainfo = FRONTEND.AntennaInfo('','','','') + _freqrange = FRONTEND.FreqRange(0,0,[]) + _feedinfo = FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo = FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities = FRONTEND.RFCapabilities(_freqrange,_freqrange) + self.my_rfinfo_pkt = FRONTEND.RFInfoPkt('',0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the device. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + To create a StreamSRI object based on tuner status structure index 'idx' and collector center frequency of 100: + sri = frontend.sri.create("my_stream_id", self.frontend_tuner_status[idx], self._id, collector_frequency=100) + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the device developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", inverted_fei_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = inverted_fei_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Application: + app = self.getApplication().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + Allocation: + + Allocation callbacks are available to customize a Device's response to an allocation request. + Callback allocation/deallocation functions are registered using the setAllocationImpl function, + usually in the initialize() function + For example, allocation property "my_alloc" can be registered with allocation function + my_alloc_fn and deallocation function my_dealloc_fn as follows: + + self.setAllocationImpl("my_alloc", self.my_alloc_fn, self.my_dealloc_fn) + + def my_alloc_fn(self, value): + # perform logic + return True # successful allocation + + def my_dealloc_fn(self, value): + # perform logic + pass + + Example: + + # This example assumes that the device has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the device + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + ''' + ************************************************************* + Functions supporting tuning allocation + *************************************************************''' + def deviceEnable(self, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + Make sure to set the 'enabled' member of fts to indicate that tuner as enabled + ************************************************************''' + fts.enabled = True + return + + def deviceDisable(self,fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + Make sure to reset the 'enabled' member of fts to indicate that tuner as disabled + ************************************************************''' + fts.enabled = False + return + + def deviceSetTuning(self,request, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + + The bandwidth, center frequency, and sampling rate that the hardware was actually tuned + to needs to populate fts (to make sure that it meets the tolerance requirement. For example, + if the tuned values match the requested values, the code would look like this: + + fts.bandwidth = request.bandwidth + fts.center_frequency = request.center_frequency + fts.sample_rate = request.sample_rate + + return True if the tuning succeeded, and False if it failed + ************************************************************''' + max_dev_bw = 1e20 + max_dev_sr = 1e20 + try: + retval = frontend.validateRequestVsDevice(request, self.my_rfinfo_pkt, False, self.min_dev_if_cf, self.max_dev_if_cf, max_dev_bw, max_dev_sr) + except: + retval = False + return retval + + def deviceDeleteTuning(self, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + return True if the tune deletion succeeded, and False if it failed + ************************************************************''' + return True + + ''' + ************************************************************* + Functions servicing the tuner control port + *************************************************************''' + def getTunerType(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].tuner_type + + def getTunerDeviceControl(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if self.getControlAllocationId(idx) == allocation_id: + return True + return False + + def getTunerGroupId(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].group_id + + def getTunerRfFlowId(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].rf_flow_id + + + def setTunerCenterFrequency(self,allocation_id, freq): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if freq<0: raise FRONTEND.BadParameterException("Center frequency cannot be less than 0") + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].center_frequency = freq + + def getTunerCenterFrequency(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].center_frequency + + def setTunerBandwidth(self,allocation_id, bw): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if bw<0: raise FRONTEND.BadParameterException("Bandwidth cannot be less than 0") + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].bandwidth = bw + + def getTunerBandwidth(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].bandwidth + + def setTunerAgcEnable(self,allocation_id, enable): + raise FRONTEND.NotSupportedException("setTunerAgcEnable not supported") + + def getTunerAgcEnable(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerAgcEnable not supported") + + def setTunerGain(self,allocation_id, gain): + raise FRONTEND.NotSupportedException("setTunerGain not supported") + + def getTunerGain(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerGain not supported") + + def setTunerReferenceSource(self,allocation_id, source): + raise FRONTEND.NotSupportedException("setTunerReferenceSource not supported") + + def getTunerReferenceSource(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerReferenceSource not supported") + + def setTunerEnable(self,allocation_id, enable): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].enabled = enable + + def getTunerEnable(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].enabled + + + def setTunerOutputSampleRate(self,allocation_id, sr): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if sr<0: raise FRONTEND.BadParameterException("Sample rate cannot be less than 0") + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].sample_rate = sr + + def getTunerOutputSampleRate(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].sample_rate + + ''' + ************************************************************* + Functions servicing the RFInfo port(s) + - port_name is the port over which the call was received + *************************************************************''' + def get_rf_flow_id(self,port_name): + return "" + + def set_rf_flow_id(self,port_name, _id): + pass + + def get_rfinfo_pkt(self,port_name): + return self.my_rfinfo_pkt + + def set_rfinfo_pkt(self,port_name, pkt): + self.my_rfinfo_pkt = pkt + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Device") + start_device(inverted_fei_i) + diff --git a/codegenTesting/sdr/dev/devices/inverted_fei/tests/.md5sums b/codegenTesting/sdr/dev/devices/inverted_fei/tests/.md5sums new file mode 100644 index 000000000..c96d16602 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/inverted_fei/tests/.md5sums @@ -0,0 +1 @@ +c0fa036b1096edb60eadf3c1a3133893 test_inverted_fei.py diff --git a/codegenTesting/sdr/dev/devices/inverted_fei/tests/test_inverted_fei.py b/codegenTesting/sdr/dev/devices/inverted_fei/tests/test_inverted_fei.py new file mode 100644 index 000000000..9a41b1954 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/inverted_fei/tests/test_inverted_fei.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python + +import ossie.utils.testing +from ossie.utils import sb +import frontend +from redhawk.frontendInterfaces import FRONTEND + +class DeviceTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the device. + SPD_FILE = '../inverted_fei.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a device using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the device, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl) + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def testBasicBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + rf_center_freq = 1e7 + rf_bandwidth = 3e5 + if_center_freq = 1e6 + pkt_str=FRONTEND.RFInfoPkt('my_flow',rf_center_freq,rf_bandwidth,if_center_freq,False,_sensorinfo,[],_rfcapabilities,[]) + request_cf = 9.95e6 + request_bw = 2e5 + alloc = frontend.createTunerAllocation(allocation_id='foo', center_frequency=request_cf, bandwidth=request_bw, sample_rate=0.0) + port=self.comp.ports[1] + port.ref._set_rfinfo_pkt(pkt_str) + self.comp.max_dev_if_cf = 1e6 + self.assertTrue(self.comp.allocateCapacity(alloc)) + self.comp.deallocateCapacity(alloc) + pkt_inv=FRONTEND.RFInfoPkt('my_flow',rf_center_freq,rf_bandwidth,if_center_freq,True,_sensorinfo,[],_rfcapabilities,[]) + port.ref._set_rfinfo_pkt(pkt_inv) + self.assertFalse(self.comp.allocateCapacity(alloc)) + self.comp.max_dev_if_cf = 1.15e6 + self.assertTrue(self.comp.allocateCapacity(alloc)) + self.comp.deallocateCapacity(alloc) + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/codegenTesting/sdr/dev/devices/my_scanner/.my_scanner.wavedev b/codegenTesting/sdr/dev/devices/my_scanner/.my_scanner.wavedev new file mode 100644 index 000000000..15baaa611 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/my_scanner/.my_scanner.wavedev @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/my_scanner/cpp/my_scanner.cpp b/codegenTesting/sdr/dev/devices/my_scanner/cpp/my_scanner.cpp new file mode 100644 index 000000000..6a8926a59 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/my_scanner/cpp/my_scanner.cpp @@ -0,0 +1,545 @@ +/************************************************************************** + + This is the device code. This file contains the child class where + custom functionality can be added to the device. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "my_scanner.h" + +PREPARE_LOGGING(my_scanner_i) + +my_scanner_i::my_scanner_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : + my_scanner_base(devMgr_ior, id, lbl, sftwrPrfl) +{ +} + +my_scanner_i::my_scanner_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : + my_scanner_base(devMgr_ior, id, lbl, sftwrPrfl, compDev) +{ +} + +my_scanner_i::my_scanner_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : + my_scanner_base(devMgr_ior, id, lbl, sftwrPrfl, capacities) +{ +} + +my_scanner_i::my_scanner_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : + my_scanner_base(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev) +{ +} + +my_scanner_i::~my_scanner_i() +{ +} + +void my_scanner_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + + For a tuner device, the structure frontend_tuner_status needs to match the number + of tuners that this device controls and what kind of device it is. + The options for devices are: TX, RX, RX_DIGITIZER, CHANNELIZER, DDC, RC_DIGITIZER_CHANNELIZER + + For example, if this device has 5 physical + tuners, 3 RX_DIGITIZER and 2 CHANNELIZER, then the code in the construct function + should look like this: + + this->addChannels(3, "RX_DIGITIZER"); + this->addChannels(2, "CHANNELIZER"); + + The incoming request for tuning contains a string describing the requested tuner + type. The string for the request must match the string in the tuner status. + ***********************************************************************************/ + this->addChannels(1, "RX_DIGITIZER"); +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + To create a StreamSRI object based on tuner status structure index 'idx' and collector center frequency of 100: + std::string stream_id = "my_stream_id"; + BULKIO::StreamSRI sri = this->create(stream_id, this->frontend_tuner_status[idx], 100); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the device has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the device base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the device developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void my_scanner_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &my_scanner_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Device Manager: + CF::DeviceManager_ptr devmgr = this->getDeviceManager()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (my_scanner_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &my_scanner_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to my_scanner.cpp + my_scanner_i::my_scanner_i(const char *uuid, const char *label) : + my_scanner_base(uuid, label) + { + addPropertyListener(scaleValue, this, &my_scanner_i::scaleChanged); + addPropertyListener(status, this, &my_scanner_i::statusChanged); + } + + void my_scanner_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(my_scanner_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void my_scanner_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(my_scanner_i, "status changed"); + } + + //Add to my_scanner.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + Allocation: + + Allocation callbacks are available to customize the Device's response to + allocation requests. For example, if the Device contains the allocation + property "my_alloc" of type string, the allocation and deallocation + callbacks follow the pattern (with arbitrary function names + my_alloc_fn and my_dealloc_fn): + + bool my_scanner_i::my_alloc_fn(const std::string &value) + { + // perform logic + return true; // successful allocation + } + void my_scanner_i::my_dealloc_fn(const std::string &value) + { + // perform logic + } + + The allocation and deallocation functions are then registered with the Device + base class with the setAllocationImpl call. Note that the variable for the property is used rather + than its id: + + this->setAllocationImpl(my_alloc, this, &my_scanner_i::my_alloc_fn, &my_scanner_i::my_dealloc_fn); + + + +************************************************************************************************/ +int my_scanner_i::serviceFunction() +{ + LOG_DEBUG(my_scanner_i, "serviceFunction() example log message"); + + return NOOP; +} + +/************************************************************* +Functions supporting tuning allocation +*************************************************************/ +void my_scanner_i::deviceEnable(frontend_tuner_status_struct_struct &fts, size_t tuner_id){ + /************************************************************ + modify fts, which corresponds to this->frontend_tuner_status[tuner_id] + Make sure to set the 'enabled' member of fts to indicate that tuner as enabled + ************************************************************/ + fts.enabled = true; + return; +} +void my_scanner_i::deviceDisable(frontend_tuner_status_struct_struct &fts, size_t tuner_id){ + /************************************************************ + modify fts, which corresponds to this->frontend_tuner_status[tuner_id] + Make sure to reset the 'enabled' member of fts to indicate that tuner as disabled + ************************************************************/ + fts.enabled = false; + return; +} +bool my_scanner_i::deviceSetTuningScan(const frontend::frontend_tuner_allocation_struct &request, const frontend::frontend_scanner_allocation_struct &scan_request, frontend_tuner_status_struct_struct &fts, size_t tuner_id){ + /************************************************************ + + This function is called when the allocation request contains a scanner allocation + + modify fts, which corresponds to this->frontend_tuner_status[tuner_id] + At a minimum, bandwidth, center frequency, and sample_rate have to be set + If the device is tuned to exactly what the request was, the code should be: + fts.bandwidth = request.bandwidth; + fts.center_frequency = request.center_frequency; + fts.sample_rate = request.sample_rate; + + return true if the tuning succeeded, and false if it failed + ************************************************************/ + if ((request.bandwidth == 1000) and (scan_request.min_freq==10000)) + return true; + return false; +} +bool my_scanner_i::deviceSetTuning(const frontend::frontend_tuner_allocation_struct &request, frontend_tuner_status_struct_struct &fts, size_t tuner_id){ + /************************************************************ + + This function is called when the allocation request does not contain a scanner allocation + + modify fts, which corresponds to this->frontend_tuner_status[tuner_id] + At a minimum, bandwidth, center frequency, and sample_rate have to be set + If the device is tuned to exactly what the request was, the code should be: + fts.bandwidth = request.bandwidth; + fts.center_frequency = request.center_frequency; + fts.sample_rate = request.sample_rate; + + return true if the tuning succeeded, and false if it failed + ************************************************************/ + return false; +} +bool my_scanner_i::deviceDeleteTuning(frontend_tuner_status_struct_struct &fts, size_t tuner_id) { + /************************************************************ + modify fts, which corresponds to this->frontend_tuner_status[tuner_id] + return true if the tune deletion succeeded, and false if it failed + ************************************************************/ + return true; +} + +/************************************************************* +Functions servicing the tuner control port +*************************************************************/ +std::string my_scanner_i::getTunerType(const std::string& allocation_id) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + return frontend_tuner_status[idx].tuner_type; +} + +bool my_scanner_i::getTunerDeviceControl(const std::string& allocation_id) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + if (getControlAllocationId(idx) == allocation_id) + return true; + return false; +} + +std::string my_scanner_i::getTunerGroupId(const std::string& allocation_id) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + return frontend_tuner_status[idx].group_id; +} + +std::string my_scanner_i::getTunerRfFlowId(const std::string& allocation_id) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + return frontend_tuner_status[idx].rf_flow_id; +} + +void my_scanner_i::setTunerCenterFrequency(const std::string& allocation_id, double freq) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); + if (freq<0) throw FRONTEND::BadParameterException("Center frequency cannot be less than 0"); + // set hardware to new value. Raise an exception if it's not possible + this->frontend_tuner_status[idx].center_frequency = freq; +} + +double my_scanner_i::getTunerCenterFrequency(const std::string& allocation_id) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + return frontend_tuner_status[idx].center_frequency; +} + +void my_scanner_i::setTunerBandwidth(const std::string& allocation_id, double bw) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); + if (bw<0) throw FRONTEND::BadParameterException("Bandwidth cannot be less than 0"); + // set hardware to new value. Raise an exception if it's not possible + this->frontend_tuner_status[idx].bandwidth = bw; +} + +double my_scanner_i::getTunerBandwidth(const std::string& allocation_id) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + return frontend_tuner_status[idx].bandwidth; +} + +void my_scanner_i::setTunerAgcEnable(const std::string& allocation_id, bool enable) +{ + throw FRONTEND::NotSupportedException("setTunerAgcEnable not supported"); +} + +bool my_scanner_i::getTunerAgcEnable(const std::string& allocation_id) +{ + throw FRONTEND::NotSupportedException("getTunerAgcEnable not supported"); +} + +void my_scanner_i::setTunerGain(const std::string& allocation_id, float gain) +{ + throw FRONTEND::NotSupportedException("setTunerGain not supported"); +} + +float my_scanner_i::getTunerGain(const std::string& allocation_id) +{ + throw FRONTEND::NotSupportedException("getTunerGain not supported"); +} + +void my_scanner_i::setTunerReferenceSource(const std::string& allocation_id, long source) +{ + throw FRONTEND::NotSupportedException("setTunerReferenceSource not supported"); +} + +long my_scanner_i::getTunerReferenceSource(const std::string& allocation_id) +{ + throw FRONTEND::NotSupportedException("getTunerReferenceSource not supported"); +} + +void my_scanner_i::setTunerEnable(const std::string& allocation_id, bool enable) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); + // set hardware to new value. Raise an exception if it's not possible + this->frontend_tuner_status[idx].enabled = enable; +} + +bool my_scanner_i::getTunerEnable(const std::string& allocation_id) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + return frontend_tuner_status[idx].enabled; +} + +void my_scanner_i::setTunerOutputSampleRate(const std::string& allocation_id, double sr) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); + if (sr<0) throw FRONTEND::BadParameterException("Sample rate cannot be less than 0"); + // set hardware to new value. Raise an exception if it's not possible + this->frontend_tuner_status[idx].sample_rate = sr; +} + +double my_scanner_i::getTunerOutputSampleRate(const std::string& allocation_id){ + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + return frontend_tuner_status[idx].sample_rate; +} +frontend::ScanStatus my_scanner_i::getScanStatus(const std::string& allocation_id) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + frontend::ManualStrategy *initial_strategy = new frontend::ManualStrategy(0); + initial_strategy->control_value = 123.0; + frontend::ScanStrategy *tmp = initial_strategy->clone(); + delete initial_strategy; + frontend::ScanStatus retval(tmp); + return retval; +} + +void my_scanner_i::setScanStartTime(const std::string& allocation_id, const BULKIO::PrecisionUTCTime& start_time) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); +} + +void my_scanner_i::setScanStrategy(const std::string& allocation_id, const frontend::ScanStrategy* scan_strategy) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); + if (dynamic_cast(scan_strategy) != NULL) { + this->strategy_request = "manual"; + } + if (dynamic_cast(scan_strategy) != NULL) { + this->strategy_request = "span"; + } + if (dynamic_cast(scan_strategy) != NULL) { + this->strategy_request = "discrete"; + } +} + +/************************************************************* +Functions servicing the RFInfo port(s) +- port_name is the port over which the call was received +*************************************************************/ +std::string my_scanner_i::get_rf_flow_id(const std::string& port_name) +{ + return std::string("none"); +} + +void my_scanner_i::set_rf_flow_id(const std::string& port_name, const std::string& id) +{ +} + +frontend::RFInfoPkt my_scanner_i::get_rfinfo_pkt(const std::string& port_name) +{ + frontend::RFInfoPkt pkt; + return pkt; +} + +void my_scanner_i::set_rfinfo_pkt(const std::string& port_name, const frontend::RFInfoPkt &pkt) +{ +} + diff --git a/codegenTesting/sdr/dev/devices/my_scanner/cpp/my_scanner.h b/codegenTesting/sdr/dev/devices/my_scanner/cpp/my_scanner.h new file mode 100644 index 000000000..6ce836056 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/my_scanner/cpp/my_scanner.h @@ -0,0 +1,61 @@ +#ifndef MY_SCANNER_I_IMPL_H +#define MY_SCANNER_I_IMPL_H + +#include "my_scanner_base.h" + +class my_scanner_i : public my_scanner_base +{ + ENABLE_LOGGING + public: + my_scanner_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl); + my_scanner_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev); + my_scanner_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities); + my_scanner_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev); + ~my_scanner_i(); + + void constructor(); + + int serviceFunction(); + + protected: + std::string getTunerType(const std::string& allocation_id); + bool getTunerDeviceControl(const std::string& allocation_id); + std::string getTunerGroupId(const std::string& allocation_id); + std::string getTunerRfFlowId(const std::string& allocation_id); + double getTunerCenterFrequency(const std::string& allocation_id); + void setTunerCenterFrequency(const std::string& allocation_id, double freq); + double getTunerBandwidth(const std::string& allocation_id); + void setTunerBandwidth(const std::string& allocation_id, double bw); + bool getTunerAgcEnable(const std::string& allocation_id); + void setTunerAgcEnable(const std::string& allocation_id, bool enable); + float getTunerGain(const std::string& allocation_id); + void setTunerGain(const std::string& allocation_id, float gain); + long getTunerReferenceSource(const std::string& allocation_id); + void setTunerReferenceSource(const std::string& allocation_id, long source); + bool getTunerEnable(const std::string& allocation_id); + void setTunerEnable(const std::string& allocation_id, bool enable); + double getTunerOutputSampleRate(const std::string& allocation_id); + void setTunerOutputSampleRate(const std::string& allocation_id, double sr); + frontend::ScanStatus getScanStatus(const std::string& allocation_id); + void setScanStartTime(const std::string& allocation_id, const BULKIO::PrecisionUTCTime& start_time); + void setScanStrategy(const std::string& allocation_id, const frontend::ScanStrategy* scan_strategy); + std::string get_rf_flow_id(const std::string& port_name); + void set_rf_flow_id(const std::string& port_name, const std::string& id); + frontend::RFInfoPkt get_rfinfo_pkt(const std::string& port_name); + void set_rfinfo_pkt(const std::string& port_name, const frontend::RFInfoPkt& pkt); + + private: + //////////////////////////////////////// + // Required device specific functions // -- to be implemented by device developer + //////////////////////////////////////// + + // these are pure virtual, must be implemented here + void deviceEnable(frontend_tuner_status_struct_struct &fts, size_t tuner_id); + void deviceDisable(frontend_tuner_status_struct_struct &fts, size_t tuner_id); + bool deviceSetTuningScan(const frontend::frontend_tuner_allocation_struct &request, const frontend::frontend_scanner_allocation_struct &scan_request, frontend_tuner_status_struct_struct &fts, size_t tuner_id); + bool deviceSetTuning(const frontend::frontend_tuner_allocation_struct &request, frontend_tuner_status_struct_struct &fts, size_t tuner_id); + bool deviceDeleteTuning(frontend_tuner_status_struct_struct &fts, size_t tuner_id); + +}; + +#endif // MY_SCANNER_I_IMPL_H diff --git a/codegenTesting/sdr/dev/devices/my_scanner/java/src/my_scanner/java/my_scanner.java b/codegenTesting/sdr/dev/devices/my_scanner/java/src/my_scanner/java/my_scanner.java new file mode 100644 index 000000000..6ff1a736d --- /dev/null +++ b/codegenTesting/sdr/dev/devices/my_scanner/java/src/my_scanner/java/my_scanner.java @@ -0,0 +1,585 @@ +package my_scanner.java; + +import java.util.Properties; +import org.omg.CORBA.ORB; +import org.omg.PortableServer.POA; +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; +import CF.DeviceManager; +import CF.DevicePackage.InvalidCapacity; +import CF.DevicePackage.InvalidCapacity; +import CF.InvalidObjectReference; + +/** + * This is the device code. This file contains the derived class where custom + * functionality can be added to the device. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general device housekeeping + * + * Source: my_scanner.spd.xml + */ +public class my_scanner extends my_scanner_base { + /** + * This is the device constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your device. + * + * A device may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * device class. + * + * Devices may contain allocation properties with "external" action, which + * are used in capacity allocation and deallocation. In order to support + * this capability, allocation properties require additional functionality. + * This is implemented by calling setAllocator() on the property instance + * with an object that implements the Allocator interface for that data type. + * + * Example: + * // This example makes use of the following properties + * // - A struct property called tuner_alloc + * // The following methods are defined elsewhere in your class: + * // - private boolean allocate_tuner(tuner_alloc_struct capacity) + * // - private void deallocate_tuner(tuner_alloc_struct capacity) + * // The file must import "org.ossie.properties.Allocator" + * + * this.tuner_alloc.setAllocator(new Allocator() { + * public boolean allocate(tuner_alloc_struct capacity) { + * return allocate_tuner(capacity); + * } + * public void deallocate(tuner_alloc_struct capacity) { + * deallocate_tuner(capacity); + * } + * }); + * + * The recommended practice is for the allocate() and deallocate() methods + * to contain only glue code to dispatch the call to private methods on the + * device class. + * Accessing the Device Manager and Domain Manager: + * + * Both the Device Manager hosting this Device and the Domain Manager hosting + * the Device Manager are available to the Device. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Device Manager: + * CF.DeviceManager devmgr = this.getDeviceManager().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + + public my_scanner() + { + super(); + } + + public void constructor() + { + /************************************************************************** + + For a tuner device, the structure frontend_tuner_status needs to match the number + of tuners that this device controls and what kind of device it is. + The options for devices are: TX, RX, RX_DIGITIZER, CHANNELIZER, DDC, RC_DIGITIZER_CHANNELIZER + + For example, if this device has 5 physical + tuners, 3 RX_DIGITIZER and 2 CHANNELIZER, then the code in the construct function + should look like this: + + this.addChannels(3, "RX_DIGITIZER"); + this.addChannels(2, "CHANNELIZER"); + + The incoming request for tuning contains a string describing the requested tuner + type. The string for the request must match the string in the tuner status. + + **************************************************************************/ + this.addChannels(1, "RX_DIGITIZER"); + } + + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the device's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; + * + * To create a StreamSRI object based on tuner status structure index 'idx': + * BULKIO.StreamSRI sri = this.create("my_stream_id", this.frontend_tuner_status.getValue().get(idx)); + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the device developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Example: + * + * This example assumes that the device has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the device + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData = new float[data.getData().length]; + * for (int i = 0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i] = (float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i] = (float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + logger.debug("serviceFunction() example log message"); + + return NOOP; + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + + + /************************************************************* + Functions supporting tuning allocation + *************************************************************/ + public void deviceEnable(frontend_tuner_status_struct_struct fts, int tuner_id) + { + /************************************************************ + modify fts, which corresponds to this.frontend_tuner_status.getValue().get(tuner_id) + Make sure to set the 'enabled' member of fts to indicate that tuner as enabled + ************************************************************/ + System.out.println("deviceEnable(): Enable the given tuner *********"); + fts.enabled.setValue(true); + return; + } + public void deviceDisable(frontend_tuner_status_struct_struct fts, int tuner_id) + { + /************************************************************ + modify fts, which corresponds to this.frontend_tuner_status.getValue().get(tuner_id) + Make sure to reset the 'enabled' member of fts to indicate that tuner as disabled + ************************************************************/ + System.out.println("deviceDisable(): Disable the given tuner *********"); + fts.enabled.setValue(false); + return; + } + public boolean deviceSetTuningScan(final frontend.FETypes.frontend_tuner_allocation_struct request, final frontend.FETypes.frontend_scanner_allocation_struct scan_request, frontend_tuner_status_struct_struct fts, int tuner_id) + { + /************************************************************ + + This function is called when the allocation request contains a scanner allocation + + modify fts, which corresponds to this.frontend_tuner_status.getValue().get(tuner_id) + + The bandwidth, center frequency, and sampling rate that the hardware was actually tuned + to needs to populate fts (to make sure that it meets the tolerance requirement. For example, + if the tuned values match the requested values, the code would look like this: + + fts.bandwidth.setValue(request.bandwidth.getValue()); + fts.center_frequency.setValue(request.center_frequency.getValue()); + fts.sample_rate.setValue(request.sample_rate.getValue()); + + return true if the tuning succeeded, and false if it failed + ************************************************************/ + if ((request.bandwidth.getValue() == 1000.0) && (scan_request.min_freq.getValue() == 10000.0)) { + return true; + } + return false; + } + public boolean deviceSetTuning(final frontend.FETypes.frontend_tuner_allocation_struct request, frontend_tuner_status_struct_struct fts, int tuner_id) + { + /************************************************************ + + This function is called when the allocation request does not contain a scanner allocation + + modify fts, which corresponds to this.frontend_tuner_status.getValue().get(tuner_id) + + The bandwidth, center frequency, and sampling rate that the hardware was actually tuned + to needs to populate fts (to make sure that it meets the tolerance requirement. For example, + if the tuned values match the requested values, the code would look like this: + + fts.bandwidth.setValue(request.bandwidth.getValue()); + fts.center_frequency.setValue(request.center_frequency.getValue()); + fts.sample_rate.setValue(request.sample_rate.getValue()); + + return true if the tuning succeeded, and false if it failed + ************************************************************/ + return false; + } + public boolean deviceDeleteTuning(frontend_tuner_status_struct_struct fts, int tuner_id) + { + /************************************************************ + modify fts, which corresponds to this.frontend_tuner_status.getValue().get(tuner_id) + return true if the tune deletion succeeded, and false if it failed + ************************************************************/ + System.out.println("deviceDeleteTuning(): Deallocate an allocated tuner *********"); + return true; + } + + /************************************************************* + Functions servicing the tuner control port + *************************************************************/ + public String getTunerType(final String allocation_id) throws FRONTEND.FrontendException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + return frontend_tuner_status.getValue().get(idx).tuner_type.getValue(); + } + + public boolean getTunerDeviceControl(final String allocation_id) throws FRONTEND.FrontendException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + if (getControlAllocationId(idx) == allocation_id) + return true; + return false; + } + + public String getTunerGroupId(final String allocation_id) throws FRONTEND.FrontendException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + return frontend_tuner_status.getValue().get(idx).group_id.getValue(); + } + + public String getTunerRfFlowId(final String allocation_id) throws FRONTEND.FrontendException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + return frontend_tuner_status.getValue().get(idx).rf_flow_id.getValue(); + } + + public void setTunerCenterFrequency(final String allocation_id, double freq) throws FRONTEND.FrontendException, FRONTEND.BadParameterException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw new FRONTEND.FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner")); + if (freq<0) throw new FRONTEND.BadParameterException("Center frequency cannot be less than 0"); + // set hardware to new value. Raise an exception if it's not possible + this.frontend_tuner_status.getValue().get(idx).center_frequency.setValue(freq); + } + + public double getTunerCenterFrequency(final String allocation_id) throws FRONTEND.FrontendException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + return frontend_tuner_status.getValue().get(idx).center_frequency.getValue(); + } + + public void setTunerBandwidth(final String allocation_id, double bw) throws FRONTEND.FrontendException, FRONTEND.BadParameterException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw new FRONTEND.FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner")); + if (bw<0) throw new FRONTEND.BadParameterException("Bandwidth cannot be less than 0"); + // set hardware to new value. Raise an exception if it's not possible + this.frontend_tuner_status.getValue().get(idx).bandwidth.setValue(bw); + } + + public double getTunerBandwidth(final String allocation_id) throws FRONTEND.FrontendException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + return frontend_tuner_status.getValue().get(idx).bandwidth.getValue(); + } + + public void setTunerAgcEnable(final String allocation_id, boolean enable) throws FRONTEND.NotSupportedException + { + throw new FRONTEND.NotSupportedException("setTunerAgcEnable not supported"); + } + + public boolean getTunerAgcEnable(final String allocation_id) throws FRONTEND.NotSupportedException + { + throw new FRONTEND.NotSupportedException("getTunerAgcEnable not supported"); + } + + public void setTunerGain(final String allocation_id, float gain) throws FRONTEND.NotSupportedException + { + throw new FRONTEND.NotSupportedException("setTunerGain not supported"); + } + + public float getTunerGain(final String allocation_id) throws FRONTEND.NotSupportedException + { + throw new FRONTEND.NotSupportedException("getTunerGain not supported"); + } + + public void setTunerReferenceSource(final String allocation_id, int source) throws FRONTEND.NotSupportedException + { + throw new FRONTEND.NotSupportedException("setTunerReferenceSource not supported"); + } + + public int getTunerReferenceSource(final String allocation_id) throws FRONTEND.NotSupportedException + { + throw new FRONTEND.NotSupportedException("getTunerReferenceSource not supported"); + } + + public void setTunerEnable(final String allocation_id, boolean enable) throws FRONTEND.FrontendException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw new FRONTEND.FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner")); + // set hardware to new value. Raise an exception if it's not possible + this.frontend_tuner_status.getValue().get(idx).enabled.setValue(enable); + } + + public boolean getTunerEnable(final String allocation_id) throws FRONTEND.FrontendException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + return frontend_tuner_status.getValue().get(idx).enabled.getValue(); + } + + public void setTunerOutputSampleRate(final String allocation_id, double sr) throws FRONTEND.FrontendException, FRONTEND.BadParameterException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw new FRONTEND.FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner")); + if (sr<0) throw new FRONTEND.BadParameterException("Sample rate cannot be less than 0"); + // set hardware to new value. Raise an exception if it's not possible + this.frontend_tuner_status.getValue().get(idx).sample_rate.setValue(sr); + } + + public double getTunerOutputSampleRate(final String allocation_id) throws FRONTEND.FrontendException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + return frontend_tuner_status.getValue().get(idx).sample_rate.getValue(); + } + + public FRONTEND.ScanningTunerPackage.ScanStatus getScanStatus(String allocation_id) throws FRONTEND.FrontendException, FRONTEND.BadParameterException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + FRONTEND.ScanningTunerPackage.ScanStatus status = new FRONTEND.ScanningTunerPackage.ScanStatus(); + status.start_time = new BULKIO.PrecisionUTCTime(); + status.center_tune_frequencies = new double[0]; + status.strategy = new FRONTEND.ScanningTunerPackage.ScanStrategy(); + status.strategy.scan_mode = FRONTEND.ScanningTunerPackage.ScanMode.MANUAL_SCAN; + status.strategy.scan_definition = new FRONTEND.ScanningTunerPackage.ScanModeDefinition(); + status.strategy.scan_definition.center_frequency(100.0); + status.strategy.control_mode = FRONTEND.ScanningTunerPackage.OutputControlMode.TIME_BASED; + status.strategy.control_value = 123.0; + return status; + } + + public void setScanStartTime(String allocation_id, BULKIO.PrecisionUTCTime start_time) throws FRONTEND.FrontendException, FRONTEND.BadParameterException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + if(!allocation_id.equals(getControlAllocationId(idx))) { + throw new FRONTEND.FrontendException("ID "+allocation_id+" does not have authorization to modify the tuner"); + } + } + + public void setScanStrategy(String allocation_id, FRONTEND.ScanningTunerPackage.ScanStrategy scan_strategy) throws FRONTEND.FrontendException, FRONTEND.BadParameterException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + if(!allocation_id.equals(getControlAllocationId(idx))) { + throw new FRONTEND.FrontendException("ID "+allocation_id+" does not have authorization to modify the tuner"); + } + if (scan_strategy.scan_mode.equals(FRONTEND.ScanningTunerPackage.ScanMode.MANUAL_SCAN)) { + this.strategy_request.setValue("manual"); + } + if (scan_strategy.scan_mode.equals(FRONTEND.ScanningTunerPackage.ScanMode.DISCRETE_SCAN)) { + this.strategy_request.setValue("discrete"); + } + if (scan_strategy.scan_mode.equals(FRONTEND.ScanningTunerPackage.ScanMode.SPAN_SCAN)) { + this.strategy_request.setValue("span"); + } + } + + + /************************************************************* + Functions servicing the RFInfo port(s) + - port_name is the port over which the call was received + *************************************************************/ + public String get_rf_flow_id(final String port_name) + { + return new String("none"); + } + + public void set_rf_flow_id(final String port_name, final String id) + { + } + + public FRONTEND.RFInfoPkt get_rfinfo_pkt(final String port_name) + { + FRONTEND.RFInfoPkt pkt = null; + return pkt; + } + + public void set_rfinfo_pkt(final String port_name, final FRONTEND.RFInfoPkt pkt) + { + } +} diff --git a/codegenTesting/sdr/dev/devices/my_scanner/my_scanner.prf.xml b/codegenTesting/sdr/dev/devices/my_scanner/my_scanner.prf.xml new file mode 100644 index 000000000..200905512 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/my_scanner/my_scanner.prf.xml @@ -0,0 +1,132 @@ + + + + + This specifies the device kind + FRONTEND::TUNER + + + + + This specifies the specific device + + + + + Status of each tuner, including entries for both allocated and un-allocated tuners. Each entry represents a single tuner. + + + Comma separated list of current Allocation IDs. + + + Current bandwidth in Hz + Hz + + + Current center frequency in Hz. + Hz + + + Indicates if tuner is enabled, in reference to the output state of the tuner. + + + Unique ID that specifies a group of Device. + + + Specifies a certain RF flow to allocate against. + + + Current sample rate in samples per second. + sps + + + Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGTIZIER_CHANNELIZER + + + Indicates if the tuner is scanning through a plan. + + + Indicates if tuner can support a scan plan. + + + + + + Frontend Interfaces v2 listener allocation structure + + + + + + Frontend Interfaces scanner allocation structure + + + + + + + + + + + + + + + + + + + Frontend Interfaces v2 main allocation structure + + Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGTIZIER_CHANNELIZER + + + The allocation_id set by the caller. Used by the caller to reference the allocation uniquely + + + Requested center frequency + Hz + + + Requested bandwidth (+/- the tolerance) + Hz + + + Allowable Percent above requested bandwidth (ie - 100 would be up to twice) + percent + + + Requested sample rate (+/- the tolerance). This can be ignored for such devices as analog tuners + Hz + + + Allowable Percent above requested sample rate (ie - 100 would be up to twice) + percent + + + True: Has control over the device to make changes +False: Does not need control and can just attach to any currently tasked device that satisfies the parameters (essentually a listener) + + + Unique identifier that specifies the group a device must be in. Must match group_id on the device + + + Optional. Specifies the RF flow of a specific input source to allocate against. If left empty, it will match all FrontEnd devices. + + + + + + + + + + + + + initial + + + + diff --git a/codegenTesting/sdr/dev/devices/my_scanner/my_scanner.scd.xml b/codegenTesting/sdr/dev/devices/my_scanner/my_scanner.scd.xml new file mode 100644 index 000000000..f09cc4007 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/my_scanner/my_scanner.scd.xml @@ -0,0 +1,78 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/my_scanner/my_scanner.spd.xml b/codegenTesting/sdr/dev/devices/my_scanner/my_scanner.spd.xml new file mode 100644 index 000000000..e03bce963 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/my_scanner/my_scanner.spd.xml @@ -0,0 +1,50 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/my_scanner + + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/my_scanner.py + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + diff --git a/codegenTesting/sdr/dev/devices/my_scanner/python/my_scanner.py b/codegenTesting/sdr/dev/devices/my_scanner/python/my_scanner.py new file mode 100755 index 000000000..a2f139177 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/my_scanner/python/my_scanner.py @@ -0,0 +1,419 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: my_scanner.spd.xml +from ossie.device import start_device +import logging + +from my_scanner_base import * + +class my_scanner_i(my_scanner_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your device registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + For a tuner device, the structure frontend_tuner_status needs to match the number + of tuners that this device controls and what kind of device it is. + The options for devices are: TX, RX, RX_DIGITIZER, CHANNELIZER, DDC, RC_DIGITIZER_CHANNELIZER + + For example, if this device has 5 physical + tuners, 3 RX_DIGITIZER and 2 CHANNELIZER, then the code in the construct function + should look like this: + + self.addChannels(3, "RX_DIGITIZER"); + self.addChannels(2, "CHANNELIZER"); + + The incoming request for tuning contains a string describing the requested tuner + type. The string for the request must match the string in the tuner status. + """ + # TODO add customization here. + self.addChannels(1, "RX_DIGITIZER"); + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the device. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + To create a StreamSRI object based on tuner status structure index 'idx' and collector center frequency of 100: + sri = frontend.sri.create("my_stream_id", self.frontend_tuner_status[idx], self._id, collector_frequency=100) + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the device developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", my_scanner_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = my_scanner_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Application: + app = self.getApplication().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + Allocation: + + Allocation callbacks are available to customize a Device's response to an allocation request. + Callback allocation/deallocation functions are registered using the setAllocationImpl function, + usually in the initialize() function + For example, allocation property "my_alloc" can be registered with allocation function + my_alloc_fn and deallocation function my_dealloc_fn as follows: + + self.setAllocationImpl("my_alloc", self.my_alloc_fn, self.my_dealloc_fn) + + def my_alloc_fn(self, value): + # perform logic + return True # successful allocation + + def my_dealloc_fn(self, value): + # perform logic + pass + + Example: + + # This example assumes that the device has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the device + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + ''' + ************************************************************* + Functions supporting tuning allocation + *************************************************************''' + def deviceEnable(self, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + Make sure to set the 'enabled' member of fts to indicate that tuner as enabled + ************************************************************''' + fts.enabled = True + return + + def deviceDisable(self,fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + Make sure to reset the 'enabled' member of fts to indicate that tuner as disabled + ************************************************************''' + fts.enabled = False + return + + def deviceSetTuningScan(self,request, scan_request, fts, tuner_id): + ''' + ************************************************************ + + This function is called when the allocation request contains a scanner allocation + + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + + The bandwidth, center frequency, and sampling rate that the hardware was actually tuned + to needs to populate fts (to make sure that it meets the tolerance requirement. For example, + if the tuned values match the requested values, the code would look like this: + + fts.bandwidth = request.bandwidth + fts.center_frequency = request.center_frequency + fts.sample_rate = request.sample_rate + + return True if the tuning succeeded, and False if it failed + ************************************************************''' + if ((request.bandwidth == 1000) and (scan_request.min_freq==10000)): + return True + return False + + def deviceSetTuning(self,request, fts, tuner_id): + ''' + ************************************************************ + + This function is called when the allocation request does not contain a scanner allocation + + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + + The bandwidth, center frequency, and sampling rate that the hardware was actually tuned + to needs to populate fts (to make sure that it meets the tolerance requirement. For example, + if the tuned values match the requested values, the code would look like this: + + fts.bandwidth = request.bandwidth + fts.center_frequency = request.center_frequency + fts.sample_rate = request.sample_rate + + return True if the tuning succeeded, and False if it failed + ************************************************************''' + return False + + def deviceDeleteTuning(self, fts, tuner_id): + ''' + ************************************************************ + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + return True if the tune deletion succeeded, and False if it failed + ************************************************************''' + return True + + ''' + ************************************************************* + Functions servicing the tuner control port + *************************************************************''' + def getTunerType(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].tuner_type + + def getTunerDeviceControl(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if self.getControlAllocationId(idx) == allocation_id: + return True + return False + + def getTunerGroupId(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].group_id + + def getTunerRfFlowId(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].rf_flow_id + + + def setTunerCenterFrequency(self,allocation_id, freq): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if freq<0: raise FRONTEND.BadParameterException("Center frequency cannot be less than 0") + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].center_frequency = freq + + def getTunerCenterFrequency(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].center_frequency + + def setTunerBandwidth(self,allocation_id, bw): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if bw<0: raise FRONTEND.BadParameterException("Bandwidth cannot be less than 0") + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].bandwidth = bw + + def getTunerBandwidth(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].bandwidth + + def setTunerAgcEnable(self,allocation_id, enable): + raise FRONTEND.NotSupportedException("setTunerAgcEnable not supported") + + def getTunerAgcEnable(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerAgcEnable not supported") + + def setTunerGain(self,allocation_id, gain): + raise FRONTEND.NotSupportedException("setTunerGain not supported") + + def getTunerGain(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerGain not supported") + + def setTunerReferenceSource(self,allocation_id, source): + raise FRONTEND.NotSupportedException("setTunerReferenceSource not supported") + + def getTunerReferenceSource(self,allocation_id): + raise FRONTEND.NotSupportedException("getTunerReferenceSource not supported") + + def setTunerEnable(self,allocation_id, enable): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].enabled = enable + + def getTunerEnable(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].enabled + + + def setTunerOutputSampleRate(self,allocation_id, sr): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + if sr<0: raise FRONTEND.BadParameterException("Sample rate cannot be less than 0") + # set hardware to new value. Raise an exception if it's not possible + self.frontend_tuner_status[idx].sample_rate = sr + + def getTunerOutputSampleRate(self,allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + return self.frontend_tuner_status[idx].sample_rate + + + def getScanStatus(self, allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + # set hardware to new value. Raise an exception if it's not possible + _scan_strategy=FRONTEND.ScanningTuner.ScanStrategy( + FRONTEND.ScanningTuner.MANUAL_SCAN, + FRONTEND.ScanningTuner.ScanModeDefinition(center_frequency=1.0), + FRONTEND.ScanningTuner.TIME_BASED, + 123.0) + _scan_status=FRONTEND.ScanningTuner.ScanStatus(_scan_strategy, + start_time=bulkio.timestamp.now(), + center_tune_frequencies=[], + started=False) + return _scan_status + + def setScanStartTime(self, allocation_id, start_time): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + + def setScanStrategy(self, allocation_id, scan_strategy): + if scan_strategy.scan_mode == FRONTEND.ScanningTuner.MANUAL_SCAN and hasattr(scan_strategy.scan_definition, 'center_frequency'): + self.strategy_request = 'manual' + if scan_strategy.scan_mode == FRONTEND.ScanningTuner.DISCRETE_SCAN and hasattr(scan_strategy.scan_definition, 'discrete_freq_list'): + self.strategy_request = 'discrete' + if scan_strategy.scan_mode == FRONTEND.ScanningTuner.SPAN_SCAN and hasattr(scan_strategy.scan_definition, 'freq_scan_list'): + self.strategy_request = 'span' + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + + ''' + ************************************************************* + Functions servicing the RFInfo port(s) + - port_name is the port over which the call was received + *************************************************************''' + def get_rf_flow_id(self,port_name): + return "" + + def set_rf_flow_id(self,port_name, _id): + pass + + def get_rfinfo_pkt(self,port_name): + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + _rfinfopkt=FRONTEND.RFInfoPkt('',0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + return _rfinfopkt + + def set_rfinfo_pkt(self,port_name, pkt): + pass + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Device") + start_device(my_scanner_i) + diff --git a/codegenTesting/sdr/dev/devices/my_scanner/tests/test_my_scanner.py b/codegenTesting/sdr/dev/devices/my_scanner/tests/test_my_scanner.py new file mode 100644 index 000000000..db7f4f9d1 --- /dev/null +++ b/codegenTesting/sdr/dev/devices/my_scanner/tests/test_my_scanner.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python + +import ossie.utils.testing +from ossie.utils import sb +import frontend +from redhawk.frontendInterfaces import FRONTEND + +class DeviceTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the device. + SPD_FILE = '../my_scanner.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a device using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the device, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl) + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def testBasicBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + self.comp.start() + bad_tuner=frontend.createTunerAllocation(tuner_type='RX_DIGITIZER',allocation_id='1', bandwidth=0.0,returnDict=False) + good_tuner=frontend.createTunerAllocation(tuner_type='RX_DIGITIZER',allocation_id='1', bandwidth=1000.0,returnDict=False) + bad_scanner=frontend.createScannerAllocation(returnDict=False) + good_scanner=frontend.createScannerAllocation(min_freq=10000.0,returnDict=False) + self.assertFalse(self.comp.allocateCapacity([bad_tuner, bad_scanner])) + self.assertFalse(self.comp.allocateCapacity([good_tuner, bad_scanner])) + self.assertFalse(self.comp.allocateCapacity([bad_tuner, good_scanner])) + self.assertTrue(self.comp.allocateCapacity([good_tuner, good_scanner])) + self.comp.deallocateCapacity([good_tuner, good_scanner]) + self.assertTrue(self.comp.allocateCapacity([good_scanner, good_tuner])) + ref = None + for port in self.comp.ports: + if port.name == 'DigitalScanningTuner_in': + ref = port.ref + break + self.assertEquals(self.comp.strategy_request, 'initial') + scan_strategy=FRONTEND.ScanningTuner.ScanStrategy(FRONTEND.ScanningTuner.MANUAL_SCAN, FRONTEND.ScanningTuner.ScanModeDefinition(center_frequency=1.0), FRONTEND.ScanningTuner.TIME_BASED, 0.0) + ref.setScanStrategy('1', scan_strategy) + self.assertEquals(self.comp.strategy_request, 'manual') + scan_strategy=FRONTEND.ScanningTuner.ScanStrategy(FRONTEND.ScanningTuner.DISCRETE_SCAN, FRONTEND.ScanningTuner.ScanModeDefinition(discrete_freq_list=[]), FRONTEND.ScanningTuner.TIME_BASED, 0.0) + ref.setScanStrategy('1', scan_strategy) + self.assertEquals(self.comp.strategy_request, 'discrete') + scan_strategy=FRONTEND.ScanningTuner.ScanStrategy(FRONTEND.ScanningTuner.SPAN_SCAN, FRONTEND.ScanningTuner.ScanModeDefinition(freq_scan_list=[]), FRONTEND.ScanningTuner.TIME_BASED, 0.0) + ref.setScanStrategy('1', scan_strategy) + self.assertEquals(self.comp.strategy_request, 'span') + status = ref.getScanStatus('1') + self.assertEquals(status.strategy.control_value, 123) + self.comp.stop() + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/codegenTesting/sdr/dom/components/BasicShared/.BasicShared.wavedev b/codegenTesting/sdr/dom/components/BasicShared/.BasicShared.wavedev new file mode 100644 index 000000000..f7936e04b --- /dev/null +++ b/codegenTesting/sdr/dom/components/BasicShared/.BasicShared.wavedev @@ -0,0 +1,6 @@ + + + + + + diff --git a/codegenTesting/sdr/dom/components/BasicShared/BasicShared.prf.xml b/codegenTesting/sdr/dom/components/BasicShared/BasicShared.prf.xml new file mode 100644 index 000000000..8f537d89f --- /dev/null +++ b/codegenTesting/sdr/dom/components/BasicShared/BasicShared.prf.xml @@ -0,0 +1,3 @@ + + + diff --git a/codegenTesting/sdr/dom/components/BasicShared/BasicShared.scd.xml b/codegenTesting/sdr/dom/components/BasicShared/BasicShared.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/codegenTesting/sdr/dom/components/BasicShared/BasicShared.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/BasicShared/BasicShared.spd.xml b/codegenTesting/sdr/dom/components/BasicShared/BasicShared.spd.xml new file mode 100644 index 000000000..9e172ff96 --- /dev/null +++ b/codegenTesting/sdr/dom/components/BasicShared/BasicShared.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/BasicShared.so + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/EnumTest/.EnumTest.wavedev b/codegenTesting/sdr/dom/components/EnumTest/.EnumTest.wavedev new file mode 100644 index 000000000..7b18962ef --- /dev/null +++ b/codegenTesting/sdr/dom/components/EnumTest/.EnumTest.wavedev @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/EnumTest/EnumTest.prf.xml b/codegenTesting/sdr/dom/components/EnumTest/EnumTest.prf.xml new file mode 100644 index 000000000..67120a68c --- /dev/null +++ b/codegenTesting/sdr/dom/components/EnumTest/EnumTest.prf.xml @@ -0,0 +1,78 @@ + + + + + + 0.0 + + + + + + + + + start + + + + + + + + + + + + + + + + + + + + + + + + + + + 0 + + + + + + + + body + + + + + + + + + + \ No newline at end of file diff --git a/codegenTesting/sdr/dom/components/EnumTest/EnumTest.scd.xml b/codegenTesting/sdr/dom/components/EnumTest/EnumTest.scd.xml new file mode 100644 index 000000000..712f34aea --- /dev/null +++ b/codegenTesting/sdr/dom/components/EnumTest/EnumTest.scd.xml @@ -0,0 +1,64 @@ + + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/codegenTesting/sdr/dom/components/EnumTest/EnumTest.spd.xml b/codegenTesting/sdr/dom/components/EnumTest/EnumTest.spd.xml new file mode 100644 index 000000000..78ecd77c1 --- /dev/null +++ b/codegenTesting/sdr/dom/components/EnumTest/EnumTest.spd.xml @@ -0,0 +1,69 @@ + + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/EnumTest.so + + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/EnumTest.py + + + + + + + \ No newline at end of file diff --git a/codegenTesting/sdr/dom/components/EnumTest/cpp/EnumTest.cpp b/codegenTesting/sdr/dom/components/EnumTest/cpp/EnumTest.cpp new file mode 100644 index 000000000..0f4001d4d --- /dev/null +++ b/codegenTesting/sdr/dom/components/EnumTest/cpp/EnumTest.cpp @@ -0,0 +1,330 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK codegenTesting. + * + * REDHAWK codegenTesting is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK codegenTesting is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include + +#include "EnumTest.h" + +PREPARE_LOGGING(EnumTest_i) + +EnumTest_i::EnumTest_i(const char *uuid, const char *label) : + EnumTest_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +EnumTest_i::~EnumTest_i() +{ +} + +void EnumTest_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void EnumTest_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &EnumTest_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (EnumTest_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &EnumTest_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to EnumTest.cpp + EnumTest_i::EnumTest_i(const char *uuid, const char *label) : + EnumTest_base(uuid, label) + { + addPropertyListener(scaleValue, this, &EnumTest_i::scaleChanged); + addPropertyListener(status, this, &EnumTest_i::statusChanged); + } + + void EnumTest_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(EnumTest_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void EnumTest_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(EnumTest_i, "status changed"); + } + + //Add to EnumTest.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int EnumTest_i::serviceFunction() +{ + LOG_DEBUG(EnumTest_i, "serviceFunction() example log message"); + + return NOOP; +} + +void EnumTest_i::runTest(CORBA::ULong testid, CF::Properties& testValues) throw (CF::UnknownProperties, CF::TestableObject::UnknownTest, CORBA::SystemException) +{ + redhawk::PropertyMap& testProps = redhawk::PropertyMap::cast(testValues); + switch (testid) { + case 0: + runEnumTest(testProps); + break; + default: + throw CF::TestableObject::UnknownTest(); + } +} + +void EnumTest_i::runEnumTest(redhawk::PropertyMap& testValues) +{ + redhawk::PropertyMap unknown; + + BOOST_FOREACH(redhawk::PropertyType& prop, testValues) { + const std::string prop_id = prop.getId(); + redhawk::PropertyMap value; + if (prop_id == "floatenum") { + value["DEFAULT"] = enums::floatenum::DEFAULT; + value["OTHER"] = enums::floatenum::OTHER; + } else if (prop_id == "stringenum") { + value["START"] = enums::stringenum::START; + value["STOPPED"] = enums::stringenum::STOPPED; + } else if (prop_id == "structprop") { + redhawk::PropertyMap number_enums; + number_enums["ZERO"] = enums::structprop::number::ZERO; + number_enums["ONE"] = enums::structprop::number::ONE; + number_enums["TWO"] = enums::structprop::number::TWO; + value["structprop::number"] = number_enums; + + redhawk::PropertyMap alpha_enums; + alpha_enums["ABC"] = enums::structprop::alpha::ABC; + alpha_enums["DEF"] = enums::structprop::alpha::DEF; + value["structprop::alpha"] = alpha_enums; + } else if (prop_id == "structseq") { + redhawk::PropertyMap number_enums; + number_enums["POSITIVE"] = enums::structseq_struct::number::POSITIVE; + number_enums["ZERO"] = enums::structseq_struct::number::ZERO; + number_enums["NEGATIVE"] = enums::structseq_struct::number::NEGATIVE; + value["structseq::number"] = number_enums; + + redhawk::PropertyMap text_enums; + text_enums["HEADER"] = enums::structseq_struct::text::HEADER; + text_enums["BODY"] = enums::structseq_struct::text::BODY; + text_enums["FOOTER"] = enums::structseq_struct::text::FOOTER; + value["structseq::text"] = text_enums; + } else { + LOG_ERROR(EnumTest_i, "Unknown property " << prop_id); + unknown.push_back(prop); + continue; + } + prop.setValue(value); + } + + if (!unknown.empty()) { + throw CF::UnknownProperties(unknown); + } +} diff --git a/codegenTesting/sdr/dom/components/EnumTest/cpp/EnumTest.h b/codegenTesting/sdr/dom/components/EnumTest/cpp/EnumTest.h new file mode 100644 index 000000000..bd4944968 --- /dev/null +++ b/codegenTesting/sdr/dom/components/EnumTest/cpp/EnumTest.h @@ -0,0 +1,44 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK codegenTesting. + * + * REDHAWK codegenTesting is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK codegenTesting is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef ENUMTEST_I_IMPL_H +#define ENUMTEST_I_IMPL_H + +#include "EnumTest_base.h" + +#include + +class EnumTest_i : public EnumTest_base +{ + ENABLE_LOGGING + public: + EnumTest_i(const char *uuid, const char *label); + ~EnumTest_i(); + + void constructor(); + + int serviceFunction(); + + void runTest(CORBA::ULong testid, CF::Properties& testValues) throw (CF::UnknownProperties, CF::TestableObject::UnknownTest, CORBA::SystemException); + + private: + void runEnumTest(redhawk::PropertyMap& testValues); +}; + +#endif // ENUMTEST_I_IMPL_H diff --git a/codegenTesting/sdr/dom/components/EnumTest/java/src/enumtest/java/EnumTest.java b/codegenTesting/sdr/dom/components/EnumTest/java/src/enumtest/java/EnumTest.java new file mode 100644 index 000000000..28996d320 --- /dev/null +++ b/codegenTesting/sdr/dom/components/EnumTest/java/src/enumtest/java/EnumTest.java @@ -0,0 +1,367 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK codegenTesting. + * + * REDHAWK codegenTesting is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK codegenTesting is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package enumtest.java; + +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +/** + * This is the component code. This file contains the derived class where custom + * functionality can be added to the component. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general component housekeeping + * + * Source: EnumTest.spd.xml + */ +public class EnumTest extends EnumTest_base { + /** + * This is the component constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your component. + * + * A component may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * component class. + * Accessing the Application and Domain Manager: + * + * Both the Application hosting this Component and the Domain Manager hosting + * the Application are available to the Component. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Application: + * CF.Application app = this.getApplication().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + + public EnumTest() + { + super(); + } + + public void constructor() + { + } + + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the component's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the component developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Example: + * + * This example assumes that the component has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the component + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData = new float[data.getData().length]; + * for (int i = 0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i] = (float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i] = (float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + logger.debug("serviceFunction() example log message"); + + return NOOP; + } + + @Override + public void runTest(int testid, CF.PropertiesHolder testValues) throws CF.UnknownProperties, CF.TestableObjectPackage.UnknownTest + { + switch (testid) { + case 0: + this.runEnumTest(testValues); + break; + default: + throw new CF.TestableObjectPackage.UnknownTest(); + } + } + + private CF.DataType[] toProperties(List props) + { + return props.toArray(new CF.DataType[props.size()]); + } + + private CF.DataType createDataType(String id, float value) + { + org.omg.CORBA.Any any = orb.create_any(); + any.insert_float(value); + return new CF.DataType(id, any); + } + + private CF.DataType createDataType(String id, int value) + { + org.omg.CORBA.Any any = orb.create_any(); + any.insert_long(value); + return new CF.DataType(id, any); + } + + private CF.DataType createDataType(String id, String value) + { + org.omg.CORBA.Any any = orb.create_any(); + any.insert_string(value); + return new CF.DataType(id, any); + } + + private CF.DataType createDataType(String id, CF.DataType[] value) + { + org.omg.CORBA.Any any = orb.create_any(); + CF.PropertiesHelper.insert(any, value); + return new CF.DataType(id, any); + } + + private void runEnumTest(CF.PropertiesHolder testValues) throws CF.UnknownProperties + { + List unknown = new ArrayList(); + + for (CF.DataType prop : testValues.value) { + List value = new ArrayList(); + if (prop.id.equals("floatenum")) { + value.add(createDataType("DEFAULT", enums.floatenum.DEFAULT)); + value.add(createDataType("OTHER", enums.floatenum.OTHER)); + } else if (prop.id.equals("stringenum")) { + value.add(createDataType("START", enums.stringenum.START)); + value.add(createDataType("STOPPED", enums.stringenum.STOPPED)); + } else if (prop.id.equals("structprop")) { + List number_enums = new ArrayList(); + number_enums.add(createDataType("ZERO", enums.structprop.number.ZERO)); + number_enums.add(createDataType("ONE", enums.structprop.number.ONE)); + number_enums.add(createDataType("TWO", enums.structprop.number.TWO)); + value.add(createDataType("structprop::number", toProperties(number_enums))); + + List alpha_enums = new ArrayList(); + alpha_enums.add(createDataType("ABC", enums.structprop.alpha.ABC)); + alpha_enums.add(createDataType("DEF", enums.structprop.alpha.DEF)); + value.add(createDataType("structprop::alpha", toProperties(alpha_enums))); + } else if (prop.id.equals("structseq")) { + List number_enums = new ArrayList(); + number_enums.add(createDataType("POSITIVE", enums.structseq_struct.number.POSITIVE)); + number_enums.add(createDataType("ZERO", enums.structseq_struct.number.ZERO)); + number_enums.add(createDataType("NEGATIVE", enums.structseq_struct.number.NEGATIVE)); + value.add(createDataType("structseq::number", toProperties(number_enums))); + + List text_enums = new ArrayList(); + text_enums.add(createDataType("HEADER", enums.structseq_struct.text.HEADER)); + text_enums.add(createDataType("BODY", enums.structseq_struct.text.BODY)); + text_enums.add(createDataType("FOOTER", enums.structseq_struct.text.FOOTER)); + value.add(createDataType("structseq::text", toProperties(text_enums))); + } else { + _logger.error("Unknown property " + prop.id); + unknown.add(prop); + continue; + } + prop.value = orb.create_any(); + CF.PropertiesHelper.insert(prop.value, toProperties(value)); + } + + if (unknown.size() > 0) { + throw new CF.UnknownProperties(toProperties(unknown)); + } + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/codegenTesting/sdr/dom/components/EnumTest/python/EnumTest.py b/codegenTesting/sdr/dom/components/EnumTest/python/EnumTest.py new file mode 100755 index 000000000..acec870bb --- /dev/null +++ b/codegenTesting/sdr/dom/components/EnumTest/python/EnumTest.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK codegenTesting. +# +# REDHAWK codegenTesting is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK codegenTesting is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +# +# +# AUTO-GENERATED +# +# Source: EnumTest.spd.xml +from ossie.resource import start_component +import logging + +from EnumTest_base import EnumTest_base, enums, NOOP + +from ossie import properties +from ossie.cf import CF + +class EnumTest_i(EnumTest_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", EnumTest_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = EnumTest_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + def runTest(self, testid, testValues): + if testid == 0: + return self.runEnumTest(testValues) + else: + raise CF.TestableObject.UnknownTest() + + def runEnumTest(self, testValues): + unknown = [] + + for prop in testValues: + value = {} + if prop.id == "floatenum": + value['DEFAULT'] = enums.floatenum.DEFAULT + value['OTHER'] = enums.floatenum.OTHER + elif prop.id == "stringenum": + value['START'] = enums.stringenum.START + value['STOPPED'] = enums.stringenum.STOPPED + elif prop.id == "structprop": + number_enums = {} + number_enums['ZERO'] = enums.structprop.number.ZERO + number_enums['ONE'] = enums.structprop.number.ONE + number_enums['TWO'] = enums.structprop.number.TWO + value['structprop::number'] = number_enums + + alpha_enums = {} + alpha_enums['ABC'] = enums.structprop.alpha.ABC + alpha_enums['DEF'] = enums.structprop.alpha.DEF + value['structprop::alpha'] = alpha_enums + elif prop.id == "structseq": + number_enums = {} + number_enums['POSITIVE'] = enums.structseq_struct.number.POSITIVE + number_enums['ZERO'] = enums.structseq_struct.number.ZERO + number_enums['NEGATIVE'] = enums.structseq_struct.number.NEGATIVE + value['structseq::number'] = number_enums + + text_enums = {} + text_enums['HEADER'] = enums.structseq_struct.text.HEADER + text_enums['BODY'] = enums.structseq_struct.text.BODY + text_enums['FOOTER'] = enums.structseq_struct.text.FOOTER + value['structseq::text'] = text_enums + else: + unknown.append(prop) + prop.value = properties.props_to_any(properties.props_from_dict(value)) + + if unknown: + raise CF.UnknownProperties(unknown) + + return testValues + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(EnumTest_i) diff --git a/codegenTesting/sdr/dom/components/EnumTest/tests/test_EnumTest.py b/codegenTesting/sdr/dom/components/EnumTest/tests/test_EnumTest.py new file mode 100644 index 000000000..1b4a090de --- /dev/null +++ b/codegenTesting/sdr/dom/components/EnumTest/tests/test_EnumTest.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK codegenTesting. +# +# REDHAWK codegenTesting is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK codegenTesting is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import ossie.utils.testing +from ossie.utils import sb +from ossie import properties + +class ComponentTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the component. + SPD_FILE = '../EnumTest.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a component using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the component, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl) + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def _propsToDict(self, props): + return dict((dt['id'], dt['value']) for dt in props) + + def _runTests(self, props): + props = properties.props_from_dict(dict((p, None) for p in props)) + result = self.comp.runTest(0, props) + return properties.props_to_dict(result) + + def testSimpleEnums(self): + result = self._runTests(['floatenum', 'stringenum']) + self.assertEqual(result['floatenum'], self.comp.floatenum._enums) + self.assertEqual(result['stringenum'], self.comp.stringenum._enums) + + def testStructEnums(self): + result = self._runTests(['structprop']) + enums = result['structprop'] + + number_enums = self._propsToDict(enums['structprop::number']) + self.assertEqual(number_enums, self.comp.structprop.number._enums) + + alpha_enums = self._propsToDict(enums['structprop::alpha']) + self.assertEqual(alpha_enums, self.comp.structprop.alpha._enums) + + def testStructSequenceEnums(self): + result = self._runTests(['structseq']) + enums = result['structseq'] + + number_enums = self._propsToDict(enums['structseq::number']) + self.assertEqual(number_enums, self.comp.structseq.structDef.number._enums) + + text_enums = self._propsToDict(enums['structseq::text']) + self.assertEqual(text_enums, self.comp.structseq.structDef.text._enums) + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/codegenTesting/sdr/dom/components/bulkio_ports/tests/test_bulkio_ports.py b/codegenTesting/sdr/dom/components/bulkio_ports/tests/test_bulkio_ports.py index 2773afee9..81903128b 100755 --- a/codegenTesting/sdr/dom/components/bulkio_ports/tests/test_bulkio_ports.py +++ b/codegenTesting/sdr/dom/components/bulkio_ports/tests/test_bulkio_ports.py @@ -274,60 +274,6 @@ def testDataPush(self): self.comp_obj.stop() - #Additional test to push data into each of the input ports, and get data - #from each of the output ports. The data is then examined for accuracy - def testEmptyDataPush(self): - self.comp_obj.start() - - ##################################### - # Send empty data on each input port - self.dataCharInput.pushPacket('', bulkio_helpers.createCPUTimestamp(), False, "s1") - self.dataDoubleInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, "s2") - self.dataFloatInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, "s3") - self.dataLongInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, "s4") - self.dataOctetInput.pushPacket('', bulkio_helpers.createCPUTimestamp(), False, "s5") - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, "s6") - self.dataUlongInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, "s7") - self.dataUshortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, "s8") - self.dataXMLInput.pushPacket('', False, "s9") - self.dataFileInput.pushPacket('', bulkio_helpers.createCPUTimestamp(), False, 's10') - self.dataLongLongInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, "s11") - self.dataUlongLongInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, "s12") - - ########################################## - # Receive data from each output port - # *** THESE GET PACKETS ARE BLOCKING! *** - _charData, T, EOS, streamID, sri, sriChanged, flushed = self.helperCharInput.getPacket(2) - _doubleData, T, EOS, streamID, sri, sriChanged, flushed = self.helperDoubleInput.getPacket(2) - _floatData, T, EOS, streamID, sri, sriChanged, flushed = self.helperFloatInput.getPacket(2) - _longData, T, EOS, streamID, sri, sriChanged, flushed = self.helperLongInput.getPacket(2) - _octetData, T, EOS, streamID, sri, sriChanged, flushed = self.helperOctetInput.getPacket(2) - _shortData, T, EOS, streamID, sri, sriChanged, flushed = self.helperShortInput.getPacket(2) - _uLongData, T, EOS, streamID, sri, sriChanged, flushed = self.helperUlongInput.getPacket(2) - _uShortData, T, EOS, streamID, sri, sriChanged, flushed = self.helperUshortInput.getPacket(2) - _xmlData, T, EOS, streamID, sri, sriChanged, flushed = self.helperXMLInput.getPacket(2) - _fileData, T, EOS, streamID, sri, sriChanged, flushed = self.helperFileInput.getPacket(2) - _longLongData, T, EOS, streamID, sri, sriChanged, flushed = self.helperLongLongInput.getPacket(2) - _uLongLongData, T, EOS, streamID, sri, sriChanged, flushed = self.helperUlongLongInput.getPacket(2) - - sentData = ['', [], [], [], '', [], [], [], '', '', [], []] - types = ['char', 'double', 'float', 'long', 'octet', 'short', 'uLong', 'uLongLong', 'longLong', 'uShort', 'xml', 'file'] - - sentData = {'char':'', 'double':[], 'float':[], 'long':[], 'octet':'', 'short':[], 'uLong':[], 'uShort':[], 'xml':'', 'file':'', - 'longLong':[], 'uLongLong':[]} - recData = {'char':_charData, 'double':_doubleData, 'float':_floatData, 'long':_longData, 'octet':_octetData, 'short':_shortData,\ - 'uLong':_uLongData, 'uShort':_uShortData, 'xml':_xmlData, 'file':_fileData, 'longLong':_longLongData, 'uLongLong':_uLongLongData} - - for x in types: - self.assertNotEquals(None, recData[x], msg="No empty set was recieved for dataType (" + str(x) + ")") - - ############################################# - # Check that data received matches data sent - for x in types: - self.assertEqual(recData[x], sentData[x]) - - self.comp_obj.stop() - def testMaxQueueDepth(self): shortData = bulkio_helpers.genRandomDataSet(16, True, 100) oldFlushTime = None diff --git a/codegenTesting/sdr/dom/components/custom_port_check/.custom_port_check.wavedev b/codegenTesting/sdr/dom/components/custom_port_check/.custom_port_check.wavedev new file mode 100644 index 000000000..2d517f494 --- /dev/null +++ b/codegenTesting/sdr/dom/components/custom_port_check/.custom_port_check.wavedev @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/custom_port_check/cpp/custom_port_check.cpp b/codegenTesting/sdr/dom/components/custom_port_check/cpp/custom_port_check.cpp new file mode 100644 index 000000000..e2c0c755f --- /dev/null +++ b/codegenTesting/sdr/dom/components/custom_port_check/cpp/custom_port_check.cpp @@ -0,0 +1,288 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "custom_port_check.h" + +PREPARE_LOGGING(custom_port_check_i) + +custom_port_check_i::custom_port_check_i(const char *uuid, const char *label) : + custom_port_check_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +custom_port_check_i::~custom_port_check_i() +{ +} + +void custom_port_check_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void custom_port_check_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &custom_port_check_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (custom_port_check_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &custom_port_check_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to custom_port_check.cpp + custom_port_check_i::custom_port_check_i(const char *uuid, const char *label) : + custom_port_check_base(uuid, label) + { + addPropertyListener(scaleValue, this, &custom_port_check_i::scaleChanged); + addPropertyListener(status, this, &custom_port_check_i::statusChanged); + } + + void custom_port_check_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(custom_port_check_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void custom_port_check_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(custom_port_check_i, "status changed"); + } + + //Add to custom_port_check.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int custom_port_check_i::serviceFunction() +{ + LOG_DEBUG(custom_port_check_i, "serviceFunction() example log message"); + + CF::Properties_var empty_prop = new CF::Properties(); + this->inout_state = "ok"; + try { + this->testableobject_out->runTest(1, empty_prop); + } catch (redhawk::PortCallError &e) { + this->inout_state = e.what(); + } + + CF::StringSequence empty_string_seq; + this->retval_state = "ok"; + try { + this->propertyemitter_out->registerPropertyListener(this->_this(), empty_string_seq, 0.1); + } catch (redhawk::PortCallError &e) { + this->retval_state = e.what(); + } + + CF::OctetSequence out_data; + this->in_state = "ok"; + try { + this->file_out->write(out_data); + } catch (redhawk::PortCallError &e) { + this->in_state = e.what(); + } + + CF::OctetSequence_var _data = new CF::OctetSequence(); + CF::OctetSequence_out data(_data); + this->out_state = "ok"; + try { + this->file_out->read(data, 10); + } catch (redhawk::PortCallError &e) { + this->out_state = e.what(); + } + this->bad_connection = "ok"; + try { + this->file_out->read(data, 10, "invalid_connectionid"); + } catch (redhawk::PortCallError &e) { + this->bad_connection = e.what(); + } + + return FINISH; +} + diff --git a/codegenTesting/sdr/dom/components/custom_port_check/custom_port_check.prf.xml b/codegenTesting/sdr/dom/components/custom_port_check/custom_port_check.prf.xml new file mode 100644 index 000000000..65210a753 --- /dev/null +++ b/codegenTesting/sdr/dom/components/custom_port_check/custom_port_check.prf.xml @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/custom_port_check/custom_port_check.scd.xml b/codegenTesting/sdr/dom/components/custom_port_check/custom_port_check.scd.xml new file mode 100644 index 000000000..836829a27 --- /dev/null +++ b/codegenTesting/sdr/dom/components/custom_port_check/custom_port_check.scd.xml @@ -0,0 +1,50 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/custom_port_check/custom_port_check.spd.xml b/codegenTesting/sdr/dom/components/custom_port_check/custom_port_check.spd.xml new file mode 100644 index 000000000..48ec37bfc --- /dev/null +++ b/codegenTesting/sdr/dom/components/custom_port_check/custom_port_check.spd.xml @@ -0,0 +1,50 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/custom_port_check.so + + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/custom_port_check.py + + + + + + + diff --git a/codegenTesting/sdr/dom/components/custom_port_check/java/src/custom_port_check/java/custom_port_check.java b/codegenTesting/sdr/dom/components/custom_port_check/java/src/custom_port_check/java/custom_port_check.java new file mode 100644 index 000000000..db3981105 --- /dev/null +++ b/codegenTesting/sdr/dom/components/custom_port_check/java/src/custom_port_check/java/custom_port_check.java @@ -0,0 +1,310 @@ +package custom_port_check.java; + +import java.util.Properties; +import org.ossie.redhawk.PortCallError; + +import CF.InvalidObjectReference; +import CF.UnknownProperties; +import CF.FilePackage.IOException; +import CF.TestableObjectPackage.UnknownTest; +import CF.DataType; +import java.util.List; +import java.util.ArrayList; + +/** + * This is the component code. This file contains the derived class where custom + * functionality can be added to the component. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general component housekeeping + * + * Source: custom_port_check.spd.xml + */ +public class custom_port_check extends custom_port_check_base { + /** + * This is the component constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your component. + * + * A component may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * component class. + * Accessing the Application and Domain Manager: + * + * Both the Application hosting this Component and the Domain Manager hosting + * the Application are available to the Component. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Application: + * CF.Application app = this.getApplication().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + + public custom_port_check() + { + super(); + } + + public void constructor() + { + } + + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the component's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the component developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Example: + * + * This example assumes that the component has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the component + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData = new float[data.getData().length]; + * for (int i = 0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i] = (float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i] = (float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + DataType[] empty_prop_value = new DataType[0]; + CF.PropertiesHolder empty_prop = new CF.PropertiesHolder(); + empty_prop.value = empty_prop_value; + this.inout_state.setValue("ok"); + try { + this.port_testableobject_out.runTest(1, empty_prop); + } catch (PortCallError e) { + this.inout_state.setValue(e.getMessage()); + } catch (UnknownProperties | UnknownTest e) { + this.inout_state.setValue("unexpected exception"); + } + + String[] empty_string_seq = new String[0]; + this.retval_state.setValue("ok"); + try { + this.port_propertyemitter_out.registerPropertyListener(this.portServants.get("testableobject_out")._this_object(), empty_string_seq, (float)0.1); + } catch (PortCallError e) { + this.retval_state.setValue(e.getMessage()); + } catch (UnknownProperties | InvalidObjectReference e) { + this.retval_state.setValue("unexpected exception"); + } + + byte[] out_data = new byte[0]; + this.in_state.setValue("ok"); + try { + this.port_file_out.write(out_data); + } catch (PortCallError e) { + this.in_state.setValue(e.getMessage()); + } catch (IOException e) { + this.in_state.setValue("unexpected exception"); + } + + CF.OctetSequenceHolder data = new CF.OctetSequenceHolder(); + this.out_state.setValue("ok"); + try { + this.port_file_out.read(data, 10); + } catch (PortCallError e) { + this.out_state.setValue(e.getMessage()); + } catch (IOException e) { + this.out_state.setValue("unexpected exception"); + } + + this.bad_connection.setValue("ok"); + try { + this.port_file_out.read(data, 10, "invalid_connectionid"); + } catch (PortCallError e) { + this.bad_connection.setValue(e.getMessage()); + } catch (IOException e) { + this.bad_connection.setValue("unexpected exception"); + } + + return FINISH; + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/codegenTesting/sdr/dom/components/custom_port_check/python/custom_port_check.py b/codegenTesting/sdr/dom/components/custom_port_check/python/custom_port_check.py new file mode 100755 index 000000000..dd6c28360 --- /dev/null +++ b/codegenTesting/sdr/dom/components/custom_port_check/python/custom_port_check.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: custom_port_check.spd.xml +from ossie.resource import start_component +import logging + +from custom_port_check_base import * + +class custom_port_check_i(custom_port_check_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", custom_port_check_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = custom_port_check_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + empty_prop = [] + self.inout_state = "ok" + try: + self.port_testableobject_out.runTest(1, empty_prop) + except PortCallError, e: + self.inout_state = str(e) + + empty_string_seq = [] + self.retval_state = "ok" + try: + self.port_propertyemitter_out.registerPropertyListener(self._this(), empty_string_seq, 0.1) + except PortCallError, e: + self.retval_state = str(e) + + out_data = '' + self.in_state = "ok" + try: + self.port_file_out.write(out_data) + except PortCallError, e: + self.in_state = str(e) + + self.out_state = "ok"; + try: + self.port_file_out.read(10) + except PortCallError, e: + self.out_state = str(e) + self.bad_connection = "ok"; + try: + self.port_file_out.read(10, "invalid_connectionid"); + except PortCallError, e: + self.bad_connection = str(e) + + return FINISH + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(custom_port_check_i) + diff --git a/codegenTesting/sdr/dom/components/custom_port_check/tests/.md5sums b/codegenTesting/sdr/dom/components/custom_port_check/tests/.md5sums new file mode 100644 index 000000000..d9e3a7baf --- /dev/null +++ b/codegenTesting/sdr/dom/components/custom_port_check/tests/.md5sums @@ -0,0 +1 @@ +9225ce4fdbc48ea428e2e497d36d2a4d test_custom_port_check.py diff --git a/codegenTesting/sdr/dom/components/custom_port_check/tests/test_custom_port_check.py b/codegenTesting/sdr/dom/components/custom_port_check/tests/test_custom_port_check.py new file mode 100644 index 000000000..f12b2f337 --- /dev/null +++ b/codegenTesting/sdr/dom/components/custom_port_check/tests/test_custom_port_check.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python + +import time +import ossie.utils.testing +from ossie.utils import sb +from ossie.cf import CF, CF__POA + +class FilePort(CF__POA.File): + def __init__(self): + pass + def read(self, length): + return '' + def write(self, data): + pass + +class PropertyEmitterPort(CF__POA.PropertyEmitter): + def __init__(self): + pass + def registerPropertyListener(self, obj, string_seq, interval): + return 'hello' + +class TestableObjectPort(CF__POA.TestableObject): + def __init__(self): + pass + def runTest(self, test_number, prop_seq): + return prop_seq + +class ComponentTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the component. + SPD_FILE = '../custom_port_check.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a component using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the component, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl) + self._filePort = FilePort() + self._propertyEmitterPort = PropertyEmitterPort() + self._testableObjectPort = TestableObjectPort() + self._filePort_2 = FilePort() + self._propertyEmitterPort_2 = PropertyEmitterPort() + self._testableObjectPort_2 = TestableObjectPort() + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def testBasicBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + self.comp.start() + time.sleep(0.5) + self.comp.stop() + self.assertEquals(self.comp.inout_state, "No connections available.") + self.assertEquals(self.comp.retval_state, "No connections available.") + self.assertEquals(self.comp.out_state, "No connections available.") + self.assertEquals(self.comp.in_state, "ok") + self.assertEquals(self.comp.bad_connection, "No connections available.") + testableobject_port = self.comp.getPort('testableobject_out') + propertyemitter_port = self.comp.getPort('propertyemitter_out') + file_port = self.comp.getPort('file_out') + testableobject_port.connectPort(self._testableObjectPort._this(), 'abc') + propertyemitter_port.connectPort(self._propertyEmitterPort._this(), 'abc') + file_port.connectPort(self._filePort._this(), 'abc') + time.sleep(0.5) + self.comp.start() + time.sleep(0.5) + self.comp.stop() + self.assertEquals(self.comp.inout_state, "ok") + self.assertEquals(self.comp.retval_state, "ok") + self.assertEquals(self.comp.out_state, "ok") + self.assertEquals(self.comp.in_state, "ok") + self.assertEquals(self.comp.bad_connection, "The requested connection id (invalid_connectionid) does not exist.Connections available: abc") + testableobject_port.connectPort(self._testableObjectPort_2._this(), 'def') + propertyemitter_port.connectPort(self._propertyEmitterPort_2._this(), 'def') + file_port.connectPort(self._filePort_2._this(), 'def') + time.sleep(0.5) + self.comp.start() + time.sleep(0.5) + self.comp.stop() + self.assertEquals(self.comp.inout_state, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.retval_state, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.out_state, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.in_state, "ok") + self.assertEquals(self.comp.bad_connection, "The requested connection id (invalid_connectionid) does not exist.Connections available: abc, def") + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/codegenTesting/sdr/dom/components/get_ports/tests/test_get_ports.py b/codegenTesting/sdr/dom/components/get_ports/tests/test_get_ports.py index 0007ae883..d62f85529 100644 --- a/codegenTesting/sdr/dom/components/get_ports/tests/test_get_ports.py +++ b/codegenTesting/sdr/dom/components/get_ports/tests/test_get_ports.py @@ -21,6 +21,7 @@ import ossie.utils.testing from ossie.utils import sb +from ossie.cf import CF class ComponentTests(ossie.utils.testing.RHTestCase): # Path to the SPD file, relative to this file. This must be set in order to @@ -67,20 +68,20 @@ def getPortDefinition(self, name, scd): if port.get_usesname() == name: port_def = { 'repid': port.get_repid(), 'description': port.get_description(), - 'direction': 'Uses' } + 'direction': CF.PortSet.DIRECTION_USES } break for port in scd.get_componentfeatures().get_ports().get_provides(): if port.get_providesname() == name: if port_def is not None: # Port was already found in uses ports, so it must be bi-directional - port_def['direction'] = 'Bidir' + port_def['direction'] = CF.PortSet.DIRECTION_BIDIR if not port_def['description']: port_def['description'] = port.get_description() else: port_def = { 'repid': port.get_repid(), 'description': port.get_description(), - 'direction': 'Provides' } + 'direction': CF.PortSet.DIRECTION_PROVIDES } break return port_def diff --git a/codegenTesting/sdr/dom/components/hexer/.hexer.wavedev b/codegenTesting/sdr/dom/components/hexer/.hexer.wavedev new file mode 100644 index 000000000..31cc6c1c5 --- /dev/null +++ b/codegenTesting/sdr/dom/components/hexer/.hexer.wavedev @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/hexer/hexer.prf.xml b/codegenTesting/sdr/dom/components/hexer/hexer.prf.xml new file mode 100644 index 000000000..76b2dd764 --- /dev/null +++ b/codegenTesting/sdr/dom/components/hexer/hexer.prf.xml @@ -0,0 +1,44 @@ + + + + + 0xFF + + + + + 128 + + + + + 12345 + + + + + 0xFFFF + + + + + 0x1234 + + + + + 4660 + + + + + 0xDEADBEEF + + + + + 123456 + + + + diff --git a/codegenTesting/sdr/dom/components/hexer/hexer.scd.xml b/codegenTesting/sdr/dom/components/hexer/hexer.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/codegenTesting/sdr/dom/components/hexer/hexer.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/hexer/hexer.spd.xml b/codegenTesting/sdr/dom/components/hexer/hexer.spd.xml new file mode 100644 index 000000000..283848ff5 --- /dev/null +++ b/codegenTesting/sdr/dom/components/hexer/hexer.spd.xml @@ -0,0 +1,50 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/hexer.py + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/hexer + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/hexer/tests/.md5sums b/codegenTesting/sdr/dom/components/hexer/tests/.md5sums new file mode 100644 index 000000000..444e56fbc --- /dev/null +++ b/codegenTesting/sdr/dom/components/hexer/tests/.md5sums @@ -0,0 +1 @@ +49db54e8297d5da742998812e914f650 test_hexer.py diff --git a/codegenTesting/sdr/dom/components/hexer/tests/test_hexer.py b/codegenTesting/sdr/dom/components/hexer/tests/test_hexer.py new file mode 100644 index 000000000..fc4c22a60 --- /dev/null +++ b/codegenTesting/sdr/dom/components/hexer/tests/test_hexer.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +import ossie.utils.testing +from ossie.utils import sb + +class ComponentTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the component. + SPD_FILE = '../hexer.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a component using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the component, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl) + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def testBasicBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + self.comp.start() + self.comp.stop() + + def checkProperties(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + self.assertEquals( self.comp.hex_octet, 255 ) + self.assertEquals( self.comp.hex_short, 65535 ) + self.assertEquals( self.comp.hex_long, 4660 ) + self.assertEquals( self.comp.hex_ulonglong, 3735928559 ) + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/codegenTesting/sdr/dom/components/octaveTestSink/tests/test_octaveTestSink.py b/codegenTesting/sdr/dom/components/octaveTestSink/tests/test_octaveTestSink.py deleted file mode 100644 index 3d45df3cf..000000000 --- a/codegenTesting/sdr/dom/components/octaveTestSink/tests/test_octaveTestSink.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK codegenTesting. -# -# REDHAWK codegenTesting is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK codegenTesting is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -import unittest -import ossie.utils.testing -import os -from omniORB import any - -from ossie.utils import sb - -class ResourceTests(ossie.utils.testing.ScaComponentTestCase): - """Test for all resource implementations in octaveTestSink""" - - def testScaBasicBehavior(self): - ####################################################################### - # Launch the resource with the default execparams - execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False) - execparams = dict([(x.id, any.from_any(x.value)) for x in execparams]) - self.launch(execparams) - - ####################################################################### - # Verify the basic state of the resource - self.assertNotEqual(self.comp, None) - self.assertEqual(self.comp.ref._non_existent(), False) - - self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True) - - ####################################################################### - # Validate that query returns all expected parameters - # Query of '[]' should return the following set of properties - expectedProps = [] - expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True)) - expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True)) - props = self.comp.query([]) - props = dict((x.id, any.from_any(x.value)) for x in props) - # Query may return more than expected, but not less - for expectedProp in expectedProps: - self.assertEquals(props.has_key(expectedProp.id), True) - - ####################################################################### - # Verify that all expected ports are available - for port in self.scd.get_componentfeatures().get_ports().get_uses(): - port_obj = self.comp.getPort(str(port.get_usesname())) - self.assertNotEqual(port_obj, None) - self.assertEqual(port_obj._non_existent(), False) - self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True) - - for port in self.scd.get_componentfeatures().get_ports().get_provides(): - port_obj = self.comp.getPort(str(port.get_providesname())) - self.assertNotEqual(port_obj, None) - self.assertEqual(port_obj._non_existent(), False) - self.assertEqual(port_obj._is_a(port.get_repid()), True) - - ####################################################################### - # Make sure start and stop can be called without throwing exceptions - self.comp.start() - self.comp.stop() - - ####################################################################### - # Simulate regular resource shutdown - self.comp.releaseObject() - -if __name__ == "__main__": - ossie.utils.testing.main("../octaveTestSink.spd.xml") # By default tests all implementations diff --git a/codegenTesting/sdr/dom/components/octaveTestSource/tests/test_octaveTestSource.py b/codegenTesting/sdr/dom/components/octaveTestSource/tests/test_octaveTestSource.py deleted file mode 100644 index 7ccd0ce5d..000000000 --- a/codegenTesting/sdr/dom/components/octaveTestSource/tests/test_octaveTestSource.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK codegenTesting. -# -# REDHAWK codegenTesting is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK codegenTesting is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -import unittest -import ossie.utils.testing -import os -from omniORB import any - -from ossie.utils import sb - -class ResourceTests(ossie.utils.testing.ScaComponentTestCase): - """Test for all resource implementations in octaveTestSource""" - - def testScaBasicBehavior(self): - ####################################################################### - # Launch the resource with the default execparams - execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False) - execparams = dict([(x.id, any.from_any(x.value)) for x in execparams]) - self.launch(execparams) - - ####################################################################### - # Verify the basic state of the resource - self.assertNotEqual(self.comp, None) - self.assertEqual(self.comp.ref._non_existent(), False) - - self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True) - - ####################################################################### - # Validate that query returns all expected parameters - # Query of '[]' should return the following set of properties - expectedProps = [] - expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True)) - expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True)) - props = self.comp.query([]) - props = dict((x.id, any.from_any(x.value)) for x in props) - # Query may return more than expected, but not less - for expectedProp in expectedProps: - self.assertEquals(props.has_key(expectedProp.id), True) - - ####################################################################### - # Verify that all expected ports are available - for port in self.scd.get_componentfeatures().get_ports().get_uses(): - port_obj = self.comp.getPort(str(port.get_usesname())) - self.assertNotEqual(port_obj, None) - self.assertEqual(port_obj._non_existent(), False) - self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True) - - for port in self.scd.get_componentfeatures().get_ports().get_provides(): - port_obj = self.comp.getPort(str(port.get_providesname())) - self.assertNotEqual(port_obj, None) - self.assertEqual(port_obj._non_existent(), False) - self.assertEqual(port_obj._is_a(port.get_repid()), True) - - ####################################################################### - # Make sure start and stop can be called without throwing exceptions - self.comp.start() - self.comp.stop() - - ####################################################################### - # Simulate regular resource shutdown - self.comp.releaseObject() - -if __name__ == "__main__": - ossie.utils.testing.main("../octaveTestSource.spd.xml") # By default tests all implementations diff --git a/codegenTesting/sdr/dom/components/props/props.prf.xml b/codegenTesting/sdr/dom/components/props/props.prf.xml index 5a918f1a6..73112a85d 100644 --- a/codegenTesting/sdr/dom/components/props/props.prf.xml +++ b/codegenTesting/sdr/dom/components/props/props.prf.xml @@ -293,4 +293,31 @@ with this program. If not, see http://www.gnu.org/licenses/. + + 2017:2:1::12:01:00.123 + + + + + false + + + + + + + + + + + + + + + 2010:2:1::12:01:00.123 + 2011:2:1::12:01:00.123 + + + + diff --git a/codegenTesting/sdr/dom/components/rf_ctrl/.rf_ctrl.wavedev b/codegenTesting/sdr/dom/components/rf_ctrl/.rf_ctrl.wavedev new file mode 100644 index 000000000..8e4ba3f9d --- /dev/null +++ b/codegenTesting/sdr/dom/components/rf_ctrl/.rf_ctrl.wavedev @@ -0,0 +1,14 @@ + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/rf_ctrl/cpp/rf_ctrl.cpp b/codegenTesting/sdr/dom/components/rf_ctrl/cpp/rf_ctrl.cpp new file mode 100644 index 000000000..fe8a468be --- /dev/null +++ b/codegenTesting/sdr/dom/components/rf_ctrl/cpp/rf_ctrl.cpp @@ -0,0 +1,462 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "rf_ctrl.h" + +PREPARE_LOGGING(rf_ctrl_i) + +rf_ctrl_i::rf_ctrl_i(const char *uuid, const char *label) : + rf_ctrl_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +rf_ctrl_i::~rf_ctrl_i() +{ +} + +void rf_ctrl_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void rf_ctrl_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &rf_ctrl_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (rf_ctrl_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &rf_ctrl_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to rf_ctrl.cpp + rf_ctrl_i::rf_ctrl_i(const char *uuid, const char *label) : + rf_ctrl_base(uuid, label) + { + addPropertyListener(scaleValue, this, &rf_ctrl_i::scaleChanged); + addPropertyListener(status, this, &rf_ctrl_i::statusChanged); + } + + void rf_ctrl_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(rf_ctrl_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void rf_ctrl_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(rf_ctrl_i, "status changed"); + } + + //Add to rf_ctrl.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int rf_ctrl_i::serviceFunction() +{ + LOG_DEBUG(rf_ctrl_i, "serviceFunction() example log message"); + + this->get_rfinfo = "ok"; + try { + this->rfinfo_out->rf_flow_id(); + } catch (redhawk::PortCallError &e) { + this->get_rfinfo = e.what(); + } + this->set_rfinfo = "ok"; + try { + std::string rf_flow_id("hello"); + this->rfinfo_out->rf_flow_id(rf_flow_id); + } catch (redhawk::PortCallError &e) { + this->set_rfinfo = e.what(); + } + this->get_current_rf = "ok"; + try { + this->rfsource_out->current_rf_input(); + } catch (redhawk::PortCallError &e) { + this->get_current_rf = e.what(); + } + this->set_current_rf = "ok"; + try { + frontend::RFInfoPkt foo; + this->rfsource_out->current_rf_input(foo); + } catch (redhawk::PortCallError &e) { + this->set_current_rf = e.what(); + } + this->get_available_rf = "ok"; + try { + this->rfsource_out->available_rf_inputs(); + } catch (redhawk::PortCallError &e) { + this->get_available_rf = e.what(); + } + this->set_available_rf = "ok"; + try { + std::vector foo; + this->rfsource_out->available_rf_inputs(foo); + } catch (redhawk::PortCallError &e) { + this->set_available_rf = e.what(); + } + this->bad_connection = "ok"; + try { + this->rfsource_out->_get_available_rf_inputs("invalid_connectionid"); + } catch (redhawk::PortCallError &e) { + this->bad_connection = e.what(); + } + + std::string tmp; + this->get_tunertype = "ok"; + try { + this->digitaltuner_out->getTunerType(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tunertype = e.what(); + } + this->get_tunerdevicecontrol = "ok"; + try { + std::vector foo; + this->digitaltuner_out->getTunerDeviceControl(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tunerdevicecontrol = e.what(); + } + this->get_tunergroupid = "ok"; + try { + this->digitaltuner_out->getTunerGroupId(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tunergroupid = e.what(); + } + this->get_tunerrfflowid = "ok"; + try { + std::vector foo; + this->digitaltuner_out->getTunerRfFlowId(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tunerrfflowid = e.what(); + } + this->get_tunerstatus = "ok"; + try { + this->digitaltuner_out->getTunerStatus(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tunerstatus = e.what(); + } + this->get_tunercenterfrequency = "ok"; + try { + std::vector foo; + this->digitaltuner_out->getTunerCenterFrequency(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tunercenterfrequency = e.what(); + } + this->set_tunercenterfrequency = "ok"; + try { + this->digitaltuner_out->setTunerCenterFrequency(tmp, 1.0); + } catch (redhawk::PortCallError &e) { + this->set_tunercenterfrequency = e.what(); + } + this->get_tunerbandwidth = "ok"; + try { + std::vector foo; + this->digitaltuner_out->getTunerBandwidth(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tunerbandwidth = e.what(); + } + this->set_tunerbandwidth = "ok"; + try { + this->digitaltuner_out->setTunerBandwidth(tmp, 1.0); + } catch (redhawk::PortCallError &e) { + this->set_tunerbandwidth = e.what(); + } + this->get_tuneragcenable = "ok"; + try { + std::vector foo; + this->digitaltuner_out->getTunerAgcEnable(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tuneragcenable = e.what(); + } + this->set_tuneragcenable = "ok"; + try { + this->digitaltuner_out->setTunerAgcEnable(tmp, false); + } catch (redhawk::PortCallError &e) { + this->set_tuneragcenable = e.what(); + } + this->get_tunergain = "ok"; + try { + std::vector foo; + this->digitaltuner_out->getTunerGain(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tunergain = e.what(); + } + this->set_tunergain = "ok"; + try { + this->digitaltuner_out->setTunerGain(tmp, 1.0); + } catch (redhawk::PortCallError &e) { + this->set_tunergain = e.what(); + } + this->get_tunerreferencesource = "ok"; + try { + std::vector foo; + this->digitaltuner_out->getTunerReferenceSource(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tunerreferencesource = e.what(); + } + this->set_tunerreferencesource = "ok"; + try { + this->digitaltuner_out->setTunerReferenceSource(tmp, 2); + } catch (redhawk::PortCallError &e) { + this->set_tunerreferencesource = e.what(); + } + this->get_tunerenable = "ok"; + try { + std::vector foo; + this->digitaltuner_out->getTunerEnable(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tunerenable = e.what(); + } + this->set_tunerenable = "ok"; + try { + this->digitaltuner_out->setTunerEnable(tmp, false); + } catch (redhawk::PortCallError &e) { + this->set_tunerenable = e.what(); + } + this->get_tuneroutputsamplerate = "ok"; + try { + std::vector foo; + this->digitaltuner_out->getTunerOutputSampleRate(tmp); + } catch (redhawk::PortCallError &e) { + this->get_tuneroutputsamplerate = e.what(); + } + this->set_tuneroutputsamplerate = "ok"; + try { + this->digitaltuner_out->setTunerOutputSampleRate(tmp, 1.0); + } catch (redhawk::PortCallError &e) { + this->set_tuneroutputsamplerate = e.what(); + } + + this->get_gpsinfo = "ok"; + try { + this->gps_out->gps_info(); + } catch (redhawk::PortCallError &e) { + this->get_gpsinfo = e.what(); + } + this->set_gpsinfo = "ok"; + try { + frontend::GPSInfo _gps; + this->gps_out->gps_info(_gps); + } catch (redhawk::PortCallError &e) { + this->set_gpsinfo = e.what(); + } + this->get_gps_timepos = "ok"; + try { + std::vector foo; + this->gps_out->gps_time_pos(); + } catch (redhawk::PortCallError &e) { + this->get_gps_timepos = e.what(); + } + this->set_gps_timepos = "ok"; + try { + frontend::GpsTimePos _gps; + this->gps_out->gps_time_pos(_gps); + } catch (redhawk::PortCallError &e) { + this->set_gps_timepos = e.what(); + } + + this->get_nav_packet = "ok"; + try { + this->navdata_out->nav_packet(); + } catch (redhawk::PortCallError &e) { + this->get_nav_packet = e.what(); + } + this->set_nav_packet = "ok"; + try { + frontend::NavigationPacket _nav; + this->navdata_out->nav_packet(_nav); + } catch (redhawk::PortCallError &e) { + this->set_nav_packet = e.what(); + } + + return NOOP; +} + diff --git a/codegenTesting/sdr/dom/components/rf_ctrl/cpp/rf_ctrl.h b/codegenTesting/sdr/dom/components/rf_ctrl/cpp/rf_ctrl.h new file mode 100644 index 000000000..7768d3763 --- /dev/null +++ b/codegenTesting/sdr/dom/components/rf_ctrl/cpp/rf_ctrl.h @@ -0,0 +1,18 @@ +#ifndef RF_CTRL_I_IMPL_H +#define RF_CTRL_I_IMPL_H + +#include "rf_ctrl_base.h" + +class rf_ctrl_i : public rf_ctrl_base +{ + ENABLE_LOGGING + public: + rf_ctrl_i(const char *uuid, const char *label); + ~rf_ctrl_i(); + + void constructor(); + + int serviceFunction(); +}; + +#endif // RF_CTRL_I_IMPL_H diff --git a/codegenTesting/sdr/dom/components/rf_ctrl/java/src/rf_ctrl/java/rf_ctrl.java b/codegenTesting/sdr/dom/components/rf_ctrl/java/src/rf_ctrl/java/rf_ctrl.java new file mode 100644 index 000000000..a0eb3e936 --- /dev/null +++ b/codegenTesting/sdr/dom/components/rf_ctrl/java/src/rf_ctrl/java/rf_ctrl.java @@ -0,0 +1,560 @@ +package rf_ctrl.java; + +import java.util.Properties; +import org.ossie.redhawk.PortCallError; + +import FRONTEND.RFInfoPkt; +import FRONTEND.GPSInfo; +import FRONTEND.GpsTimePos; +import FRONTEND.NavigationPacket; +import FRONTEND.FrontendException; +import FRONTEND.BadParameterException; +import FRONTEND.CartesianPositionInfo; +import FRONTEND.NotSupportedException; +import FRONTEND.SensorInfo; +import FRONTEND.VelocityInfo; +import FRONTEND.PathDelay; +import FRONTEND.AccelerationInfo; +import FRONTEND.AntennaInfo; +import FRONTEND.AttitudeInfo; +import FRONTEND.FeedInfo; +import FRONTEND.RFCapabilities; +import FRONTEND.FreqRange; +import FRONTEND.PositionInfo; + +/** + * This is the component code. This file contains the derived class where custom + * functionality can be added to the component. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general component housekeeping + * + * Source: rf_ctrl.spd.xml + */ +public class rf_ctrl extends rf_ctrl_base { + /** + * This is the component constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your component. + * + * A component may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * component class. + * Accessing the Application and Domain Manager: + * + * Both the Application hosting this Component and the Domain Manager hosting + * the Application are available to the Component. + * + * To access the Domain Manager: + * CF.DomainManager dommgr.setValue(this.getDomainManager().getRef(); + * To access the Application: + * CF.Application app.setValue(this.getApplication().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message.setValue(new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + + public rf_ctrl() + { + super(); + } + + public void constructor() + { + } + + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the component's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id.setValue("testStream"); + * BULKIO.StreamSRI sri.setValue(new BULKIO.StreamSRI(); + * sri.mode.setValue(0; + * sri.xdelta.setValue(0.0; + * sri.ydelta.setValue(1.0; + * sri.subsize.setValue(0; + * sri.xunits.setValue(1; // TIME_S + * sri.streamID.setValue((stream_id != null) ? stream_id : ""); + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp.setValue(bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the component developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val.setValue(this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Example: + * + * This example assumes that the component has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the component + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data.setValue(this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData.setValue(new float[data.getData().length]; + * for (int i.setValue(0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i].setValue((float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i].setValue((float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + this.get_rfinfo.setValue("ok"); + try { + this.port_rfinfo_out.rf_flow_id(); + } catch (PortCallError e) { + this.get_rfinfo.setValue(e.getMessage()); + } + this.set_rfinfo.setValue("ok"); + try { + String rf_flow_id = new String("hello"); + this.port_rfinfo_out.rf_flow_id(rf_flow_id); + } catch (PortCallError e) { + this.set_rfinfo.setValue(e.getMessage()); + } + this.get_current_rf.setValue("ok"); + try { + this.port_rfsource_out.current_rf_input(); + } catch (PortCallError e) { + this.get_current_rf.setValue(e.getMessage()); + } + this.set_current_rf.setValue("ok"); + try { + RFInfoPkt foo = new RFInfoPkt(); + foo.rf_flow_id = new String(""); + foo.sensor = new SensorInfo(); + foo.sensor.collector = new String(""); + foo.sensor.antenna = new AntennaInfo(); + foo.sensor.antenna.description = new String(""); + foo.sensor.antenna.name = new String(""); + foo.sensor.antenna.size = new String(""); + foo.sensor.antenna.type = new String(""); + foo.sensor.feed = new FeedInfo(); + foo.sensor.feed.name = new String(""); + foo.sensor.feed.polarization = new String(""); + foo.sensor.feed.freq_range = new FreqRange(); + foo.sensor.feed.freq_range.values = new double[0]; + foo.sensor.mission = new String(""); + foo.sensor.rx = new String(""); + foo.ext_path_delays = new PathDelay[0]; + foo.capabilities = new RFCapabilities(); + foo.capabilities.freq_range = new FreqRange(); + foo.capabilities.freq_range.values = new double[0]; + foo.capabilities.bw_range = new FreqRange(); + foo.capabilities.bw_range.values = new double[0]; + foo.additional_info = new CF.DataType[0]; + this.port_rfsource_out.current_rf_input(foo); + } catch (PortCallError e) { + this.set_current_rf.setValue(e.getMessage()); + } + this.get_available_rf.setValue("ok"); + try { + this.port_rfsource_out.available_rf_inputs(); + } catch (PortCallError e) { + this.get_available_rf.setValue(e.getMessage()); + } + this.set_available_rf.setValue("ok"); + try { + RFInfoPkt[] foo = new RFInfoPkt[0]; + this.port_rfsource_out.available_rf_inputs(foo); + } catch (PortCallError e) { + this.set_available_rf.setValue(e.getMessage()); + } + this.bad_connection.setValue("ok"); + try { + this.port_rfsource_out._get_available_rf_inputs("invalid_connectionid"); + } catch (PortCallError e) { + this.bad_connection.setValue(e.getMessage()); + } + + String tmp = new String(""); + this.get_tunertype.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerType(tmp); + } catch (PortCallError e) { + this.get_tunertype.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tunerdevicecontrol.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerDeviceControl(tmp); + } catch (PortCallError e) { + this.get_tunerdevicecontrol.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tunergroupid.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerGroupId(tmp); + } catch (PortCallError e) { + this.get_tunergroupid.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tunerrfflowid.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerRfFlowId(tmp); + } catch (PortCallError e) { + this.get_tunerrfflowid.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tunerstatus.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerStatus(tmp); + } catch (PortCallError e) { + this.get_tunerstatus.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tunercenterfrequency.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerCenterFrequency(tmp); + } catch (PortCallError e) { + this.get_tunercenterfrequency.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.set_tunercenterfrequency.setValue("ok"); + try { + this.port_digitaltuner_out.setTunerCenterFrequency(tmp, 1.0); + } catch (PortCallError e) { + this.set_tunercenterfrequency.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tunerbandwidth.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerBandwidth(tmp); + } catch (PortCallError e) { + this.get_tunerbandwidth.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.set_tunerbandwidth.setValue("ok"); + try { + this.port_digitaltuner_out.setTunerBandwidth(tmp, 1.0); + } catch (PortCallError e) { + this.set_tunerbandwidth.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tuneragcenable.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerAgcEnable(tmp); + } catch (PortCallError e) { + this.get_tuneragcenable.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.set_tuneragcenable.setValue("ok"); + try { + this.port_digitaltuner_out.setTunerAgcEnable(tmp, false); + } catch (PortCallError e) { + this.set_tuneragcenable.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tunergain.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerGain(tmp); + } catch (PortCallError e) { + this.get_tunergain.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.set_tunergain.setValue("ok"); + try { + this.port_digitaltuner_out.setTunerGain(tmp, (float)1.0); + } catch (PortCallError e) { + this.set_tunergain.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tunerreferencesource.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerReferenceSource(tmp); + } catch (PortCallError e) { + this.get_tunerreferencesource.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.set_tunerreferencesource.setValue("ok"); + try { + this.port_digitaltuner_out.setTunerReferenceSource(tmp, 2); + } catch (PortCallError e) { + this.set_tunerreferencesource.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tunerenable.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerEnable(tmp); + } catch (PortCallError e) { + this.get_tunerenable.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.set_tunerenable.setValue("ok"); + try { + this.port_digitaltuner_out.setTunerEnable(tmp, false); + } catch (PortCallError e) { + this.set_tunerenable.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.get_tuneroutputsamplerate.setValue("ok"); + try { + this.port_digitaltuner_out.getTunerOutputSampleRate(tmp); + } catch (PortCallError e) { + this.get_tuneroutputsamplerate.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + this.set_tuneroutputsamplerate.setValue("ok"); + try { + this.port_digitaltuner_out.setTunerOutputSampleRate(tmp, 1.0); + } catch (PortCallError e) { + this.set_tuneroutputsamplerate.setValue(e.getMessage()); + } catch (FrontendException | BadParameterException | NotSupportedException e) { + this.get_rfinfo.setValue("unexpected exception"); + } + + this.get_gpsinfo.setValue("ok"); + try { + this.port_gps_out.gps_info(); + } catch (PortCallError e) { + this.get_gpsinfo.setValue(e.getMessage()); + } + this.set_gpsinfo.setValue("ok"); + try { + GPSInfo _gps = new GPSInfo(); + _gps.additional_info = new CF.DataType[0]; + _gps.mode = new String(""); + _gps.rf_flow_id = new String(""); + _gps.source_id = new String(""); + _gps.status_message = new String(""); + _gps.timestamp = new BULKIO.PrecisionUTCTime(); + _gps.timestamp = bulkio.time.utils.now(); + this.port_gps_out.gps_info(_gps); + } catch (PortCallError e) { + this.set_gpsinfo.setValue(e.getMessage()); + } + this.get_gps_timepos.setValue("ok"); + try { + this.port_gps_out.gps_time_pos(); + } catch (PortCallError e) { + this.get_gps_timepos.setValue(e.getMessage()); + } + this.set_gps_timepos.setValue("ok"); + try { + GpsTimePos _gps = new GpsTimePos(); + _gps.position = new PositionInfo(); + _gps.position.datum = new String(""); + _gps.timestamp = new BULKIO.PrecisionUTCTime(); + _gps.timestamp = bulkio.time.utils.now(); + this.port_gps_out.gps_time_pos(_gps); + } catch (PortCallError e) { + this.set_gps_timepos.setValue(e.getMessage()); + } + + this.get_nav_packet.setValue("ok"); + try { + this.port_navdata_out.nav_packet(); + } catch (PortCallError e) { + this.get_nav_packet.setValue(e.getMessage()); + } + this.set_nav_packet.setValue("ok"); + try { + NavigationPacket _nav = new NavigationPacket(); + _nav.acceleration = new AccelerationInfo(); + _nav.acceleration.coordinate_system = new String(""); + _nav.acceleration.datum = new String(""); + _nav.additional_info = new CF.DataType[0]; + _nav.attitude = new AttitudeInfo(); + _nav.cposition = new CartesianPositionInfo(); + _nav.cposition.datum = new String(""); + _nav.position = new PositionInfo(); + _nav.position.datum = new String(""); + _nav.rf_flow_id = new String(""); + _nav.source_id = new String(""); + _nav.timestamp = new BULKIO.PrecisionUTCTime(); + _nav.timestamp = bulkio.time.utils.now(); + _nav.velocity = new VelocityInfo(); + _nav.velocity.coordinate_system = new String(""); + _nav.velocity.datum = new String(""); + this.port_navdata_out.nav_packet(_nav); + } catch (PortCallError e) { + this.set_nav_packet.setValue(e.getMessage()); + } + + return FINISH; + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/codegenTesting/sdr/dom/components/rf_ctrl/python/rf_ctrl.py b/codegenTesting/sdr/dom/components/rf_ctrl/python/rf_ctrl.py new file mode 100755 index 000000000..1737e928b --- /dev/null +++ b/codegenTesting/sdr/dom/components/rf_ctrl/python/rf_ctrl.py @@ -0,0 +1,371 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: rf_ctrl.spd.xml +from ossie.resource import start_component +import logging +from ossie.resource import PortCallError +from bulkio.bulkioInterfaces import BULKIO + +from rf_ctrl_base import * + +class rf_ctrl_i(rf_ctrl_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", rf_ctrl_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = rf_ctrl_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef() + To access the Device Manager: + devmgr = self.getDeviceManager().getRef() + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI) + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self.get_rfinfo = "ok" + try: + self.port_rfinfo_out.rf_flow_id() + except PortCallError, e: + self.get_rfinfo = str(e) + + self.set_rfinfo = "ok" + try: + rf_flow_id = "hello" + self.port_rfinfo_out._set_rf_flow_id(rf_flow_id) + except PortCallError, e: + self.set_rfinfo = str(e) + + self.get_current_rf = "ok" + try: + self.port_rfsource_out.current_rf_input() + except PortCallError, e: + self.get_current_rf = str(e) + + self.set_current_rf = "ok" + try: + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + foo = FRONTEND.RFInfoPkt('',0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + self.port_rfsource_out._set_current_rf_input(foo) + except PortCallError, e: + self.set_current_rf = str(e) + + self.get_available_rf = "ok" + try: + self.port_rfsource_out.available_rf_inputs() + except PortCallError, e: + self.get_available_rf = str(e) + + self.set_available_rf = "ok" + try: + foo = [] + self.port_rfsource_out._set_available_rf_inputs(foo) + except PortCallError, e: + self.set_available_rf = str(e) + + self.bad_connection = "ok" + try: + self.port_rfsource_out._get_available_rf_inputs("invalid_connectionid") + except PortCallError, e: + self.bad_connection = str(e) + + + tmp = '' + self.get_tunertype = "ok" + try: + self.port_digitaltuner_out.getTunerType(tmp) + except PortCallError, e: + self.get_tunertype = str(e) + + self.get_tunerdevicecontrol = "ok" + try: + self.port_digitaltuner_out.getTunerDeviceControl(tmp) + except PortCallError, e: + self.get_tunerdevicecontrol = str(e) + + self.get_tunergroupid = "ok" + try: + self.port_digitaltuner_out.getTunerGroupId(tmp) + except PortCallError, e: + self.get_tunergroupid = str(e) + + self.get_tunerrfflowid = "ok" + try: + self.port_digitaltuner_out.getTunerRfFlowId(tmp) + except PortCallError, e: + self.get_tunerrfflowid = str(e) + + self.get_tunerstatus = "ok" + try: + self.port_digitaltuner_out.getTunerStatus(tmp) + except PortCallError, e: + self.get_tunerstatus = str(e) + + self.get_tunercenterfrequency = "ok" + try: + self.port_digitaltuner_out.getTunerCenterFrequency(tmp) + except PortCallError, e: + self.get_tunercenterfrequency = str(e) + + self.set_tunercenterfrequency = "ok" + try: + self.port_digitaltuner_out.setTunerCenterFrequency(tmp, 1.0) + except PortCallError, e: + self.set_tunercenterfrequency = str(e) + + self.get_tunerbandwidth = "ok" + try: + self.port_digitaltuner_out.getTunerBandwidth(tmp) + except PortCallError, e: + self.get_tunerbandwidth = str(e) + + self.set_tunerbandwidth = "ok" + try: + self.port_digitaltuner_out.setTunerBandwidth(tmp, 1.0) + except PortCallError, e: + self.set_tunerbandwidth = str(e) + + self.get_tuneragcenable = "ok" + try: + self.port_digitaltuner_out.getTunerAgcEnable(tmp) + except PortCallError, e: + self.get_tuneragcenable = str(e) + + self.set_tuneragcenable = "ok" + try: + self.port_digitaltuner_out.setTunerAgcEnable(tmp, False) + except PortCallError, e: + self.set_tuneragcenable = str(e) + + self.get_tunergain = "ok" + try: + self.port_digitaltuner_out.getTunerGain(tmp) + except PortCallError, e: + self.get_tunergain = str(e) + + self.set_tunergain = "ok" + try: + self.port_digitaltuner_out.setTunerGain(tmp, 1.0) + except PortCallError, e: + self.set_tunergain = str(e) + + self.get_tunerreferencesource = "ok" + try: + self.port_digitaltuner_out.getTunerReferenceSource(tmp) + except PortCallError, e: + self.get_tunerreferencesource = str(e) + + self.set_tunerreferencesource = "ok" + try: + self.port_digitaltuner_out.setTunerReferenceSource(tmp, 2) + except PortCallError, e: + self.set_tunerreferencesource = str(e) + + self.get_tunerenable = "ok" + try: + self.port_digitaltuner_out.getTunerEnable(tmp) + except PortCallError, e: + self.get_tunerenable = str(e) + + self.set_tunerenable = "ok" + try: + self.port_digitaltuner_out.setTunerEnable(tmp, False) + except PortCallError, e: + self.set_tunerenable = str(e) + + self.get_tuneroutputsamplerate = "ok" + try: + self.port_digitaltuner_out.getTunerOutputSampleRate(tmp) + except PortCallError, e: + self.get_tuneroutputsamplerate = str(e) + + self.set_tuneroutputsamplerate = "ok" + try: + self.port_digitaltuner_out.setTunerOutputSampleRate(tmp, 1.0) + except PortCallError, e: + self.set_tuneroutputsamplerate = str(e) + + + self.get_gpsinfo = "ok" + try: + self.port_gps_out.gps_info() + except PortCallError, e: + self.get_gpsinfo = str(e) + + self.set_gpsinfo = "ok" + try: + _gps = FRONTEND.GPSInfo('','','',1L,1L,1L,1.0,1.0,1.0,1.0,1,1.0,'',BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0),[]) + self.port_gps_out._set_gps_info(_gps) + except PortCallError, e: + self.set_gpsinfo = str(e) + + self.get_gps_timepos = "ok" + try: + self.port_gps_out.gps_time_pos() + except PortCallError, e: + self.get_gps_timepos = str(e) + + self.set_gps_timepos = "ok" + try: + _positioninfo = FRONTEND.PositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _gps = FRONTEND.GpsTimePos(_positioninfo,BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0)) + self.port_gps_out._set_gps_time_pos(_gps) + except PortCallError, e: + self.set_gps_timepos = str(e) + + + self.get_nav_packet = "ok" + try: + self.port_navdata_out.nav_packet() + except PortCallError, e: + self.get_nav_packet = str(e) + + self.set_nav_packet = "ok" + try: + _time = BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0) + _positioninfo = FRONTEND.PositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _cartesianpos=FRONTEND.CartesianPositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _velocityinfo=FRONTEND.VelocityInfo(False,'DATUM_WGS84','',0.0,0.0,0.0) + _accelerationinfo=FRONTEND.AccelerationInfo(False,'DATUM_WGS84','',0.0,0.0,0.0) + _attitudeinfo=FRONTEND.AttitudeInfo(False,0.0,0.0,0.0) + _nav = FRONTEND.NavigationPacket('','',_positioninfo,_cartesianpos,_velocityinfo,_accelerationinfo,_attitudeinfo,_time,[]) + self.port_navdata_out._set_nav_packet(_nav) + except PortCallError, e: + self.set_nav_packet = str(e) + + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(rf_ctrl_i) + diff --git a/codegenTesting/sdr/dom/components/rf_ctrl/rf_ctrl.prf.xml b/codegenTesting/sdr/dom/components/rf_ctrl/rf_ctrl.prf.xml new file mode 100644 index 000000000..6726bcb9f --- /dev/null +++ b/codegenTesting/sdr/dom/components/rf_ctrl/rf_ctrl.prf.xml @@ -0,0 +1,132 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/rf_ctrl/rf_ctrl.scd.xml b/codegenTesting/sdr/dom/components/rf_ctrl/rf_ctrl.scd.xml new file mode 100644 index 000000000..92e9dc163 --- /dev/null +++ b/codegenTesting/sdr/dom/components/rf_ctrl/rf_ctrl.scd.xml @@ -0,0 +1,62 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/codegenTesting/sdr/dom/components/rf_ctrl/rf_ctrl.spd.xml b/codegenTesting/sdr/dom/components/rf_ctrl/rf_ctrl.spd.xml new file mode 100644 index 000000000..aa1ccb31c --- /dev/null +++ b/codegenTesting/sdr/dom/components/rf_ctrl/rf_ctrl.spd.xml @@ -0,0 +1,50 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/rf_ctrl.so + + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/rf_ctrl.py + + + + + + + diff --git a/codegenTesting/sdr/dom/components/rf_ctrl/tests/test_rf_ctrl.py b/codegenTesting/sdr/dom/components/rf_ctrl/tests/test_rf_ctrl.py new file mode 100644 index 000000000..50c720005 --- /dev/null +++ b/codegenTesting/sdr/dom/components/rf_ctrl/tests/test_rf_ctrl.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python + +from redhawk.frontendInterfaces import FRONTEND, FRONTEND__POA +from bulkio.bulkioInterfaces import BULKIO +import ossie.utils.testing +from ossie.utils import sb +import time + +class RFInfoPort(FRONTEND__POA.RFInfo): + def __init__(self): + pass + def _get_rf_flow_id(self): + return 'hello' + def _set_rf_flow_id(self, rfinfo_pkt): + pass + def _get_rfinfo_pkt(self): + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + _rfinfopkt=FRONTEND.RFInfoPkt('',0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + return _rfinfopkt + def _set_rfinfo_pkt(self, rf_inputs): + pass + +class RFSourcePort(FRONTEND__POA.RFSource): + def __init__(self): + pass + def _get_available_rf_inputs(self): + return [] + def _set_available_rf_inputs(self, rf_inputs): + pass + def _get_current_rf_input(self): + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + _rfinfopkt=FRONTEND.RFInfoPkt('',0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + return _rfinfopkt + def _set_current_rf_input(self, rf_inputs): + pass + +class DigitalTunerPort(FRONTEND__POA.DigitalTuner): + def __init__(self): + pass + def getTunerType(self, _id): + return '' + def getTunerDeviceControl(self, _id): + return False + def getTunerGroupId(self, _id): + return '' + def getTunerRfFlowId(self, _id): + return '' + def getTunerStatus(self, _id): + return [] + def setTunerCenterFrequency(self, _id, freq): + pass + def getTunerCenterFrequency(self, _id): + return 0.0 + def setTunerBandwidth(self, _id, bw): + pass + def getTunerBandwidth(self, _id): + return 0.0 + def setTunerAgcEnable(self, _id, enable): + pass + def getTunerAgcEnable(self, _id): + return False + def setTunerGain(self, _id, gain): + pass + def getTunerGain(self, _id): + return 0.0 + def setTunerReferenceSource(self, _id, source): + pass + def getTunerReferenceSource(self, _id): + return 0 + def setTunerEnable(self, _id, enable): + pass + def getTunerEnable(self, _id): + return False + def setTunerOutputSampleRate(self, _id, sr): + pass + def getTunerOutputSampleRate(self, _id): + return 0 + +class GPSPort(FRONTEND__POA.GPS): + def __init__(self): + pass + def _get_gps_info(self): + _gpsinfo = FRONTEND.GPSInfo('','','',1L,1L,1L,1.0,1.0,1.0,1.0,1,1.0,'',BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0),[]) + return _gpsinfo + def _set_gps_info(self, gi): + pass + def _get_gps_time_pos(self): + _positioninfo = FRONTEND.PositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _gpstimepos = FRONTEND.GpsTimePos(_positioninfo,BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0)) + return _gpstimepos + def _set_gps_time_pos(self, gtp): + pass + +class NavDataPort(FRONTEND__POA.NavData): + def __init__(self): + pass + def _get_nav_packet(self): + _time = BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0) + _positioninfo = FRONTEND.PositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _cartesianpos=FRONTEND.CartesianPositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _velocityinfo=FRONTEND.VelocityInfo(False,'DATUM_WGS84','',0.0,0.0,0.0) + _accelerationinfo=FRONTEND.AccelerationInfo(False,'DATUM_WGS84','',0.0,0.0,0.0) + _attitudeinfo=FRONTEND.AttitudeInfo(False,0.0,0.0,0.0) + _navpacket=FRONTEND.NavigationPacket('','',_positioninfo,_cartesianpos,_velocityinfo,_accelerationinfo,_attitudeinfo,_time,[]) + return _navpacket + def _set_nav_packet(self, rf_inputs): + pass + +class ComponentTests(ossie.utils.testing.RHTestCase): + # Path to the SPD file, relative to this file. This must be set in order to + # launch the component. + SPD_FILE = '../rf_ctrl.spd.xml' + + # setUp is run before every function preceded by "test" is executed + # tearDown is run after every function preceded by "test" is executed + + # self.comp is a component using the sandbox API + # to create a data source, the package sb contains data sources like DataSource or FileSource + # to create a data sink, there are sinks like DataSink and FileSink + # to connect the component to get data from a file, process it, and write the output to a file, use the following syntax: + # src = sb.FileSource('myfile.dat') + # snk = sb.DataSink() + # src.connect(self.comp) + # self.comp.connect(snk) + # sb.start() + # + # components/sources/sinks need to be started. Individual components or elements can be started + # src.start() + # self.comp.start() + # + # every component/elements in the sandbox can be started + # sb.start() + + def setUp(self): + # Launch the component, using the selected implementation + self.comp = sb.launch(self.spd_file, impl=self.impl) + self._rfinfoPort = RFInfoPort() + self._rfsourcePort = RFSourcePort() + self._digitaltunerPort = DigitalTunerPort() + self._gpsPort = GPSPort() + self._navdataPort = NavDataPort() + self._rfinfoPort_2 = RFInfoPort() + self._rfsourcePort_2 = RFSourcePort() + self._digitaltunerPort_2 = DigitalTunerPort() + self._gpsPort_2 = GPSPort() + self._navdataPort_2 = NavDataPort() + + def tearDown(self): + # Clean up all sandbox artifacts created during test + sb.release() + + def testBasicBehavior(self): + ####################################################################### + # Make sure start and stop can be called without throwing exceptions + self.comp.start() + time.sleep(0.5) + self.comp.stop() + self.assertEquals(self.comp.get_rfinfo, "No connections available.") + self.assertEquals(self.comp.set_rfinfo, "ok") + self.assertEquals(self.comp.get_available_rf, "No connections available.") + self.assertEquals(self.comp.set_available_rf, "ok") + self.assertEquals(self.comp.get_current_rf, "No connections available.") + self.assertEquals(self.comp.set_current_rf, "ok") + + self.assertEquals(self.comp.get_tunertype, "No connections available.") + self.assertEquals(self.comp.get_tunerdevicecontrol, "No connections available.") + self.assertEquals(self.comp.get_tunergroupid, "No connections available.") + self.assertEquals(self.comp.get_tunerrfflowid, "No connections available.") + self.assertEquals(self.comp.get_tunerstatus, "No connections available.") + self.assertEquals(self.comp.get_tunercenterfrequency, "No connections available.") + self.assertEquals(self.comp.set_tunercenterfrequency, "ok") + self.assertEquals(self.comp.get_tunerbandwidth, "No connections available.") + self.assertEquals(self.comp.set_tunerbandwidth, "ok") + self.assertEquals(self.comp.get_tuneragcenable, "No connections available.") + self.assertEquals(self.comp.set_tuneragcenable, "ok") + self.assertEquals(self.comp.get_tunergain, "No connections available.") + self.assertEquals(self.comp.set_tunergain, "ok") + self.assertEquals(self.comp.get_tunerreferencesource, "No connections available.") + self.assertEquals(self.comp.set_tunerreferencesource, "ok") + self.assertEquals(self.comp.get_tunerenable, "No connections available.") + self.assertEquals(self.comp.set_tunerenable, "ok") + self.assertEquals(self.comp.get_tuneroutputsamplerate, "No connections available.") + self.assertEquals(self.comp.set_tuneroutputsamplerate, "ok") + + self.assertEquals(self.comp.get_gpsinfo, "No connections available.") + self.assertEquals(self.comp.set_gpsinfo, "ok") + self.assertEquals(self.comp.get_gps_timepos, "No connections available.") + self.assertEquals(self.comp.set_gps_timepos, "ok") + self.assertEquals(self.comp.get_nav_packet, "No connections available.") + self.assertEquals(self.comp.set_nav_packet, "ok") + + self.assertEquals(self.comp.bad_connection, "No connections available.") + rfinfo_port = self.comp.getPort('rfinfo_out') + rfsource_port = self.comp.getPort('rfsource_out') + digitaltuner_port = self.comp.getPort('digitaltuner_out') + gps_port = self.comp.getPort('gps_out') + navdata_port = self.comp.getPort('navdata_out') + rfinfo_port.connectPort(self._rfinfoPort._this(), 'abc') + rfsource_port.connectPort(self._rfsourcePort._this(), 'abc') + digitaltuner_port.connectPort(self._digitaltunerPort._this(), 'abc') + gps_port.connectPort(self._gpsPort._this(), 'abc') + navdata_port.connectPort(self._navdataPort._this(), 'abc') + time.sleep(0.5) + self.comp.start() + time.sleep(0.5) + self.comp.stop() + self.assertEquals(self.comp.get_rfinfo, "ok") + self.assertEquals(self.comp.set_rfinfo, "ok") + self.assertEquals(self.comp.get_available_rf, "ok") + self.assertEquals(self.comp.set_available_rf, "ok") + self.assertEquals(self.comp.get_current_rf, "ok") + self.assertEquals(self.comp.set_current_rf, "ok") + + self.assertEquals(self.comp.get_tunertype, "ok") + self.assertEquals(self.comp.get_tunerdevicecontrol, "ok") + self.assertEquals(self.comp.get_tunergroupid, "ok") + self.assertEquals(self.comp.get_tunerrfflowid, "ok") + self.assertEquals(self.comp.get_tunerstatus, "ok") + self.assertEquals(self.comp.get_tunercenterfrequency, "ok") + self.assertEquals(self.comp.set_tunercenterfrequency, "ok") + self.assertEquals(self.comp.get_tunerbandwidth, "ok") + self.assertEquals(self.comp.set_tunerbandwidth, "ok") + self.assertEquals(self.comp.get_tuneragcenable, "ok") + self.assertEquals(self.comp.set_tuneragcenable, "ok") + self.assertEquals(self.comp.get_tunergain, "ok") + self.assertEquals(self.comp.set_tunergain, "ok") + self.assertEquals(self.comp.get_tunerreferencesource, "ok") + self.assertEquals(self.comp.set_tunerreferencesource, "ok") + self.assertEquals(self.comp.get_tunerenable, "ok") + self.assertEquals(self.comp.set_tunerenable, "ok") + self.assertEquals(self.comp.get_tuneroutputsamplerate, "ok") + self.assertEquals(self.comp.set_tuneroutputsamplerate, "ok") + + self.assertEquals(self.comp.get_gpsinfo, "ok") + self.assertEquals(self.comp.set_gpsinfo, "ok") + self.assertEquals(self.comp.get_gps_timepos, "ok") + self.assertEquals(self.comp.set_gps_timepos, "ok") + self.assertEquals(self.comp.get_nav_packet, "ok") + self.assertEquals(self.comp.set_nav_packet, "ok") + + self.assertEquals(self.comp.bad_connection, "The requested connection id (invalid_connectionid) does not exist.Connections available: abc") + rfinfo_port.connectPort(self._rfinfoPort_2._this(), 'def') + rfsource_port.connectPort(self._rfsourcePort_2._this(), 'def') + digitaltuner_port.connectPort(self._digitaltunerPort_2._this(), 'def') + gps_port.connectPort(self._gpsPort_2._this(), 'def') + navdata_port.connectPort(self._navdataPort_2._this(), 'def') + time.sleep(0.5) + self.comp.start() + time.sleep(0.5) + self.comp.stop() + self.assertEquals(self.comp.get_rfinfo, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_rfinfo, "ok") + self.assertEquals(self.comp.get_available_rf, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_available_rf, "ok") + self.assertEquals(self.comp.get_current_rf, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_current_rf, "ok") + + self.assertEquals(self.comp.get_tunertype, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.get_tunerdevicecontrol, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.get_tunergroupid, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.get_tunerrfflowid, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.get_tunerstatus, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.get_tunercenterfrequency, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_tunercenterfrequency, "ok") + self.assertEquals(self.comp.get_tunerbandwidth, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_tunerbandwidth, "ok") + self.assertEquals(self.comp.get_tuneragcenable, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_tuneragcenable, "ok") + self.assertEquals(self.comp.get_tunergain, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_tunergain, "ok") + self.assertEquals(self.comp.get_tunerreferencesource, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_tunerreferencesource, "ok") + self.assertEquals(self.comp.get_tunerenable, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_tunerenable, "ok") + self.assertEquals(self.comp.get_tuneroutputsamplerate, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_tuneroutputsamplerate, "ok") + + self.assertEquals(self.comp.get_gpsinfo, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_gpsinfo, "ok") + self.assertEquals(self.comp.get_gps_timepos, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_gps_timepos, "ok") + self.assertEquals(self.comp.get_nav_packet, "Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.Connections available: abc, def") + self.assertEquals(self.comp.set_nav_packet, "ok") + + self.assertEquals(self.comp.bad_connection, "The requested connection id (invalid_connectionid) does not exist.Connections available: abc, def") + +if __name__ == "__main__": + ossie.utils.testing.main() # By default tests all implementations diff --git a/codegenTesting/sdr/dom/components/sri/tests/test_sri.py b/codegenTesting/sdr/dom/components/sri/tests/test_sri.py index 6895d4717..05f25168f 100644 --- a/codegenTesting/sdr/dom/components/sri/tests/test_sri.py +++ b/codegenTesting/sdr/dom/components/sri/tests/test_sri.py @@ -334,7 +334,7 @@ def testEOSBlockReset(self): #fill the queue for x in range(numPackets-1): - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's1') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's1') #send the last one and the rest of the packets in a separate thread t2 = threading.Thread(None, self.pushOnePlusPacketsThread, 't2', ('s1','s2',numPackets), {}) @@ -362,9 +362,9 @@ def testEOSBlockReset(self): #send 2 packets, the second one with an EOS which should lift the block #since s1 was calling for the block and s2 wasn't self.comp.start() - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's1') - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), True, 's1') - + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's1') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), True, 's1') + #wait to see that the EOS has passed through the component #which should reset the block to false while not self.EOSReceived: @@ -373,10 +373,10 @@ def testEOSBlockReset(self): self.comp.stop() #push enough packets in to cause a flush - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's2') - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's2') - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's2') - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's2') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's2') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's2') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's2') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's2') self.comp.start() t1.join() @@ -400,17 +400,17 @@ def testChangeBlockWithNonEmptyQueue(self): self.dataShortInput.pushSRI(s2_sri) #fill the queue half way - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's2') - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's2') - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's2') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's2') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's2') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's2') #now set the port the be blocking s1_sri = BULKIO.StreamSRI(1, 0.0, 0.001, 1, 300, 0.0, 0.001, 1, 1, 's1', True, []) self.dataShortInput.pushSRI(s1_sri) #should still be able to push 2 more before the queue is full - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's2') - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, 's2') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's2') + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, 's2') #send the last packet in a separate thread #this call should block, not flush @@ -473,12 +473,12 @@ def getPacketThread(self, numPacketsSent): self.packetsReceived = numPacketsReceived def pushOnePacketThread(self, streamID): - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, streamID) + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, streamID) def pushOnePlusPacketsThread(self, firstStreamID, restStreamID, numPacketsToSend): - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, firstStreamID) + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, firstStreamID) for x in range(numPacketsToSend): - self.dataShortInput.pushPacket([], bulkio_helpers.createCPUTimestamp(), False, restStreamID) + self.dataShortInput.pushPacket([1], bulkio_helpers.createCPUTimestamp(), False, restStreamID) # TODO Add additional tests here diff --git a/docs/shared-address/component-examples.md b/docs/shared-address/component-examples.md new file mode 100644 index 000000000..64bd85d41 --- /dev/null +++ b/docs/shared-address/component-examples.md @@ -0,0 +1,168 @@ +# Component Examples + +Transforming Component +---------------------- + +A component that takes some input stream(s), and produces output data. E.g., TuneFilterDecimate, AmFmPmBasebandDemod, etc. + +### Service Function + +The following example is from the modified FM demodulator I'm using to do resource utilization measurement in shared address space components, versus a comparable 2.0 application. This is the basic flow I'm expecting for new components that are doing some sort of data transformation; other than the use of the shared buffer data types, this is almost a valid 2.0 flow as well–much of it is intrinsic to the BulkIO stream API. The one thing missing in 2.0 is the getStream() method on +output ports. You have to track the output streams yourself in 2.0, which was an oversight. + +``` +int FmDemod_i::serviceFunction() +{ + // InFloatPort::getCurrentStream() returns the first input stream that has data available to be read, + // blocking if none are currently ready. + bulkio::InFloatStream input = dataFloat_in->getCurrentStream(); + if (!input) { + // No streams are available (typically due to a stop). + return NOOP; + } + + // If an output stream has already been created, OutFloatPort::getStream() finds it by streamID. + bulkio::OutFloatStream output = dataFloat_out->getStream(input.s + treamID()); + if (!output) { + // Otherwise, create a new stream. + output = dataFloat_out->createStream(input.streamID()); + // configureStream() is a user-defined method to apply any transformations from the input SRI to the + // output SRI + configureStream(output, input.sri()); + } + + // Blocking read from the input stream. The default behavior returns exactly one packet worth of data, + // but a sample count and consume length (for overlap) can be provided as well. + bulkio::FloatDataBlock block = input.read(); + if (!block) { + // There are two reasons why InFloatStream::read() might return a "null" block: + if (input.eos()) { + // 1. End-of-stream; close the output stream. + LOG_DEBUG(FmDemod_i, "Stream " << input.streamID() << " got an EOS"); + output.close(); + return NORMAL; + } else { + // 2. The component was stopped. + return NOOP; + } + } + + // Handle an input queue flush. Depending on your algorithm, this may require resetting state, etc. + if (block.inputQueueFlushed()) { + LOG_WARN(FmDemod_i, "Input queue flushed"); + } + + // Handle SRI changes. The configureStream() method is also used on output stream creation; the SRI change flag + // is not set for a new stream. + // Optionally, FloatDataBlock::sriChangeFlags() returns a set of bit flags to check which parts of the SRI changed, + // if only certain fields are relevant. + if (block.sriChanged()) { + LOG_INFO(FmDemod_i, "SRI changed"); + configureStream(output, block.sri()); + } + + // Update any internal variables or properties. This a user-defined method, and the behavior is up to the + // implementer. In this case, the algorithm depends on the sample rate of the input SRI. + updateParameters(block.sri()); + + // Run the algorithm here (FM demodulation, in this case), taking into account whether the input is complex or + // real. In this example, the output is always real regardless of the input, but this can differ on a per-component + // basis. The actual work is done in a private templatized member function, doFmDemod(). + redhawk::buffer buffer; + if (block.complex()) { + buffer = doFmDemod(block.cxbuffer()); + } else { + buffer = doFmDemod(block.buffer()); + } + + // Write the processed data to the output stream. After calling OutFloatStream::write(), the buffer is now shared + // with an unknown number of consumers. No more modifications should be made. + // Regarding the BULKIO::PrecisionUTCTime, a block can contain multiple timestamps, if you explicitly request + // a read size; here, we are just taking the data in the block sizes it comes in, so we can just use the starting + // timestamp for the entire block. Algorithms that affect the time basis should compute a new block time. + output.write(buffer, block.getStartTime()); + return NORMAL; +} +``` + +### Algorithm + +Here's what `doFmDemod()` looks like. You wouldn't necessarily have to +do it this way, it's just an example of using functional programming to +do your algorithm. By making the method templatized, the same code can +be used (inline) for both real and complex input. + +``` +template +redhawk::shared_buffer FmDemod_i::doFmDemod(const redhawk::shared_buffer& input) +{ + redhawk::buffer output(input.size()); + dsp::process(input.begin(), input.end(), output.begin(), demod); + return output; +} +``` + +It's not taking different streams into account, but that's relatively +easy to fix–you could keep a map of stream IDs to demodulator objects. + +Quick implementation notes: + +- `dsp::process()` is basically `std::transform()`, but it allows the + `demod` object to be stateful (i.e., it remembers the last phase) +- `demod` is a small functor that is applied to each input, storing + the result to the output + +Generative Component +-------------------- + +A component that creates BulkIO output data based on parameters or +files, instead of BulkIO input. E.g., SigGen, FileReader. + +### Service Function + +Generative components can be simpler, because there is no input to be +concerned with. The following example is for a component that generates +a waveform (like SigGen, but more trivial). + +``` +int Waveform_i::serviceFunction() +{ + // Update any internal variables or properties. This a user-defined method, and the behavior is up to the + // implementer. In this case, the waveform parameters could be copied from properties to shadow variables + // while holding the properties lock (propertySetAccess). + updateParameters(); + + // For this example, the output stream is a member variable on the Waveform_i class. If it's not already + // created, do it here. Another option would be to create it inthe constructor() method. + if (!_outputStream) { + _outputStream = dataFloat_out->createStream(_streamID); + _sriChanged = true; + } + + // If the SRI needs to be updated, based on the properties (or some other reason), one option is to set an + // internal flag; another possibility might be to update the SRI in updateParameters()--if the stream is only + // used from within serviceFunction(), there are no threading concerns. + // configureStream() is a user-defined method to apply any changes to the output stream SRI. + if (_sriChanged) { + configureStream(_outputStream); + } + + // Create a buffer of the desired size. The redhawk::buffer class is the mutable version, and it gracefullly + // "degrades" to the immutable redhawk::shared_buffer class. + redhawk::buffer data(1024); + + // The implementation of the generator function is left as an exercise to the reader. + doWaveform(data.begin(), data.end()); + + // This example keeps a running BULKIO::PrecisionUTCTime for the first sample of each block. There are overloaded + // arithmetic operators to make it easier to modify. + _currentTimestamp += _outputStream.xdelta() * data.size(); + + // After calling OutFloatStream::write(), the buffer is now shared with an unknown number of consumers. No more + // modifications should be made. + _outputStream.write(data, _currentTimestamp); + + return NORMAL; +} +``` diff --git a/docs/shared-address/component-model.md b/docs/shared-address/component-model.md new file mode 100644 index 000000000..fb2c5792d --- /dev/null +++ b/docs/shared-address/component-model.md @@ -0,0 +1,61 @@ +# Component Model Overview  + +REDHAWK 2.0 C++ Components +-------------------------- + +Historically in REDHAWK, a C++ component is an executable which contains +both the component-specific implementation class (e.g., "TuneFilterDecimate\_i") and a small `main()` driver that creates and executes the component. + +![component_exe](/docs/shared-address/images/component_exe.png) + +Most of the actual work of `main()` is done inside of the core libraries, in the `Resource_impl::start_component()` function, so the generated `main.cpp` contains very little code. + +Shared Address Space C++ Components +----------------------------------- + +In order to support creating arbitrary components in the same address space, the creation of the component is separated from the management of the process. + +![component_so](/docs/shared-address/images/component_so.png) + + +The component code provides a factory function, `make_component()`, that implements creation of an instance of the specific component (this is a subset of what `main()` used to do). The rest of the component implementation remains the same. + +A new meta-component handles the normal `main()` responsibilites, and provides an interface for the system to create a component inside its process space. This is called the ComponentHost. + +REDHAWK 2.0 Process Model +------------------------- + +The classic REDHAWK model is a 1:1 mapping of processes to components. + +![current](/docs/shared-address/images/current.png) + +The component's executable is either run by the ApplicationFactory via the GPP, or directly within the Sandbox/Chalkboard. Either does a fork-and-exec; the component executable creates a single instance of the component, which then registers back with the ApplicationFactory or Sandbox. + +Shared Address Space Process Model +---------------------------------- + +The way to remove IPC costs is, naturally, to do away with the P (at least in part). This means a 1:N process-to-component mapping. + +![shared_address](/docs/shared-address/images/shared_address.png) + +The Sandbox/Chalkboard or ApplicationFactory executes the component host, then launches the components into the host. The component host is capable of dynamically loading and unloading components and their dependencies; this allows the set of components running in the component host to change arbitrarily at run-time. The Sandbox benefits from this behavior now, since it doesn't know the future. Applications, on the other hand, do not support dynamic deployment *yet*. + +Mixing Shared Libraries and Executables +--------------------------------------- + +The shared address space model does not affect existing components. Executable-based C++ components will still compile and run as-is; however, without updating and re-generating a component, it will not be able to take advantage of shared address space benefits. + +![mixed](/docs/shared-address/images/mixed.png) + +The ability to generate executable components is not going away. Although I would expect it to be rare, there may be particular reasons why a given component cannot be run in a shared address space–perhaps due to the requirements of an external library, for example. + +Process Per Component +--------------------- + +While the intent is to run all shared library components from an application in a single component host in order to take advantage of the benefits of memory sharing, the design does not require it (1 is a perfectly reasonable N in 1:N). + +![process_per_component](/docs/shared-address/images/process_per_component.png) + +Each component can be launched in its own component host, giving the exact same behavior as REDHAWK 2.0 and prior. Or, a few troublesome components could be isolated from the rest by running them in their own component hosts, while the rest of the components share a host. + +A further enhancement could allow an application designer to choose arbitrary groupings of components to be run in a set of component hosts, though I have my doubts as to how valuable this is when balanced against the complexity. diff --git a/docs/shared-address/images/component_exe.png b/docs/shared-address/images/component_exe.png new file mode 100644 index 000000000..9dedcbbf8 Binary files /dev/null and b/docs/shared-address/images/component_exe.png differ diff --git a/docs/shared-address/images/component_so.png b/docs/shared-address/images/component_so.png new file mode 100644 index 000000000..6ce2398e9 Binary files /dev/null and b/docs/shared-address/images/component_so.png differ diff --git a/docs/shared-address/images/current.png b/docs/shared-address/images/current.png new file mode 100644 index 000000000..2fb79edfc Binary files /dev/null and b/docs/shared-address/images/current.png differ diff --git a/docs/shared-address/images/mixed.png b/docs/shared-address/images/mixed.png new file mode 100644 index 000000000..1da608a77 Binary files /dev/null and b/docs/shared-address/images/mixed.png differ diff --git a/docs/shared-address/images/process_per_component.png b/docs/shared-address/images/process_per_component.png new file mode 100644 index 000000000..594642986 Binary files /dev/null and b/docs/shared-address/images/process_per_component.png differ diff --git a/docs/shared-address/images/shared_address.png b/docs/shared-address/images/shared_address.png new file mode 100644 index 000000000..f6bfe0ef1 Binary files /dev/null and b/docs/shared-address/images/shared_address.png differ diff --git a/docs/shared-address/shared-address-components-howto.md b/docs/shared-address/shared-address-components-howto.md new file mode 100644 index 000000000..43027222d --- /dev/null +++ b/docs/shared-address/shared-address-components-howto.md @@ -0,0 +1,193 @@ +# Shared Address Space Components HOWTO +Instructions for getting up and running with shared address space +components. + +Creating a Shared Library Component +=================================== + +In the REDHAWK IDE, create a component project as you normally would, +selecting C++ as the language. C++ components default to a shared +library implementation. + +Converting a Component to a Shared Library +------------------------------------------ + +1. Open up the component SPD in the editor and select the + **Implementations** tab. +2. In the right hand side of the pane, expand the **Code** section. +3. Change **Type** to be "SharedLibrary" +4. Add ".so" to the end of **Entry Point** and **File** +5. Re-generate the component + +Other than changing the code entry, everything else is the same. When +you generate the component, it will produce different +`Makefile.am, configure.ac` and `main.cpp` files. Make sure there are +overwritten. + +Running a Shared Library Component +================================== + +Shared library support is built in to both the Python Sandbox and IDE +Chalkboard. Launch the component the same way as always (by path or by +name); the Sandbox/Chalkboard recognizes that the component is a shared +library and, if necessary, launches the ComponentHost soft package. The +component is then deployed into the ComponentHost. All shared library +components in the Sandbox/Chalkboard run in the same ComponentHost. + +Within the REDHAWK IDE, to terminate the ComponentHost instance, click +the "Release Waveform" button from the toolbar. + +BulkIO Memory Sharing +===================== + +This is where the bulk (no pun intended) of the difference comes in. To +avoid copies, you have to use the new shared buffer classes and API. + +There are two STL-like template classes for working with shared data: + +- `redhawk::shared_buffer` - read-only buffer +- `redhawk::buffer` - adds write operations to + `redhawk::shared_buffer` + +Buffer classes have reference semantics, which means that assignment +shares the underlying buffer instead of making a deep copy. Multiple +buffer classes can point to the same underlying memory; the memory is +freed only once there are no more references. For this reason, buffers +are memory leak-proof (unless you create them as pointers view the `new` +operator). + +The intent is that new data starts out as a `redhawk::buffer`, which +then gracefully degrades to a `redhawk::shared_buffer` once it's +passed to the port. This allows the originator to modify the data as +needed, but prevents any downstream receivers from modifying it, which +is necessary to avoid copies. Both classes are part of the core +libraries (not BulkIO), in ``, and have complete +Doxygen documentation. + +Unlike `std::vector`, the size is fixed at creation time. + +BulkIO Output Ports +------------------- + +Using the output stream API is strongly recommended, but all of the +numeric port classes have an overload of `pushPacket()` that takes a +shared buffer for the data argument. + +To create a new writeable buffer, use the single-argument construtor +with the desired size (in number of elements) to allocate space: +``` +redhawk::buffer buffer(1024); + for (size_t ii = 0; ii < buffer.size(); ++ii) { + buffer[ii] = (float) ii; + } +stream.write(buffer, bulkio::time::utils::now()); +``` + +The buffer is shared with all local connections (which now have a +`redhawk::shared_buffer` that points to the same data), and transferred +over CORBA for remote connections (same IPC cost as before). Once a +`redhawk::buffer` has been shared with other contexts, in this case via +the `write()` method, **you must not modify it**. + +If your algorithm requires history (besides overlap, which is supported +from the input stream), it's cheap to keep a read-only "copy" of your +output buffer: + +``` +redhawk::shared_buffer history = buffer; +``` + +BulkIO Input Ports +------------------ + +To use this feature, you must use BulkIO input streams. The DataBlock +classes have been updated to use a `redhawk::shared_buffer` internally, +and provide access to the internal buffer in both scalar and complex +forms: + +``` +bulkio::InFloatStream stream = dataFloat_in->getCurrentStream(); + if (!stream) { + return NOOP; + } + + bulkio::FloatDataBlock block = stream.read(); + if (!block) { + return NOOP; + } + + if (block.complex()) { + redhawk::shared_buffer > buffer = block.cxbuffer(); + std::complex sum = std::accumulate(buffer.begin(), buffer.end(), std::complex(0.0, 0.0)); + } else { + redhawk::shared_buffer buffer = block.buffer(); + float sum = std::accumulate(buffer.begin(), buffer.end(), 0.0); + } + + return NORMAL; +``` + +You can pass along the input buffer to an output stream, and the same +sharing rules apply as above (though you don't have write access). + +Advanced Usage +-------------- + +Beyond being a better array, buffers include some additional features +for low-level and signal processing use. + +#### Externally Acquired Memory + +You can wrap existing memory in a `redhawk::shared_buffer` or +`redhawk::buffer` (depending on whether you plan to modify it via the +buffer's API): +``` +float* data = new float[1024]; +redhawk::shared_buffer buffer(data, 1024); +``` + +The buffer takes ownership of the memory, calling `delete[]` when the +last reference goes away. + +#### Custom Deleter + +You can customize the delete behavior by passing your own deleter as the +third argument: + +``` +float* data = (float*) malloc(sizeof(float) * 1024)); +redhawk::shared_buffer buffer(data, 1024, &std::free); +``` + +The deleter can be any callable object or function pointer that takes a +single argument, a pointer to the memory being deleted. The `std::free` +example is somewhat contrived, but it's easy to imagine a scenario like +SourceNic, where you may want to push portions of a larger slab of +memory to downstream components, and then be notified when the chunk +becomes available for DMA. + +#### Slicing + +Buffer classes provide a `slice()` method, which allows you to get a +subset of the data without making a copy. The returned buffer still +shares the underlying memory, but only provides access within the bounds +provided. + +``` +// NB: The arguments are the start and end indices. +redhawk::shared_buffer part = buffer.slice(8, 24); +// part.size() == 16 +``` + +#### Recast + +The internal data is strongly typed, but you can reinterpret a buffer as +another type with the static method `recast()`, similar to C++'s +`reinterpret_cast`. The size of the elements are taken into account in +the returned buffer, with remainder always truncated. + +``` +redhawk::buffer > source(20); // 80 bytes +redhawk::shared_buffer shorty = redhawk::shared_buffer::recast(source); | +// shorty.size() == 40 +``` diff --git a/docs/shared-memory/shared-memory-ipc.md b/docs/shared-memory/shared-memory-ipc.md new file mode 100644 index 000000000..0d9c5e8fa --- /dev/null +++ b/docs/shared-memory/shared-memory-ipc.md @@ -0,0 +1,36 @@ +# Shared Memory IPC +Instructions for taking advantage of zero-copy shared memory transfers in C++ +components and devices. + +Using Shared Memory +=================== + +BulkIO connections between C++ components or devices in different processes +on the same host will automatically use shared memory to transfer data. +In order to make optimal use of shared memory, components should follow the +advice in the Shared Address Space Components HOWTO guide. In particular, the +section BulkIO Memory Sharing describes the preferred classes and API methods +for working with data buffers. + +With no further modifications to user code, the `redhawk::buffer` template +class acquires its memory from a process-shared heap, enabling shared memory- +aware connections to transfer only a small amount of metadata to pass buffers +between processes. + +Cleaning Up After Crashed Components +==================================== +Each REDHAWK process that allocates shared memory creates its own per-process +heap using POSIX real-time shared memory. The file is automatically unlinked +when it is no longer in use; however, if components crash or are terminated +with a kill signal, the clean up will not occur. + +Orphaned heaps are visible via the `/dev/shm` file system, with a filename of +`heap-`. If the process is no longer alive, these files can be removed +with the `rm` command. + +Interprocess communication is done via FIFOs created on the local `/tmp` file +system. To avoid polluting the file system these files are removed once the +communication channel is established, or the connection is canceled due to +error. In the unlikely event that a FIFO is not removed by REDHAWK, it may be +manually cleaned up by removing the file, whose name is of the form: +`/tmp/fifo--`. diff --git a/frontendInterfaces/Makefile.am b/frontendInterfaces/Makefile.am index 40e031151..67e3debac 100644 --- a/frontendInterfaces/Makefile.am +++ b/frontendInterfaces/Makefile.am @@ -92,9 +92,7 @@ clean-local: clean-python clean-java clean-cpp # Always build the current directory first (this is hack-ish, but the # # alternative is to combine the Makefile.am's) -SUBDIRS = . - -SUBDIRS += libsrc +SUBDIRS = . libsrc ############################################################################### # C++ (via automake and libtool) diff --git a/frontendInterfaces/build.sh b/frontendInterfaces/build.sh index 4dc2d6e28..2695a5897 100755 --- a/frontendInterfaces/build.sh +++ b/frontendInterfaces/build.sh @@ -25,9 +25,9 @@ elif [ "$1" = "rpm" ]; then # A very simplistic RPM build scenario mydir=`dirname $0` tmpdir=`mktemp -d` - cp -r ${mydir} ${tmpdir}/frontendInterfaces-2.3.9 - tar czf ${tmpdir}/frontendInterfaces-2.3.9.tar.gz --exclude=".svn" -C ${tmpdir} frontendInterfaces-2.3.9 - rpmbuild -ta ${tmpdir}/frontendInterfaces-2.3.9.tar.gz + cp -r ${mydir} ${tmpdir}/frontendInterfaces-2.4.4 + tar czf ${tmpdir}/frontendInterfaces-2.4.4.tar.gz --exclude=".svn" -C ${tmpdir} frontendInterfaces-2.4.4 + rpmbuild -ta ${tmpdir}/frontendInterfaces-2.4.4.tar.gz rm -rf $tmpdir else # Checks if build is newer than makefile (based on modification time) diff --git a/frontendInterfaces/configure.ac b/frontendInterfaces/configure.ac index f6bebcbfb..a0fa1d3f6 100644 --- a/frontendInterfaces/configure.ac +++ b/frontendInterfaces/configure.ac @@ -18,7 +18,7 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # -AC_INIT(frontendInterfaces, 2.3.9) +AC_INIT(frontendInterfaces, 2.4.4) AM_INIT_AUTOMAKE(nostdinc) AC_PROG_CC @@ -41,16 +41,16 @@ if test "$IDL" = no; then fi AC_LANG_PUSH([C++]) PKG_CHECK_MODULES([OMNIORB], [omniORB4 >= 4.1.0]) -PKG_CHECK_MODULES([OSSIE], [ossie >= 2.0.0]) +PKG_CHECK_MODULES([OSSIE], [ossie >= 2.2.1]) RH_PKG_IDLDIR([OSSIE], [ossie]) # If you depend on other IDL modules, such as CF or BULKIO add them here -PKG_CHECK_MODULES([BULKIO], [bulkioInterfaces >= 2.0]) +PKG_CHECK_MODULES([BULKIO], [bulkioInterfaces >= 2.2]) AC_CHECK_PYMODULE(bulkio.bulkioInterfaces, [], [AC_MSG_ERROR([the python bulkio.bulkioInterfaces module is required])]) RH_PKG_IDLDIR([BULKIO], [bulkioInterfaces]) AC_SUBST([FRONTEND_SO_VERSION],[0:0:0]) -AC_SUBST([FRONTEND_API_VERSION],[2.3]) +AC_SUBST([FRONTEND_API_VERSION],[2.4]) AX_BOOST_BASE([1.41]) AX_BOOST_THREAD @@ -68,7 +68,7 @@ HAVE_JAVASUPPORT=no if test "x$enable_java" != "xno"; then # Ensure JAVA_HOME is set RH_JAVA_HOME - RH_PROG_JAVAC([1.6]) + RH_PROG_JAVAC([1.8]) RH_PROG_JAR RH_PROG_IDLJ @@ -87,11 +87,19 @@ if test "x$enable_java" != "xno"; then fi AM_CONDITIONAL(HAVE_JAVASUPPORT, test $HAVE_JAVASUPPORT = yes) -AC_CONFIG_FILES([Makefile frontendInterfaces.pc]) -if test "$enable_base_classes" != "no"; then - if test "$HAVE_JAVASUPPORT = yes"; then - AC_CONFIG_FILES([libsrc/java/META-INF/MANIFEST.MF]) - fi - AC_CONFIG_FILES([libsrc/Makefile libsrc/frontend.pc]) +AM_PATH_CPPUNIT(1.12.1) +AS_IF([test "x$HAVE_JAVASUPPORT" == "xyes"], [ + dnl Use RPM location hard-coded for now + AC_SUBST([JUNIT_CLASSPATH], "/usr/share/java/junit4.jar") +]) + +AC_CONFIG_FILES([Makefile \ + frontendInterfaces.pc \ + libsrc/Makefile \ + libsrc/testing/tests/cpp/Makefile \ + libsrc/frontend.pc]) +if test "$HAVE_JAVASUPPORT = yes"; then + AC_CONFIG_FILES([libsrc/java/META-INF/MANIFEST.MF \ + libsrc/testing/tests/java/Makefile]) fi AC_OUTPUT diff --git a/frontendInterfaces/frontendInterfaces.spec b/frontendInterfaces/frontendInterfaces.spec index b5774484b..63feea776 100644 --- a/frontendInterfaces/frontendInterfaces.spec +++ b/frontendInterfaces/frontendInterfaces.spec @@ -34,19 +34,18 @@ Prefix: %{_prefix} Summary: The frontend library for REDHAWK Name: frontendInterfaces -Version: 2.3.9 -Release: 1%{?dist} +Version: 2.4.4 +Release: 2%{?dist} License: LGPLv3+ Group: REDHAWK/Interfaces Source: %{name}-%{version}.tar.gz Vendor: REDHAWK -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot - -Requires: redhawk >= 2.0.0 -Requires: bulkioInterfaces >= 2.0.0 -BuildRequires: redhawk-devel >= 2.0.0 -BuildRequires: bulkioInterfaces >= 2.0.0 +Requires: redhawk >= 2.2.1 +Requires: bulkioInterfaces >= 2.2.1 +BuildRequires: redhawk-devel >= 2.2.1 +BuildRequires: bulkioInterfaces >= 2.2.1 +BuildRequires: cppunit-devel %description Libraries and interface definitions for frontend. diff --git a/frontendInterfaces/idl/redhawk/FRONTEND/Frontend.idl b/frontendInterfaces/idl/redhawk/FRONTEND/Frontend.idl index 8ba452363..d7004aef2 100644 --- a/frontendInterfaces/idl/redhawk/FRONTEND/Frontend.idl +++ b/frontendInterfaces/idl/redhawk/FRONTEND/Frontend.idl @@ -109,8 +109,8 @@ module FRONTEND { struct CartesianPositionInfo { boolean valid; string datum; /** Default: DATUM_WGS84 */ - double x; /** Default Unit: Degrees */ - double y; /** Default Unit: Degrees */ + double x; /** Default Unit: Meters */ + double y; /** Default Unit: Meters */ double z; /** Default Unit: Meters */ }; diff --git a/frontendInterfaces/idl/redhawk/FRONTEND/GPS.idl b/frontendInterfaces/idl/redhawk/FRONTEND/GPS.idl index 969735f44..1b3c1aef4 100644 --- a/frontendInterfaces/idl/redhawk/FRONTEND/GPS.idl +++ b/frontendInterfaces/idl/redhawk/FRONTEND/GPS.idl @@ -43,37 +43,70 @@ module FRONTEND { /** Device Kind */ const string FE_GPS_DEVICE_KIND = "FRONTEND::GPS"; - const string GPS_MODE_LOCKED = "Locked"; - const string GPS_MODE_UNLOCKED = "Unlocked"; - const string GPS_MODE_TRACKING = "Tracking"; - + const string GPS_MODE_LOCKED = "Locked"; /** GPS Receiver has locked on to the signal */ + const string GPS_MODE_UNLOCKED = "Unlocked"; /** GPS Receiver has not locked on to the signal */ + const string GPS_MODE_TRACKING = "Tracking"; /** GPS Receiver has found but not locked on to the signal (optional) */ /************************/ /** STRUCTURES */ /************************/ struct GPSInfo { - string source_id; /** NAV source identifier */ - string rf_flow_id; /** identifier of rf_flow (ie - antenna) feeding into this GPS hardware */ - string mode; - - long fom; /** Figure of merit (for position) */ - long tfom; /** Figure of merit (for time) */ - long datumID; /** Should be 47, WGS 1984 */ - - double time_offset; /** Characterization of the Reference Source - should be mostly 0 */ - double freq_offset; /** Frequency offset of Reference Source */ - double time_variance; /** Timing Stability/Variance of Reference Source */ - double freq_variance; /** Frequency Stability/Variance of Reference Source */ - - short satellite_count; - float snr; - string status_message; - BULKIO::PrecisionUTCTime timestamp; - - /** Allows for keyword expansion*/ - CF::Properties additional_info; + string source_id; /** Device identifier for the device that generated the GPS location report. + This device's id if accessing the hardware directly */ + string rf_flow_id; /** Identifier of rf_flow (ie - antenna) feeding into this GPS hardware */ + string mode; /** "Locked" for signal locked and "Unlocked" for no signal lock. + Use "Tracking" if the signal has been found but has not locked (if available) */ + + long fom; /** Position figure-of-merit (see fom table below) */ + long tfom; /** Time figure-of-merit (see tfom table below) */ + long datumID; /** Should be 47, WGS 1984 */ + + double time_offset; /** Receiver oscillator's most recent time offset (seconds). Usually 0 */ + double freq_offset; /** Receiver's center frequency offset (Hz) */ + double time_variance; /** Receiver oscillator's time offset variance (seconds**2). Usually 0 */ + double freq_variance; /** Receiver's center frequency offset variance (Hz**2) */ + + short satellite_count; /** Number of satellites visible to the receiver */ + float snr; /** GPS receiver's reported signal to noise ratio. + The definition of this value is not standardized varies by manufacturer */ + string status_message; /** Device-specific status message */ + BULKIO::PrecisionUTCTime timestamp; /** Timestamp for the GPS information */ + + CF::Properties additional_info; /** Allows for keyword expansion*/ }; - + + /* The figure of merit (fom) provides the expected position error (EPE) + + |---------|-----------------| + | Value | Error (meters) | + |---------|-----------------| + | 1 | < 25 | + | 2 | < 50 | + | 3 | < 75 | + | 4 | < 100 | + | 5 | < 200 | + | 6 | < 500 | + | 7 | < 1000 | + | 8 | < 5000 | + | 9 | >= 5000 | + |---------|-----------------| + + The time figure of merit (fom) provides the expected time error (ETE) + + |---------|----------------------| + | Value | Error (nanoseconds) | + |---------|----------------------| + | 1 | < 1 | + | 2 | < 10 | + | 3 | < 100 | + | 4 | < 1e3 | + | 5 | < 1e4 | + | 6 | < 1e5 | + | 7 | < 1e6 | + | 8 | < 1e7 | + | 9 | >= 1e7 | + |---------|---------------------*/ + /** Provides the correlation between a timestamp and position */ struct GpsTimePos { PositionInfo position; diff --git a/frontendInterfaces/idl/redhawk/FRONTEND/TunerControl.idl b/frontendInterfaces/idl/redhawk/FRONTEND/TunerControl.idl index 0e06e2724..fc9e0cd44 100644 --- a/frontendInterfaces/idl/redhawk/FRONTEND/TunerControl.idl +++ b/frontendInterfaces/idl/redhawk/FRONTEND/TunerControl.idl @@ -22,16 +22,17 @@ #define _FRONTEND_TUNERCONTROL_IDL_ #include "redhawk/FRONTEND/Frontend.idl" +#include "ossie/BULKIO/bulkioDataTypes.idl" module FRONTEND { /** Mandated Structures and Ports: ------------------------------ - Frontend mandates three property structures outside of normal REDHAWK properties of "device_kind" and "device_model" : + Frontend mandates four property structures outside of normal REDHAWK properties of "device_kind" and "device_model" : (1) FRONTEND::tuner_allocation - allocation structure to acquire capability on a tuner based off tuner settings. Name || ID || Type || Description - - tuner_type || FRONTEND::tuner_allocation::tuner_type || string || Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGITIZER_CHANNELIZER + - tuner_type || FRONTEND::tuner_allocation::tuner_type || string || Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGITIZER_CHANNELIZER, RX_SCANNER_DIGITIZER - allocation_id || FRONTEND::tuner_allocation::allocation_id || string || The allocation_id set by the caller. Used by the caller to reference the device uniquely - center_frequency || FRONTEND::tuner_allocation::center_frequency || double || Requested center frequency in Hz - bandwidth || FRONTEND::tuner_allocation::bandwidth || double || Requested Bandwidth in Hz @@ -42,14 +43,23 @@ module FRONTEND { to any currently tasked device that satisfies the parameters (essentually a listener) - group_id || FRONTEND::tuner_allocation::group_id || string || Unique identifier that specifies a group of device. Must match group_id on the device - rf_flow_id || FRONTEND::tuner_allocation::rf_flow_id || string || Optional. Specifies a certain RF flow to allocate against. If left empty, it will match all frontend devices. - (2) FRONTEND::listener_allocation - additional allocation structure to acquire "listener" capability on a tuner based off a previous allocation. "Listeners" have the ability to receive the data + (2) FRONTEND::scanner_allocation + Allocation structure to acquire capability on a scanning tuner (must be used in conjunction with FRONTEND::tuner_allocation). + Note that the allocation does not contain enough information to setup the scan strategy. Once the device is allocated, the strategy must be set through the control API + Name || ID || Type || Description + - min_freq || FRONTEND::scanner_allocation::min_freq || double || Requested lower edge of the scanning band + - max_freq || FRONTEND::scanner_allocation::max_freq || double || Requested upper edge of the scanning band + - mode || FRONTEND::scanner_allocation::mode || enum string || SPAN_SCAN or DISCRETE_SCAN + - control_mode || FRONTEND::scanner_allocation::control_mode || enum string|| TIME_BASED or SAMPLE_BASED + - control_limit || FRONTEND::scanner_allocation::control_limit || double || Either the fastest hop rate (TIME_BASED) or shortest set of samples (SAMPLE_BASED) that the scanner is expected to support. In samples, the number that will be needed before the next retune, equivalent to control_limit >= sample_rate/(max_settle_time+min_dwell_time) is met before the next retune + (3) FRONTEND::listener_allocation - additional allocation structure to acquire "listener" capability on a tuner based off a previous allocation. "Listeners" have the ability to receive the data but can not modify the settings of the tuner Name || ID || Type || Description - existing_allocation_id || FRONTEND::listener_allocation::existing_allocation_id || string || Allocation ID for an existing allocation. Could be either control or listener - listener_allocation_id || FRONTEND::listener_allocation::listener_allocation_id || string || New Listener ID - (3) FRONTEND::tuner_status - a struct sequence containing the status of all tuners. There are optional and required fields for this structure. The required fields are listed below: + (4) FRONTEND::tuner_status - a struct sequence containing the status of all tuners. There are optional and required fields for this structure. The required fields are listed below: Name || ID || Type || Description - - tuner_type || FRONTEND::tuner_status::tuner_type || string || Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGTIZIER_CHANNELIZER + - tuner_type || FRONTEND::tuner_status::tuner_type || string || Example Tuner Types: TX, RX, CHANNELIZER, DDC, RX_DIGITIZER, RX_DIGITIZER_CHANNELIZER, RX_SCANNER_DIGITIZER - allocation_id_csv || FRONTEND::tuner_status::allocation_id_csv || string || Comma seperated list of currrent allocation ids, both control and listeners. - center_frequency || FRONTEND::tuner_status::center_frequency || double || Current center frequency in Hz - bandwidth || FRONTEND::tuner_status::bandwidth || double || Current Bandwidth in Hz @@ -57,6 +67,9 @@ module FRONTEND { - group_id || FRONTEND::tuner_status::group_id || string || Unique identifier that specifies a group of device. - rf_flow_id || FRONTEND::tuner_status::rf_flow_id || string || Specifies a certain RF flow to allocate against. - enabled || FRONTEND::tuner_status::enabled || boolean || True is tuner is enabled. Can be allocated but disabled + If the tuner_type is of type RX_SCANNER_DIGITIZER, the following optional fields are required as part of FRONTEND::tuner_status: + - scan_mode_enabled || FRONTEND::tuner_status::scan_mode_enabled || boolean || True is scan mode is enabled. False is Manual Tune is enabled + - supports_scan || FRONTEND::tuner_status::supports_scan || boolean || True if scan is supported Usual port additions include a input (provides) port for the tuner control as well as an output (uses) BULKIO data port that follows the naming convention [interface]_[in/out]. Examples include dataShort_out, dataSDDS_out, dataOctet_in, and DigitalTuner_in. @@ -72,7 +85,8 @@ module FRONTEND { - CHANNELIZER: Accepts digitized wideband and provides DDC's (allocation against a channelizer ensures that the input port is not shared) - DDC: Digital Down Converter. Channel that is extracted from a wider bandwidth (ie - Channelizer). Similar to a RX_DIGITIZER but often much cheaper. - RX_DIGITIZER_CHANNELIZER: RX_DIGITIZER and CHANNELIZER combo. The reason they are combined is because they are a single device that cannot operate - independetly (ie - RX_DIGITIZER can not output full-rate or the CHANNELIZER can not accept external input) + independetly (ie - RX_DIGITIZER can not output full-rate or the CHANNELIZER can not accept external input) + - RX_SCANNER_DIGITIZER: Frequency scanning digitizer @@ -109,11 +123,11 @@ module FRONTEND { const string TUNER_TYPE_TX = "TX"; const string TUNER_TYPE_RX = "RX"; const string TUNER_TYPE_RX_DIGITIZER = "RX_DIGITIZER"; + const string TUNER_TYPE_RX_SCANNER_DIGITIZER = "RX_SCANNER_DIGITIZER"; const string TUNER_TYPE_CHANNELIZER = "CHANNELIZER"; const string TUNER_TYPE_DDC = "DDC"; const string TUNER_TYPE_RX_DIGITIZER_CHANNELIZER = "RX_DIGITIZER_CHANNELIZER"; - /*************************/ /*** INTERFACE */ /*************************/ @@ -152,27 +166,27 @@ module FRONTEND { raises (FrontendException, BadParameterException, NotSupportedException); /** Set/Get tuner bandwidth */ - void setTunerBandwidth(in string id,in double bw) + void setTunerBandwidth(in string id, in double bw) raises (FrontendException, BadParameterException, NotSupportedException); double getTunerBandwidth(in string id) raises (FrontendException, BadParameterException, NotSupportedException); /** MGC/AGC */ - void setTunerAgcEnable(in string id,in boolean enable) + void setTunerAgcEnable(in string id, in boolean enable) raises (FrontendException, BadParameterException, NotSupportedException); boolean getTunerAgcEnable(in string id) raises (FrontendException, BadParameterException, NotSupportedException); - /** MGC Gain (where negative gain is attentuation)*/ - void setTunerGain(in string id,in float gain) + /** MGC Gain (where negative gain is attenuation)*/ + void setTunerGain(in string id, in float gain) raises (FrontendException, BadParameterException, NotSupportedException); float getTunerGain(in string id) raises (FrontendException, BadParameterException, NotSupportedException); - /** Tuner Reference Source: 0 = internal, 1 = external*/ - void setTunerReferenceSource(in string id,in long source) + /** Tuner Reference Source: 0 = internal, 1 = external*/ + void setTunerReferenceSource(in string id, in long source) raises (FrontendException, BadParameterException, NotSupportedException); - long getTunerReferenceSource(in string id) + long getTunerReferenceSource(in string id) raises (FrontendException, BadParameterException, NotSupportedException); /** Enable/Disable Tuner - Expected to keep current tuner settings on a disable and an EOS to be sent */ @@ -180,7 +194,7 @@ module FRONTEND { raises (FrontendException, BadParameterException, NotSupportedException); boolean getTunerEnable(in string id) raises (FrontendException, BadParameterException, NotSupportedException); - }; + }; interface DigitalTuner : AnalogTuner @@ -190,8 +204,115 @@ module FRONTEND { raises (FRONTEND::FrontendException, FRONTEND::BadParameterException, FRONTEND::NotSupportedException); double getTunerOutputSampleRate(in string id) raises (FRONTEND::FrontendException, FRONTEND::BadParameterException, FRONTEND::NotSupportedException); + }; + + /** + TUNER SCAN MODE + The tuner SCAN mode is being added to reduce aggregate tune delays when there is a need for a series of fast + retunes. The objective is to let the tuner asset perform the retuning in an automated fashion. + + The scan_mode property in the tuner_allocation structure is used for device allocation. If scan_mode is set to + "Scan" and the device has the capability, it will be allocated. The scan_rate property is also available in the + tuner_allocation structure. It is used as an allocation property if the user specifies a desired rate for the scan + operation. It is an optional allocation parameter. + + Tuner SCAN Mode allows the tuner to internally perform a scan where the tuner itself retunes to cover the desired + spectrum. There are two types of automated scans: Span scan and discrete frequency scan, and a single non-automated scan: Manual (the way devices normally operate). + The scan_mode_type value determines the scan type. If the only scanning that the device allows is manual, then the device does not support scanning. + A Span scan is created using a series of start/stop frequencies. A Discrete Frequency Scan is created from a series of discrete + frequencies. Each of these inputs are used to create a series of center tune frequencies. Based on the selected bandwidth, + the tuner scan generates as a series of center frequency retunes to cover the spectrum between the start/stop frequencies or + discrete frequencies. + + The FRONTEND::tuner_status::scan_mode_enabled reflects whether the tuner is currently scanning through a plan that was generated to cover the spectrum. + It will then tune, dwell for a specific number of samples based on the dwell setting and then move to the next center frequency. + No output samples should be generated until the tuner settling time has been internally accounted for. + + start_time provides control over the tuner state and allows for synchronous sampling between multiple tuners. + + Scanning is enabled when the following conditions are met: + 1. FRONTEND::tuner_status::enabled is true + 2. scan_strategy.scan_mode != MANUAL_SCAN (note that the whole data structure must be passed on the call) + 3. start_time <= (BULKIO::PrecisionUTCTime) now + alternatively, start_time = 0 + + Scanning can be disabled by either: + - FRONTEND::tuner_status::enabled is false (use setTunerEnable with the argument false) + and/or + - setting scan_strategy.scan_mode = MANUAL_SCAN + and/or + - start_time > (BULKIO::PrecisionUTCTime) now + note: when the device's clock reaches start_time, the scan will start + + Also note that if the start_time is not set before setting the strategy, the scan plan will be executed as soon as the strategy is set. + + The ScanStatus structure contains the settings for the scan. + */ + interface ScanningTuner + { + /************************/ + /* STRUCTURES */ + /************************/ + enum ScanMode { + MANUAL_SCAN, + SPAN_SCAN, + DISCRETE_SCAN + }; + enum OutputControlMode { + TIME_BASED, + SAMPLE_BASED + }; + /** Basic Scan Structure */ + /** Note: the bandwidth is set by the tuner base interface. The scanning interface manages the center frequency and duration of dwell **/ + struct ScanSpanRange { + double begin_frequency; /* beginning center frequency for a Scan span (Hz) */ + double end_frequency; /* limit center frequency for a Scan span (Hz) */ + double step; /* change in center frequency (Hz) */ + }; + typedef sequence ScanSpanRanges; + typedef sequence Frequencies; + + union ScanModeDefinition switch(ScanMode) { + case MANUAL_SCAN: + double center_frequency; + case SPAN_SCAN: + ScanSpanRanges freq_scan_list; + case DISCRETE_SCAN: + Frequencies discrete_freq_list; + }; + struct ScanStrategy { + ScanMode scan_mode; /* determines the scan mode type: Manual: MANUAL_SCAN, Span Scan: SPAN_SCAN, Discrete Frequency Scan: DISCRETE_SCAN */ + ScanModeDefinition scan_definition; /* manual, span, or discrete frequency */ + OutputControlMode control_mode; /* time-based or sample-based */ + double control_value; /* time (in seconds) for time-based, or samples (truncated) for sample-based */ + }; + + struct ScanStatus { + ScanStrategy strategy; /* describes the scanning strategy (i.e.: time-based or sample-based) */ + BULKIO::PrecisionUTCTime start_time; /* Scheduled (or actual) start */ + Frequencies center_tune_frequencies; /* list of frequencies derived from the scanning plan (computed by the scanner device) */ + boolean started; /* True, scan plan in process */ + }; + + ScanStatus getScanStatus(in string id) + raises (FRONTEND::FrontendException, FRONTEND::BadParameterException, FRONTEND::NotSupportedException); + + /** Set Tuner Scan Start Time. Set to time zero or any time in the past with a valid tcstatus flag to start immediately. Set to invalid tcstatus to disable */ + void setScanStartTime(in string id,in BULKIO::PrecisionUTCTime start_time) + raises (FRONTEND::FrontendException, FRONTEND::BadParameterException, FRONTEND::NotSupportedException); + + void setScanStrategy(in string id,in ScanStrategy scan_strategy) + raises (FRONTEND::FrontendException, FRONTEND::BadParameterException, FRONTEND::NotSupportedException); }; + interface AnalogScanningTuner : ScanningTuner, AnalogTuner + { + }; + + interface DigitalScanningTuner : ScanningTuner, DigitalTuner + { + }; + }; #endif diff --git a/frontendInterfaces/libsrc/Makefile.am b/frontendInterfaces/libsrc/Makefile.am index 37f8d3ec0..c920f3dfe 100644 --- a/frontendInterfaces/libsrc/Makefile.am +++ b/frontendInterfaces/libsrc/Makefile.am @@ -33,9 +33,10 @@ ACLOCAL_AMFLAGS = ${ACLOCAL_FLAGS} lib_LTLIBRARIES = libfrontend-@FRONTEND_API_VERSION@.la libfrontend_@FRONTEND_API_VERSION@_la_LDFLAGS = -version-info $(FRONTEND_SO_VERSION) -libfrontend_@FRONTEND_API_VERSION@_la_LIBADD = $(BULKIO_LIBS) $(OSSIE_LIBS) +libfrontend_@FRONTEND_API_VERSION@_la_LIBADD = $(BULKIO_LIBS) $(OSSIE_LIBS) $(top_builddir)/libfrontendInterfaces.la libfrontend_@FRONTEND_API_VERSION@_la_SOURCES = \ + cpp/fe_port_impl.cpp \ cpp/fe_rfsource_port_impl.cpp \ cpp/fe_tuner_device.cpp @@ -69,6 +70,8 @@ library_include_HEADERS = \ libfrontend_@FRONTEND_API_VERSION@_la_CXXFLAGS = -Wall -I./cpp -DLOGGING $(FRONTEND_INF_INCLUDES) $(BOOST_CPPFLAGS) $(OMNIORB_CFLAGS) $(OSSIE_CFLAGS) +SUBDIRS = . testing/tests/cpp + ############################################################################### # Python @@ -97,11 +100,15 @@ JAVA_BINDIR := $(JAVA_DIR)/bin JAVA_SRCS := \ InAnalogTunerPort.java \ +InAnalogScanningTunerPort.java \ OutAnalogTunerPort.java \ AnalogTunerDelegate.java \ +AnalogScanningTunerDelegate.java \ InDigitalTunerPort.java \ +InDigitalScanningTunerPort.java \ OutDigitalTunerPort.java \ DigitalTunerDelegate.java \ +DigitalScanningTunerDelegate.java \ InFrontendTunerPort.java \ OutFrontendTunerPort.java \ FrontendTunerDelegate.java \ @@ -118,6 +125,7 @@ InRFSourcePort.java \ OutRFSourcePort.java \ RFSourceDelegate.java \ FETypes.java \ +FrontendScanningTunerDevice.java \ FrontendTunerDevice.java java_JARFILES = frontend.jar diff --git a/frontendInterfaces/libsrc/cpp/fe_gps_port_impl.h b/frontendInterfaces/libsrc/cpp/fe_gps_port_impl.h index 473452024..6d4a5d583 100644 --- a/frontendInterfaces/libsrc/cpp/fe_gps_port_impl.h +++ b/frontendInterfaces/libsrc/cpp/fe_gps_port_impl.h @@ -95,24 +95,75 @@ namespace frontend { OutGPSPortT(std::string port_name) : OutFrontendPort(port_name) {}; ~OutGPSPortT(){}; - + + std::vector getConnectionIds() + { + std::vector retval; + for (unsigned int i = 0; i < this->outConnections.size(); i++) { + retval.push_back(this->outConnections[i].second); + } + return retval; + }; + void __evaluateRequestBasedOnConnections(const std::string &__connection_id__, bool returnValue, bool inOut, bool out) { + if (__connection_id__.empty() and (this->outConnections.size() > 1)) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.", + getConnectionIds()); + } + } + if (this->outConnections.empty()) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("No connections available.", std::vector()); + } else { + if (not __connection_id__.empty()) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } + if ((not __connection_id__.empty()) and (not this->outConnections.empty())) { + bool foundConnection = false; + typename std::vector < std::pair < PortType_var, std::string > >::iterator i; + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (i->second == __connection_id__) { + foundConnection = true; + break; + } + } + if (not foundConnection) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } frontend::GPSInfo gps_info() { + return _get_gps_info(""); + }; + frontend::GPSInfo _get_gps_info(const std::string __connection_id__) { frontend::GPSInfo retval; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; const FRONTEND::GPSInfo_var tmp = ((*i).first)->gps_info(); retval = frontend::returnGPSInfo(tmp); } } return retval; }; - void gps_info(const frontend::GPSInfo &gps) { + void gps_info(const frontend::GPSInfo &gps, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; const FRONTEND::GPSInfo_var tmp = frontend::returnGPSInfo(gps); ((*i).first)->gps_info(tmp); } @@ -120,22 +171,31 @@ namespace frontend { return; }; frontend::GpsTimePos gps_time_pos() { + return _get_gps_time_pos(""); + }; + frontend::GpsTimePos _get_gps_time_pos(const std::string __connection_id__) { frontend::GpsTimePos retval; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; const FRONTEND::GpsTimePos_var tmp = ((*i).first)->gps_time_pos(); retval = frontend::returnGpsTimePos(tmp); } } return retval; }; - void gps_time_pos(frontend::GpsTimePos gps_time_pos) { + void gps_time_pos(frontend::GpsTimePos gps_time_pos, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; const FRONTEND::GpsTimePos_var tmp = frontend::returnGpsTimePos(gps_time_pos); ((*i).first)->gps_time_pos(tmp); } diff --git a/frontendInterfaces/libsrc/cpp/fe_navdata_port_impl.h b/frontendInterfaces/libsrc/cpp/fe_navdata_port_impl.h index ee6b85830..5e1102652 100644 --- a/frontendInterfaces/libsrc/cpp/fe_navdata_port_impl.h +++ b/frontendInterfaces/libsrc/cpp/fe_navdata_port_impl.h @@ -78,24 +78,75 @@ namespace frontend { OutNavDataPortT(std::string port_name) : OutFrontendPort(port_name) {}; ~OutNavDataPortT(){}; - + + std::vector getConnectionIds() + { + std::vector retval; + for (unsigned int i = 0; i < this->outConnections.size(); i++) { + retval.push_back(this->outConnections[i].second); + } + return retval; + }; + void __evaluateRequestBasedOnConnections(const std::string &__connection_id__, bool returnValue, bool inOut, bool out) { + if (__connection_id__.empty() and (this->outConnections.size() > 1)) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.", + getConnectionIds()); + } + } + if (this->outConnections.empty()) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("No connections available.", std::vector()); + } else { + if (not __connection_id__.empty()) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } + if ((not __connection_id__.empty()) and (not this->outConnections.empty())) { + bool foundConnection = false; + typename std::vector < std::pair < PortType_var, std::string > >::iterator i; + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (i->second == __connection_id__) { + foundConnection = true; + break; + } + } + if (not foundConnection) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + }; frontend::NavigationPacket nav_packet() { + return _get_nav_packet(""); + }; + frontend::NavigationPacket _get_nav_packet(const std::string __connection_id__) { frontend::NavigationPacket retval; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; const FRONTEND::NavigationPacket_var tmp = ((*i).first)->nav_packet(); retval = frontend::returnNavigationPacket(tmp); } } return retval; }; - void nav_packet(const frontend::NavigationPacket &nav) { + void nav_packet(const frontend::NavigationPacket &nav, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; const FRONTEND::NavigationPacket_var tmp = frontend::returnNavigationPacket(nav); ((*i).first)->nav_packet(tmp); } diff --git a/frontendInterfaces/libsrc/cpp/fe_port_impl.cpp b/frontendInterfaces/libsrc/cpp/fe_port_impl.cpp new file mode 100644 index 000000000..1ffe9cccb --- /dev/null +++ b/frontendInterfaces/libsrc/cpp/fe_port_impl.cpp @@ -0,0 +1,524 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK frontendInterfaces. + * + * REDHAWK frontendInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK frontendInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#include "fe_port_impl.h" + + +namespace frontend { + + + FRONTEND::RFInfoPkt getRFInfoPkt(const RFInfoPkt &val) { + FRONTEND::RFInfoPkt tmpVal; + tmpVal.rf_flow_id = CORBA::string_dup(val.rf_flow_id.c_str()); + tmpVal.rf_center_freq = val.rf_center_freq; + tmpVal.rf_bandwidth = val.rf_bandwidth; + tmpVal.if_center_freq = val.if_center_freq; + tmpVal.spectrum_inverted = val.spectrum_inverted; + tmpVal.sensor.collector = CORBA::string_dup(val.sensor.collector.c_str()); + tmpVal.sensor.mission = CORBA::string_dup(val.sensor.mission.c_str()); + tmpVal.sensor.rx = CORBA::string_dup(val.sensor.rx.c_str()); + tmpVal.sensor.antenna.description = CORBA::string_dup(val.sensor.antenna.description.c_str()); + tmpVal.sensor.antenna.name = CORBA::string_dup(val.sensor.antenna.name.c_str()); + tmpVal.sensor.antenna.size = CORBA::string_dup(val.sensor.antenna.size.c_str()); + tmpVal.sensor.antenna.type = CORBA::string_dup(val.sensor.antenna.type.c_str()); + tmpVal.sensor.feed.name = CORBA::string_dup(val.sensor.feed.name.c_str()); + tmpVal.sensor.feed.polarization = CORBA::string_dup(val.sensor.feed.polarization.c_str()); + tmpVal.sensor.feed.freq_range.max_val = val.sensor.feed.freq_range.max_val; + tmpVal.sensor.feed.freq_range.min_val = val.sensor.feed.freq_range.min_val; + tmpVal.sensor.feed.freq_range.values.length(val.sensor.feed.freq_range.values.size()); + for (unsigned int i=0; irf_flow_id = CORBA::string_dup(val.rf_flow_id.c_str()); + tmpVal->rf_center_freq = val.rf_center_freq; + tmpVal->rf_bandwidth = val.rf_bandwidth; + tmpVal->if_center_freq = val.if_center_freq; + tmpVal->spectrum_inverted = val.spectrum_inverted; + tmpVal->sensor.collector = CORBA::string_dup(val.sensor.collector.c_str()); + tmpVal->sensor.mission = CORBA::string_dup(val.sensor.mission.c_str()); + tmpVal->sensor.rx = CORBA::string_dup(val.sensor.rx.c_str()); + tmpVal->sensor.antenna.description = CORBA::string_dup(val.sensor.antenna.description.c_str()); + tmpVal->sensor.antenna.name = CORBA::string_dup(val.sensor.antenna.name.c_str()); + tmpVal->sensor.antenna.size = CORBA::string_dup(val.sensor.antenna.size.c_str()); + tmpVal->sensor.antenna.type = CORBA::string_dup(val.sensor.antenna.type.c_str()); + tmpVal->sensor.feed.name = CORBA::string_dup(val.sensor.feed.name.c_str()); + tmpVal->sensor.feed.polarization = CORBA::string_dup(val.sensor.feed.polarization.c_str()); + tmpVal->sensor.feed.freq_range.max_val = val.sensor.feed.freq_range.max_val; + tmpVal->sensor.feed.freq_range.min_val = val.sensor.feed.freq_range.min_val; + tmpVal->sensor.feed.freq_range.values.length(val.sensor.feed.freq_range.values.size()); + for (unsigned int i=0; isensor.feed.freq_range.values[i] = val.sensor.feed.freq_range.values[i]; + } + tmpVal->ext_path_delays.length(val.ext_path_delays.size()); + for (unsigned int i=0; iext_path_delays[i].freq = val.ext_path_delays[i].freq; + tmpVal->ext_path_delays[i].delay_ns = val.ext_path_delays[i].delay_ns; + } + tmpVal->capabilities.freq_range.min_val = val.capabilities.freq_range.min_val; + tmpVal->capabilities.freq_range.max_val = val.capabilities.freq_range.max_val; + tmpVal->capabilities.bw_range.min_val = val.capabilities.bw_range.min_val; + tmpVal->capabilities.bw_range.max_val = val.capabilities.bw_range.max_val; + tmpVal->additional_info = val.additional_info; + return tmpVal; + }; + RFInfoPkt returnRFInfoPkt(const FRONTEND::RFInfoPkt &tmpVal) { + RFInfoPkt val; + val.rf_flow_id = ossie::corba::returnString(tmpVal.rf_flow_id); + val.rf_center_freq = tmpVal.rf_center_freq; + val.rf_bandwidth = tmpVal.rf_bandwidth; + val.if_center_freq = tmpVal.if_center_freq; + val.spectrum_inverted = tmpVal.spectrum_inverted; + val.sensor.collector = ossie::corba::returnString(tmpVal.sensor.collector); + val.sensor.mission = ossie::corba::returnString(tmpVal.sensor.mission); + val.sensor.rx = ossie::corba::returnString(tmpVal.sensor.rx); + val.sensor.antenna.description = ossie::corba::returnString(tmpVal.sensor.antenna.description); + val.sensor.antenna.name = ossie::corba::returnString(tmpVal.sensor.antenna.name); + val.sensor.antenna.size = ossie::corba::returnString(tmpVal.sensor.antenna.size); + val.sensor.antenna.type = ossie::corba::returnString(tmpVal.sensor.antenna.type); + val.sensor.feed.name = ossie::corba::returnString(tmpVal.sensor.feed.name); + val.sensor.feed.polarization = ossie::corba::returnString(tmpVal.sensor.feed.polarization); + val.sensor.feed.freq_range.max_val = tmpVal.sensor.feed.freq_range.max_val; + val.sensor.feed.freq_range.min_val = tmpVal.sensor.feed.freq_range.min_val; + val.sensor.feed.freq_range.values.resize(tmpVal.sensor.feed.freq_range.values.length()); + for (unsigned int i=0; isource_id = CORBA::string_dup(val.source_id.c_str()); + tmpVal->rf_flow_id = CORBA::string_dup(val.rf_flow_id.c_str()); + tmpVal->mode = CORBA::string_dup(val.mode.c_str()); + tmpVal->fom = val.fom; + tmpVal->tfom = val.tfom; + tmpVal->datumID = val.datumID; + tmpVal->time_offset = val.time_offset; + tmpVal->freq_offset = val.freq_offset; + tmpVal->time_variance = val.time_variance; + tmpVal->freq_variance = val.freq_variance; + tmpVal->satellite_count = val.satellite_count; + tmpVal->snr = val.snr; + tmpVal->status_message = CORBA::string_dup(val.status_message.c_str()); + tmpVal->timestamp = val.timestamp; + tmpVal->additional_info = val.additional_info; + return tmpVal; + }; + frontend::GPSInfo returnGPSInfo(const FRONTEND::GPSInfo &tmpVal) { + frontend::GPSInfo val; + val.source_id = ossie::corba::returnString(tmpVal.source_id); + val.rf_flow_id = ossie::corba::returnString(tmpVal.rf_flow_id); + val.mode = ossie::corba::returnString(tmpVal.mode); + val.fom = tmpVal.fom; + val.tfom = tmpVal.tfom; + val.datumID = tmpVal.datumID; + val.time_offset = tmpVal.time_offset; + val.freq_offset = tmpVal.freq_offset; + val.time_variance = tmpVal.time_variance; + val.freq_variance = tmpVal.freq_variance; + val.satellite_count = tmpVal.satellite_count; + val.snr = tmpVal.snr; + val.status_message = ossie::corba::returnString(tmpVal.status_message); + val.timestamp = tmpVal.timestamp; + val.additional_info = tmpVal.additional_info; + return val; + }; + + FRONTEND::GpsTimePos* returnGpsTimePos(const frontend::GpsTimePos &val) { + FRONTEND::GpsTimePos* tmpVal = new FRONTEND::GpsTimePos(); + tmpVal->position.valid = val.position.valid; + tmpVal->position.datum = CORBA::string_dup(val.position.datum.c_str()); + tmpVal->position.lat = val.position.lat; + tmpVal->position.lon = val.position.lon; + tmpVal->position.alt = val.position.alt; + tmpVal->timestamp = val.timestamp; + return tmpVal; + }; + frontend::GpsTimePos returnGpsTimePos(const FRONTEND::GpsTimePos &tmpVal) { + frontend::GpsTimePos val; + val.position.valid = tmpVal.position.valid; + val.position.datum = ossie::corba::returnString(tmpVal.position.datum); + val.position.lat = tmpVal.position.lat; + val.position.lon = tmpVal.position.lon; + val.position.alt = tmpVal.position.alt; + val.timestamp = tmpVal.timestamp; + return val; + }; + + FRONTEND::NavigationPacket* returnNavigationPacket(const frontend::NavigationPacket &val) { + FRONTEND::NavigationPacket* tmpVal = new FRONTEND::NavigationPacket(); + tmpVal->source_id = CORBA::string_dup(val.source_id.c_str()); + tmpVal->rf_flow_id = CORBA::string_dup(val.rf_flow_id.c_str()); + tmpVal->position.valid = val.position.valid; + tmpVal->position.datum = CORBA::string_dup(val.position.datum.c_str()); + tmpVal->position.lat = val.position.lat; + tmpVal->position.lon = val.position.lon; + tmpVal->position.alt = val.position.alt; + tmpVal->cposition.valid = val.cposition.valid; + tmpVal->cposition.datum = CORBA::string_dup(val.cposition.datum.c_str()); + tmpVal->cposition.x = val.cposition.x; + tmpVal->cposition.y = val.cposition.y; + tmpVal->cposition.z = val.cposition.z; + tmpVal->velocity.valid = val.velocity.valid; + tmpVal->velocity.datum = CORBA::string_dup(val.velocity.datum.c_str()); + tmpVal->velocity.coordinate_system = CORBA::string_dup(val.velocity.coordinate_system.c_str()); + tmpVal->velocity.x = val.velocity.x; + tmpVal->velocity.y = val.velocity.y; + tmpVal->velocity.z = val.velocity.z; + tmpVal->acceleration.valid = val.acceleration.valid; + tmpVal->acceleration.datum = CORBA::string_dup(val.acceleration.datum.c_str()); + tmpVal->acceleration.coordinate_system = CORBA::string_dup(val.acceleration.coordinate_system.c_str()); + tmpVal->acceleration.x = val.acceleration.x; + tmpVal->acceleration.y = val.acceleration.y; + tmpVal->acceleration.z = val.acceleration.z; + tmpVal->attitude.valid = val.attitude.valid; + tmpVal->attitude.pitch = val.attitude.pitch; + tmpVal->attitude.yaw = val.attitude.yaw; + tmpVal->attitude.roll = val.attitude.roll; + tmpVal->timestamp = val.timestamp; + tmpVal->additional_info = val.additional_info; + return tmpVal; + }; + frontend::NavigationPacket returnNavigationPacket(const FRONTEND::NavigationPacket &tmpVal) { + frontend::NavigationPacket val; + val.source_id = ossie::corba::returnString(tmpVal.source_id); + val.rf_flow_id = ossie::corba::returnString(tmpVal.rf_flow_id); + val.position.valid = tmpVal.position.valid; + val.position.datum = ossie::corba::returnString(tmpVal.position.datum); + val.position.lat = tmpVal.position.lat; + val.position.lon = tmpVal.position.lon; + val.position.alt = tmpVal.position.alt; + val.cposition.valid = tmpVal.cposition.valid; + val.cposition.datum = ossie::corba::returnString(tmpVal.cposition.datum); + val.cposition.x = tmpVal.cposition.x; + val.cposition.y = tmpVal.cposition.y; + val.cposition.z = tmpVal.cposition.z; + val.velocity.valid = tmpVal.velocity.valid; + val.velocity.datum = ossie::corba::returnString(tmpVal.velocity.datum); + val.velocity.coordinate_system = ossie::corba::returnString(tmpVal.velocity.coordinate_system); + val.velocity.x = tmpVal.velocity.x; + val.velocity.y = tmpVal.velocity.y; + val.velocity.z = tmpVal.velocity.z; + val.acceleration.valid = tmpVal.acceleration.valid; + val.acceleration.datum = ossie::corba::returnString(tmpVal.acceleration.datum); + val.acceleration.coordinate_system = ossie::corba::returnString(tmpVal.acceleration.coordinate_system); + val.acceleration.x = tmpVal.acceleration.x; + val.acceleration.y = tmpVal.acceleration.y; + val.acceleration.z = tmpVal.acceleration.z; + val.attitude.valid = tmpVal.attitude.valid; + val.attitude.pitch = tmpVal.attitude.pitch; + val.attitude.yaw = tmpVal.attitude.yaw; + val.attitude.roll = tmpVal.attitude.roll; + val.timestamp = tmpVal.timestamp; + val.additional_info = tmpVal.additional_info; + return val; + }; + + void copyRFInfoPktSequence(const RFInfoPktSequence &src, FRONTEND::RFInfoPktSequence &dest) { + RFInfoPktSequence::const_iterator i=src.begin(); + for( ; i != src.end(); i++ ) { + ossie::corba::push_back( dest, getRFInfoPkt(*i) ); + } + } + + void copyRFInfoPktSequence(const FRONTEND::RFInfoPktSequence &src, RFInfoPktSequence &dest ) { + for( unsigned int i=0 ; i != src.length(); i++ ) { + dest.push_back( returnRFInfoPkt( src[i] )); + } + } + + FRONTEND::ScanningTuner::ScanStatus* returnScanStatus(const frontend::ScanStatus &val) { + FRONTEND::ScanningTuner::ScanStatus* tmpVal = new FRONTEND::ScanningTuner::ScanStatus(); + ScanStrategy* _scan_strategy = val.strategy.get(); + if (dynamic_cast(_scan_strategy) != NULL) { + tmpVal->strategy.scan_mode = FRONTEND::ScanningTuner::MANUAL_SCAN; + ManualStrategy* _strat = dynamic_cast(_scan_strategy); + tmpVal->strategy.scan_definition.center_frequency(_strat->center_frequency); + switch(_strat->control_mode) { + case frontend::TIME_BASED: + tmpVal->strategy.control_mode = FRONTEND::ScanningTuner::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + tmpVal->strategy.control_mode = FRONTEND::ScanningTuner::SAMPLE_BASED; + break; + } + tmpVal->strategy.control_value = _strat->control_value; + } else if (dynamic_cast(_scan_strategy) != NULL) { + tmpVal->strategy.scan_mode = FRONTEND::ScanningTuner::SPAN_SCAN; + SpanStrategy* _strat = dynamic_cast(_scan_strategy); + FRONTEND::ScanningTuner::ScanSpanRanges _tmp; + _tmp.length(_strat->freq_scan_list.size()); + for (unsigned int i=0; i<_strat->freq_scan_list.size(); i++) { + _tmp[i].begin_frequency = _strat->freq_scan_list[i].begin_frequency; + _tmp[i].end_frequency = _strat->freq_scan_list[i].end_frequency; + _tmp[i].step = _strat->freq_scan_list[i].step; + } + tmpVal->strategy.scan_definition.freq_scan_list(_tmp); + switch(_strat->control_mode) { + case frontend::TIME_BASED: + tmpVal->strategy.control_mode = FRONTEND::ScanningTuner::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + tmpVal->strategy.control_mode = FRONTEND::ScanningTuner::SAMPLE_BASED; + break; + } + tmpVal->strategy.control_value = _strat->control_value; + } else if (dynamic_cast(_scan_strategy) != NULL) { + tmpVal->strategy.scan_mode = FRONTEND::ScanningTuner::DISCRETE_SCAN; + DiscreteStrategy* _strat = dynamic_cast(_scan_strategy); + FRONTEND::ScanningTuner::Frequencies _tmp; + _tmp.length(_strat->discrete_freq_list.size()); + for (unsigned int i=0; i<_strat->discrete_freq_list.size(); i++) { + _tmp[i] = _strat->discrete_freq_list[i]; + } + tmpVal->strategy.scan_definition.discrete_freq_list(_tmp); + switch(_strat->control_mode) { + case frontend::TIME_BASED: + tmpVal->strategy.control_mode = FRONTEND::ScanningTuner::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + tmpVal->strategy.control_mode = FRONTEND::ScanningTuner::SAMPLE_BASED; + break; + } + tmpVal->strategy.control_value = _strat->control_value; + } + tmpVal->start_time = val.start_time; + tmpVal->center_tune_frequencies.length(val.center_tune_frequencies.size()); + for (unsigned int i=0; icenter_tune_frequencies[i] = val.center_tune_frequencies[i]; + } + tmpVal->started = val.started; + return tmpVal; + }; + frontend::ScanStatus returnScanStatus(const FRONTEND::ScanningTuner::ScanStatus &tmpVal) { + switch (tmpVal.strategy.scan_mode) { + case FRONTEND::ScanningTuner::MANUAL_SCAN: + break; + case FRONTEND::ScanningTuner::SPAN_SCAN: + { + ScanSpanRanges freqs; + FRONTEND::ScanningTuner::ScanSpanRanges _tmp(tmpVal.strategy.scan_definition.freq_scan_list()); + freqs.resize(_tmp.length()); + for (unsigned int i=0; i<_tmp.length(); i++) { + freqs[i].begin_frequency = _tmp[i].begin_frequency; + freqs[i].end_frequency = _tmp[i].end_frequency; + freqs[i].step = _tmp[i].step; + } + SpanStrategy* _strat = new SpanStrategy(freqs); + switch(tmpVal.strategy.control_mode) { + case frontend::TIME_BASED: + _strat->control_mode = frontend::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + _strat->control_mode = frontend::SAMPLE_BASED; + break; + } + _strat->control_value = tmpVal.strategy.control_value; + ScanStatus retval(_strat); + retval.start_time = tmpVal.start_time; + retval.started = tmpVal.started; + for (unsigned int i=0; icontrol_mode = frontend::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + _strat->control_mode = frontend::SAMPLE_BASED; + break; + } + _strat->control_value = tmpVal.strategy.control_value; + ScanStatus retval(_strat); + retval.start_time = tmpVal.start_time; + retval.started = tmpVal.started; + for (unsigned int i=0; icontrol_mode = frontend::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + _strat->control_mode = frontend::SAMPLE_BASED; + break; + } + _strat->control_value = tmpVal.strategy.control_value; + ScanStatus retval(_strat); + retval.start_time = tmpVal.start_time; + retval.started = tmpVal.started; + for (unsigned int i=0; i(&val) != NULL) { + tmpVal->scan_mode = FRONTEND::ScanningTuner::MANUAL_SCAN; + const ManualStrategy* _strat = dynamic_cast(&val); + tmpVal->scan_definition.center_frequency(_strat->center_frequency); + switch(_strat->control_mode) { + case frontend::TIME_BASED: + tmpVal->control_mode = FRONTEND::ScanningTuner::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + tmpVal->control_mode = FRONTEND::ScanningTuner::SAMPLE_BASED; + break; + } + tmpVal->control_value = _strat->control_value; + } else if (dynamic_cast(&val) != NULL) { + tmpVal->scan_mode = FRONTEND::ScanningTuner::SPAN_SCAN; + const SpanStrategy* _strat = dynamic_cast(&val); + FRONTEND::ScanningTuner::ScanSpanRanges _tmp; + _tmp.length(_strat->freq_scan_list.size()); + for (unsigned int i=0; i<_strat->freq_scan_list.size(); i++) { + _tmp[i].begin_frequency = _strat->freq_scan_list[i].begin_frequency; + _tmp[i].end_frequency = _strat->freq_scan_list[i].end_frequency; + _tmp[i].step = _strat->freq_scan_list[i].step; + } + tmpVal->scan_definition.freq_scan_list(_tmp); + switch(_strat->control_mode) { + case frontend::TIME_BASED: + tmpVal->control_mode = FRONTEND::ScanningTuner::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + tmpVal->control_mode = FRONTEND::ScanningTuner::SAMPLE_BASED; + break; + } + tmpVal->control_value = _strat->control_value; + } else if (dynamic_cast(&val) != NULL) { + tmpVal->scan_mode = FRONTEND::ScanningTuner::DISCRETE_SCAN; + const DiscreteStrategy* _strat = dynamic_cast(&val); + FRONTEND::ScanningTuner::Frequencies _tmp; + _tmp.length(_strat->discrete_freq_list.size()); + for (unsigned int i=0; i<_strat->discrete_freq_list.size(); i++) { + _tmp[i] = _strat->discrete_freq_list[i]; + } + tmpVal->scan_definition.discrete_freq_list(_tmp); + switch(_strat->control_mode) { + case frontend::TIME_BASED: + tmpVal->control_mode = FRONTEND::ScanningTuner::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + tmpVal->control_mode = FRONTEND::ScanningTuner::SAMPLE_BASED; + break; + } + tmpVal->control_value = _strat->control_value; + } + return tmpVal; + }; + frontend::ScanStrategy* returnScanStrategy(const FRONTEND::ScanningTuner::ScanStrategy &tmpVal) { + switch (tmpVal.scan_mode) { + case FRONTEND::ScanningTuner::MANUAL_SCAN: + break; + case FRONTEND::ScanningTuner::SPAN_SCAN: + { + ScanSpanRanges freqs; + FRONTEND::ScanningTuner::ScanSpanRanges _tmp(tmpVal.scan_definition.freq_scan_list()); + freqs.resize(_tmp.length()); + for (unsigned int i=0; i<_tmp.length(); i++) { + freqs[i].begin_frequency = _tmp[i].begin_frequency; + freqs[i].end_frequency = _tmp[i].end_frequency; + freqs[i].step = _tmp[i].step; + } + SpanStrategy* retval = new SpanStrategy(freqs); + switch(tmpVal.control_mode) { + case frontend::TIME_BASED: + retval->control_mode = frontend::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + retval->control_mode = frontend::SAMPLE_BASED; + break; + } + retval->control_value = tmpVal.control_value; + return retval; + } + case FRONTEND::ScanningTuner::DISCRETE_SCAN: + { + Frequencies freqs; + FRONTEND::ScanningTuner::Frequencies _tmp(tmpVal.scan_definition.discrete_freq_list()); + for (unsigned int i=0; i<_tmp.length(); i++) { + freqs.push_back(_tmp[i]); + } + DiscreteStrategy* retval = new DiscreteStrategy(freqs); + switch(tmpVal.control_mode) { + case frontend::TIME_BASED: + retval->control_mode = frontend::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + retval->control_mode = frontend::SAMPLE_BASED; + break; + } + retval->control_value = tmpVal.control_value; + return retval; + } + } + ManualStrategy* retval = new ManualStrategy(tmpVal.scan_definition.center_frequency()); + switch(tmpVal.control_mode) { + case frontend::TIME_BASED: + retval->control_mode = frontend::TIME_BASED; + break; + case frontend::SAMPLE_BASED: + retval->control_mode = frontend::SAMPLE_BASED; + break; + } + retval->control_value = tmpVal.control_value; + return retval; + }; +} // end of frontend namespace + diff --git a/frontendInterfaces/libsrc/cpp/fe_port_impl.h b/frontendInterfaces/libsrc/cpp/fe_port_impl.h index 82aa110eb..3197998d4 100644 --- a/frontendInterfaces/libsrc/cpp/fe_port_impl.h +++ b/frontendInterfaces/libsrc/cpp/fe_port_impl.h @@ -35,6 +35,7 @@ #include #include #include +#include #include #include "fe_types.h" @@ -55,202 +56,23 @@ namespace frontend { }; // END FROM bulkio_p.h - inline FRONTEND::RFInfoPkt* returnRFInfoPkt(const RFInfoPkt &val) { - FRONTEND::RFInfoPkt* tmpVal = new FRONTEND::RFInfoPkt(); - tmpVal->rf_flow_id = CORBA::string_dup(val.rf_flow_id.c_str()); - tmpVal->rf_center_freq = val.rf_center_freq; - tmpVal->rf_bandwidth = val.rf_bandwidth; - tmpVal->if_center_freq = val.if_center_freq; - tmpVal->spectrum_inverted = val.spectrum_inverted; - tmpVal->sensor.collector = CORBA::string_dup(val.sensor.collector.c_str()); - tmpVal->sensor.mission = CORBA::string_dup(val.sensor.mission.c_str()); - tmpVal->sensor.rx = CORBA::string_dup(val.sensor.rx.c_str()); - tmpVal->sensor.antenna.description = CORBA::string_dup(val.sensor.antenna.description.c_str()); - tmpVal->sensor.antenna.name = CORBA::string_dup(val.sensor.antenna.name.c_str()); - tmpVal->sensor.antenna.size = CORBA::string_dup(val.sensor.antenna.size.c_str()); - tmpVal->sensor.antenna.type = CORBA::string_dup(val.sensor.antenna.type.c_str()); - tmpVal->sensor.feed.name = CORBA::string_dup(val.sensor.feed.name.c_str()); - tmpVal->sensor.feed.polarization = CORBA::string_dup(val.sensor.feed.polarization.c_str()); - tmpVal->sensor.feed.freq_range.max_val = val.sensor.feed.freq_range.max_val; - tmpVal->sensor.feed.freq_range.min_val = val.sensor.feed.freq_range.min_val; - tmpVal->sensor.feed.freq_range.values.length(val.sensor.feed.freq_range.values.size()); - for (unsigned int i=0; isensor.feed.freq_range.values[i] = val.sensor.feed.freq_range.values[i]; - } - tmpVal->ext_path_delays.length(val.ext_path_delays.size()); - for (unsigned int i=0; iext_path_delays[i].freq = val.ext_path_delays[i].freq; - tmpVal->ext_path_delays[i].delay_ns = val.ext_path_delays[i].delay_ns; - } - tmpVal->capabilities.freq_range.min_val = val.capabilities.freq_range.min_val; - tmpVal->capabilities.freq_range.max_val = val.capabilities.freq_range.max_val; - tmpVal->capabilities.bw_range.min_val = val.capabilities.bw_range.min_val; - tmpVal->capabilities.bw_range.max_val = val.capabilities.bw_range.max_val; - tmpVal->additional_info = val.additional_info; - return tmpVal; - }; - inline RFInfoPkt returnRFInfoPkt(const FRONTEND::RFInfoPkt &tmpVal) { - RFInfoPkt val; - val.rf_flow_id = ossie::corba::returnString(tmpVal.rf_flow_id); - val.rf_center_freq = tmpVal.rf_center_freq; - val.rf_bandwidth = tmpVal.rf_bandwidth; - val.if_center_freq = tmpVal.if_center_freq; - val.spectrum_inverted = tmpVal.spectrum_inverted; - val.sensor.collector = ossie::corba::returnString(tmpVal.sensor.collector); - val.sensor.mission = ossie::corba::returnString(tmpVal.sensor.mission); - val.sensor.rx = ossie::corba::returnString(tmpVal.sensor.rx); - val.sensor.antenna.description = ossie::corba::returnString(tmpVal.sensor.antenna.description); - val.sensor.antenna.name = ossie::corba::returnString(tmpVal.sensor.antenna.name); - val.sensor.antenna.size = ossie::corba::returnString(tmpVal.sensor.antenna.size); - val.sensor.antenna.type = ossie::corba::returnString(tmpVal.sensor.antenna.type); - val.sensor.feed.name = ossie::corba::returnString(tmpVal.sensor.feed.name); - val.sensor.feed.polarization = ossie::corba::returnString(tmpVal.sensor.feed.polarization); - val.sensor.feed.freq_range.max_val = tmpVal.sensor.feed.freq_range.max_val; - val.sensor.feed.freq_range.min_val = tmpVal.sensor.feed.freq_range.min_val; - val.sensor.feed.freq_range.values.resize(tmpVal.sensor.feed.freq_range.values.length()); - for (unsigned int i=0; isource_id = CORBA::string_dup(val.source_id.c_str()); - tmpVal->rf_flow_id = CORBA::string_dup(val.rf_flow_id.c_str()); - tmpVal->mode = CORBA::string_dup(val.mode.c_str()); - tmpVal->fom = val.fom; - tmpVal->tfom = val.tfom; - tmpVal->datumID = val.datumID; - tmpVal->time_offset = val.time_offset; - tmpVal->freq_offset = val.freq_offset; - tmpVal->time_variance = val.time_variance; - tmpVal->freq_variance = val.freq_variance; - tmpVal->satellite_count = val.satellite_count; - tmpVal->snr = val.snr; - tmpVal->status_message = CORBA::string_dup(val.status_message.c_str()); - tmpVal->timestamp = val.timestamp; - tmpVal->additional_info = val.additional_info; - return tmpVal; - }; - inline frontend::GPSInfo returnGPSInfo(const FRONTEND::GPSInfo &tmpVal) { - frontend::GPSInfo val; - val.source_id = ossie::corba::returnString(tmpVal.source_id); - val.rf_flow_id = ossie::corba::returnString(tmpVal.rf_flow_id); - val.mode = ossie::corba::returnString(tmpVal.mode); - val.fom = tmpVal.fom; - val.tfom = tmpVal.tfom; - val.datumID = tmpVal.datumID; - val.time_offset = tmpVal.time_offset; - val.freq_offset = tmpVal.freq_offset; - val.time_variance = tmpVal.time_variance; - val.freq_variance = tmpVal.freq_variance; - val.satellite_count = tmpVal.satellite_count; - val.snr = tmpVal.snr; - val.status_message = ossie::corba::returnString(tmpVal.status_message); - val.timestamp = tmpVal.timestamp; - val.additional_info = tmpVal.additional_info; - return val; - }; + FRONTEND::RFInfoPkt getRFInfoPkt(const RFInfoPkt &val); + FRONTEND::RFInfoPkt* returnRFInfoPkt(const RFInfoPkt &val); + RFInfoPkt returnRFInfoPkt(const FRONTEND::RFInfoPkt &tmpVal); + FRONTEND::GPSInfo* returnGPSInfo(const frontend::GPSInfo &val); + frontend::GPSInfo returnGPSInfo(const FRONTEND::GPSInfo &tmpVal); + FRONTEND::GpsTimePos* returnGpsTimePos(const frontend::GpsTimePos &val); + frontend::GpsTimePos returnGpsTimePos(const FRONTEND::GpsTimePos &tmpVal); + FRONTEND::NavigationPacket* returnNavigationPacket(const frontend::NavigationPacket &val); + frontend::NavigationPacket returnNavigationPacket(const FRONTEND::NavigationPacket &tmpVal); + void copyRFInfoPktSequence(const RFInfoPktSequence &src, FRONTEND::RFInfoPktSequence &dest); + void copyRFInfoPktSequence(const FRONTEND::RFInfoPktSequence &src, RFInfoPktSequence &dest ); - inline FRONTEND::GpsTimePos* returnGpsTimePos(const frontend::GpsTimePos &val) { - FRONTEND::GpsTimePos* tmpVal = new FRONTEND::GpsTimePos(); - tmpVal->position.valid = val.position.valid; - tmpVal->position.datum = CORBA::string_dup(val.position.datum.c_str()); - tmpVal->position.lat = val.position.lat; - tmpVal->position.lon = val.position.lon; - tmpVal->position.alt = val.position.alt; - tmpVal->timestamp = val.timestamp; - return tmpVal; - }; - inline frontend::GpsTimePos returnGpsTimePos(const FRONTEND::GpsTimePos &tmpVal) { - frontend::GpsTimePos val; - val.position.valid = tmpVal.position.valid; - val.position.datum = ossie::corba::returnString(tmpVal.position.datum); - val.position.lat = tmpVal.position.lat; - val.position.lon = tmpVal.position.lon; - val.position.alt = tmpVal.position.alt; - val.timestamp = tmpVal.timestamp; - return val; - }; - inline FRONTEND::NavigationPacket* returnNavigationPacket(const frontend::NavigationPacket &val) { - FRONTEND::NavigationPacket* tmpVal = new FRONTEND::NavigationPacket(); - tmpVal->source_id = CORBA::string_dup(val.source_id.c_str()); - tmpVal->rf_flow_id = CORBA::string_dup(val.rf_flow_id.c_str()); - tmpVal->position.valid = val.position.valid; - tmpVal->position.datum = CORBA::string_dup(val.position.datum.c_str()); - tmpVal->position.lat = val.position.lat; - tmpVal->position.lon = val.position.lon; - tmpVal->position.alt = val.position.alt; - tmpVal->cposition.valid = val.cposition.valid; - tmpVal->cposition.datum = CORBA::string_dup(val.cposition.datum.c_str()); - tmpVal->cposition.x = val.cposition.x; - tmpVal->cposition.y = val.cposition.y; - tmpVal->cposition.z = val.cposition.z; - tmpVal->velocity.valid = val.velocity.valid; - tmpVal->velocity.datum = CORBA::string_dup(val.velocity.datum.c_str()); - tmpVal->velocity.coordinate_system = CORBA::string_dup(val.velocity.coordinate_system.c_str()); - tmpVal->velocity.x = val.velocity.x; - tmpVal->velocity.y = val.velocity.y; - tmpVal->velocity.z = val.velocity.z; - tmpVal->acceleration.valid = val.acceleration.valid; - tmpVal->acceleration.datum = CORBA::string_dup(val.acceleration.datum.c_str()); - tmpVal->acceleration.coordinate_system = CORBA::string_dup(val.acceleration.coordinate_system.c_str()); - tmpVal->acceleration.x = val.acceleration.x; - tmpVal->acceleration.y = val.acceleration.y; - tmpVal->acceleration.z = val.acceleration.z; - tmpVal->attitude.valid = val.attitude.valid; - tmpVal->attitude.pitch = val.attitude.pitch; - tmpVal->attitude.yaw = val.attitude.yaw; - tmpVal->attitude.roll = val.attitude.roll; - tmpVal->timestamp = val.timestamp; - tmpVal->additional_info = val.additional_info; - return tmpVal; - }; - inline frontend::NavigationPacket returnNavigationPacket(const FRONTEND::NavigationPacket &tmpVal) { - frontend::NavigationPacket val; - val.source_id = ossie::corba::returnString(tmpVal.source_id); - val.rf_flow_id = ossie::corba::returnString(tmpVal.rf_flow_id); - val.position.valid = tmpVal.position.valid; - val.position.datum = ossie::corba::returnString(tmpVal.position.datum); - val.position.lat = tmpVal.position.lat; - val.position.lon = tmpVal.position.lon; - val.position.alt = tmpVal.position.alt; - val.cposition.valid = tmpVal.cposition.valid; - val.cposition.datum = ossie::corba::returnString(tmpVal.cposition.datum); - val.cposition.x = tmpVal.cposition.x; - val.cposition.y = tmpVal.cposition.y; - val.cposition.z = tmpVal.cposition.z; - val.velocity.valid = tmpVal.velocity.valid; - val.velocity.datum = ossie::corba::returnString(tmpVal.velocity.datum); - val.velocity.coordinate_system = ossie::corba::returnString(tmpVal.velocity.coordinate_system); - val.velocity.x = tmpVal.velocity.x; - val.velocity.y = tmpVal.velocity.y; - val.velocity.z = tmpVal.velocity.z; - val.acceleration.valid = tmpVal.acceleration.valid; - val.acceleration.datum = ossie::corba::returnString(tmpVal.acceleration.datum); - val.acceleration.coordinate_system = ossie::corba::returnString(tmpVal.acceleration.coordinate_system); - val.acceleration.x = tmpVal.acceleration.x; - val.acceleration.y = tmpVal.acceleration.y; - val.acceleration.z = tmpVal.acceleration.z; - val.attitude.valid = tmpVal.attitude.valid; - val.attitude.pitch = tmpVal.attitude.pitch; - val.attitude.yaw = tmpVal.attitude.yaw; - val.attitude.roll = tmpVal.attitude.roll; - val.timestamp = tmpVal.timestamp; - val.additional_info = tmpVal.additional_info; - return val; - }; + FRONTEND::ScanningTuner::ScanStatus* returnScanStatus(const frontend::ScanStatus &val); + frontend::ScanStatus returnScanStatus(const FRONTEND::ScanningTuner::ScanStatus &tmpVal); + FRONTEND::ScanningTuner::ScanStrategy* returnScanStrategy(const frontend::ScanStrategy &val); + frontend::ScanStrategy* returnScanStrategy(const FRONTEND::ScanningTuner::ScanStrategy &tmpVal); + // ---------------------------------------------------------------------------------------- // OutFrontendPort declaration // ---------------------------------------------------------------------------------------- @@ -267,6 +89,50 @@ namespace frontend { ~OutFrontendPort(){ } + std::vector getConnectionIds() + { + std::vector retval; + for (unsigned int i = 0; i < outConnections.size(); i++) { + retval.push_back(outConnections[i].second); + } + return retval; + }; + + void __evaluateRequestBasedOnConnections(const std::string &__connection_id__, bool returnValue, bool inOut, bool out) { + if (__connection_id__.empty() and (this->outConnections.size() > 1)) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.", + getConnectionIds()); + } + } + if (this->outConnections.empty()) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("No connections available.", std::vector()); + } else { + if (not __connection_id__.empty()) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } + if ((not __connection_id__.empty()) and (not this->outConnections.empty())) { + bool foundConnection = false; + typename std::vector < std::pair < PortType_var, std::string > >::iterator i; + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (i->second == __connection_id__) { + foundConnection = true; + break; + } + } + if (not foundConnection) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } + ExtendedCF::UsesConnectionSequence * connections() { boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in diff --git a/frontendInterfaces/libsrc/cpp/fe_rfinfo_port_impl.h b/frontendInterfaces/libsrc/cpp/fe_rfinfo_port_impl.h index b93a003c1..6a2981463 100644 --- a/frontendInterfaces/libsrc/cpp/fe_rfinfo_port_impl.h +++ b/frontendInterfaces/libsrc/cpp/fe_rfinfo_port_impl.h @@ -93,46 +93,106 @@ namespace frontend { OutRFInfoPortT(std::string port_name) : OutFrontendPort(port_name) {}; ~OutRFInfoPortT(){}; - + + std::vector getConnectionIds() + { + std::vector retval; + for (unsigned int i = 0; i < this->outConnections.size(); i++) { + retval.push_back(this->outConnections[i].second); + } + return retval; + }; + void __evaluateRequestBasedOnConnections(const std::string &__connection_id__, bool returnValue, bool inOut, bool out) { + if (__connection_id__.empty() and (this->outConnections.size() > 1)) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.", + getConnectionIds()); + } + } + if (this->outConnections.empty()) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("No connections available.", std::vector()); + } else { + if (not __connection_id__.empty()) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } + if ((not __connection_id__.empty()) and (not this->outConnections.empty())) { + bool foundConnection = false; + typename std::vector < std::pair < PortType_var, std::string > >::iterator i; + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (i->second == __connection_id__) { + foundConnection = true; + break; + } + } + if (not foundConnection) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } std::string rf_flow_id() { + return _get_rf_flow_id(""); + }; + std::string _get_rf_flow_id(const std::string __connection_id__) { CORBA::String_var retval; - typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + typename std::vector < std::pair < PortType_var, std::string > >::iterator i; if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->rf_flow_id(); } } std::string str_retval = ossie::corba::returnString(retval); return str_retval; }; - void rf_flow_id(std::string &data) { + void rf_flow_id(std::string &data, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; ((*i).first)->rf_flow_id(data.c_str()); } } return; }; frontend::RFInfoPkt rfinfo_pkt() { + return _get_rfinfo_pkt(""); + }; + frontend::RFInfoPkt _get_rfinfo_pkt(const std::string __connection_id__ = "") { frontend::RFInfoPkt retval; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; const FRONTEND::RFInfoPkt_var tmp = ((*i).first)->rfinfo_pkt(); retval = frontend::returnRFInfoPkt(tmp); } } return retval; }; - void rfinfo_pkt(frontend::RFInfoPkt data) { + void rfinfo_pkt(frontend::RFInfoPkt data, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; const FRONTEND::RFInfoPkt_var tmp = frontend::returnRFInfoPkt(data); ((*i).first)->rfinfo_pkt(tmp); } diff --git a/frontendInterfaces/libsrc/cpp/fe_rfsource_port_impl.cpp b/frontendInterfaces/libsrc/cpp/fe_rfsource_port_impl.cpp index 80f91b903..7f29930bc 100644 --- a/frontendInterfaces/libsrc/cpp/fe_rfsource_port_impl.cpp +++ b/frontendInterfaces/libsrc/cpp/fe_rfsource_port_impl.cpp @@ -34,62 +34,90 @@ namespace frontend { { } - FRONTEND::RFInfoPktSequence* OutRFSourcePort::available_rf_inputs() + RFInfoPktSequence OutRFSourcePort::available_rf_inputs() { - FRONTEND::RFInfoPktSequence_var retval = new FRONTEND::RFInfoPktSequence(); + return _get_available_rf_inputs(""); + } + RFInfoPktSequence OutRFSourcePort::_get_available_rf_inputs(const std::string __connection_id__) + { + FRONTEND::RFInfoPktSequence_var _retval; + RFInfoPktSequence retval; std::vector < std::pair < FRONTEND::RFSource_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in - - if (active) { - for (i = outConnections.begin(); i != outConnections.end(); ++i) { - retval = ((*i).first)->available_rf_inputs(); + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + if (this->active) { + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; + _retval = ((*i).first)->available_rf_inputs(); } + frontend::copyRFInfoPktSequence(_retval.in(), retval); } - return retval._retn(); + return retval; } - void OutRFSourcePort::available_rf_inputs(const FRONTEND::RFInfoPktSequence& data) + void OutRFSourcePort::available_rf_inputs(const RFInfoPktSequence& data, const std::string __connection_id__) { std::vector < std::pair < FRONTEND::RFSource_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in - - if (active) { - for (i = outConnections.begin(); i != outConnections.end(); ++i) { - ((*i).first)->available_rf_inputs(data); + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + + if (this->active) { + FRONTEND::RFInfoPktSequence_var _data = new FRONTEND::RFInfoPktSequence(); + frontend::copyRFInfoPktSequence(data, _data); + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; + ((*i).first)->available_rf_inputs(_data); } } return; } - FRONTEND::RFInfoPkt* OutRFSourcePort::current_rf_input() + RFInfoPkt *OutRFSourcePort::current_rf_input() { - FRONTEND::RFInfoPkt_var retval = new FRONTEND::RFInfoPkt(); + return _get_current_rf_input(""); + } + RFInfoPkt *OutRFSourcePort::_get_current_rf_input(const std::string __connection_id__) + { + FRONTEND::RFInfoPkt_var _retval; + RFInfoPkt *retval = 0; std::vector < std::pair < FRONTEND::RFSource_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); - if (active) { - for (i = outConnections.begin(); i != outConnections.end(); ++i) { - retval = ((*i).first)->current_rf_input(); + if (this->active) { + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; + _retval = ((*i).first)->current_rf_input(); } + RFInfoPkt __retval; + __retval = frontend::returnRFInfoPkt(_retval); + retval= new RFInfoPkt(__retval); } - return retval._retn(); + return retval; } - void OutRFSourcePort::current_rf_input(const FRONTEND::RFInfoPkt& data) + void OutRFSourcePort::current_rf_input(const RFInfoPkt& data, const std::string __connection_id__) { std::vector < std::pair < FRONTEND::RFSource_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in - - if (active) { - for (i = outConnections.begin(); i != outConnections.end(); ++i) { - ((*i).first)->current_rf_input(data); + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + + if (this->active) { + FRONTEND::RFInfoPkt_var _data = frontend::returnRFInfoPkt(data); + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; + ((*i).first)->current_rf_input(_data); } } diff --git a/frontendInterfaces/libsrc/cpp/fe_rfsource_port_impl.h b/frontendInterfaces/libsrc/cpp/fe_rfsource_port_impl.h index bdf8189ed..b07ed0edd 100644 --- a/frontendInterfaces/libsrc/cpp/fe_rfsource_port_impl.h +++ b/frontendInterfaces/libsrc/cpp/fe_rfsource_port_impl.h @@ -102,10 +102,12 @@ namespace frontend { OutRFSourcePort(std::string port_name); ~OutRFSourcePort(); - FRONTEND::RFInfoPktSequence* available_rf_inputs(); - void available_rf_inputs(const FRONTEND::RFInfoPktSequence& data); - FRONTEND::RFInfoPkt* current_rf_input(); - void current_rf_input(const FRONTEND::RFInfoPkt& data);; + frontend::RFInfoPktSequence available_rf_inputs(); + frontend::RFInfoPktSequence _get_available_rf_inputs(const std::string __connection_id__); + void available_rf_inputs(const RFInfoPktSequence& data, const std::string __connection_id__ = ""); + frontend::RFInfoPkt *current_rf_input(); + frontend::RFInfoPkt *_get_current_rf_input(const std::string __connection_id__); + void current_rf_input(const RFInfoPkt& data, const std::string __connection_id__ = ""); }; } // end of frontend namespace diff --git a/frontendInterfaces/libsrc/cpp/fe_tuner_device.cpp b/frontendInterfaces/libsrc/cpp/fe_tuner_device.cpp index a93b3be4e..42016eaed 100644 --- a/frontendInterfaces/libsrc/cpp/fe_tuner_device.cpp +++ b/frontendInterfaces/libsrc/cpp/fe_tuner_device.cpp @@ -24,7 +24,6 @@ namespace frontend { template < typename TunerStatusStructType > PREPARE_ALT_LOGGING(FrontendTunerDevice, FrontendTunerDevice ); - /* validateRequestVsSRI is a helper function to check that the input data stream can support * the allocation request. The output mode (true if complex output) is used when determining * the necessary sample rate required to satisfy the request. The entire frequency band of the @@ -180,8 +179,13 @@ namespace frontend { // check device constraints // see if IF center frequency is set in rfinfo packet double request_if_center_freq = request.center_frequency; - if(request.tuner_type != "TX" && floatingPointCompare(rfinfo.if_center_freq,0) > 0 && floatingPointCompare(rfinfo.rf_center_freq,rfinfo.if_center_freq) > 0) - request_if_center_freq = request.center_frequency - (rfinfo.rf_center_freq-rfinfo.if_center_freq); + if(request.tuner_type != "TX" && floatingPointCompare(rfinfo.if_center_freq,0) > 0 && floatingPointCompare(rfinfo.rf_center_freq,rfinfo.if_center_freq) > 0) { + if (rfinfo.spectrum_inverted) { + request_if_center_freq = rfinfo.if_center_freq - (request.center_frequency - rfinfo.rf_center_freq); + } else { + request_if_center_freq = rfinfo.if_center_freq + (request.center_frequency - rfinfo.rf_center_freq); + } + } // check vs. device center freq capability (ensure 0 <= request <= max device capability) if ( !validateRequest(min_device_center_freq,max_device_center_freq,request_if_center_freq) ) { @@ -263,6 +267,92 @@ namespace frontend { tuner_allocation_ids.clear(); } + template < typename TunerStatusStructType > + FrontendScanningTunerDevice::FrontendScanningTunerDevice(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : + FrontendTunerDevice(devMgr_ior, id, lbl, sftwrPrfl) + { + construct(); + } + + template < typename TunerStatusStructType > + FrontendScanningTunerDevice::FrontendScanningTunerDevice(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : + FrontendTunerDevice(devMgr_ior, id, lbl, sftwrPrfl, compDev) + { + construct(); + } + + template < typename TunerStatusStructType > + FrontendScanningTunerDevice::FrontendScanningTunerDevice(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : + FrontendTunerDevice(devMgr_ior, id, lbl, sftwrPrfl, capacities) + { + construct(); + } + + template < typename TunerStatusStructType > + FrontendScanningTunerDevice::FrontendScanningTunerDevice(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : + FrontendTunerDevice(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev) + { + construct(); + } + + template < typename TunerStatusStructType > + void FrontendScanningTunerDevice::construct() + { + Resource_impl::_started = false; + loadProperties(); + } + + template < typename TunerStatusStructType > + bool FrontendScanningTunerDevice::callDeviceSetTuning(size_t tuner_id) { + if (this->_has_scanner) { + return deviceSetTuningScan(FrontendTunerDevice::frontend_tuner_allocation, frontend_scanner_allocation, FrontendTunerDevice::frontend_tuner_status[tuner_id], tuner_id); + } + return deviceSetTuning(FrontendTunerDevice::frontend_tuner_allocation, FrontendTunerDevice::frontend_tuner_status[tuner_id], tuner_id); + } + + template < typename TunerStatusStructType > + void FrontendScanningTunerDevice::loadProperties() + { + FrontendTunerDevice::loadProperties(); + this->addProperty(frontend_scanner_allocation, + frontend::frontend_scanner_allocation_struct(), + "FRONTEND::scanner_allocation", + "frontend_scanner_allocation", + "writeonly", + "", + "external", + "allocation"); + } + + template < typename TunerStatusStructType > + void FrontendScanningTunerDevice::checkValidIds(const CF::Properties & capacities) { + this->_has_scanner = false; + for (unsigned int ii = 0; ii < capacities.length(); ++ii) { + const std::string id = (const char*) capacities[ii].id; + if (id != "FRONTEND::tuner_allocation" && id != "FRONTEND::listener_allocation" && id != "FRONTEND::scanner_allocation"){ + throw CF::Device::InvalidCapacity("UNKNOWN ALLOCATION PROPERTY1", capacities); + } + if (id == "FRONTEND::scanner_allocation") { + this->_has_scanner = true; + } + PropertyInterface* property = this->getPropertyFromId(id); + if(!property){ + throw CF::Device::InvalidCapacity("UNKNOWN PROPERTY", capacities); + } + try{ + property->setValue(capacities[ii].value); + } + catch(const std::logic_error &e){ + throw CF::Device::InvalidCapacity("COULD NOT PARSE CAPACITY", capacities); + }; + } + } + + template < typename TunerStatusStructType > + FrontendScanningTunerDevice::~FrontendScanningTunerDevice() + { + } + /******************************************************************************************* Framework-level functions These functions are generally called by the framework to perform housekeeping. @@ -293,7 +383,7 @@ namespace frontend { frontend::frontend_tuner_allocation_struct(), "FRONTEND::tuner_allocation", "frontend_tuner_allocation", - "readwrite", + "writeonly", "", "external", "allocation"); @@ -302,7 +392,7 @@ namespace frontend { frontend::frontend_listener_allocation_struct(), "FRONTEND::listener_allocation", "frontend_listener_allocation", - "readwrite", + "writeonly", "", "external", "allocation"); @@ -319,7 +409,6 @@ namespace frontend { template < typename TunerStatusStructType > std::string FrontendTunerDevice::createAllocationIdCsv(size_t tuner_id){ - //LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__); std::string alloc_id_csv = ""; // ensure control allocation_id is first in list if (!tuner_allocation_ids[tuner_id].control_allocation_id.empty()) @@ -346,7 +435,6 @@ namespace frontend { /*****************************************************************/ template < typename TunerStatusStructType > CF::Device::UsageType FrontendTunerDevice::updateUsageState() { - //LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__); size_t tunerAllocated = 0; for (size_t tuner_id = 0; tuner_id < tuner_allocation_ids.size(); tuner_id++) { if (!tuner_allocation_ids[tuner_id].control_allocation_id.empty()) @@ -362,42 +450,58 @@ namespace frontend { return CF::Device::ACTIVE; } + template < typename TunerStatusStructType > + bool FrontendTunerDevice::callDeviceSetTuning(size_t tuner_id) { + return deviceSetTuning(frontend_tuner_allocation, frontend_tuner_status[tuner_id], tuner_id); + } + + template < typename TunerStatusStructType > + void FrontendTunerDevice::checkValidIds(const CF::Properties & capacities) { + for (unsigned int ii = 0; ii < capacities.length(); ++ii) { + const std::string id = (const char*) capacities[ii].id; + if (id == "FRONTEND::scanner_allocation"){ + throw CF::Device::InvalidCapacity("FRONTEND::scanner_allocation found in allocation; this is not a scanning device", capacities); + } + } + for (unsigned int ii = 0; ii < capacities.length(); ++ii) { + const std::string id = (const char*) capacities[ii].id; + if (id != "FRONTEND::tuner_allocation" && id != "FRONTEND::listener_allocation"){ + throw CF::Device::InvalidCapacity("Invalid allocation property", capacities); + } + PropertyInterface* property = this->getPropertyFromId(id); + if(!property){ + throw CF::Device::InvalidCapacity("UNKNOWN PROPERTY", capacities); + } + try{ + property->setValue(capacities[ii].value); + } + catch(const std::logic_error &e){ + throw CF::Device::InvalidCapacity("COULD NOT PARSE CAPACITY", capacities); + }; + } + } + template < typename TunerStatusStructType > CORBA::Boolean FrontendTunerDevice::allocateCapacity(const CF::Properties & capacities) throw (CORBA::SystemException, CF::Device::InvalidCapacity, CF::Device::InvalidState) { if (this->tuner_allocation_ids.size() != this->frontend_tuner_status.size()) { this->tuner_allocation_ids.resize(this->frontend_tuner_status.size()); } - LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__); + RH_TRACE(_deviceLog,__PRETTY_FUNCTION__); + checkValidIds(capacities); CORBA::ULong ii; try{ for (ii = 0; ii < capacities.length(); ++ii) { const std::string id = (const char*) capacities[ii].id; - if (id != "FRONTEND::tuner_allocation" && id != "FRONTEND::listener_allocation"){ - LOG_DEBUG(FrontendTunerDevice, "UNKNOWN ALLOCATION PROPERTY1"); - throw CF::Device::InvalidCapacity("UNKNOWN ALLOCATION PROPERTY1", capacities); - } - PropertyInterface* property = getPropertyFromId(id); - if(!property){ - LOG_DEBUG(FrontendTunerDevice, "UNKNOWN PROPERTY"); - throw CF::Device::InvalidCapacity("UNKNOWN PROPERTY", capacities); - } - try{ - property->setValue(capacities[ii].value); - } - catch(const std::logic_error &e){ - LOG_DEBUG(FrontendTunerDevice, "COULD NOT PARSE CAPACITY: " << e.what()); - throw CF::Device::InvalidCapacity("COULD NOT PARSE CAPACITY", capacities); - }; if (id == "FRONTEND::tuner_allocation"){ // Check allocation_id if (frontend_tuner_allocation.allocation_id.empty()) { - LOG_INFO(FrontendTunerDevice,"allocateCapacity: MISSING ALLOCATION_ID"); + RH_INFO(_deviceLog,"allocateCapacity: MISSING ALLOCATION_ID"); throw CF::Device::InvalidCapacity("MISSING ALLOCATION_ID", capacities); } // Check if allocation ID has already been used if(getTunerMapping(frontend_tuner_allocation.allocation_id) >= 0){ - LOG_INFO(FrontendTunerDevice,"allocateCapacity: ALLOCATION_ID ALREADY IN USE: [" << frontend_tuner_allocation.allocation_id << "]"); + RH_INFO(_deviceLog,"allocateCapacity: ALLOCATION_ID ALREADY IN USE: [" << frontend_tuner_allocation.allocation_id << "]"); throw AllocationAlreadyExists("ALLOCATION_ID ALREADY IN USE", capacities); } @@ -407,13 +511,13 @@ namespace frontend { // Next, try to allocate a new tuner for (size_t tuner_id = 0; tuner_id < tuner_allocation_ids.size(); tuner_id++) { if(frontend_tuner_status[tuner_id].tuner_type != frontend_tuner_allocation.tuner_type) { - LOG_DEBUG(FrontendTunerDevice, + RH_DEBUG(_deviceLog, "allocateCapacity: Requested tuner type '"<, + RH_DEBUG(_deviceLog, "allocateCapacity: Requested group_id '"<, + RH_DEBUG(_deviceLog, "allocateCapacity: Requested rf_flow_id '"<, + RH_DEBUG(_deviceLog, "allocateCapacity: Tuner["<, eout.str()); + RH_DEBUG(_deviceLog, eout.str()); throw CF::Device::InvalidCapacity(eout.str().c_str(), capacities); } // listener if(tuner_allocation_ids[tuner_id].control_allocation_id.empty() || !listenerRequestValidation(frontend_tuner_allocation, tuner_id)){ // either not allocated or can't support listener request - LOG_DEBUG(FrontendTunerDevice, + RH_DEBUG(_deviceLog, "allocateCapacity: Tuner["<, std::fixed << " allocateCapacity - SR requested: " << frontend_tuner_allocation.sample_rate + RH_DEBUG(_deviceLog, std::fixed << " allocateCapacity - SR requested: " << frontend_tuner_allocation.sample_rate << " SR got: " << frontend_tuner_status[tuner_id].sample_rate); if( (floatingPointCompare(frontend_tuner_allocation.sample_rate,0)!=0) && (floatingPointCompare(frontend_tuner_status[tuner_id].sample_rate,frontend_tuner_allocation.sample_rate)<0 || floatingPointCompare(frontend_tuner_status[tuner_id].sample_rate,frontend_tuner_allocation.sample_rate+frontend_tuner_allocation.sample_rate * frontend_tuner_allocation.sample_rate_tolerance/100.0)>0 )){ std::ostringstream eout; eout<, eout.str()); + RH_INFO(_deviceLog, eout.str()); throw std::logic_error(eout.str().c_str()); } - LOG_DEBUG(FrontendTunerDevice, std::fixed << " allocateCapacity - BW requested: " << frontend_tuner_allocation.bandwidth + RH_DEBUG(_deviceLog, std::fixed << " allocateCapacity - BW requested: " << frontend_tuner_allocation.bandwidth << " BW got: " << frontend_tuner_status[tuner_id].bandwidth); // Only check when bandwidth was not set to don't care if( (floatingPointCompare(frontend_tuner_allocation.bandwidth,0)!=0) && @@ -493,7 +597,7 @@ namespace frontend { floatingPointCompare(frontend_tuner_status[tuner_id].bandwidth,frontend_tuner_allocation.bandwidth+frontend_tuner_allocation.bandwidth * frontend_tuner_allocation.bandwidth_tolerance/100.0)>0 )){ std::ostringstream eout; eout<, eout.str()); + RH_INFO(_deviceLog, eout.str()); throw std::logic_error(eout.str().c_str()); } @@ -504,7 +608,7 @@ namespace frontend { } catch(...){ std::ostringstream eout; eout<<"allocateCapacity: Failed to enable tuner after allocation"; - LOG_INFO(FrontendTunerDevice, eout.str()); + RH_INFO(_deviceLog, eout.str()); throw std::logic_error(eout.str().c_str()); } } @@ -514,17 +618,17 @@ namespace frontend { // if we made it here, we failed to find an available tuner std::ostringstream eout; eout<<"allocateCapacity: NO AVAILABLE TUNER. Make sure that the device has an initialized frontend_tuner_status"; - LOG_INFO(FrontendTunerDevice, eout.str()); + RH_INFO(_deviceLog, eout.str()); throw std::logic_error(eout.str().c_str()); } else if (id == "FRONTEND::listener_allocation") { // Check validity of allocation_id's if (frontend_listener_allocation.existing_allocation_id.empty()){ - LOG_INFO(FrontendTunerDevice,"allocateCapacity: MISSING EXISTING ALLOCATION ID"); + RH_INFO(_deviceLog,"allocateCapacity: MISSING EXISTING ALLOCATION ID"); throw CF::Device::InvalidCapacity("MISSING EXISTING ALLOCATION ID", capacities); } if (frontend_listener_allocation.listener_allocation_id.empty()){ - LOG_INFO(FrontendTunerDevice,"allocateCapacity: MISSING LISTENER ALLOCATION ID"); + RH_INFO(_deviceLog,"allocateCapacity: MISSING LISTENER ALLOCATION ID"); throw CF::Device::InvalidCapacity("MISSING LISTENER ALLOCATION ID", capacities); } @@ -532,13 +636,13 @@ namespace frontend { // Check if listener allocation ID has already been used if(getTunerMapping(frontend_listener_allocation.listener_allocation_id) >= 0){ - LOG_INFO(FrontendTunerDevice,"allocateCapacity: LISTENER ALLOCATION ID ALREADY IN USE: [" << frontend_listener_allocation.listener_allocation_id << "]"); + RH_INFO(_deviceLog,"allocateCapacity: LISTENER ALLOCATION ID ALREADY IN USE: [" << frontend_listener_allocation.listener_allocation_id << "]"); throw AllocationAlreadyExists("LISTENER ALLOCATION ID ALREADY IN USE", capacities); } // Do not allocate if existing allocation ID does not exist long tuner_id = getTunerMapping(frontend_listener_allocation.existing_allocation_id); if (tuner_id < 0){ - LOG_INFO(FrontendTunerDevice,"allocateCapacity: UNKNOWN CONTROL ALLOCATION ID: ["<< frontend_listener_allocation.existing_allocation_id <<"]"); + RH_INFO(_deviceLog,"allocateCapacity: UNKNOWN CONTROL ALLOCATION ID: ["<< frontend_listener_allocation.existing_allocation_id <<"]"); throw FRONTEND::BadParameterException("UNKNOWN CONTROL ALLOCATION ID"); } @@ -546,7 +650,7 @@ namespace frontend { if(frontend_tuner_status[tuner_id].tuner_type == "CHANNELIZER" || frontend_tuner_status[tuner_id].tuner_type == "TX"){ std::ostringstream eout; eout<<"allocateCapacity: listener allocations are not permitted for " << std::string(frontend_tuner_status[tuner_id].tuner_type) << " tuner type"; - LOG_DEBUG(FrontendTunerDevice, eout.str()); + RH_DEBUG(_deviceLog, eout.str()); throw CF::Device::InvalidCapacity(eout.str().c_str(), capacities); } @@ -555,9 +659,10 @@ namespace frontend { frontend_tuner_status[tuner_id].allocation_id_csv = createAllocationIdCsv(tuner_id); this->assignListener(frontend_listener_allocation.listener_allocation_id,frontend_listener_allocation.existing_allocation_id); return true; - } - else { - LOG_INFO(FrontendTunerDevice,"allocateCapacity: UNKNOWN ALLOCATION PROPERTY2"); + } else if (id == "FRONTEND::scanner_allocation") { + continue; + } else { + RH_INFO(_deviceLog,"allocateCapacity: UNKNOWN ALLOCATION PROPERTY2"); throw CF::Device::InvalidCapacity("UNKNOWN ALLOCATION PROPERTY2", capacities); } } @@ -595,69 +700,96 @@ namespace frontend { return true; } + /* This sets the number of entries in the frontend_tuner_status struct sequence property + * as well as the tuner_allocation_ids vector. Call this function during initialization + */ + template < typename TunerStatusStructType > + void FrontendTunerDevice::setNumChannels(size_t num) + { + this->setNumChannels(num, "RX_DIGITIZER"); + } + + /* This sets the number of entries in the frontend_tuner_status struct sequence property + * as well as the tuner_allocation_ids vector. Call this function during initialization + */ + template < typename TunerStatusStructType > + void FrontendTunerDevice::setNumChannels(size_t num, std::string tuner_type) + { + frontend_tuner_status.clear(); + tuner_allocation_ids.clear(); + addChannels(num, tuner_type); + } + + template < typename TunerStatusStructType > + void FrontendTunerDevice::addChannels(size_t num, std::string tuner_type) { + tuner_allocation_ids.resize(tuner_allocation_ids.size()+num); + for (unsigned int i=0; i void FrontendTunerDevice::deallocateCapacity(const CF::Properties & capacities) throw (CORBA::SystemException, CF::Device::InvalidCapacity, CF::Device::InvalidState) { - LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__); + RH_TRACE(_deviceLog,__PRETTY_FUNCTION__); for (CORBA::ULong ii = 0; ii < capacities.length(); ++ii) { try{ const std::string id = (const char*) capacities[ii].id; + if (id == "FRONTEND::scanner_allocation") + continue; if (id != "FRONTEND::tuner_allocation" && id != "FRONTEND::listener_allocation"){ - LOG_INFO(FrontendTunerDevice,"deallocateCapacity: UNKNOWN ALLOCATION PROPERTY"); + RH_INFO(_deviceLog,"deallocateCapacity: UNKNOWN ALLOCATION PROPERTY"); throw CF::Device::InvalidCapacity("UNKNOWN ALLOCATION PROPERTY", capacities); } PropertyInterface* property = getPropertyFromId(id); if(!property){ - LOG_INFO(FrontendTunerDevice,"deallocateCapacity: UNKNOWN PROPERTY"); + RH_INFO(_deviceLog,"deallocateCapacity: UNKNOWN PROPERTY"); throw CF::Device::InvalidCapacity("UNKNOWN PROPERTY", capacities); } try{ property->setValue(capacities[ii].value); } catch(const std::logic_error &e){ - LOG_DEBUG(FrontendTunerDevice, "COULD NOT PARSE CAPACITY: " << e.what()); + RH_DEBUG(_deviceLog, "COULD NOT PARSE CAPACITY: " << e.what()); throw CF::Device::InvalidCapacity("COULD NOT PARSE CAPACITY", capacities); }; if (id == "FRONTEND::tuner_allocation"){ - //LOG_DEBUG(FrontendTunerDevice,std::string(__PRETTY_FUNCTION__)+" tuner_allocation"); // Try to remove control of the device long tuner_id = getTunerMapping(frontend_tuner_allocation.allocation_id); if (tuner_id < 0){ - LOG_DEBUG(FrontendTunerDevice, "ALLOCATION_ID NOT FOUND: [" << frontend_tuner_allocation.allocation_id <<"]"); + RH_DEBUG(_deviceLog, "ALLOCATION_ID NOT FOUND: [" << frontend_tuner_allocation.allocation_id <<"]"); throw CF::Device::InvalidCapacity("ALLOCATION_ID NOT FOUND", capacities); } - //LOG_DEBUG(FrontendTunerDevice,std::string(__PRETTY_FUNCTION__)+" tuner_id = " << tuner_id); if(tuner_allocation_ids[tuner_id].control_allocation_id == frontend_tuner_allocation.allocation_id){ - //LOG_DEBUG(FrontendTunerDevice,std::string(__PRETTY_FUNCTION__)+" deallocating control for tuner_id = " << tuner_id); enableTuner(tuner_id, false); removeTunerMapping(tuner_id); frontend_tuner_status[tuner_id].allocation_id_csv = createAllocationIdCsv(tuner_id); } else { - //LOG_DEBUG(FrontendTunerDevice,std::string(__PRETTY_FUNCTION__)+" deallocating listener for tuner_id = " << tuner_id); // send EOS to listener connection only removeTunerMapping(tuner_id,frontend_tuner_allocation.allocation_id); frontend_tuner_status[tuner_id].allocation_id_csv = createAllocationIdCsv(tuner_id); } } else if (id == "FRONTEND::listener_allocation") { - //LOG_DEBUG(FrontendTunerDevice,std::string(__PRETTY_FUNCTION__)+" listener_allocation"); long tuner_id = getTunerMapping(frontend_listener_allocation.listener_allocation_id); if (tuner_id < 0){ - LOG_DEBUG(FrontendTunerDevice, "ALLOCATION_ID NOT FOUND: [" << frontend_listener_allocation.listener_allocation_id <<"]"); + RH_DEBUG(_deviceLog, "ALLOCATION_ID NOT FOUND: [" << frontend_listener_allocation.listener_allocation_id <<"]"); throw CF::Device::InvalidCapacity("ALLOCATION_ID NOT FOUND", capacities); } if (this->tuner_allocation_ids[tuner_id].control_allocation_id == frontend_listener_allocation.listener_allocation_id) { - LOG_DEBUG(FrontendTunerDevice, "Controlling allocation id cannot be used as a listener id: [" << frontend_listener_allocation.listener_allocation_id <<"]"); + RH_DEBUG(_deviceLog, "Controlling allocation id cannot be used as a listener id: [" << frontend_listener_allocation.listener_allocation_id <<"]"); throw CF::Device::InvalidCapacity("Controlling allocation id cannot be used as a listener id", capacities); } - //LOG_DEBUG(FrontendTunerDevice,std::string(__PRETTY_FUNCTION__)+" tuner_id = " << tuner_id); // send EOS to listener connection only removeTunerMapping(tuner_id,frontend_listener_allocation.listener_allocation_id); frontend_tuner_status[tuner_id].allocation_id_csv = createAllocationIdCsv(tuner_id); } else { - LOG_TRACE(FrontendTunerDevice,"WARNING: UNKNOWN ALLOCATION PROPERTY \""+ std::string(property->name) + "\". IGNORING!"); + RH_TRACE(_deviceLog,"WARNING: UNKNOWN ALLOCATION PROPERTY \""+ std::string(property->name) + "\". IGNORING!"); } } catch(CF::Device::InvalidCapacity &e){ @@ -666,7 +798,7 @@ namespace frontend { } catch(...){ _usageState = updateUsageState(); - LOG_DEBUG(FrontendTunerDevice,"ERROR WHEN DEALLOCATING. SKIPPING..."); + RH_DEBUG(_deviceLog,"ERROR WHEN DEALLOCATING. SKIPPING..."); } } _usageState = updateUsageState(); @@ -678,7 +810,6 @@ namespace frontend { template < typename TunerStatusStructType > bool FrontendTunerDevice::enableTuner(size_t tuner_id, bool enable) { - //LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__); bool prev_enabled = frontend_tuner_status[tuner_id].enabled; @@ -698,7 +829,7 @@ namespace frontend { template < typename TunerStatusStructType > bool FrontendTunerDevice::listenerRequestValidation(frontend_tuner_allocation_struct &request, size_t tuner_id){ - LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__); + RH_TRACE(_deviceLog,__PRETTY_FUNCTION__); // ensure requested values are non-negative if(floatingPointCompare(request.center_frequency,0)<0 || floatingPointCompare(request.bandwidth,0)<0 || floatingPointCompare(request.sample_rate,0)<0 || floatingPointCompare(request.bandwidth_tolerance,0)<0 || floatingPointCompare(request.sample_rate_tolerance,0)<0) @@ -707,14 +838,14 @@ namespace frontend { // ensure lower end of requested band fits //if((request.center_frequency - (request.bandwidth*0.5)) < (frontend_tuner_status[tuner_id].center_frequency - (frontend_tuner_status[tuner_id].bandwidth*0.5))){ if( floatingPointCompare((request.center_frequency-(request.bandwidth*0.5)),(frontend_tuner_status[tuner_id].center_frequency-(frontend_tuner_status[tuner_id].bandwidth*0.5))) < 0 ){ - LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__ << " FAILED LOWER END TEST"); + RH_TRACE(_deviceLog,__PRETTY_FUNCTION__ << " FAILED LOWER END TEST"); return false; } // ensure upper end of requested band fits //if((request.center_frequency + (request.bandwidth*0.5)) > (frontend_tuner_status[tuner_id].center_frequency + (frontend_tuner_status[tuner_id].bandwidth*0.5))){ if( floatingPointCompare((request.center_frequency + (request.bandwidth*0.5)),(frontend_tuner_status[tuner_id].center_frequency + (frontend_tuner_status[tuner_id].bandwidth*0.5))) > 0 ){ - LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__ << " FAILED UPPER END TEST"); + RH_TRACE(_deviceLog,__PRETTY_FUNCTION__ << " FAILED UPPER END TEST"); return false; } @@ -745,7 +876,6 @@ namespace frontend { template < typename TunerStatusStructType > long FrontendTunerDevice::getTunerMapping(std::string allocation_id) { - //LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__); long NO_VALID_TUNER = -1; string_number_mapping::iterator iter = allocation_id_to_tuner_id.find(allocation_id); @@ -784,7 +914,7 @@ namespace frontend { template < typename TunerStatusStructType > bool FrontendTunerDevice::removeTunerMapping(size_t tuner_id, std::string allocation_id) { - LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__); + RH_TRACE(_deviceLog,__PRETTY_FUNCTION__); removeListener(allocation_id); sendEOS(allocation_id); std::vector::iterator it = tuner_allocation_ids[tuner_id].listener_allocation_ids.begin(); @@ -803,7 +933,7 @@ namespace frontend { template < typename TunerStatusStructType > bool FrontendTunerDevice::removeTunerMapping(size_t tuner_id) { - LOG_TRACE(FrontendTunerDevice,__PRETTY_FUNCTION__); + RH_TRACE(_deviceLog,__PRETTY_FUNCTION__); deviceDeleteTuning(frontend_tuner_status[tuner_id], tuner_id); removeAllocationIdRouting(tuner_id); diff --git a/frontendInterfaces/libsrc/cpp/fe_tuner_device.h b/frontendInterfaces/libsrc/cpp/fe_tuner_device.h index b52823096..87b982bb6 100644 --- a/frontendInterfaces/libsrc/cpp/fe_tuner_device.h +++ b/frontendInterfaces/libsrc/cpp/fe_tuner_device.h @@ -87,8 +87,9 @@ namespace frontend { * False is returned if min > max for either available for requested values */ inline bool validateRequest(double available_min, double available_max, double requested_min, double requested_max){ - if(floatingPointCompare(requested_min,available_min) < 0) return false; - if(floatingPointCompare(requested_max,available_max) > 0) return false; + double center_request = (requested_max+requested_min)/2.0; + if(floatingPointCompare(center_request,available_min) < 0) return false; + if(floatingPointCompare(center_request,available_max) > 0) return false; if(floatingPointCompare(available_min,available_max) > 0) return false; if(floatingPointCompare(requested_min,requested_max) > 0) return false; return true; @@ -184,6 +185,9 @@ namespace frontend { frontend::frontend_listener_allocation_struct frontend_listener_allocation; std::vector frontend_tuner_status; + virtual bool callDeviceSetTuning(size_t tuner_id); + virtual void checkValidIds(const CF::Properties & capacities); + // tuner_allocation_ids is exclusively paired with property frontend_tuner_status. // tuner_allocation_ids tracks allocation ids while frontend_tuner_status provides tuner information. std::vector tuner_allocation_ids; @@ -213,7 +217,9 @@ namespace frontend { virtual void assignListener(const std::string& listen_alloc_id, const std::string& alloc_id); virtual void removeListener(const std::string& listen_alloc_id); virtual void removeAllocationIdRouting(const size_t tuner_id) = 0; - virtual void setNumChannels(size_t num) = 0; + virtual void setNumChannels(size_t num); + virtual void setNumChannels(size_t num, std::string tuner_type); + virtual void addChannels(size_t num, std::string tuner_type); // Configure tuner - gets called during allocation virtual bool enableTuner(size_t tuner_id, bool enable); @@ -297,6 +303,36 @@ namespace frontend { virtual void construct(); }; + /* + * Frontend scanning tuner class definition + */ + template < typename TunerStatusStructType > + class FrontendScanningTunerDevice : public FrontendTunerDevice< TunerStatusStructType > + { + ENABLE_LOGGING + + public: + FrontendScanningTunerDevice(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl); + FrontendScanningTunerDevice(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev); + FrontendScanningTunerDevice(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities); + FrontendScanningTunerDevice(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev); + ~FrontendScanningTunerDevice(); + + protected: + frontend::frontend_scanner_allocation_struct frontend_scanner_allocation; + + virtual bool callDeviceSetTuning(size_t tuner_id); + virtual void checkValidIds(const CF::Properties & capacities); + virtual bool deviceSetTuningScan(const frontend_tuner_allocation_struct &request, const frontend_scanner_allocation_struct &scan_request, TunerStatusStructType &fts, size_t tuner_id) = 0; + virtual bool deviceSetTuning(const frontend_tuner_allocation_struct &request, TunerStatusStructType &fts, size_t tuner_id) = 0; + virtual void loadProperties(); + + private: + // this will be overridden by the generated base class once all ports are known + virtual void construct(); + bool _has_scanner; + }; + }; // end frontend namespace #endif diff --git a/frontendInterfaces/libsrc/cpp/fe_tuner_port_impl.h b/frontendInterfaces/libsrc/cpp/fe_tuner_port_impl.h index 9b202cedc..9b2ce6fa9 100644 --- a/frontendInterfaces/libsrc/cpp/fe_tuner_port_impl.h +++ b/frontendInterfaces/libsrc/cpp/fe_tuner_port_impl.h @@ -94,6 +94,32 @@ namespace frontend { } }; + class analog_scanning_tuner_delegation : public virtual analog_tuner_delegation { + public: + virtual frontend::ScanStatus getScanStatus(const std::string& id) { + throw FRONTEND::NotSupportedException("getScanStatus not supported"); + } + virtual void setScanStartTime(const std::string& id, const BULKIO::PrecisionUTCTime& start_time) { + throw FRONTEND::NotSupportedException("setScanStartTime not supported"); + } + virtual void setScanStrategy(const std::string& id, const frontend::ScanStrategy* scan_strategy) { + throw FRONTEND::NotSupportedException("setScanStrategy not supported"); + } + }; + + class digital_scanning_tuner_delegation : public virtual digital_tuner_delegation { + public: + virtual frontend::ScanStatus getScanStatus(const std::string& id) { + throw FRONTEND::NotSupportedException("getScanStatus not supported"); + } + virtual void setScanStartTime(const std::string& id, const BULKIO::PrecisionUTCTime& start_time) { + throw FRONTEND::NotSupportedException("setScanStartTime not supported"); + } + virtual void setScanStrategy(const std::string& id, const frontend::ScanStrategy* scan_strategy) { + throw FRONTEND::NotSupportedException("setScanStrategy not supported"); + } + }; + class InFrontendTunerPort : public virtual POA_FRONTEND::FrontendTuner, public Port_Provides_base_impl { public: @@ -238,8 +264,71 @@ namespace frontend { private: digital_tuner_delegation *parent; }; - - + + class InAnalogScanningTunerPort : public virtual POA_FRONTEND::AnalogScanningTuner, public InAnalogTunerPort + { + public: + typedef InAnalogTunerPort super; + InAnalogScanningTunerPort(std::string port_name, analog_scanning_tuner_delegation *_parent):super(port_name, _parent) + { + parent = _parent; + }; + ~InAnalogScanningTunerPort() {}; + FRONTEND::ScanningTuner::ScanStatus* getScanStatus(const char* id) { + boost::mutex::scoped_lock lock(this->portAccess); + std::string _id(id); + return (returnScanStatus(this->parent->getScanStatus(_id))); + }; + void setScanStartTime(const char* id, const BULKIO::PrecisionUTCTime& start_time) { + boost::mutex::scoped_lock lock(this->portAccess); + std::string _id(id); + this->parent->setScanStartTime(_id, start_time); + }; + void setScanStrategy(const char* id, const FRONTEND::ScanningTuner::ScanStrategy& scan_strategy) { + boost::mutex::scoped_lock lock(this->portAccess); + std::string _id(id); + std::auto_ptr _strat(returnScanStrategy(scan_strategy)); + this->parent->setScanStrategy(_id, _strat.get()); + }; + std::string getRepid() const { + return "IDL:FRONTEND/AnalogScanningTuner:1.0"; + }; + private: + analog_scanning_tuner_delegation *parent; + }; + + class InDigitalScanningTunerPort : public virtual POA_FRONTEND::DigitalScanningTuner, public InDigitalTunerPort + { + public: + typedef InDigitalTunerPort super; + InDigitalScanningTunerPort(std::string port_name, digital_scanning_tuner_delegation *_parent):super(port_name, _parent) + { + parent = _parent; + }; + ~InDigitalScanningTunerPort() {}; + FRONTEND::ScanningTuner::ScanStatus* getScanStatus(const char* id) { + boost::mutex::scoped_lock lock(this->portAccess); + std::string _id(id); + return (returnScanStatus(this->parent->getScanStatus(_id))); + }; + void setScanStartTime(const char* id, const BULKIO::PrecisionUTCTime& start_time) { + boost::mutex::scoped_lock lock(this->portAccess); + std::string _id(id); + this->parent->setScanStartTime(_id, start_time); + }; + void setScanStrategy(const char* id, const FRONTEND::ScanningTuner::ScanStrategy& scan_strategy) { + boost::mutex::scoped_lock lock(this->portAccess); + std::string _id(id); + std::auto_ptr _strat(returnScanStrategy(scan_strategy)); + this->parent->setScanStrategy(_id, _strat.get()); + }; + std::string getRepid() const { + return "IDL:FRONTEND/DigitalScanningTuner:1.0"; + }; + private: + digital_scanning_tuner_delegation *parent; + }; + template class OutFrontendTunerPortT : public OutFrontendPort { @@ -247,60 +336,118 @@ namespace frontend { OutFrontendTunerPortT(std::string port_name) : OutFrontendPort(port_name) {}; ~OutFrontendTunerPortT(){}; - - std::string getTunerType(std::string &id) { + + std::vector getConnectionIds() { + std::vector retval; + for (unsigned int i = 0; i < this->outConnections.size(); i++) { + retval.push_back(this->outConnections[i].second); + } + return retval; + } + + void __evaluateRequestBasedOnConnections(const std::string &__connection_id__, bool returnValue, bool inOut, bool out) { + if (__connection_id__.empty() and (this->outConnections.size() > 1)) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.", + getConnectionIds()); + } + } + if (this->outConnections.empty()) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("No connections available.", std::vector()); + } else { + if (not __connection_id__.empty()) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } + if ((not __connection_id__.empty()) and (not this->outConnections.empty())) { + bool foundConnection = false; + typename std::vector < std::pair < PortType_var, std::string > >::iterator i; + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (i->second == __connection_id__) { + foundConnection = true; + break; + } + } + if (not foundConnection) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } + + std::string getTunerType(const std::string &id, const std::string __connection_id__ = "") { CORBA::String_var retval = ""; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerType(id.c_str()); } } std::string str_retval = ossie::corba::returnString(retval); return str_retval; }; - bool getTunerDeviceControl(std::string &id) { + bool getTunerDeviceControl(const std::string &id, const std::string __connection_id__ = "") { CORBA::Boolean retval = false; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerDeviceControl(id.c_str()); } } return retval; }; - std::string getTunerGroupId(std::string &id) { + std::string getTunerGroupId(const std::string &id, const std::string __connection_id__ = "") { CORBA::String_var retval = ""; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerGroupId(id.c_str()); } } std::string str_retval = ossie::corba::returnString(retval); return str_retval; }; - std::string getTunerRfFlowId(std::string &id) { + std::string getTunerRfFlowId(const std::string &id, const std::string __connection_id__ = "") { CORBA::String_var retval = ""; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerRfFlowId(id.c_str()); } } std::string str_retval = ossie::corba::returnString(retval); return str_retval; }; - CF::Properties* getTunerStatus(std::string &id) { + CF::Properties* getTunerStatus(const std::string &id, const std::string __connection_id__ = "") { CF::Properties_var retval = new CF::Properties(); typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); // don't want to process while command information is coming in + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerStatus(id.c_str()); } } @@ -315,128 +462,164 @@ namespace frontend { OutAnalogTunerPortT(std::string port_name) : OutFrontendTunerPortT(port_name) {}; ~OutAnalogTunerPortT(){}; - - void setTunerCenterFrequency(std::string &id, double freq) { + + void setTunerCenterFrequency(const std::string &id, double freq, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; ((*i).first)->setTunerCenterFrequency(id.c_str(), freq); } } return; }; - double getTunerCenterFrequency(std::string &id) { + double getTunerCenterFrequency(const std::string &id, const std::string __connection_id__ = "") { CORBA::Double retval = 0; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerCenterFrequency(id.c_str()); } } return retval; }; - void setTunerBandwidth(std::string &id, double bw) { + void setTunerBandwidth(const std::string &id, double bw, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; ((*i).first)->setTunerBandwidth(id.c_str(), bw); } } return; }; - double getTunerBandwidth(std::string &id) { + double getTunerBandwidth(const std::string &id, const std::string __connection_id__ = "") { CORBA::Double retval = 0; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerBandwidth(id.c_str()); } } return retval; }; - void setTunerAgcEnable(std::string &id, bool enable) { + void setTunerAgcEnable(const std::string &id, bool enable, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; ((*i).first)->setTunerAgcEnable(id.c_str(), enable); } } return; }; - bool getTunerAgcEnable(std::string &id) { + bool getTunerAgcEnable(const std::string &id, const std::string __connection_id__ = "") { CORBA::Boolean retval = false; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerAgcEnable(id.c_str()); } } return retval; }; - void setTunerGain(std::string &id, float gain) { + void setTunerGain(const std::string &id, float gain, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; ((*i).first)->setTunerGain(id.c_str(), gain); } } return; }; - float getTunerGain(std::string &id) { + float getTunerGain(const std::string &id, const std::string __connection_id__ = "") { CORBA::Float retval = 0; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerGain(id.c_str()); } } return retval; }; - void setTunerReferenceSource(std::string &id, int source) { + void setTunerReferenceSource(const std::string &id, int source, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; ((*i).first)->setTunerReferenceSource(id.c_str(), source); } } return; }; - int getTunerReferenceSource(std::string &id) { + int getTunerReferenceSource(const std::string &id, const std::string __connection_id__ = "") { CORBA::Long retval = 0; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerReferenceSource(id.c_str()); } } return retval; }; - void setTunerEnable(std::string &id, bool enable) { + void setTunerEnable(const std::string &id, bool enable, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; ((*i).first)->setTunerEnable(id.c_str(), enable); } } return; }; - bool getTunerEnable(std::string &id) { + bool getTunerEnable(const std::string &id, const std::string __connection_id__ = "") { CORBA::Boolean retval = false; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerEnable(id.c_str()); } } @@ -451,23 +634,29 @@ namespace frontend { OutDigitalTunerPortT(std::string port_name) : OutAnalogTunerPortT(port_name) {}; ~OutDigitalTunerPortT(){}; - - void setTunerOutputSampleRate(std::string &id, double sr) { + + void setTunerOutputSampleRate(const std::string &id, double sr, const std::string __connection_id__ = "") { typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, false, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; ((*i).first)->setTunerOutputSampleRate(id.c_str(), sr); } } return; }; - double getTunerOutputSampleRate(std::string &id) { + double getTunerOutputSampleRate(const std::string &id, const std::string __connection_id__ = "") { CORBA::Double retval = 0; typename std::vector < std::pair < PortType_var, std::string > >::iterator i; boost::mutex::scoped_lock lock(this->updatingPortsLock); + OutFrontendTunerPortT::__evaluateRequestBasedOnConnections(__connection_id__, true, false, false); if (this->active) { for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != i->second) + continue; retval = ((*i).first)->getTunerOutputSampleRate(id.c_str()); } } diff --git a/frontendInterfaces/libsrc/cpp/fe_tuner_struct_props.h b/frontendInterfaces/libsrc/cpp/fe_tuner_struct_props.h index a4f44fa8a..c00b74bcd 100644 --- a/frontendInterfaces/libsrc/cpp/fe_tuner_struct_props.h +++ b/frontendInterfaces/libsrc/cpp/fe_tuner_struct_props.h @@ -22,6 +22,7 @@ #include #include +#include #include "fe_types.h" inline bool operator>>= (const CORBA::Any& a, frontend::frontend_tuner_allocation_struct& s) { @@ -333,4 +334,80 @@ template<> inline short StructSequenceProperty>= (const CORBA::Any& a, frontend::frontend_scanner_allocation_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("FRONTEND::scanner_allocation::min_freq")) { + if (!(props["FRONTEND::scanner_allocation::min_freq"] >>= s.min_freq)) return false; + } + if (props.contains("FRONTEND::scanner_allocation::max_freq")) { + if (!(props["FRONTEND::scanner_allocation::max_freq"] >>= s.max_freq)) return false; + } + if (props.contains("FRONTEND::scanner_allocation::mode")) { + if (!(props["FRONTEND::scanner_allocation::mode"] >>= s.mode)) return false; + } + if (props.contains("FRONTEND::scanner_allocation::control_mode")) { + if (!(props["FRONTEND::scanner_allocation::control_mode"] >>= s.control_mode)) return false; + } + if (props.contains("FRONTEND::scanner_allocation::control_limit")) { + if (!(props["FRONTEND::scanner_allocation::control_limit"] >>= s.control_limit)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const frontend::frontend_scanner_allocation_struct& s) { + redhawk::PropertyMap props; + + props["FRONTEND::scanner_allocation::min_freq"] = s.min_freq; + + props["FRONTEND::scanner_allocation::max_freq"] = s.max_freq; + + props["FRONTEND::scanner_allocation::mode"] = s.mode; + + props["FRONTEND::scanner_allocation::control_mode"] = s.control_mode; + + props["FRONTEND::scanner_allocation::control_limit"] = s.control_limit; + a <<= props; +} + +inline bool operator== (const frontend::frontend_scanner_allocation_struct& s1, const frontend::frontend_scanner_allocation_struct& s2) { + if (s1.min_freq!=s2.min_freq) + return false; + if (s1.max_freq!=s2.max_freq) + return false; + if (s1.mode!=s2.mode) + return false; + if (s1.control_mode!=s2.control_mode) + return false; + if (s1.control_limit!=s2.control_limit) + return false; + return true; +} + +inline bool operator!= (const frontend::frontend_scanner_allocation_struct& s1, const frontend::frontend_scanner_allocation_struct& s2) { + return !(s1==s2); +} + +template<> inline short StructProperty::compare (const CORBA::Any& a) { + if (super::isNil_) { + CORBA::TypeCode_var aType = a.type(); + if (aType->kind() == (CORBA::tk_null)) { + return 0; + } + return 1; + } + + frontend::frontend_scanner_allocation_struct tmp; + if (fromAny(a, tmp)) { + if (tmp != this->value_) { + return 1; + } + + return 0; + } else { + return 1; + } +} + #endif diff --git a/frontendInterfaces/libsrc/cpp/fe_types.h b/frontendInterfaces/libsrc/cpp/fe_types.h index e39d61004..a1c64de9f 100644 --- a/frontendInterfaces/libsrc/cpp/fe_types.h +++ b/frontendInterfaces/libsrc/cpp/fe_types.h @@ -210,6 +210,93 @@ namespace frontend { std::string rf_flow_id; bool enabled; }; + + struct frontend_scanner_allocation_struct { + frontend_scanner_allocation_struct () + { + } + + static std::string getId() { + return std::string("FRONTEND::scanner_allocation"); + } + + static const char* getFormat() { + return "ddssd"; + } + + double min_freq; + double max_freq; + std::string mode; + std::string control_mode; + double control_limit; + }; + + typedef enum ScanMode { MANUAL_SCAN, SPAN_SCAN, DISCRETE_SCAN } ScanMode; + typedef enum OutputControlMode { TIME_BASED, SAMPLE_BASED } OutputControlMode; + + struct ScanSpanRange { + double begin_frequency; + double end_frequency; + double step; + }; + + typedef std::vector ScanSpanRanges; + typedef std::vector Frequencies; + + class ScanStrategy { + public: + ScanMode scan_mode; + OutputControlMode control_mode; + double control_value; + virtual ~ScanStrategy() {}; + virtual ScanStrategy* clone() const = 0; + protected: + ScanStrategy(ScanMode _scan_mode) : scan_mode(_scan_mode), control_mode(TIME_BASED), control_value(0) { }; + ScanStrategy(const ScanStrategy& source) { + scan_mode = source.scan_mode; + control_mode = source.control_mode; + control_value = source.control_value; + }; + }; + + class ManualStrategy : public ScanStrategy { + public: + double center_frequency; + ManualStrategy* clone() const {return new ManualStrategy(*this);}; + ManualStrategy(const ManualStrategy& source) : ScanStrategy(source) { + center_frequency = source.center_frequency; + }; + ManualStrategy(const double _center_frequency) : ScanStrategy(MANUAL_SCAN), center_frequency(_center_frequency) { }; + }; + + class SpanStrategy : public ScanStrategy { + public: + ScanSpanRanges freq_scan_list; + SpanStrategy* clone() const {return new SpanStrategy(*this);}; + SpanStrategy(const SpanStrategy& source) : ScanStrategy(source) { + freq_scan_list = source.freq_scan_list; + }; + SpanStrategy(const ScanSpanRanges& _freq_scan_list) : ScanStrategy(SPAN_SCAN), freq_scan_list(_freq_scan_list) { }; + }; + + class DiscreteStrategy : public ScanStrategy { + public: + Frequencies discrete_freq_list; + DiscreteStrategy* clone() const {return new DiscreteStrategy(*this);}; + DiscreteStrategy(const DiscreteStrategy& source) : ScanStrategy(source) { + discrete_freq_list = source.discrete_freq_list; + }; + DiscreteStrategy(const Frequencies& _discrete_freq_list) : ScanStrategy(DISCRETE_SCAN), discrete_freq_list(_discrete_freq_list) { }; + }; + + class ScanStatus { + public: + std::auto_ptr strategy; + BULKIO::PrecisionUTCTime start_time; + Frequencies center_tune_frequencies; + bool started; + ScanStatus(ScanStrategy *strat) : strategy(strat) {}; + }; } #endif diff --git a/frontendInterfaces/libsrc/java/src/frontend/AnalogScanningTunerDelegate.java b/frontendInterfaces/libsrc/java/src/frontend/AnalogScanningTunerDelegate.java new file mode 100644 index 000000000..379ec9a54 --- /dev/null +++ b/frontendInterfaces/libsrc/java/src/frontend/AnalogScanningTunerDelegate.java @@ -0,0 +1,37 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK frontendInterfaces. + * + * REDHAWK frontendInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK frontendInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package frontend; + +import frontend.AnalogTunerDelegate; +import FRONTEND.FrontendException; +import FRONTEND.BadParameterException; +import FRONTEND.NotSupportedException; +import FRONTEND.ScanningTunerPackage.ScanStatus; +import FRONTEND.ScanningTunerPackage.ScanStrategy; +import BULKIO.PrecisionUTCTime; + +public interface AnalogScanningTunerDelegate extends AnalogTunerDelegate { + + public FRONTEND.ScanningTunerPackage.ScanStatus getScanStatus(String id) throws FrontendException, BadParameterException, NotSupportedException; + + public void setScanStartTime(String id, BULKIO.PrecisionUTCTime start_time) throws FrontendException, BadParameterException, NotSupportedException; + + public void setScanStrategy(String id, FRONTEND.ScanningTunerPackage.ScanStrategy scan_strategy) throws FrontendException, BadParameterException, NotSupportedException; +} diff --git a/frontendInterfaces/libsrc/java/src/frontend/DigitalScanningTunerDelegate.java b/frontendInterfaces/libsrc/java/src/frontend/DigitalScanningTunerDelegate.java new file mode 100644 index 000000000..81ef27fe3 --- /dev/null +++ b/frontendInterfaces/libsrc/java/src/frontend/DigitalScanningTunerDelegate.java @@ -0,0 +1,37 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK frontendInterfaces. + * + * REDHAWK frontendInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK frontendInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package frontend; + +import frontend.DigitalTunerDelegate; +import FRONTEND.FrontendException; +import FRONTEND.BadParameterException; +import FRONTEND.NotSupportedException; +import FRONTEND.ScanningTunerPackage.ScanStatus; +import FRONTEND.ScanningTunerPackage.ScanStrategy; +import BULKIO.PrecisionUTCTime; + +public interface DigitalScanningTunerDelegate extends DigitalTunerDelegate { + + public FRONTEND.ScanningTunerPackage.ScanStatus getScanStatus(String id) throws FrontendException, BadParameterException, NotSupportedException; + + public void setScanStartTime(String id, BULKIO.PrecisionUTCTime start_time) throws FrontendException, BadParameterException, NotSupportedException; + + public void setScanStrategy(String id, FRONTEND.ScanningTunerPackage.ScanStrategy scan_strategy) throws FrontendException, BadParameterException, NotSupportedException; +} diff --git a/frontendInterfaces/libsrc/java/src/frontend/FETypes.java b/frontendInterfaces/libsrc/java/src/frontend/FETypes.java index dd4ba46f8..4c8db4b81 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/FETypes.java +++ b/frontendInterfaces/libsrc/java/src/frontend/FETypes.java @@ -34,7 +34,7 @@ public class FETypes { - public enum timeTypes { + public static enum timeTypes { J1950(1), J1970(2), JCY(3); @@ -44,10 +44,10 @@ private timeTypes(int value){ } } - public class FreqRange { - protected double min_val; - protected double max_val; - protected List values; + public static class FreqRange { + public double min_val; + public double max_val; + public List values; public FreqRange(){ min_val = 0.0; max_val = 0.0; @@ -55,11 +55,11 @@ public FreqRange(){ } } - public class AntennaInfo { - protected String name; - protected String type; - protected String size; - protected String description; + public static class AntennaInfo { + public String name; + public String type; + public String size; + public String description; public AntennaInfo(){ name = null; type = null; @@ -68,10 +68,10 @@ public AntennaInfo(){ } } - public class FeedInfo { - protected String name; - protected String polarization; - protected FreqRange freq_range; + public static class FeedInfo { + public String name; + public String polarization; + public FreqRange freq_range; public FeedInfo(){ name = null; polarization = null; @@ -79,12 +79,12 @@ public FeedInfo(){ } } - public class SensorInfo { - protected String mission; - protected String collector; - protected String rx; - protected AntennaInfo antenna; - protected FeedInfo feed; + public static class SensorInfo { + public String mission; + public String collector; + public String rx; + public AntennaInfo antenna; + public FeedInfo feed; public SensorInfo(){ mission = null; collector = null; @@ -94,34 +94,34 @@ public SensorInfo(){ } } - public class PathDelay { - protected double freq; - protected double delay_ns; + public static class PathDelay { + public double freq; + public double delay_ns; public PathDelay(){ freq = 0.0; delay_ns = 0.0; } } - public class RFCapabilities { - protected FreqRange freq_range; - protected FreqRange bw_range; + public static class RFCapabilities { + public FreqRange freq_range; + public FreqRange bw_range; public RFCapabilities(){ freq_range = new FreqRange(); bw_range = new FreqRange(); } } - public class RFInfoPkt { - protected String rf_flow_id; - protected double rf_center_freq; - protected double rf_bandwidth; - protected double if_center_freq; - protected boolean spectrum_inverted; - protected SensorInfo sensor; - protected List ext_path_delays; - protected RFCapabilities capabilities; - protected CF.PropertiesHolder additional_info; + public static class RFInfoPkt { + public String rf_flow_id; + public double rf_center_freq; + public double rf_bandwidth; + public double if_center_freq; + public boolean spectrum_inverted; + public SensorInfo sensor; + public List ext_path_delays; + public RFCapabilities capabilities; + public CF.PropertiesHolder additional_info; public RFInfoPkt(){ rf_flow_id = null; rf_center_freq = 0.0; @@ -135,12 +135,12 @@ public RFInfoPkt(){ } } - public class PositionInfo { - protected boolean valid; - protected String datum; - protected double lat; - protected double lon; - protected double alt; + public static class PositionInfo { + public boolean valid; + public String datum; + public double lat; + public double lon; + public double alt; public PositionInfo(){ valid = false; datum = null; @@ -150,22 +150,22 @@ public PositionInfo(){ } } - public class GPSInfo { - protected String source_id; - protected String rf_flow_id; - protected String mode; - protected long fom; - protected long tfom; - protected long datumID; - protected double time_offset; - protected double freq_offset; - protected double time_variance; - protected double freq_variance; - protected short satellite_count; - protected float snr; - protected String status_message; - protected BULKIO.PrecisionUTCTime timestamp; - protected CF.PropertiesHolder additional_info; + public static class GPSInfo { + public String source_id; + public String rf_flow_id; + public String mode; + public long fom; + public long tfom; + public long datumID; + public double time_offset; + public double freq_offset; + public double time_variance; + public double freq_variance; + public short satellite_count; + public float snr; + public String status_message; + public BULKIO.PrecisionUTCTime timestamp; + public CF.PropertiesHolder additional_info; public GPSInfo(){ source_id = null; rf_flow_id = null; @@ -185,21 +185,21 @@ public GPSInfo(){ } } - public class GpsTimePos { - protected PositionInfo position; - protected BULKIO.PrecisionUTCTime timestamp; + public static class GpsTimePos { + public PositionInfo position; + public BULKIO.PrecisionUTCTime timestamp; public GpsTimePos(){ position = new PositionInfo(); timestamp = new BULKIO.PrecisionUTCTime(); } } - public class CartesianPositionInfo { - protected boolean valid; - protected String datum; - protected double x; - protected double y; - protected double z; + public static class CartesianPositionInfo { + public boolean valid; + public String datum; + public double x; + public double y; + public double z; public CartesianPositionInfo(){ valid = false; datum = null; @@ -209,11 +209,11 @@ public CartesianPositionInfo(){ } } - public class AttitudeInfo { - protected boolean valid; - protected double pitch; - protected double yaw; - protected double roll; + public static class AttitudeInfo { + public boolean valid; + public double pitch; + public double yaw; + public double roll; public AttitudeInfo(){ valid = false; pitch = 0.0; @@ -222,13 +222,13 @@ public AttitudeInfo(){ } } - public class VelocityInfo { - protected boolean valid; - protected String datum; - protected String coordinate_system; - protected double x; - protected double y; - protected double z; + public static class VelocityInfo { + public boolean valid; + public String datum; + public String coordinate_system; + public double x; + public double y; + public double z; public VelocityInfo(){ valid = false; datum = null; @@ -239,13 +239,13 @@ public VelocityInfo(){ } } - public class AccelerationInfo { - protected boolean valid; - protected String datum; - protected String coordinate_system; - protected double x; - protected double y; - protected double z; + public static class AccelerationInfo { + public boolean valid; + public String datum; + public String coordinate_system; + public double x; + public double y; + public double z; public AccelerationInfo(){ valid = false; datum = null; @@ -256,16 +256,16 @@ public AccelerationInfo(){ } } - public class NavigationPacket { - protected String source_id; - protected String rf_flow_id; - protected PositionInfo position; - protected CartesianPositionInfo cposition; - protected VelocityInfo velocity; - protected AccelerationInfo acceleration; - protected AttitudeInfo attitude; - protected BULKIO.PrecisionUTCTime timestamp; - protected CF.PropertiesHolder additional_info; + public static class NavigationPacket { + public String source_id; + public String rf_flow_id; + public PositionInfo position; + public CartesianPositionInfo cposition; + public VelocityInfo velocity; + public AccelerationInfo acceleration; + public AttitudeInfo attitude; + public BULKIO.PrecisionUTCTime timestamp; + public CF.PropertiesHolder additional_info; public NavigationPacket(){ source_id = null; rf_flow_id = null; @@ -398,6 +398,16 @@ public String getId() { Mode.READWRITE, //mode new Kind[] {Kind.ALLOCATION} //kind ); + + public final StructProperty frontend_scanner_allocation = + new StructProperty( + "FRONTEND::scanner_allocation", //id + "frontend_scanner_allocation", //name + frontend_scanner_allocation_struct.class, //type + new frontend_scanner_allocation_struct(), //default value + Mode.READWRITE, //mode + new Kind[] {Kind.ALLOCATION} //kind + ); public static class frontend_listener_allocation_struct extends StructDef { public final StringProperty existing_allocation_id = @@ -527,4 +537,113 @@ public String getId() { return "frontend_tuner_status_struct"; } } + + public static class frontend_scanner_allocation_struct extends StructDef { + public final DoubleProperty min_freq = + new DoubleProperty( + "FRONTEND::scanner_allocation::min_freq", //id + "min_freq", //name + null, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + public final DoubleProperty max_freq = + new DoubleProperty( + "FRONTEND::scanner_allocation::max_freq", //id + "max_freq", //name + null, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + public final StringProperty mode = + new StringProperty( + "FRONTEND::scanner_allocation::mode", //id + "mode", //name + null, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + public final StringProperty control_mode = + new StringProperty( + "FRONTEND::scanner_allocation::control_mode", //id + "control_mode", //name + null, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + public final DoubleProperty control_limit = + new DoubleProperty( + "FRONTEND::scanner_allocation::control_limit", //id + "control_limit", //name + null, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + + /** + * @generated + */ + public frontend_scanner_allocation_struct(Double min_freq, Double max_freq, String mode, String control_mode, Double control_limit) { + this(); + this.min_freq.setValue(min_freq); + this.max_freq.setValue(max_freq); + this.mode.setValue(mode); + this.control_mode.setValue(control_mode); + this.control_limit.setValue(control_limit); + } + + /** + * @generated + */ + public void set_min_freq(Double min_freq) { + this.min_freq.setValue(min_freq); + } + public Double get_min_freq() { + return this.min_freq.getValue(); + } + public void set_max_freq(Double max_freq) { + this.max_freq.setValue(max_freq); + } + public Double get_max_freq() { + return this.max_freq.getValue(); + } + public void set_mode(String mode) { + this.mode.setValue(mode); + } + public String get_mode() { + return this.mode.getValue(); + } + public void set_control_mode(String control_mode) { + this.control_mode.setValue(control_mode); + } + public String get_control_mode() { + return this.control_mode.getValue(); + } + public void set_control_limit(Double control_limit) { + this.control_limit.setValue(control_limit); + } + public Double get_control_limit() { + return this.control_limit.getValue(); + } + + /** + * @generated + */ + public frontend_scanner_allocation_struct() { + addElement(this.min_freq); + addElement(this.max_freq); + addElement(this.mode); + addElement(this.control_mode); + addElement(this.control_limit); + } + + public String getId() { + return "FRONTEND::scanner_allocation"; + } + }; } diff --git a/frontendInterfaces/libsrc/java/src/frontend/FrontendScanningTunerDevice.java b/frontendInterfaces/libsrc/java/src/frontend/FrontendScanningTunerDevice.java new file mode 100644 index 000000000..9c09c92ba --- /dev/null +++ b/frontendInterfaces/libsrc/java/src/frontend/FrontendScanningTunerDevice.java @@ -0,0 +1,120 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK frontendInterfaces. + * + * REDHAWK frontendInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK frontendInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package frontend; + +import BULKIO.StreamSRI; +import BULKIO.UNITS_TIME; +import BULKIO.UNITS_NONE; +import CF.AggregateDevice; +import CF.DataType; +import CF.DeviceManager; +import CF.DevicePackage.UsageType.*; +import CF.DevicePackage.InvalidCapacity; +import CF.DevicePackage.InvalidCapacityHelper; +import CF.DevicePackage.InvalidState; +import CF.DevicePackage.UsageType; +import CF.PortSetPackage.PortInfoType; +import CF.InvalidObjectReference; +import ExtendedCF.UsesConnection; +import FRONTEND.RFInfoPkt; +import FRONTEND.BadParameterException; +import java.lang.Math.*; +import java.lang.reflect.*; +import java.text.*; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.concurrent.*; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TimeZone; +import java.util.UUID.*; +import org.omg.CORBA.Any; +import org.omg.CORBA.ORB; +import org.omg.PortableServer.POA; +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; +import org.ossie.component.ThreadedDevice; +import org.ossie.properties.Action; +import org.ossie.properties.Allocator; +import org.ossie.properties.AnyUtils; +import org.ossie.properties.Kind; +import org.ossie.properties.Mode; +import org.ossie.properties.StringProperty; +import org.ossie.properties.StructProperty; +import org.ossie.properties.StructSequenceProperty; + +public abstract class FrontendScanningTunerDevice extends FrontendTunerDevice { + + + public FrontendScanningTunerDevice() { + super(); + } + + public FrontendScanningTunerDevice(Class _genericType) { + super(_genericType); + } + + public boolean callDeviceSetTuning(final frontend.FETypes.frontend_tuner_allocation_struct frontend_tuner_allocation, TunerStatusStructType fts, int tuner_id) { + if (this._has_scanner) { + return deviceSetTuningScan(frontend_tuner_allocation, frontend_scanner_allocation.getValue(), fts, tuner_id); + } + return deviceSetTuning(frontend_tuner_allocation, fts, tuner_id); + } + + public void checkValidIds(DataType[] capacities) throws InvalidCapacity, InvalidState { + this._has_scanner = false; + for (DataType cap : capacities) { + if (!cap.id.equals("FRONTEND::tuner_allocation") && !cap.id.equals("FRONTEND::listener_allocation") && (!cap.id.equals("FRONTEND::scanner_allocation"))) { + throw new CF.DevicePackage.InvalidCapacity("Invalid allocation property", capacities); + } + if (cap.id.equals("FRONTEND::scanner_allocation")) { + this._has_scanner = true; + } + } + } + + protected abstract boolean deviceSetTuningScan(final frontend.FETypes.frontend_tuner_allocation_struct request, final frontend.FETypes.frontend_scanner_allocation_struct scan_request, TunerStatusStructType fts, int tuner_id); + + protected StructProperty frontend_scanner_allocation; + + private boolean _has_scanner = false; + + // this is implemented in the generated base class once all properties are known + public void loadProperties(){ + frontend_scanner_allocation = + new StructProperty( + "FRONTEND::scanner_allocation", //id + "frontend_scanner_allocation", //name + frontend.FETypes.frontend_scanner_allocation_struct.class, //type + new frontend.FETypes.frontend_scanner_allocation_struct(), //default value + Mode.READWRITE, //mode + new Kind[] {Kind.ALLOCATION} //kind + ); + + addProperty(device_kind); + addProperty(device_model); + addProperty(frontend_tuner_allocation); + addProperty(frontend_listener_allocation); + addProperty(frontend_scanner_allocation); + addProperty(frontend_tuner_status); + } +} diff --git a/frontendInterfaces/libsrc/java/src/frontend/FrontendTunerDevice.java b/frontendInterfaces/libsrc/java/src/frontend/FrontendTunerDevice.java index d4fa16419..c263f5a17 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/FrontendTunerDevice.java +++ b/frontendInterfaces/libsrc/java/src/frontend/FrontendTunerDevice.java @@ -56,6 +56,7 @@ import org.ossie.properties.Action; import org.ossie.properties.Allocator; import org.ossie.properties.AnyUtils; +import org.ossie.properties.IProperty; import org.ossie.properties.Kind; import org.ossie.properties.Mode; import org.ossie.properties.StringProperty; @@ -72,7 +73,7 @@ public abstract class FrontendTunerDevice 0 && floatingPointCompare(rfinfo.rf_center_freq,rfinfo.if_center_freq) > 0) - request_if_center_freq = request.center_frequency.getValue() - (rfinfo.rf_center_freq-rfinfo.if_center_freq); + if(!request.tuner_type.getValue().equals("TX") && floatingPointCompare(rfinfo.if_center_freq,0) > 0 && floatingPointCompare(rfinfo.rf_center_freq,rfinfo.if_center_freq) > 0) { + if (rfinfo.spectrum_inverted) { + request_if_center_freq = rfinfo.if_center_freq - (request.center_frequency.getValue() - rfinfo.rf_center_freq); + } else { + request_if_center_freq = rfinfo.if_center_freq + (request.center_frequency.getValue() - rfinfo.rf_center_freq); + } + } // check vs. device center freq capability (ensure 0 <= request <= max device capability) if ( !validateRequest(min_device_center_freq,max_device_center_freq,request_if_center_freq) ) { @@ -269,15 +275,27 @@ public boolean validateRequestVsDevice(final frontend.FETypes.frontend_tuner_all return true; } + private Class frontend_tuner_status_class_type; + public FrontendTunerDevice() { super(); construct(); } - + + public FrontendTunerDevice(Class _genericType) { + super(); + this.frontend_tuner_status_class_type = _genericType; + construct(); + } + + public void setFrontendTunerStatusClassType(Class _genericType) { + this.frontend_tuner_status_class_type = _genericType; + } + private void construct() { loadProperties(); allocation_id_to_tuner_id = new HashMap(); - frontend_tuner_allocation.setAllocator(new Allocator() { + /*frontend_tuner_allocation.setAllocator(new Allocator() { public boolean allocate(frontend.FETypes.frontend_tuner_allocation_struct capacity){ boolean status = false; try{ @@ -308,7 +326,7 @@ public boolean allocate(frontend.FETypes.frontend_listener_allocation_struct cap public void deallocate(frontend.FETypes.frontend_listener_allocation_struct capacity) throws CF.DevicePackage.InvalidCapacity { deallocateListener(capacity); } - }); + });*/ } // this is implemented in the generated base class once all properties are known @@ -345,6 +363,64 @@ protected List getListenerAllocationIds(int tuner_id){ return tuner_allocation_ids.get(tuner_id).listener_allocation_ids; } + /* This sets the number of entries in the frontend_tuner_status struct sequence property + * as well as the tuner_allocation_ids vector. Call this function during initialization + */ + public void setNumChannels(int num) + { + this.setNumChannels(num, "RX_DIGITIZER"); + } + + /* This sets the number of entries in the frontend_tuner_status struct sequence property + * as well as the tuner_allocation_ids vector. Call this function during initialization + */ + public void setNumChannels(int num, String tuner_type) + { + if (frontend_tuner_status_class_type == null) { + _deviceLog.error("To use setNumChannels from the base classes, this device must be re-generated"); + return; + } + frontend_tuner_status.setValue(new ArrayList()); + tuner_allocation_ids = new ArrayList(); + this.addChannels(num, tuner_type); + } + + /* This sets the number of entries in the frontend_tuner_status struct sequence property + * as well as the tuner_allocation_ids vector. Call this function during initialization + */ + public void addChannels(int num, String tuner_type) + { + if (frontend_tuner_status_class_type == null) { + _deviceLog.error("To use addChannels from the base classes, this device must be re-generated"); + return; + } + if (frontend_tuner_status == null) { + frontend_tuner_status.setValue(new ArrayList()); + } + if (tuner_allocation_ids == null) { + tuner_allocation_ids = new ArrayList(); + } + for (int idx=0;idx invalidProps = new ArrayList(); + for (DataType cap : capacities) { + final IProperty property = this.propSet.get(cap.id); + if (cap.id.equals("FRONTEND::tuner_allocation")) { + frontend_tuner_allocation.configure(cap.value); + deallocateTuner(frontend_tuner_allocation.getValue()); + } + if (cap.id.equals("FRONTEND::listener_allocation")) { + try { + frontend_listener_allocation.configure(cap.value); + deallocateListener(frontend_listener_allocation.getValue()); + } catch (CF.DevicePackage.InvalidCapacity e) { + invalidProps.add(cap); + } + } + } + + updateUsageState(); + + if ( invalidProps.size() > 0 ) { + throw new InvalidCapacity("Invalid capacity deallocation", invalidProps.toArray(new DataType[0])); + } + } + public boolean allocateTuner(frontend.FETypes.frontend_tuner_allocation_struct frontend_tuner_allocation) throws CF.DevicePackage.InvalidCapacity, Exception { try{ // Check allocation_id if (frontend_tuner_allocation.allocation_id != null && frontend_tuner_allocation.allocation_id.getValue().isEmpty()) { - logger.info("allocateTuner: MISSING ALLOCATION_ID"); + _deviceLog.info("allocateTuner: MISSING ALLOCATION_ID"); throw new CF.DevicePackage.InvalidCapacity("MISSING ALLOCATION ID", new CF.DataType[]{new DataType("frontend_tuner_allocation", frontend_tuner_allocation.toAny())}); } // Check if allocation ID has already been used if(this.getTunerMapping(frontend_tuner_allocation.allocation_id.getValue()) >= 0){ - logger.info("allocateTuner: ALLOCATION_ID ALREADY IN USE: [" + frontend_tuner_allocation.allocation_id.getValue() + "]"); + _deviceLog.info("allocateTuner: ALLOCATION_ID ALREADY IN USE: [" + frontend_tuner_allocation.allocation_id.getValue() + "]"); throw new InvalidCapacity("ALLOCATION_ID ALREADY IN USE", new CF.DataType[]{new DataType("frontend_tuner_allocation", frontend_tuner_allocation.toAny())}); } // Check if available tuner @@ -387,14 +576,14 @@ public boolean allocateTuner(frontend.FETypes.frontend_tuner_allocation_struct f // Next, try to allocate a new tuner for (int tuner_id = 0; tuner_id < this.tuner_allocation_ids.size(); tuner_id++) { if(!frontend_tuner_status.getValue().get(tuner_id).tuner_type.getValue().equals(frontend_tuner_allocation.tuner_type.getValue())) { - logger.debug("allocateTuner: Requested tuner type '"+frontend_tuner_allocation.tuner_type.getValue() +"' does not match tuner[" + tuner_id + "].tuner_type (" + frontend_tuner_status.getValue().get(tuner_id).tuner_type.getValue()+")"); + _deviceLog.debug("allocateTuner: Requested tuner type '"+frontend_tuner_allocation.tuner_type.getValue() +"' does not match tuner[" + tuner_id + "].tuner_type (" + frontend_tuner_status.getValue().get(tuner_id).tuner_type.getValue()+")"); continue; } if(frontend_tuner_allocation.group_id != null && !frontend_tuner_allocation.group_id.getValue().isEmpty() && !frontend_tuner_allocation.group_id.getValue().equals(frontend_tuner_status.getValue().get(tuner_id).group_id.getValue()) ){ - logger.debug("allocateTuner: Requested group_id '" + frontend_tuner_allocation.group_id.getValue() + "' does not match tuner[" + tuner_id + "].group_id (" + this.frontend_tuner_status.getValue().get(tuner_id).group_id.getValue() +")"); + _deviceLog.debug("allocateTuner: Requested group_id '" + frontend_tuner_allocation.group_id.getValue() + "' does not match tuner[" + tuner_id + "].group_id (" + this.frontend_tuner_status.getValue().get(tuner_id).group_id.getValue() +")"); continue; } @@ -403,7 +592,7 @@ public boolean allocateTuner(frontend.FETypes.frontend_tuner_allocation_struct f !frontend_tuner_allocation.rf_flow_id.getValue().isEmpty() && !frontend_tuner_allocation.rf_flow_id.getValue().equals(frontend_tuner_status.getValue().get(tuner_id).rf_flow_id.getValue()) && !frontend_tuner_allocation.tuner_type.equals("CHANNELIZER")){ - logger.debug("allocateTuner: Requested rf_flow_id '" + frontend_tuner_allocation.rf_flow_id.getValue() +"' does not match tuner[" + tuner_id + "].rf_flow_id (" + this.frontend_tuner_status.getValue().get(tuner_id).rf_flow_id.getValue() + ")"); + _deviceLog.debug("allocateTuner: Requested rf_flow_id '" + frontend_tuner_allocation.rf_flow_id.getValue() +"' does not match tuner[" + tuner_id + "].rf_flow_id (" + this.frontend_tuner_status.getValue().get(tuner_id).rf_flow_id.getValue() + ")"); continue; } @@ -417,7 +606,7 @@ public boolean allocateTuner(frontend.FETypes.frontend_tuner_allocation_struct f frontend_tuner_status.getValue().get(tuner_id).sample_rate.setValue(frontend_tuner_allocation.sample_rate.getValue()); if(tuner_allocation_ids.get(tuner_id).control_allocation_id != null && (!tuner_allocation_ids.get(tuner_id).control_allocation_id.isEmpty() || - !deviceSetTuning(frontend_tuner_allocation, frontend_tuner_status.getValue().get(tuner_id), tuner_id))){ + !callDeviceSetTuning(frontend_tuner_allocation, frontend_tuner_status.getValue().get(tuner_id), tuner_id))){ // either not available or didn't succeed setting tuning, try next tuner if (frontend_tuner_status.getValue().get(tuner_id).bandwidth.getValue().equals(frontend_tuner_allocation.bandwidth.getValue())) frontend_tuner_status.getValue().get(tuner_id).bandwidth.setValue(orig_bw); @@ -425,7 +614,7 @@ public boolean allocateTuner(frontend.FETypes.frontend_tuner_allocation_struct f frontend_tuner_status.getValue().get(tuner_id).center_frequency.setValue(orig_cf); if (frontend_tuner_status.getValue().get(tuner_id).sample_rate.getValue().equals(frontend_tuner_allocation.sample_rate.getValue())) frontend_tuner_status.getValue().get(tuner_id).sample_rate.setValue(orig_sr); - logger.debug("allocateTuner: Tuner["+tuner_id+"] is either not available or didn't succeed while setting tuning "); + _deviceLog.debug("allocateTuner: Tuner["+tuner_id+"] is either not available or didn't succeed while setting tuning "); continue; } tuner_allocation_ids.get(tuner_id).control_allocation_id = frontend_tuner_allocation.allocation_id.getValue(); @@ -436,13 +625,13 @@ public boolean allocateTuner(frontend.FETypes.frontend_tuner_allocation_struct f if(frontend_tuner_allocation.tuner_type.getValue().equals("CHANNELIZER") || frontend_tuner_allocation.tuner_type.getValue().equals("TX")){ String eout; eout = frontend_tuner_allocation.tuner_type.getValue() + " allocation with device_control=false is invalid."; - logger.debug(eout); + _deviceLog.debug(eout); throw new CF.DevicePackage.InvalidCapacity(eout, new CF.DataType[]{new DataType("frontend_tuner_allocation", frontend_tuner_allocation.toAny())}); } // listener if(tuner_allocation_ids.get(tuner_id).control_allocation_id.isEmpty() || !listenerRequestValidation(frontend_tuner_allocation, tuner_id)){ // either not allocated or can't support listener request - logger.debug("allocateTuner: Tuner["+tuner_id+"] is either not available or can not support listener request "); + _deviceLog.debug("allocateTuner: Tuner["+tuner_id+"] is either not available or can not support listener request "); continue; } tuner_allocation_ids.get(tuner_id).listener_allocation_ids.add(frontend_tuner_allocation.allocation_id.getValue()); @@ -454,21 +643,21 @@ public boolean allocateTuner(frontend.FETypes.frontend_tuner_allocation_struct f // check tolerances // only check when sample_rate was not set to don't care) - logger.debug(" allocateTuner - SR requested: " + frontend_tuner_allocation.sample_rate.getValue() + " SR got: " + frontend_tuner_status.getValue().get(tuner_id).sample_rate.getValue()); + _deviceLog.debug(" allocateTuner - SR requested: " + frontend_tuner_allocation.sample_rate.getValue() + " SR got: " + frontend_tuner_status.getValue().get(tuner_id).sample_rate.getValue()); if( (floatingPointCompare(frontend_tuner_allocation.sample_rate.getValue(),0)!=0) && (floatingPointCompare(frontend_tuner_status.getValue().get(tuner_id).sample_rate.getValue(),frontend_tuner_allocation.sample_rate.getValue())<0 || floatingPointCompare(frontend_tuner_status.getValue().get(tuner_id).sample_rate.getValue(),frontend_tuner_allocation.sample_rate.getValue()+frontend_tuner_allocation.sample_rate.getValue() * frontend_tuner_allocation.sample_rate_tolerance.getValue()/100.0)>0 )){ String eout = "allocateTuner(" + tuner_id + "): returned sr " + frontend_tuner_status.getValue().get(tuner_id).sample_rate.getValue()+" does not meet tolerance criteria of " + frontend_tuner_allocation.sample_rate_tolerance.getValue()+" percent"; - logger.info(eout); + _deviceLog.info(eout); throw new RuntimeException(eout); } - logger.debug(" allocateTuner - BW requested: " + frontend_tuner_allocation.bandwidth.getValue() + " BW got: " + frontend_tuner_status.getValue().get(tuner_id).bandwidth.getValue()); + _deviceLog.debug(" allocateTuner - BW requested: " + frontend_tuner_allocation.bandwidth.getValue() + " BW got: " + frontend_tuner_status.getValue().get(tuner_id).bandwidth.getValue()); // Only check when bandwidth was not set to don't care if( (floatingPointCompare(frontend_tuner_allocation.bandwidth.getValue(),0)!=0) && (floatingPointCompare(frontend_tuner_status.getValue().get(tuner_id).bandwidth.getValue(),frontend_tuner_allocation.bandwidth.getValue())<0 || floatingPointCompare(frontend_tuner_status.getValue().get(tuner_id).bandwidth.getValue(),frontend_tuner_allocation.bandwidth.getValue()+frontend_tuner_allocation.bandwidth.getValue() * frontend_tuner_allocation.bandwidth_tolerance.getValue()/100.0)>0 )){ String eout = "allocateTuner(" + tuner_id + "): returned bw " + frontend_tuner_status.getValue().get(tuner_id).bandwidth.getValue() + " does not meet tolerance criteria of " + frontend_tuner_allocation.bandwidth_tolerance.getValue() + " percent"; - logger.info(eout); + _deviceLog.info(eout); throw new RuntimeException(eout); } @@ -478,7 +667,7 @@ public boolean allocateTuner(frontend.FETypes.frontend_tuner_allocation_struct f enableTuner(tuner_id,true); } catch(Exception e){ String eout = "allocateTuner: Failed to enable tuner after allocation"; - logger.info(eout); + _deviceLog.info(eout); throw new RuntimeException(eout); } } @@ -487,7 +676,7 @@ public boolean allocateTuner(frontend.FETypes.frontend_tuner_allocation_struct f } // if we made it here, we failed to find an available tuner String eout = "allocateTuner: NO AVAILABLE TUNER. Make sure that the device has an initialized frontend_tuner_status"; - logger.info(eout); + _deviceLog.info(eout); throw new RuntimeException(eout); //} } catch(RuntimeException e) { @@ -509,16 +698,16 @@ public boolean allocateTuner(frontend.FETypes.frontend_tuner_allocation_struct f public void deallocateTuner(frontend.FETypes.frontend_tuner_allocation_struct frontend_tuner_deallocation){ try{ - //logger.debug("deallocateTuner()"); + //_deviceLog.debug("deallocateTuner()"); // Try to remove control of the device int tuner_id = this.getTunerMapping(frontend_tuner_deallocation.allocation_id.getValue()); if (tuner_id < 0){ - logger.debug("ALLOCATION_ID NOT FOUND: [" + frontend_tuner_deallocation.allocation_id.getValue() +"]"); + _deviceLog.debug("ALLOCATION_ID NOT FOUND: [" + frontend_tuner_deallocation.allocation_id.getValue() +"]"); throw new CF.DevicePackage.InvalidCapacity("ALLOCATION_ID NOT FOUND", new CF.DataType[]{new DataType("frontend_tuner_deallocation", frontend_tuner_deallocation.toAny())}); } - //logger.debug("deallocateTuner() tuner_id = " + tuner_id); + //_deviceLog.debug("deallocateTuner() tuner_id = " + tuner_id); if(tuner_allocation_ids.get(tuner_id).control_allocation_id.equals(frontend_tuner_deallocation.allocation_id.getValue())){ - //logger.debug("deallocateTuner() deallocating control for tuner_id = " + tuner_id); + //_deviceLog.debug("deallocateTuner() deallocating control for tuner_id = " + tuner_id); enableTuner(tuner_id, false); frontend_tuner_status.getValue().get(tuner_id).allocation_id_csv.setValue(""); removeTunerMapping(tuner_id); @@ -535,37 +724,38 @@ public void deallocateTuner(frontend.FETypes.frontend_tuner_allocation_struct fr } public boolean allocateListener(frontend.FETypes.frontend_listener_allocation_struct frontend_listener_allocation) throws CF.DevicePackage.InvalidCapacity, Exception { + //public boolean allocateListener() throws CF.DevicePackage.InvalidCapacity, Exception { try{ // Check validity of allocation_id's if (frontend_listener_allocation.existing_allocation_id == null || frontend_listener_allocation.existing_allocation_id.getValue().isEmpty()){ - logger.info("allocateListener: MISSING EXISTING ALLOCATION ID"); + _deviceLog.info("allocateListener: MISSING EXISTING ALLOCATION ID"); throw new CF.DevicePackage.InvalidCapacity("MISSING EXISTING ALLOCATION ID", new CF.DataType[]{new DataType("frontend_listener_allocation", frontend_listener_allocation.toAny())}); } if (frontend_listener_allocation.listener_allocation_id == null || frontend_listener_allocation.listener_allocation_id.getValue().isEmpty()){ - logger.info("allocateListener: MISSING LISTENER ALLOCATION ID"); + _deviceLog.info("allocateListener: MISSING LISTENER ALLOCATION ID"); throw new CF.DevicePackage.InvalidCapacity("MISSING LISTENER ALLOCATION ID", new CF.DataType[]{new DataType("frontend_listener_allocation", frontend_listener_allocation.toAny())}); } //synchronized(allocation_id_mapping_lock){ // Check if listener allocation ID has already been used if(getTunerMapping(frontend_listener_allocation.listener_allocation_id.getValue()) >= 0){ - logger.error("allocateListener: LISTENER ALLOCATION_ID ALREADY IN USE"); + _deviceLog.error("allocateListener: LISTENER ALLOCATION_ID ALREADY IN USE"); throw new InvalidCapacity("LISTENER ALLOCATION_ID ALREADY IN USE", new CF.DataType[]{new DataType("frontend_listener_allocation", frontend_listener_allocation.toAny())}); } // Do not allocate if existing allocation ID does not exist int tuner_id = getTunerMapping(frontend_listener_allocation.existing_allocation_id.getValue()); if (tuner_id < 0){ - logger.info("allocateListener: UNKNOWN CONTROL ALLOCATION ID: ["+ frontend_listener_allocation.existing_allocation_id.getValue() +"]"); + _deviceLog.info("allocateListener: UNKNOWN CONTROL ALLOCATION ID: ["+ frontend_listener_allocation.existing_allocation_id.getValue() +"]"); throw new FRONTEND.BadParameterException("UNKNOWN CONTROL ALLOCATION ID"); } // listener allocations are not permitted for channelizers or TX if(frontend_tuner_status.getValue().get(tuner_id).tuner_type.getValue().equals("CHANNELIZER") || frontend_tuner_status.getValue().get(tuner_id).tuner_type.getValue().equals("TX")){ String eout = "allocateListener: listener allocations are not permitted for " + frontend_tuner_status.getValue().get(tuner_id).tuner_type.getValue() + " tuner type"; - logger.debug(eout); + _deviceLog.debug(eout); throw new CF.DevicePackage.InvalidCapacity(eout, new CF.DataType[]{new DataType("frontend_listener_allocation", frontend_listener_allocation.toAny())}); } @@ -593,11 +783,11 @@ public void deallocateListener(frontend.FETypes.frontend_listener_allocation_str try{ int tuner_id = getTunerMapping(frontend_listener_allocation.listener_allocation_id.getValue()); if (tuner_id < 0){ - logger.debug("ALLOCATION_ID NOT FOUND: [" + frontend_listener_allocation.listener_allocation_id.getValue() + "]"); + _deviceLog.debug("ALLOCATION_ID NOT FOUND: [" + frontend_listener_allocation.listener_allocation_id.getValue() + "]"); throw new CF.DevicePackage.InvalidCapacity("ALLOCATION_ID NOT FOUND", new CF.DataType[]{new DataType("frontend_listener_allocation", frontend_listener_allocation.toAny())}); } if (tuner_allocation_ids.get(tuner_id).control_allocation_id.equals(frontend_listener_allocation.listener_allocation_id.getValue())) { - logger.debug("Controlling allocation id cannot be used as a listener id: [" + frontend_listener_allocation.listener_allocation_id.getValue() + "]"); + _deviceLog.debug("Controlling allocation id cannot be used as a listener id: [" + frontend_listener_allocation.listener_allocation_id.getValue() + "]"); throw new CF.DevicePackage.InvalidCapacity("Controlling allocation id cannot be used as a listener id", new CF.DataType[]{new DataType("frontend_listener_allocation", frontend_listener_allocation.toAny())}); } // send EOS to listener connection only @@ -606,7 +796,7 @@ public void deallocateListener(frontend.FETypes.frontend_listener_allocation_str } catch (CF.DevicePackage.InvalidCapacity invalidcap){ throw invalidcap; } catch (Exception e){ - logger.debug("deallocateListener: ERROR WHEN DEALLOCATING. SKIPPING..."); + _deviceLog.debug("deallocateListener: ERROR WHEN DEALLOCATING. SKIPPING..."); } } @@ -650,20 +840,20 @@ protected boolean enableTuner(int tuner_id, boolean enable){ } protected boolean listenerRequestValidation(frontend.FETypes.frontend_tuner_allocation_struct request, int tuner_id){ - logger.trace("listenerRequestValidation() tuner_id " + tuner_id); + _deviceLog.trace("listenerRequestValidation() tuner_id " + tuner_id); // ensure requested values are non-negative if(floatingPointCompare(request.center_frequency.getValue(),0)<0 || floatingPointCompare(request.bandwidth.getValue(),0)<0 || floatingPointCompare(request.sample_rate.getValue(),0)<0 || floatingPointCompare(request.bandwidth_tolerance.getValue(),0)<0 || floatingPointCompare(request.sample_rate_tolerance.getValue(),0)<0) return false; // ensure lower end of requested band fits if( floatingPointCompare((request.center_frequency.getValue()-(request.bandwidth.getValue()*0.5)),(frontend_tuner_status.getValue().get(tuner_id).center_frequency.getValue()-(frontend_tuner_status.getValue().get(tuner_id).bandwidth.getValue()*0.5))) < 0 ){ - logger.trace("listenerRequestValidation() FAILED LOWER END TEST"); + _deviceLog.trace("listenerRequestValidation() FAILED LOWER END TEST"); return false; } // ensure upper end of requested band fits if( floatingPointCompare((request.center_frequency.getValue() + (request.bandwidth.getValue()*0.5)),(frontend_tuner_status.getValue().get(tuner_id).center_frequency.getValue() + (frontend_tuner_status.getValue().get(tuner_id).bandwidth.getValue()*0.5))) > 0 ){ - logger.trace("listenerRequestValidation() FAILED UPPER END TEST"); + _deviceLog.trace("listenerRequestValidation() FAILED UPPER END TEST"); return false; } @@ -689,7 +879,7 @@ protected boolean listenerRequestValidation(frontend.FETypes.frontend_tuner_allo //////////////////////////// protected int getTunerMapping(String allocation_id){ - //logger.trace("getTunerMapping() allocation_id " + allocation_id); + //_deviceLog.trace("getTunerMapping() allocation_id " + allocation_id); int NO_VALID_TUNER = -1; if (allocation_id_to_tuner_id.containsKey(allocation_id)){ @@ -729,7 +919,7 @@ protected void sendEOS(String allocation_id) { } protected boolean removeTunerMapping(int tuner_id, String allocation_id){ - logger.trace("removeTunerMapping() tuner_id " + tuner_id + ", allocation_id " + allocation_id); + _deviceLog.trace("removeTunerMapping() tuner_id " + tuner_id + ", allocation_id " + allocation_id); removeListener(allocation_id); this.sendEOS(allocation_id); Iterator iter = tuner_allocation_ids.get(tuner_id).listener_allocation_ids.iterator(); @@ -749,7 +939,7 @@ protected boolean removeTunerMapping(int tuner_id, String allocation_id){ } protected boolean removeTunerMapping(int tuner_id){ - logger.trace("removeTunerMapping() tuner_id " + tuner_id); + _deviceLog.trace("removeTunerMapping() tuner_id " + tuner_id); deviceDeleteTuning(frontend_tuner_status.getValue().get(tuner_id),tuner_id); removeAllocationIdRouting(tuner_id); @@ -790,11 +980,11 @@ protected void removeListener(final String listen_alloc_id){ * "places" is used to specify precision. The default is 1, which * uses a single decimal place of precision. */ - public double floatingPointCompare(double lhs, double rhs){ + static public double floatingPointCompare(double lhs, double rhs){ return floatingPointCompare(lhs, rhs, 1); } - public double floatingPointCompare(double lhs, double rhs, int places){ + static public double floatingPointCompare(double lhs, double rhs, int places){ return java.lang.Math.rint((lhs-rhs)*java.lang.Math.pow(10.0,(double)places)); } @@ -802,7 +992,7 @@ public double floatingPointCompare(double lhs, double rhs, int places){ * true if the value requested_val falls within the range [available_min:available_max] * False is returned if min > max */ - public boolean validateRequest(double available_min, double available_max, double requested_val){ + static public boolean validateRequest(double available_min, double available_max, double requested_val){ if(floatingPointCompare(requested_val,available_min) < 0) return false; if(floatingPointCompare(requested_val,available_max) > 0) return false; if(floatingPointCompare(available_min,available_max) > 0) return false; @@ -813,9 +1003,10 @@ public boolean validateRequest(double available_min, double available_max, doubl * [requested_min:requested_max] falls within the range [available_min:available_max] * False is returned if min > max for either available for requested values */ - public boolean validateRequest(double available_min, double available_max, double requested_min, double requested_max){ - if(floatingPointCompare(requested_min,available_min) < 0) return false; - if(floatingPointCompare(requested_max,available_max) > 0) return false; + static public boolean validateRequest(double available_min, double available_max, double requested_min, double requested_max){ + double center_request = (requested_max+requested_min)/2.0; + if(floatingPointCompare(center_request,available_min) < 0) return false; + if(floatingPointCompare(center_request,available_max) > 0) return false; if(floatingPointCompare(available_min,available_max) > 0) return false; if(floatingPointCompare(requested_min,requested_max) > 0) return false; return true; @@ -869,7 +1060,7 @@ public void reset(){ "frontend_tuner_allocation", //name frontend.FETypes.frontend_tuner_allocation_struct.class, //type new frontend.FETypes.frontend_tuner_allocation_struct(), //default value - Mode.READWRITE, //mode + Mode.WRITEONLY, //mode new Kind[] {Kind.ALLOCATION} //kind ); @@ -879,12 +1070,10 @@ public void reset(){ "frontend_listener_allocation", //name frontend.FETypes.frontend_listener_allocation_struct.class, //type new frontend.FETypes.frontend_listener_allocation_struct(), //default value - Mode.READWRITE, //mode + Mode.WRITEONLY, //mode new Kind[] {Kind.ALLOCATION} //kind ); - private Class frontend_tuner_status_class_type; - protected StructSequenceProperty frontend_tuner_status = new StructSequenceProperty ( "FRONTEND::tuner_status", //id diff --git a/frontendInterfaces/libsrc/java/src/frontend/InAnalogScanningTunerPort.java b/frontendInterfaces/libsrc/java/src/frontend/InAnalogScanningTunerPort.java new file mode 100644 index 000000000..e728c5dea --- /dev/null +++ b/frontendInterfaces/libsrc/java/src/frontend/InAnalogScanningTunerPort.java @@ -0,0 +1,389 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK frontendInterfaces. + * + * REDHAWK frontendInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK frontendInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package frontend; + +import FRONTEND.FrontendException; +import FRONTEND.BadParameterException; +import FRONTEND.NotSupportedException; +import FRONTEND.AnalogScanningTunerHelper; +import frontend.AnalogScanningTunerDelegate; +import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; +import FRONTEND.ScanningTunerPackage.ScanStatus; +import FRONTEND.ScanningTunerPackage.ScanStrategy; +import BULKIO.PrecisionUTCTime; + +public class InAnalogScanningTunerPort extends FRONTEND.AnalogScanningTunerPOA implements PortBase { + + protected String name; + + protected Object portAccess = null; + + protected AnalogScanningTunerDelegate delegate = null; + + public InAnalogScanningTunerPort(String portName) { + this(portName, null); + } + + public InAnalogScanningTunerPort(String portName, + AnalogScanningTunerDelegate d){ + this.name = portName; + this.delegate = d; + this.portAccess = new Object(); + } + + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + + public String getTunerType(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerType(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerType(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public boolean getTunerDeviceControl(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerDeviceControl(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerDeviceControl(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public String getTunerGroupId(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerGroupId(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerGroupId(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public String getTunerRfFlowId(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerRfFlowId(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerRfFlowId(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public CF.DataType[] getTunerStatus(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerStatus(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerStatus(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerCenterFrequency(String id, double freq) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerCenterFrequency(id, freq); + } else { + throw new RuntimeException("InAnalogScanningTunerPort setTunerCenterFrequency(String id, double freq) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public double getTunerCenterFrequency(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerCenterFrequency(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerCenterFrequency(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerBandwidth(String id, double bw) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerBandwidth(id, bw); + } else { + throw new RuntimeException("InAnalogScanningTunerPort setTunerBandwidth(String id, double bw) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public double getTunerBandwidth(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerBandwidth(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerBandwidth(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerAgcEnable(String id, boolean enable) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerAgcEnable(id, enable); + } else { + throw new RuntimeException("InAnalogScanningTunerPort setTunerAgcEnable(String id, boolean enable) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public boolean getTunerAgcEnable(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerAgcEnable(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerAgcEnable(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerGain(String id, float gain) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerGain(id, gain); + } else { + throw new RuntimeException("InAnalogScanningTunerPort setTunerGain(String id, float gain) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public float getTunerGain(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerGain(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerGain(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerReferenceSource(String id, int source) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerReferenceSource(id, source); + } else { + throw new RuntimeException("InAnalogScanningTunerPort setTunerReferenceSource(String id, int source) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public int getTunerReferenceSource(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerReferenceSource(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerReferenceSource(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerEnable(String id, boolean enable) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerEnable(id, enable); + } else { + throw new RuntimeException("InAnalogScanningTunerPort setTunerEnable(String id, boolean enable) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public boolean getTunerEnable(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerEnable(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getTunerEnable(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public FRONTEND.ScanningTunerPackage.ScanStatus getScanStatus(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getScanStatus(id); + } else { + throw new RuntimeException("InAnalogScanningTunerPort getScanStatus(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setScanStartTime(String id, BULKIO.PrecisionUTCTime start_time) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setScanStartTime(id, start_time); + } else { + throw new RuntimeException("InAnalogScanningTunerPort setScanStartTime(String id, BULKIO.PrecisionUTCTime start_time) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setScanStrategy(String id, FRONTEND.ScanningTunerPackage.ScanStrategy scan_strategy) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setScanStrategy(id, scan_strategy); + } else { + throw new RuntimeException("InAnalogScanningTunerPort setScanStrategy(String id, FRONTEND.ScanningTunerPackage.ScanStrategy scan_strategy) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setDelegate( AnalogScanningTunerDelegate d ) { + delegate = d; + } + + public String getRepid() { + return AnalogScanningTunerHelper.id(); + } + + public String getDirection() { + return "Provides"; + } +} diff --git a/frontendInterfaces/libsrc/java/src/frontend/InAnalogTunerPort.java b/frontendInterfaces/libsrc/java/src/frontend/InAnalogTunerPort.java index 2347b0915..5f1ec9681 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/InAnalogTunerPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/InAnalogTunerPort.java @@ -25,6 +25,7 @@ import FRONTEND.AnalogTunerHelper; import frontend.AnalogTunerDelegate; import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; public class InAnalogTunerPort extends FRONTEND.AnalogTunerPOA implements PortBase { @@ -45,6 +46,12 @@ public InAnalogTunerPort(String portName, this.portAccess = new Object(); } + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + public String getTunerType(String id) { synchronized(this.portAccess){ try{ diff --git a/frontendInterfaces/libsrc/java/src/frontend/InDigitalScanningTunerPort.java b/frontendInterfaces/libsrc/java/src/frontend/InDigitalScanningTunerPort.java new file mode 100644 index 000000000..3ba9b624c --- /dev/null +++ b/frontendInterfaces/libsrc/java/src/frontend/InDigitalScanningTunerPort.java @@ -0,0 +1,421 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK frontendInterfaces. + * + * REDHAWK frontendInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK frontendInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package frontend; + +import FRONTEND.FrontendException; +import FRONTEND.BadParameterException; +import FRONTEND.NotSupportedException; +import FRONTEND.DigitalScanningTunerHelper; +import frontend.DigitalScanningTunerDelegate; +import FRONTEND.ScanningTunerPackage.ScanStatus; +import FRONTEND.ScanningTunerPackage.ScanStrategy; +import BULKIO.PrecisionUTCTime; +import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; + +public class InDigitalScanningTunerPort extends FRONTEND.DigitalScanningTunerPOA implements PortBase { + + protected String name; + + protected Object portAccess = null; + + protected DigitalScanningTunerDelegate delegate = null; + + public InDigitalScanningTunerPort( String portName) { + this(portName, null); + } + + public InDigitalScanningTunerPort( String portName, + DigitalScanningTunerDelegate d) { + this.name = portName; + this.delegate = d; + this.portAccess = new Object(); + } + + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + + public String getTunerType(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerType(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerType(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public boolean getTunerDeviceControl(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerDeviceControl(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerDeviceControl(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public String getTunerGroupId(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerGroupId(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerGroupId(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public String getTunerRfFlowId(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerRfFlowId(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerRfFlowId(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public CF.DataType[] getTunerStatus(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerStatus(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerStatus(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerCenterFrequency(String id, double freq) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerCenterFrequency(id, freq); + } else { + throw new RuntimeException("InDigitalScanningTunerPort setTunerCenterFrequency(String id, double freq) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public double getTunerCenterFrequency(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerCenterFrequency(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerCenterFrequency(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerBandwidth(String id, double bw) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerBandwidth(id, bw); + } else { + throw new RuntimeException("InDigitalScanningTunerPort setTunerBandwidth(String id, double bw) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public double getTunerBandwidth(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerBandwidth(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerBandwidth(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerAgcEnable(String id, boolean enable) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerAgcEnable(id, enable); + } else { + throw new RuntimeException("InDigitalScanningTunerPort setTunerAgcEnable(String id, boolean enable) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public boolean getTunerAgcEnable(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerAgcEnable(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerAgcEnable(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerGain(String id, float gain) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerGain(id, gain); + } else { + throw new RuntimeException("InDigitalScanningTunerPort setTunerGain(String id, float gain) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public float getTunerGain(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerGain(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerGain(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerReferenceSource(String id, int source) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerReferenceSource(id, source); + } else { + throw new RuntimeException("InDigitalScanningTunerPort setTunerReferenceSource(String id, int source) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public int getTunerReferenceSource(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerReferenceSource(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerReferenceSource(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerEnable(String id, boolean enable) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerEnable(id, enable); + } else { + throw new RuntimeException("InDigitalScanningTunerPort setTunerEnable(String id, boolean enable) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public boolean getTunerEnable(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerEnable(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerEnable(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setTunerOutputSampleRate(String id, double sr) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setTunerOutputSampleRate(id, sr); + } else { + throw new RuntimeException("InDigitalScanningTunerPort setTunerOutputSampleRate(String id, double sr) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public double getTunerOutputSampleRate(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getTunerOutputSampleRate(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getTunerOutputSampleRate(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public FRONTEND.ScanningTunerPackage.ScanStatus getScanStatus(String id) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + return delegate.getScanStatus(id); + } else { + throw new RuntimeException("InDigitalScanningTunerPort getScanStatus(String id) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setScanStartTime(String id, BULKIO.PrecisionUTCTime start_time) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setScanStartTime(id, start_time); + } else { + throw new RuntimeException("InDigitalScanningTunerPort setScanStartTime(String id, BULKIO.PrecisionUTCTime start_time) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setScanStrategy(String id, FRONTEND.ScanningTunerPackage.ScanStrategy scan_strategy) { + synchronized(this.portAccess){ + try{ + if ( delegate != null ){ + delegate.setScanStrategy(id, scan_strategy); + } else { + throw new RuntimeException("InDigitalScanningTunerPort setScanStrategy(String id, FRONTEND.ScanningTunerPackage.ScanStrategy scan_strategy) callback delegate not defined"); + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); + } + } + } + + public void setDelegate( DigitalScanningTunerDelegate d ) { + delegate = d; + } + + public String getRepid() { + return DigitalScanningTunerHelper.id(); + } + + public String getDirection() { + return "Provides"; + } +} diff --git a/frontendInterfaces/libsrc/java/src/frontend/InDigitalTunerPort.java b/frontendInterfaces/libsrc/java/src/frontend/InDigitalTunerPort.java index a915121dc..618851241 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/InDigitalTunerPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/InDigitalTunerPort.java @@ -25,6 +25,7 @@ import FRONTEND.DigitalTunerHelper; import frontend.DigitalTunerDelegate; import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; public class InDigitalTunerPort extends FRONTEND.DigitalTunerPOA implements PortBase { @@ -45,6 +46,12 @@ public InDigitalTunerPort( String portName, this.portAccess = new Object(); } + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + public String getTunerType(String id) { synchronized(this.portAccess){ try{ diff --git a/frontendInterfaces/libsrc/java/src/frontend/InFrontendTunerPort.java b/frontendInterfaces/libsrc/java/src/frontend/InFrontendTunerPort.java index 21c9f1380..39d691247 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/InFrontendTunerPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/InFrontendTunerPort.java @@ -25,6 +25,7 @@ import FRONTEND.FrontendTunerHelper; import frontend.FrontendTunerDelegate; import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; public class InFrontendTunerPort extends FRONTEND.FrontendTunerPOA implements PortBase { @@ -45,6 +46,11 @@ public InFrontendTunerPort( String portName, this.portAccess = new Object(); } + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } public String getTunerType(String id) { synchronized(this.portAccess){ diff --git a/frontendInterfaces/libsrc/java/src/frontend/InGPSPort.java b/frontendInterfaces/libsrc/java/src/frontend/InGPSPort.java index 1b0d12157..925c0da63 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/InGPSPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/InGPSPort.java @@ -24,6 +24,7 @@ import FRONTEND.GPSHelper; import frontend.GPSDelegate; import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; public class InGPSPort extends FRONTEND.GPSPOA implements PortBase { @@ -44,6 +45,12 @@ public InGPSPort( String portName, this.portAccess = new Object(); } + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + public GPSInfo gps_info() { synchronized(this.portAccess){ try{ diff --git a/frontendInterfaces/libsrc/java/src/frontend/InNavDataPort.java b/frontendInterfaces/libsrc/java/src/frontend/InNavDataPort.java index 0b49aafb3..d5a72d710 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/InNavDataPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/InNavDataPort.java @@ -23,6 +23,7 @@ import FRONTEND.NavDataHelper; import frontend.NavDataDelegate; import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; // ---------------------------------------------------------------------------------------- // InNavDataPort definition @@ -46,6 +47,12 @@ public InNavDataPort( String portName, this.portAccess = new Object(); } + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + public NavigationPacket nav_packet() { synchronized(this.portAccess){ try{ diff --git a/frontendInterfaces/libsrc/java/src/frontend/InRFInfoPort.java b/frontendInterfaces/libsrc/java/src/frontend/InRFInfoPort.java index 215867c37..6932a4d53 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/InRFInfoPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/InRFInfoPort.java @@ -23,6 +23,7 @@ import FRONTEND.RFInfoPkt; import FRONTEND.RFInfoHelper; import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; public class InRFInfoPort extends FRONTEND.RFInfoPOA implements PortBase { @@ -43,6 +44,12 @@ public InRFInfoPort( String portName, this.portAccess = new Object(); } + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + public String rf_flow_id() { synchronized(this.portAccess){ try{ diff --git a/frontendInterfaces/libsrc/java/src/frontend/InRFSourcePort.java b/frontendInterfaces/libsrc/java/src/frontend/InRFSourcePort.java index 860134f08..24c287ca6 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/InRFSourcePort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/InRFSourcePort.java @@ -23,6 +23,7 @@ import FRONTEND.RFSourceHelper; import frontend.RFSourceDelegate; import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; public class InRFSourcePort extends FRONTEND.RFSourcePOA implements PortBase { @@ -43,6 +44,12 @@ public InRFSourcePort( String portName, this.portAccess = new Object(); } + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + public RFInfoPkt[] available_rf_inputs() { synchronized(this.portAccess){ try{ diff --git a/frontendInterfaces/libsrc/java/src/frontend/OutAnalogTunerPort.java b/frontendInterfaces/libsrc/java/src/frontend/OutAnalogTunerPort.java index 843e5e08e..6103f5d5e 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/OutAnalogTunerPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/OutAnalogTunerPort.java @@ -28,8 +28,9 @@ import FRONTEND.BadParameterException; import FRONTEND.NotSupportedException; import org.ossie.component.PortBase; +import org.ossie.redhawk.PortCallError; -public class OutAnalogTunerPort extends QueryableUsesPort implements AnalogTunerOperations, PortBase { +public class OutAnalogTunerPort extends QueryableUsesPort implements PortBase { /** * Map of connection Ids to port objects @@ -70,313 +71,555 @@ public void disconnectPort(final String connectionId) { } } - public String getTunerType(String id) { + public String getTunerType(String id) throws PortCallError + { + return this.getTunerType(id, ""); + } + public String getTunerType(String id, String __connection_id__) throws PortCallError + { String retval = ""; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerType(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerType(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerType(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } - return retval; } - public boolean getTunerDeviceControl(String id) { + public boolean getTunerDeviceControl(String id) throws PortCallError + { + return this.getTunerDeviceControl(id, ""); + } + public boolean getTunerDeviceControl(String id, String __connection_id__) throws PortCallError + { boolean retval = false; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerDeviceControl(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerDeviceControl(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerDeviceControl(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } - return retval; } - public String getTunerGroupId(String id) { + public String getTunerGroupId(String id) throws PortCallError + { + return this.getTunerGroupId(id, ""); + } + public String getTunerGroupId(String id, String __connection_id__) throws PortCallError + { String retval = ""; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerGroupId(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerGroupId(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerGroupId(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public String getTunerRfFlowId(String id) { + public String getTunerRfFlowId(String id) throws PortCallError + { + return this.getTunerRfFlowId(id, ""); + } + public String getTunerRfFlowId(String id, String __connection_id__) throws PortCallError + { String retval = ""; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerRfFlowId(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerRfFlowId(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerRfFlowId(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public CF.DataType[] getTunerStatus(String id) { + public CF.DataType[] getTunerStatus(String id) throws PortCallError + { + return this.getTunerStatus(id, ""); + } + public CF.DataType[] getTunerStatus(String id, String __connection_id__) throws PortCallError + { CF.DataType[] retval = null; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerStatus(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerStatus(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerStatus(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerCenterFrequency(String id, double freq) { - synchronized(this.updatingPortsLock) { + public void setTunerCenterFrequency(String id, double data) throws PortCallError + { + this.setTunerCenterFrequency(id, data, ""); + } + + public void setTunerCenterFrequency(String id, double data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - p.setTunerCenterFrequency(id, freq); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerCenterFrequency(id, data); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + p.setTunerCenterFrequency(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public double getTunerCenterFrequency(String id) { + public double getTunerCenterFrequency(String id) throws PortCallError + { + return this.getTunerCenterFrequency(id, ""); + } + public double getTunerCenterFrequency(String id, String __connection_id__) throws PortCallError + { double retval = 0.0; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerCenterFrequency(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerCenterFrequency(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerCenterFrequency(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerBandwidth(String id, double bw) { - synchronized(this.updatingPortsLock) { + public void setTunerBandwidth(String id, double data) throws PortCallError + { + this.setTunerBandwidth(id, data, ""); + } + + public void setTunerBandwidth(String id, double data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - p.setTunerBandwidth(id, bw); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerBandwidth(id, data); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + p.setTunerBandwidth(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public double getTunerBandwidth(String id) { + public double getTunerBandwidth(String id) throws PortCallError + { + return this.getTunerBandwidth(id, ""); + } + public double getTunerBandwidth(String id, String __connection_id__) throws PortCallError + { double retval = 0.0; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerBandwidth(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerBandwidth(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerBandwidth(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerAgcEnable(String id, boolean enable) { - synchronized(this.updatingPortsLock) { + public void setTunerAgcEnable(String id, boolean data) throws PortCallError + { + this.setTunerAgcEnable(id, data, ""); + } + + public void setTunerAgcEnable(String id, boolean data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - p.setTunerAgcEnable(id, enable); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerAgcEnable(id, data); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + p.setTunerAgcEnable(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public boolean getTunerAgcEnable(String id) { + public boolean getTunerAgcEnable(String id) throws PortCallError + { + return this.getTunerAgcEnable(id, ""); + } + public boolean getTunerAgcEnable(String id, String __connection_id__) throws PortCallError + { boolean retval = false; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerAgcEnable(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerAgcEnable(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerAgcEnable(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerGain(String id, float gain) { - synchronized(this.updatingPortsLock) { + public void setTunerGain(String id, float data) throws PortCallError + { + this.setTunerGain(id, data, ""); + } + + public void setTunerGain(String id, float data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - p.setTunerGain(id, gain); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerGain(id, data); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + p.setTunerGain(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public float getTunerGain(String id) { - float retval = 0.0F; + public float getTunerGain(String id) throws PortCallError + { + return this.getTunerGain(id, ""); + } + public float getTunerGain(String id, String __connection_id__) throws PortCallError + { + float retval = (float)0.0; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerGain(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerGain(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerGain(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerReferenceSource(String id, int source) { - synchronized(this.updatingPortsLock) { + public void setTunerReferenceSource(String id, int data) throws PortCallError + { + this.setTunerReferenceSource(id, data, ""); + } + + public void setTunerReferenceSource(String id, int data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - p.setTunerReferenceSource(id, source); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerReferenceSource(id, data); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + p.setTunerReferenceSource(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public int getTunerReferenceSource(String id) { + public int getTunerReferenceSource(String id) throws PortCallError + { + return this.getTunerReferenceSource(id, ""); + } + public int getTunerReferenceSource(String id, String __connection_id__) throws PortCallError + { int retval = 0; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerReferenceSource(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerReferenceSource(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerReferenceSource(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerEnable(String id, boolean enable) { - synchronized(this.updatingPortsLock) { + public void setTunerEnable(String id, boolean data) throws PortCallError + { + this.setTunerEnable(id, data, ""); + } + + public void setTunerEnable(String id, boolean data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - p.setTunerEnable(id, enable); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerEnable(id, data); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + p.setTunerEnable(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public boolean getTunerEnable(String id) { + public boolean getTunerEnable(String id) throws PortCallError + { + return this.getTunerEnable(id, ""); + } + public boolean getTunerEnable(String id, String __connection_id__) throws PortCallError + { boolean retval = false; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (AnalogTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerEnable(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerEnable(id); + } else { + for (AnalogTunerOperations p : this.outConnections.values()) { + retval = p.getTunerEnable(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - + public String getRepid() { return AnalogTunerHelper.id(); } diff --git a/frontendInterfaces/libsrc/java/src/frontend/OutDigitalTunerPort.java b/frontendInterfaces/libsrc/java/src/frontend/OutDigitalTunerPort.java index 26e5d7012..1287b202a 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/OutDigitalTunerPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/OutDigitalTunerPort.java @@ -28,8 +28,9 @@ import FRONTEND.BadParameterException; import FRONTEND.NotSupportedException; import org.ossie.component.PortBase; +import org.ossie.redhawk.PortCallError; -public class OutDigitalTunerPort extends QueryableUsesPort implements DigitalTunerOperations, PortBase { +public class OutDigitalTunerPort extends QueryableUsesPort implements PortBase { /** * Map of connection Ids to port objects @@ -68,339 +69,613 @@ public void disconnectPort(final String connectionId) { } } - public String getTunerType(String id) { + public String getTunerType(String id) throws PortCallError + { + return this.getTunerType(id, ""); + } + public String getTunerType(String id, String __connection_id__) throws PortCallError + { String retval = ""; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerType(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerType(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerType(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public boolean getTunerDeviceControl(String id) { + public boolean getTunerDeviceControl(String id) throws PortCallError + { + return this.getTunerDeviceControl(id, ""); + } + public boolean getTunerDeviceControl(String id, String __connection_id__) throws PortCallError + { boolean retval = false; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerDeviceControl(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerDeviceControl(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerDeviceControl(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public String getTunerGroupId(String id) { + public String getTunerGroupId(String id) throws PortCallError + { + return this.getTunerGroupId(id, ""); + } + public String getTunerGroupId(String id, String __connection_id__) throws PortCallError + { String retval = ""; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerGroupId(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerGroupId(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerGroupId(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public String getTunerRfFlowId(String id) { + public String getTunerRfFlowId(String id) throws PortCallError + { + return this.getTunerRfFlowId(id, ""); + } + public String getTunerRfFlowId(String id, String __connection_id__) throws PortCallError + { String retval = ""; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerRfFlowId(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerRfFlowId(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerRfFlowId(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public CF.DataType[] getTunerStatus(String id) { + public CF.DataType[] getTunerStatus(String id) throws PortCallError + { + return this.getTunerStatus(id, ""); + } + public CF.DataType[] getTunerStatus(String id, String __connection_id__) throws PortCallError + { CF.DataType[] retval = null; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerStatus(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerStatus(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerStatus(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerCenterFrequency(String id, double freq) { - synchronized(this.updatingPortsLock) { + public void setTunerCenterFrequency(String id, double data) throws PortCallError + { + this.setTunerCenterFrequency(id, data, ""); + } + + public void setTunerCenterFrequency(String id, double data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - p.setTunerCenterFrequency(id, freq); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerCenterFrequency(id, data); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + p.setTunerCenterFrequency(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public double getTunerCenterFrequency(String id) { + public double getTunerCenterFrequency(String id) throws PortCallError + { + return this.getTunerCenterFrequency(id, ""); + } + public double getTunerCenterFrequency(String id, String __connection_id__) throws PortCallError + { double retval = 0.0; - synchronized(this.updatingPortsLock) { + + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerCenterFrequency(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerCenterFrequency(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerCenterFrequency(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerBandwidth(String id, double bw) { - synchronized(this.updatingPortsLock) { + public void setTunerBandwidth(String id, double data) throws PortCallError + { + this.setTunerBandwidth(id, data, ""); + } + + public void setTunerBandwidth(String id, double data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - p.setTunerBandwidth(id, bw); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerBandwidth(id, data); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + p.setTunerBandwidth(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public double getTunerBandwidth(String id) { + public double getTunerBandwidth(String id) throws PortCallError + { + return this.getTunerBandwidth(id, ""); + } + public double getTunerBandwidth(String id, String __connection_id__) throws PortCallError + { double retval = 0.0; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerBandwidth(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerBandwidth(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerBandwidth(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerAgcEnable(String id, boolean enable) { - synchronized(this.updatingPortsLock) { + public void setTunerAgcEnable(String id, boolean data) throws PortCallError + { + this.setTunerAgcEnable(id, data, ""); + } + + public void setTunerAgcEnable(String id, boolean data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - p.setTunerAgcEnable(id, enable); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerAgcEnable(id, data); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + p.setTunerAgcEnable(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public boolean getTunerAgcEnable(String id) { + public boolean getTunerAgcEnable(String id) throws PortCallError + { + return this.getTunerAgcEnable(id, ""); + } + public boolean getTunerAgcEnable(String id, String __connection_id__) throws PortCallError + { boolean retval = false; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerAgcEnable(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerAgcEnable(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerAgcEnable(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerGain(String id, float gain) { - synchronized(this.updatingPortsLock) { + public void setTunerGain(String id, float data) throws PortCallError + { + this.setTunerGain(id, data, ""); + } + + public void setTunerGain(String id, float data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - p.setTunerGain(id, gain); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerGain(id, data); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + p.setTunerGain(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public float getTunerGain(String id) { - float retval = 0.0F; + public float getTunerGain(String id) throws PortCallError + { + return this.getTunerGain(id, ""); + } + public float getTunerGain(String id, String __connection_id__) throws PortCallError + { + float retval = (float)0.0; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerGain(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerGain(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerGain(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerReferenceSource(String id, int source) { - synchronized(this.updatingPortsLock) { + public void setTunerReferenceSource(String id, int data) throws PortCallError + { + this.setTunerReferenceSource(id, data, ""); + } + + public void setTunerReferenceSource(String id, int data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - p.setTunerReferenceSource(id, source); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerReferenceSource(id, data); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + p.setTunerReferenceSource(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public int getTunerReferenceSource(String id) { + public int getTunerReferenceSource(String id) throws PortCallError + { + return this.getTunerReferenceSource(id, ""); + } + public int getTunerReferenceSource(String id, String __connection_id__) throws PortCallError + { int retval = 0; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerReferenceSource(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerReferenceSource(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerReferenceSource(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerEnable(String id, boolean enable) { - synchronized(this.updatingPortsLock) { + public void setTunerEnable(String id, boolean data) throws PortCallError + { + this.setTunerEnable(id, data, ""); + } + + public void setTunerEnable(String id, boolean data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - p.setTunerEnable(id, enable); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerEnable(id, data); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + p.setTunerEnable(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public boolean getTunerEnable(String id) { + public boolean getTunerEnable(String id) throws PortCallError + { + return this.getTunerEnable(id, ""); + } + public boolean getTunerEnable(String id, String __connection_id__) throws PortCallError + { boolean retval = false; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerEnable(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerEnable(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerEnable(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void setTunerOutputSampleRate(String id, double sr) { - synchronized(this.updatingPortsLock) { + public void setTunerOutputSampleRate(String id, double data) throws PortCallError + { + this.setTunerOutputSampleRate(id, data, ""); + } + + public void setTunerOutputSampleRate(String id, double data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - p.setTunerOutputSampleRate(id, sr); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).setTunerOutputSampleRate(id, data); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + p.setTunerOutputSampleRate(id, data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public double getTunerOutputSampleRate(String id) { + public double getTunerOutputSampleRate(String id) throws PortCallError + { + return this.getTunerOutputSampleRate(id, ""); + } + public double getTunerOutputSampleRate(String id, String __connection_id__) throws PortCallError + { double retval = 0.0; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (DigitalTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerOutputSampleRate(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerOutputSampleRate(id); + } else { + for (DigitalTunerOperations p : this.outConnections.values()) { + retval = p.getTunerOutputSampleRate(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } diff --git a/frontendInterfaces/libsrc/java/src/frontend/OutFrontendTunerPort.java b/frontendInterfaces/libsrc/java/src/frontend/OutFrontendTunerPort.java index 6f37426f5..2b1009775 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/OutFrontendTunerPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/OutFrontendTunerPort.java @@ -28,8 +28,9 @@ import FRONTEND.BadParameterException; import FRONTEND.NotSupportedException; import org.ossie.component.PortBase; +import org.ossie.redhawk.PortCallError; -public class OutFrontendTunerPort extends QueryableUsesPort implements FrontendTunerOperations, PortBase { +public class OutFrontendTunerPort extends QueryableUsesPort implements PortBase { /** * Map of connection Ids to port objects @@ -68,97 +69,165 @@ public void disconnectPort(final String connectionId) { } } - public String getTunerType(String id) { + public String getTunerType(String id) throws PortCallError + { + return this.getTunerType(id, ""); + } + public String getTunerType(String id, String __connection_id__) throws PortCallError + { String retval = ""; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (FrontendTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerType(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerType(id); + } else { + for (FrontendTunerOperations p : this.outConnections.values()) { + retval = p.getTunerType(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public boolean getTunerDeviceControl(String id) { + public boolean getTunerDeviceControl(String id) throws PortCallError + { + return this.getTunerDeviceControl(id, ""); + } + public boolean getTunerDeviceControl(String id, String __connection_id__) throws PortCallError + { boolean retval = false; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (FrontendTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerDeviceControl(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerDeviceControl(id); + } else { + for (FrontendTunerOperations p : this.outConnections.values()) { + retval = p.getTunerDeviceControl(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public String getTunerGroupId(String id) { + public String getTunerGroupId(String id) throws PortCallError + { + return this.getTunerGroupId(id, ""); + } + public String getTunerGroupId(String id, String __connection_id__) throws PortCallError + { String retval = ""; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (FrontendTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerGroupId(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerGroupId(id); + } else { + for (FrontendTunerOperations p : this.outConnections.values()) { + retval = p.getTunerGroupId(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } - } + } return retval; } - public String getTunerRfFlowId(String id) { + public String getTunerRfFlowId(String id) throws PortCallError + { + return this.getTunerRfFlowId(id, ""); + } + public String getTunerRfFlowId(String id, String __connection_id__) throws PortCallError + { String retval = ""; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - - for (FrontendTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerRfFlowId(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerRfFlowId(id); + } else { + for (FrontendTunerOperations p : this.outConnections.values()) { + retval = p.getTunerRfFlowId(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } - } - + } return retval; } - public CF.DataType[] getTunerStatus(String id) { + public CF.DataType[] getTunerStatus(String id) throws PortCallError + { + return this.getTunerStatus(id, ""); + } + public CF.DataType[] getTunerStatus(String id, String __connection_id__) throws PortCallError + { CF.DataType[] retval = null; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (FrontendTunerOperations p : this.outConnections.values()) { - try { - retval = p.getTunerStatus(id); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).getTunerStatus(id); + } else { + for (FrontendTunerOperations p : this.outConnections.values()) { + retval = p.getTunerStatus(id); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } diff --git a/frontendInterfaces/libsrc/java/src/frontend/OutGPSPort.java b/frontendInterfaces/libsrc/java/src/frontend/OutGPSPort.java index c4c2a14b3..bdde6a19b 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/OutGPSPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/OutGPSPort.java @@ -27,8 +27,9 @@ import FRONTEND.GPSInfo; import FRONTEND.GpsTimePos; import org.ossie.component.PortBase; +import org.ossie.redhawk.PortCallError; -public class OutGPSPort extends QueryableUsesPort implements GPSOperations, PortBase { +public class OutGPSPort extends QueryableUsesPort implements PortBase { /** * Map of connection Ids to port objects @@ -69,75 +70,131 @@ public void disconnectPort(final String connectionId) { } } - public GPSInfo gps_info() + public GPSInfo gps_info() throws PortCallError + { + return this._get_gps_info(""); + } + + public GPSInfo _get_gps_info(String __connection_id__) throws PortCallError { GPSInfo retval = null; synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (GPSOperations p : this.outConnections.values()) { - try { - retval = p.gps_info(); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).gps_info(); + } else { + for (GPSOperations p : this.outConnections.values()) { + retval = p.gps_info(); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void gps_info(GPSInfo data) + public void gps_info(GPSInfo data) throws PortCallError + { + this.gps_info(data, ""); + } + + public void gps_info(GPSInfo data, String __connection_id__) throws PortCallError { synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (GPSOperations p : this.outConnections.values()) { - try { - p.gps_info(data); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).gps_info(data); + } else { + for (GPSOperations p : this.outConnections.values()) { + p.gps_info(data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public GpsTimePos gps_time_pos() + public GpsTimePos gps_time_pos() throws PortCallError + { + return this._get_gps_time_pos(""); + } + + public GpsTimePos _get_gps_time_pos(String __connection_id__) throws PortCallError { GpsTimePos retval = null; - synchronized(this.updatingPortsLock) { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (GPSOperations p : this.outConnections.values()) { - try { - retval = p.gps_time_pos(); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).gps_time_pos(); + } else { + for (GPSOperations p : this.outConnections.values()) { + retval = p.gps_time_pos(); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void gps_time_pos(GpsTimePos data) + public void gps_time_pos(GpsTimePos data) throws PortCallError { - synchronized(this.updatingPortsLock) { + this.gps_time_pos(data, ""); + } + + public void gps_time_pos(GpsTimePos data, String __connection_id__) throws PortCallError + { + synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (GPSOperations p : this.outConnections.values()) { - try { - p.gps_time_pos(data); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).gps_time_pos(data); + } else { + for (GPSOperations p : this.outConnections.values()) { + p.gps_time_pos(data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } diff --git a/frontendInterfaces/libsrc/java/src/frontend/OutNavDataPort.java b/frontendInterfaces/libsrc/java/src/frontend/OutNavDataPort.java index 001e9fb49..30e1fc1db 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/OutNavDataPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/OutNavDataPort.java @@ -26,8 +26,9 @@ import FRONTEND.NavDataHelper; import FRONTEND.NavigationPacket; import org.ossie.component.PortBase; +import org.ossie.redhawk.PortCallError; -public class OutNavDataPort extends QueryableUsesPort implements NavDataOperations, PortBase { +public class OutNavDataPort extends QueryableUsesPort implements PortBase { /** * Map of connection Ids to port objects @@ -68,38 +69,65 @@ public void disconnectPort(final String connectionId) { } } - public NavigationPacket nav_packet() + public NavigationPacket nav_packet() throws PortCallError + { + return this._get_nav_packet(""); + } + public NavigationPacket _get_nav_packet(String __connection_id__) throws PortCallError { NavigationPacket retval = null; synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (NavDataOperations p : this.outConnections.values()) { - try { - retval = p.nav_packet(); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).nav_packet(); + } else { + for (NavDataOperations p : this.outConnections.values()) { + retval = p.nav_packet(); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void nav_packet(NavigationPacket data) + public void nav_packet(NavigationPacket data) throws PortCallError + { + this.nav_packet(data, ""); + } + + public void nav_packet(NavigationPacket data, String __connection_id__) throws PortCallError { synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (NavDataOperations p : this.outConnections.values()) { - try { - p.nav_packet(data); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).nav_packet(data); + } else { + for (NavDataOperations p : this.outConnections.values()) { + p.nav_packet(data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } diff --git a/frontendInterfaces/libsrc/java/src/frontend/OutRFInfoPort.java b/frontendInterfaces/libsrc/java/src/frontend/OutRFInfoPort.java index 68a0044ed..9c04cf8a4 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/OutRFInfoPort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/OutRFInfoPort.java @@ -26,8 +26,9 @@ import FRONTEND.RFInfoHelper; import FRONTEND.RFInfoPkt; import org.ossie.component.PortBase; +import org.ossie.redhawk.PortCallError; -public class OutRFInfoPort extends QueryableUsesPort implements RFInfoOperations, PortBase { +public class OutRFInfoPort extends QueryableUsesPort implements PortBase { /** * Map of connection Ids to port objects @@ -66,71 +67,129 @@ public void disconnectPort(final String connectionId) { } } - public String rf_flow_id() { + public String rf_flow_id() throws PortCallError + { + return this._get_rf_flow_id(""); + } + public String _get_rf_flow_id(String __connection_id__) throws PortCallError + { String retval = ""; synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (RFInfoOperations p : this.outConnections.values()) { - try { - retval = p.rf_flow_id(); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).rf_flow_id(); + } else { + for (RFInfoOperations p : this.outConnections.values()) { + retval = p.rf_flow_id(); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void rf_flow_id(String data) { + public void rf_flow_id(String data) throws PortCallError + { + this.rf_flow_id(data, ""); + } + + public void rf_flow_id(String data, String __connection_id__) throws PortCallError + { synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (RFInfoOperations p : this.outConnections.values()) { - try { - p.rf_flow_id(data); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).rf_flow_id(data); + } else { + for (RFInfoOperations p : this.outConnections.values()) { + p.rf_flow_id(data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public RFInfoPkt rfinfo_pkt() { + public RFInfoPkt rfinfo_pkt() throws PortCallError + { + return this._get_rfinfo_pkt(""); + } + public RFInfoPkt _get_rfinfo_pkt(String __connection_id__) throws PortCallError + { RFInfoPkt retval = null; synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (RFInfoOperations p : this.outConnections.values()) { - try { - retval = p.rfinfo_pkt(); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).rfinfo_pkt(); + } else { + for (RFInfoOperations p : this.outConnections.values()) { + retval = p.rfinfo_pkt(); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void rfinfo_pkt(RFInfoPkt data) { + public void rfinfo_pkt(RFInfoPkt data) throws PortCallError + { + this.rfinfo_pkt(data, ""); + } + + public void rfinfo_pkt(RFInfoPkt data, String __connection_id__) throws PortCallError + { synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (RFInfoOperations p : this.outConnections.values()) { - try { - p.rfinfo_pkt(data); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).rfinfo_pkt(data); + } else { + for (RFInfoOperations p : this.outConnections.values()) { + p.rfinfo_pkt(data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } diff --git a/frontendInterfaces/libsrc/java/src/frontend/OutRFSourcePort.java b/frontendInterfaces/libsrc/java/src/frontend/OutRFSourcePort.java index 71938ecd0..bead47507 100644 --- a/frontendInterfaces/libsrc/java/src/frontend/OutRFSourcePort.java +++ b/frontendInterfaces/libsrc/java/src/frontend/OutRFSourcePort.java @@ -26,8 +26,9 @@ import FRONTEND.RFSourceHelper; import FRONTEND.RFInfoPkt; import org.ossie.component.PortBase; +import org.ossie.redhawk.PortCallError; -public class OutRFSourcePort extends QueryableUsesPort implements RFSourceOperations, PortBase { +public class OutRFSourcePort extends QueryableUsesPort implements PortBase { /** * Map of connection Ids to port objects @@ -66,71 +67,129 @@ public void disconnectPort(final String connectionId) { } } - public RFInfoPkt[] available_rf_inputs() { + public RFInfoPkt[] available_rf_inputs() throws PortCallError + { + return this._get_available_rf_inputs(""); + } + public RFInfoPkt[] _get_available_rf_inputs(String __connection_id__) throws PortCallError + { RFInfoPkt[] retval = null; synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (RFSourceOperations p : this.outConnections.values()) { - try { - retval = p.available_rf_inputs(); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).available_rf_inputs(); + } else { + for (RFSourceOperations p : this.outConnections.values()) { + retval = p.available_rf_inputs(); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void available_rf_inputs(RFInfoPkt[] data) { + public void available_rf_inputs(RFInfoPkt[] data) throws PortCallError + { + this.available_rf_inputs(data, ""); + } + + public void available_rf_inputs(RFInfoPkt[] data, String __connection_id__) throws PortCallError + { synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (RFSourceOperations p : this.outConnections.values()) { - try { - p.available_rf_inputs(data); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).available_rf_inputs(data); + } else { + for (RFSourceOperations p : this.outConnections.values()) { + p.available_rf_inputs(data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } } - public RFInfoPkt current_rf_input() { + public RFInfoPkt current_rf_input() throws PortCallError + { + return this._get_current_rf_input(""); + } + public RFInfoPkt _get_current_rf_input(String __connection_id__) throws PortCallError + { RFInfoPkt retval = null; synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, true, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (RFSourceOperations p : this.outConnections.values()) { - try { - retval = p.current_rf_input(); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + retval = this.outPorts.get(__connection_id__).current_rf_input(); + } else { + for (RFSourceOperations p : this.outConnections.values()) { + retval = p.current_rf_input(); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } return retval; } - public void current_rf_input(RFInfoPkt data) { + public void current_rf_input(RFInfoPkt data) throws PortCallError + { + this.current_rf_input(data, ""); + } + + public void current_rf_input(RFInfoPkt data, String __connection_id__) throws PortCallError + { synchronized(updatingPortsLock){ + try { + __evaluateRequestBasedOnConnections(__connection_id__, false, false, false); + } catch (PortCallError e) { + throw e; + } if (this.active) { - for (RFSourceOperations p : this.outConnections.values()) { - try { - p.current_rf_input(data); - } catch(org.omg.CORBA.SystemException e) { - throw e; - } catch(Throwable e) { - throw new RuntimeException(e); + try { + if (!__connection_id__.isEmpty()) { + this.outPorts.get(__connection_id__).current_rf_input(data); + } else { + for (RFSourceOperations p : this.outConnections.values()) { + p.current_rf_input(data); + } } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } diff --git a/frontendInterfaces/libsrc/pom.xml b/frontendInterfaces/libsrc/pom.xml deleted file mode 100644 index 32aee32f0..000000000 --- a/frontendInterfaces/libsrc/pom.xml +++ /dev/null @@ -1,103 +0,0 @@ - - 4.0.0 - - redhawk.coreframework - parent - 2.0.9-SNAPSHOT - ../../pom.xml - - frontend - bundle - - - ${project.groupId} - ossie - ${project.version} - - - ${project.groupId} - bulkio-interfaces - ${project.version} - - - ${project.groupId} - frontend-interfaces - ${project.version} - - - ${project.groupId} - cf-interfaces - ${project.version} - - - log4j - log4j - 1.2.15 - - - com.sun.jmx - jmxri - - - com.sun.jdmk - jmxtools - - - javax.jms - jms - - - - - - java/src - - - org.codehaus.gmaven - gmaven-plugin - 1.3 - - - set-main-artifact - package - - execute - - - - project.artifact.setFile(new - File("${project.basedir}/frontend.jar")) - - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.7 - - - attach-artifacts - package - - attach-artifact - - - - - ${project.build.directory}/${project.artifactId}-${project.version}.jar - beta - jar - - - - - - - - - - diff --git a/frontendInterfaces/libsrc/python/fe_types.py b/frontendInterfaces/libsrc/python/fe_types.py index dd7f20052..6b85e5088 100644 --- a/frontendInterfaces/libsrc/python/fe_types.py +++ b/frontendInterfaces/libsrc/python/fe_types.py @@ -19,8 +19,9 @@ # import bulkio +from redhawk.frontendInterfaces import FRONTEND from ossie.cf import CF -from ossie.properties import simple_property +from ossie.properties import simple_property, simpleseq_property from omniORB import any as _any # Time Type Definition @@ -28,6 +29,9 @@ J1970 = 2 JCY = 3 +class allocationException(Exception): + pass + class tuner_allocation_ids_struct(object): def __init__(self): self.reset() @@ -121,7 +125,7 @@ class CartesianPositionInfo(object): def __init__(self,valid=False,datum="",x=0.0,y=0.0,z=0.0): self.valid = valid self.datum = datum - self.x = x + self.x = x self.y = y self.z = z @@ -288,6 +292,52 @@ def getProp(self): def getMembers(self): return [("FRONTEND::listener_allocation::existing_allocation_id",self.existing_allocation_id),("FRONTEND::listener_allocation::listener_allocation_id",self.listener_allocation_id)] +class frontend_scanner_allocation(object): + min_freq = simple_property(id_="FRONTEND::scanner_allocation::min_freq", + name="min_freq", + type_="double") + max_freq = simple_property(id_="FRONTEND::scanner_allocation::max_freq", + name="max_freq", + type_="double") + mode = simple_property(id_="FRONTEND::scanner_allocation::mode", + name="mode", + type_="string") + control_mode = simple_property(id_="FRONTEND::scanner_allocation::control_mode", + name="control_mode", + type_="string") + control_limit = simple_property(id_="FRONTEND::scanner_allocation::control_limit", + name="control_limit", + type_="double") + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["min_freq"] = self.min_freq + d["max_freq"] = self.max_freq + d["mode"] = self.mode + d["control_mode"] = self.control_mode + d["control_limit"] = self.control_limit + return str(d) + + @classmethod + def getId(cls): + return "FRONTEND::scanner_allocation" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("min_freq",self.min_freq),("max_freq",self.max_freq),("mode",self.mode),("control_mode",self.control_mode),("control_limit",self.control_limit)] + class default_frontend_tuner_status_struct_struct(object): tuner_type = simple_property(id_="FRONTEND::tuner_status::tuner_type", name="tuner_type", diff --git a/frontendInterfaces/libsrc/python/input_ports.py b/frontendInterfaces/libsrc/python/input_ports.py index b0e96cf38..118cf4090 100644 --- a/frontendInterfaces/libsrc/python/input_ports.py +++ b/frontendInterfaces/libsrc/python/input_ports.py @@ -44,7 +44,7 @@ def setTunerCenterFrequency(self, id, freq): raise FRONTEND.NotSupportedException("setTunerCenterFrequency not supported") def getTunerCenterFrequency(self, id): raise FRONTEND.NotSupportedException("getTunerCenterFrequency not supported") - def setTunerBandwidth(self, bw): + def setTunerBandwidth(self, id, bw): raise FRONTEND.NotSupportedException("setTunerBandwidth not supported") def getTunerBandwidth(self, id): raise FRONTEND.NotSupportedException("getTunerBandwidth not supported") @@ -52,7 +52,7 @@ def setTunerAgcEnable(self, id, enable): raise FRONTEND.NotSupportedException("setTunerAgcEnable not supported") def getTunerAgcEnable(self, id): raise FRONTEND.NotSupportedException("getTunerAgcEnable not supported") - def setTunerGain(self, id,gain): + def setTunerGain(self, id, gain): raise FRONTEND.NotSupportedException("setTunerGain not supported") def getTunerGain(self, id): raise FRONTEND.NotSupportedException("getTunerGain not supported") @@ -71,6 +71,22 @@ def setTunerOutputSampleRate(self, id, sr): def getTunerOutputSampleRate(self, id): raise FRONTEND.NotSupportedException("getTunerOutputSampleRate not supported") +class analog_scanning_tuner_delegation(analog_tuner_delegation): + def getScanStatus(self, id): + raise FRONTEND.NotSupportedException("getScanStatus not supported") + def setScanStartTime(self, id, start_time): + raise FRONTEND.NotSupportedException("setScanStartTime not supported") + def setScanStrategy(self, id, scan_strategy): + raise FRONTEND.NotSupportedException("setScanStrategy not supported") + +class digital_scanning_tuner_delegation(digital_tuner_delegation): + def getScanStatus(self, id): + raise FRONTEND.NotSupportedException("getScanStatus not supported") + def setScanStartTime(self, id, start_time): + raise FRONTEND.NotSupportedException("setScanStartTime not supported") + def setScanStrategy(self, id, scan_strategy): + raise FRONTEND.NotSupportedException("setScanStrategy not supported") + class InFrontendTunerPort(FRONTEND__POA.FrontendTuner): def __init__(self, name, parent=tuner_delegation()): self.name = name @@ -222,6 +238,33 @@ def getTunerOutputSampleRate(self, id): finally: self.port_lock.release() +class InDigitalScanningTunerPort(FRONTEND__POA.DigitalScanningTuner, InDigitalTunerPort): + def __init__(self, name, parent=digital_scanning_tuner_delegation()): + self.name = name + self.port_lock = threading.Lock() + self.parent = parent + + def getScanStatus(self, id): + self.port_lock.acquire() + try: + return self.parent.getScanStatus(id) + finally: + self.port_lock.release() + + def setScanStartTime(self, id, start_time): + self.port_lock.acquire() + try: + return self.parent.setScanStartTime(id, start_time) + finally: + self.port_lock.release() + + def setScanStrategy(self, id, scan_strategy): + self.port_lock.acquire() + try: + return self.parent.setScanStrategy(id, scan_strategy) + finally: + self.port_lock.release() + class gps_delegation(object): def get_gps_info(self, port_name): _gpsinfo = FRONTEND.GPSInfo('','','',1L,1L,1L,1.0,1.0,1.0,1.0,1,1.0,'',BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0),[]) diff --git a/frontendInterfaces/libsrc/python/output_ports.py b/frontendInterfaces/libsrc/python/output_ports.py index 5e52054e3..f90131b2f 100644 --- a/frontendInterfaces/libsrc/python/output_ports.py +++ b/frontendInterfaces/libsrc/python/output_ports.py @@ -24,6 +24,7 @@ import threading from redhawk.frontendInterfaces import FRONTEND +from ossie.resource import PortCallError class OutPort (CF__POA.Port ): @@ -52,6 +53,9 @@ def disconnectPort(self, connectionId): finally: self.port_lock.release() + def getConnectionIds(self): + return self.outConnections.keys() + def _get_connections(self): currentConnections = [] self.port_lock.acquire() @@ -60,16 +64,39 @@ def _get_connections(self): self.port_lock.release() return currentConnections + def _evaluateRequestBasedOnConnections(self, __connection_id__, returnValue, inOut, out): + if not __connection_id__ and len(self.outConnections) > 1: + if (out or inOut or returnValue): + raise PortCallError("Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.", self.getConnectionIds()) + + if len(self.outConnections) == 0: + if (out or inOut or returnValue): + raise PortCallError("No connections available.", self.getConnectionIds()) + else: + if __connection_id__: + raise PortCallError("The requested connection id ("+__connection_id__+") does not exist.", self.getConnectionIds()) + if __connection_id__ and len(self.outConnections) > 0: + foundConnection = False + for connId, port in self.outConnections.items(): + if __connection_id__ == connId: + foundConnection = True + break + if not foundConnection: + raise PortCallError("The requested connection id ("+__connection_id__+") does not exist.", self.getConnectionIds()) + class OutFrontendTunerPort(OutPort): def __init__(self, name): OutPort.__init__(self, name, FRONTEND.FrontendTuner) - def getTunerType(self, id): + def getTunerType(self, id, __connection_id__=""): retVal = "" self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, true, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: try: retVal = port.getTunerType(id) @@ -77,15 +104,18 @@ def getTunerType(self, id): pass finally: self.port_lock.release() - + return retVal - def getTunerDeviceControl(self, id): + def getTunerDeviceControl(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, true, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: try: retVal = port.getTunerDeviceControl(id) @@ -96,12 +126,15 @@ def getTunerDeviceControl(self, id): return retVal - def getTunerGroupId(self, id): + def getTunerGroupId(self, id, __connection_id__=""): retVal = "" self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, true, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: try: retVal = port.getTunerGroupId(id) @@ -112,12 +145,15 @@ def getTunerGroupId(self, id): return retVal - def getTunerRfFlowId(self, id): + def getTunerRfFlowId(self, id, __connection_id__=""): retVal = "" self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, true, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: try: retVal = port.getTunerRfFlowId(id) @@ -128,12 +164,15 @@ def getTunerRfFlowId(self, id): return retVal - def getTunerStatus(self, id): + def getTunerStatus(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, true, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: try: retVal = port.getTunerStatus(id) @@ -148,12 +187,15 @@ class OutAnalogTunerPort(OutPort): def __init__(self, name): OutPort.__init__(self, name, FRONTEND.AnalogTuner) - def getTunerType(self, id): + def getTunerType(self, id, __connection_id__=""): retVal = "" self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, true, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerType(id) finally: @@ -161,12 +203,15 @@ def getTunerType(self, id): return retVal - def getTunerDeviceControl(self, id): + def getTunerDeviceControl(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, true, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerDeviceControl(id) finally: @@ -174,12 +219,15 @@ def getTunerDeviceControl(self, id): return retVal - def getTunerGroupId(self, id): + def getTunerGroupId(self, id, __connection_id__=""): retVal = "" self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, true, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerGroupId(id) finally: @@ -187,12 +235,15 @@ def getTunerGroupId(self, id): return retVal - def getTunerRfFlowId(self, id): + def getTunerRfFlowId(self, id, __connection_id__=""): retVal = "" self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, true, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerRfFlowId(id) finally: @@ -200,12 +251,15 @@ def getTunerRfFlowId(self, id): return retVal - def getTunerStatus(self, id): + def getTunerStatus(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, true, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerStatus(id) finally: @@ -213,22 +267,28 @@ def getTunerStatus(self, id): return retVal - def setTunerCenterFrequency(self, id, freq): + def setTunerCenterFrequency(self, id, freq, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, false, false); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerCenterFrequency(id, freq) finally: self.port_lock.release() - def getTunerCenterFrequency(self, id): + def getTunerCenterFrequency(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerCenterFrequency(id) finally: @@ -236,22 +296,28 @@ def getTunerCenterFrequency(self, id): return retVal - def setTunerBandwidth(self, id, bw): + def setTunerBandwidth(self, id, bw, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerBandwidth(id, bw) finally: self.port_lock.release() - def getTunerBandwidth(self, id): + def getTunerBandwidth(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerBandwidth(id) finally: @@ -259,22 +325,28 @@ def getTunerBandwidth(self, id): return retVal - def setTunerAgcEnable(self, id, enable): + def setTunerAgcEnable(self, id, enable, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerAgcEnable(id, enable) finally: self.port_lock.release() - def getTunerAgcEnable(self, id): + def getTunerAgcEnable(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerAgcEnable(id) finally: @@ -282,22 +354,28 @@ def getTunerAgcEnable(self, id): return retVal - def setTunerGain(self, id, gain): + def setTunerGain(self, id, gain, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerGain(id, gain) finally: self.port_lock.release() - def getTunerGain(self, id): + def getTunerGain(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerGain(id) finally: @@ -305,22 +383,28 @@ def getTunerGain(self, id): return retVal - def setTunerReferenceSource(self, id, source): + def setTunerReferenceSource(self, id, source, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerReferenceSource(id, source) finally: self.port_lock.release() - def getTunerReferenceSource(self, id): + def getTunerReferenceSource(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerReferenceSource(id) finally: @@ -328,22 +412,28 @@ def getTunerReferenceSource(self, id): return retVal - def setTunerEnable(self, id, enable): + def setTunerEnable(self, id, enable, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerEnable(id, enable) finally: self.port_lock.release() - def getTunerEnable(self, id): + def getTunerEnable(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerEnable(id) finally: @@ -355,12 +445,15 @@ class OutDigitalTunerPort(OutPort): def __init__(self, name): OutPort.__init__(self, name, FRONTEND.DigitalTuner) - def getTunerType(self, id): + def getTunerType(self, id, __connection_id__=""): retVal = "" self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerType(id) finally: @@ -368,12 +461,15 @@ def getTunerType(self, id): return retVal - def getTunerDeviceControl(self, id): + def getTunerDeviceControl(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerDeviceControl(id) finally: @@ -381,12 +477,15 @@ def getTunerDeviceControl(self, id): return retVal - def getTunerGroupId(self, id): + def getTunerGroupId(self, id, __connection_id__=""): retVal = "" self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerGroupId(id) finally: @@ -394,12 +493,15 @@ def getTunerGroupId(self, id): return retVal - def getTunerRfFlowId(self, id): + def getTunerRfFlowId(self, id, __connection_id__=""): retVal = "" self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerRfFlowId(id) finally: @@ -407,12 +509,15 @@ def getTunerRfFlowId(self, id): return retVal - def getTunerStatus(self, id): + def getTunerStatus(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerStatus(id) finally: @@ -420,22 +525,28 @@ def getTunerStatus(self, id): return retVal - def setTunerCenterFrequency(self, id, freq): + def setTunerCenterFrequency(self, id, freq, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerCenterFrequency(id, freq) finally: self.port_lock.release() - def getTunerCenterFrequency(self, id): + def getTunerCenterFrequency(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerCenterFrequency(id) finally: @@ -443,22 +554,28 @@ def getTunerCenterFrequency(self, id): return retVal - def setTunerBandwidth(self, id, bw): + def setTunerBandwidth(self, id, bw, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerBandwidth(id, bw) finally: self.port_lock.release() - def getTunerBandwidth(self, id): + def getTunerBandwidth(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerBandwidth(id) finally: @@ -466,22 +583,28 @@ def getTunerBandwidth(self, id): return retVal - def setTunerAgcEnable(self, id, enable): + def setTunerAgcEnable(self, id, enable, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerAgcEnable(id, enable) finally: self.port_lock.release() - def getTunerAgcEnable(self, id): + def getTunerAgcEnable(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerAgcEnable(id) finally: @@ -489,22 +612,28 @@ def getTunerAgcEnable(self, id): return retVal - def setTunerGain(self, id, gain): + def setTunerGain(self, id, gain, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerGain(id, gain) finally: self.port_lock.release() - def getTunerGain(self, id): + def getTunerGain(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerGain(id) finally: @@ -512,22 +641,28 @@ def getTunerGain(self, id): return retVal - def setTunerReferenceSource(self, id, source): + def setTunerReferenceSource(self, id, source, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerReferenceSource(id, source) finally: self.port_lock.release() - def getTunerReferenceSource(self, id): + def getTunerReferenceSource(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerReferenceSource(id) finally: @@ -535,22 +670,28 @@ def getTunerReferenceSource(self, id): return retVal - def setTunerEnable(self, id, enable): + def setTunerEnable(self, id, enable, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerEnable(id, enable) finally: self.port_lock.release() - def getTunerEnable(self, id): + def getTunerEnable(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerEnable(id) finally: @@ -558,22 +699,28 @@ def getTunerEnable(self, id): return retVal - def setTunerOutputSampleRate(self, id, sr): + def setTunerOutputSampleRate(self, id, sr, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port.setTunerOutputSampleRate(id, sr) finally: self.port_lock.release() - def getTunerOutputSampleRate(self, id): + def getTunerOutputSampleRate(self, id, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port.getTunerOutputSampleRate(id) finally: @@ -585,12 +732,15 @@ class OutGPSPort(OutPort): def __init__(self, name): OutPort.__init__(self, name, FRONTEND.GPS) - def _get_gps_info(self): + def _get_gps_info(self, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port._get_gps_info() finally: @@ -598,22 +748,28 @@ def _get_gps_info(self): return retVal - def _set_gps_info(self, data): + def _set_gps_info(self, data, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port._set_gps_info(data) finally: self.port_lock.release() - def _get_gps_time_pos(self): + def _get_gps_time_pos(self, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port._get_gps_time_pos() finally: @@ -621,11 +777,14 @@ def _get_gps_time_pos(self): return retVal - def _set_gps_time_pos(self, data): + def _set_gps_time_pos(self, data, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port._set_gps_time_pos(data) finally: @@ -635,12 +794,15 @@ class OutRFInfoPort(OutPort): def __init__(self, name): OutPort.__init__(self, name, FRONTEND.RFInfo) - def _get_rf_flow_id(self): + def _get_rf_flow_id(self, __connection_id__=""): retVal = "" self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port._get_rf_flow_id() finally: @@ -648,22 +810,28 @@ def _get_rf_flow_id(self): return retVal - def _set_rf_flow_id(self, data): + def _set_rf_flow_id(self, data, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port._set_rf_flow_id(data) finally: self.port_lock.release() - def _get_rfinfo_pkt(self): + def _get_rfinfo_pkt(self, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port._get_rfinfo_pkt() finally: @@ -671,11 +839,14 @@ def _get_rfinfo_pkt(self): return retVal - def _set_rfinfo_pkt(self, data): + def _set_rfinfo_pkt(self, data, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port._set_rfinfo_pkt(data) finally: @@ -685,12 +856,15 @@ class OutRFSourcePort(OutPort): def __init__(self, name): OutPort.__init__(self, name, FRONTEND.RFSource) - def _get_available_rf_inputs(self): + def _get_available_rf_inputs(self, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port._get_available_rf_inputs() finally: @@ -698,22 +872,28 @@ def _get_available_rf_inputs(self): return retVal - def _set_available_rf_inputs(self, data): + def _set_available_rf_inputs(self, data, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port._set_available_rf_inputs(data) finally: self.port_lock.release() - def _get_current_rf_input(self): + def _get_current_rf_input(self, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port._get_current_rf_input() finally: @@ -721,11 +901,14 @@ def _get_current_rf_input(self): return retVal - def _set_current_rf_input(self, data): + def _set_current_rf_input(self, data, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port._set_current_rf_input(data) finally: @@ -735,12 +918,15 @@ class OutNavDataPort(OutPort): def __init__(self, name): OutPort.__init__(self, name, FRONTEND.NavData) - def _get_nav_packet(self): + def _get_nav_packet(self, __connection_id__=""): retVal = None self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, True, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: retVal = port._get_nav_packet() finally: @@ -748,11 +934,14 @@ def _get_nav_packet(self): return retVal - def _set_nav_packet(self, data): + def _set_nav_packet(self, data, __connection_id__=""): self.port_lock.acquire() try: + self._evaluateRequestBasedOnConnections(__connection_id__, False, False, False); for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: port._set_nav_packet(data) finally: diff --git a/frontendInterfaces/libsrc/python/tuner_device.py b/frontendInterfaces/libsrc/python/tuner_device.py index 79000d18d..e22c77949 100644 --- a/frontendInterfaces/libsrc/python/tuner_device.py +++ b/frontendInterfaces/libsrc/python/tuner_device.py @@ -27,6 +27,7 @@ from ossie.properties import simple_property from ossie.properties import struct_property from ossie.properties import structseq_property +from ossie.properties import struct_to_props from ossie.utils import model import threading @@ -86,7 +87,7 @@ def floatingPointCompare(lhs, rhs, places = 1): true if the value requested_val falls within the range [available_min:available_max] False is returned if min > max ''' -def validateRequest(available_min, available_max, requested_val): +def validateRequestSingle(available_min, available_max, requested_val): if floatingPointCompare(requested_val,available_min) < 0: return False if floatingPointCompare(requested_val,available_max) > 0: return False if floatingPointCompare(available_min,available_max) > 0: return False @@ -97,8 +98,9 @@ def validateRequest(available_min, available_max, requested_val): False is returned if min > max for either available for requested values ''' def validateRequest(available_min, available_max, requested_min, requested_max): - if floatingPointCompare(requested_min,available_min) < 0: return False - if floatingPointCompare(requested_max,available_max) > 0: return False + center_request = (requested_max+requested_min)/2.0 + if floatingPointCompare(center_request,available_min) < 0: return False + if floatingPointCompare(center_request,available_max) > 0: return False if floatingPointCompare(available_min,available_max) > 0: return False if floatingPointCompare(requested_min,requested_max) > 0: return False return True @@ -120,12 +122,12 @@ def validateRequestVsSRI(request,upstream_sri,output_mode): found_bw = False key_size = len(upstream_sri.keywords) for i in range(key_size): - if upstream_sri.keywords[i].id != "CHAN_RF": + if upstream_sri.keywords[i].id == "CHAN_RF": upstream_cf = any.from_any(upstream_sri.keywords[i].value) - found_cf = true - elif upstream_sri.keywords[i].id != "FRONTEND.BANDWIDTH": + found_cf = True + elif upstream_sri.keywords[i].id == "FRONTEND::BANDWIDTH": upstream_bw = any.from_any(upstream_sri.keywords[i].value) - found_bw = true + found_bw = True if not found_cf or not found_bw: raise FRONTEND.BadParameterException("CANNOT VERIFY REQUEST -- SRI missing required keywords") @@ -165,7 +167,7 @@ def validateRequestVsSRI(request,upstream_sri,output_mode): If the CHAN_RF and FRONTEND.BANDWIDTH keywords are not found in the sri, FRONTEND.BadParameterException is thrown. ''' -def validateRequestVsDevice(request, upstream_sri, output_mode, min_device_center_freq, max_device_center_freq, max_device_bandwidth, max_device_sample_rate): +def validateRequestVsDeviceStream(request, upstream_sri, output_mode, min_device_center_freq, max_device_center_freq, max_device_bandwidth, max_device_sample_rate): # check if request can be satisfied using the available upstream data if not validateRequestVsSRI(request,upstream_sri, output_mode): @@ -173,15 +175,15 @@ def validateRequestVsDevice(request, upstream_sri, output_mode, min_device_cente # check device constraints # check vs. device center frequency capability (ensure 0 <= request <= max device capability) - if not validateRequest(min_device_center_freq,max_device_center_freq,request.center_frequency): + if not validateRequestSingle(min_device_center_freq,max_device_center_freq,request.center_frequency): raise FRONTEND.BadParameterException("INVALID REQUEST -- device capabilities cannot support freq request") # check vs. device bandwidth capability (ensure 0 <= request <= max device capability) - if not validateRequest(0,max_device_bandwidth,request.bandwidth): + if not validateRequestSingle(0,max_device_bandwidth,request.bandwidth): raise FRONTEND.BadParameterException("INVALID REQUEST -- device capabilities cannot support bw request") # check vs. device sample rate capability (ensure 0 <= request <= max device capability) - if not validateRequest(0,max_device_sample_rate,request.sample_rate): + if not validateRequestSingle(0,max_device_sample_rate,request.sample_rate): raise FRONTEND.BadParameterException("INVALID REQUEST -- device capabilities cannot support sr request") # calculate overall frequency range of the device (not just CF range) @@ -226,9 +228,9 @@ def validateRequestVsRFInfo(request, rfinfo, mode): raise FRONTEND.BadParameterException("INVALID REQUEST -- analog freq range (RFinfo) cannot support freq/bw request") # check sample rate - scaling_factor = 2 - if mode == 1: - scaling_factor = 4 # adjust for complex data + scaling_factor = 4 + if mode: + scaling_factor = 2 # adjust for complex data min_requested_freq = request.center_frequency-(request.sample_rate/scaling_factor) max_requested_freq = request.center_frequency+(request.sample_rate/scaling_factor) @@ -253,18 +255,21 @@ def validateRequestVsDevice(request, rfinfo, mode, min_device_center_freq, max_d # see if IF center frequency is set in rfinfo packet request_if_center_freq = request.center_frequency if request.tuner_type != "TX" and floatingPointCompare(rfinfo.if_center_freq,0) > 0 and floatingPointCompare(rfinfo.rf_center_freq,rfinfo.if_center_freq) > 0: - request_if_center_freq = request.center_frequency - (rfinfo.rf_center_freq-rfinfo.if_center_freq) + if rfinfo.spectrum_inverted: + request_if_center_freq = rfinfo.if_center_freq - (request.center_frequency - rfinfo.rf_center_freq) + else: + request_if_center_freq = rfinfo.if_center_freq + (request.center_frequency - rfinfo.rf_center_freq) # check vs. device center freq capability (ensure 0 <= request <= max device capability) - if not validateRequest(min_device_center_freq,max_device_center_freq,request_if_center_freq): + if not validateRequestSingle(min_device_center_freq, max_device_center_freq, request_if_center_freq): raise FRONTEND.BadParameterException("INVALID REQUEST -- device capabilities cannot support freq request") # check vs. device bandwidth capability (ensure 0 <= request <= max device capability) - if not validateRequest(0,max_device_bandwidth,request.bandwidth): + if not validateRequestSingle(0, max_device_bandwidth, request.bandwidth): raise FRONTEND.BadParameterException("INVALID REQUEST -- device capabilities cannot support bw request") # check vs. device sample rate capability (ensure 0 <= request <= max device capability) - if not validateRequest(0,max_device_sample_rate,request.sample_rate): + if not validateRequestSingle(0, max_device_sample_rate, request.sample_rate): raise FRONTEND.BadParameterException("INVALID REQUEST -- device capabilities cannot support sr request") # calculate overall frequency range of the device (not just CF range) @@ -278,19 +283,37 @@ def validateRequestVsDevice(request, rfinfo, mode, min_device_center_freq, max_d min_requested_freq = request_if_center_freq-(request.bandwidth/2) max_requested_freq = request_if_center_freq+(request.bandwidth/2) - if not validateRequest(min_device_freq,max_device_freq,min_requested_freq,max_requested_freq): + if not validateRequest(min_device_freq, max_device_freq, min_requested_freq, max_requested_freq): raise FRONTEND.BadParameterException("INVALID REQUEST -- device capabilities cannot support freq/bw request") # check based on sample rate min_requested_freq = request_if_center_freq-(request.sample_rate/scaling_factor) max_requested_freq = request_if_center_freq+(request.sample_rate/scaling_factor) - if not validateRequest(min_device_freq,max_device_freq,min_requested_freq,max_requested_freq): + if not validateRequest(min_device_freq, max_device_freq, min_requested_freq, max_requested_freq): raise FRONTEND.BadParameterException("INVALID REQUEST -- device capabilities cannot support freq/sr request") return True -def createTunerAllocation(tuner_type='DDC',allocation_id=None,center_frequency=0.0,bandwidth=0.0,sample_rate=1.0, +def createScannerAllocation(min_freq=0.0, max_freq=0.0, mode="", control_mode="", control_limit=0.0, returnDict=True): + if returnDict: + retval = {'FRONTEND::scanner_allocation':{'FRONTEND::scanner_allocation::min_freq':min_freq,'FRONTEND::scanner_allocation::max_freq':max_freq, + 'FRONTEND::scanner_allocation::mode':mode,'FRONTEND::scanner_allocation::control_mode':control_mode, + 'FRONTEND::scanner_allocation::control_limit':control_limit}} + else: + alloc=[] + alloc.append(CF.DataType(id='FRONTEND::scanner_allocation::min_freq',value=any.to_any(min_freq))) + alloc[-1].value._t = CORBA.TC_double + alloc.append(CF.DataType(id='FRONTEND::scanner_allocation::max_freq',value=any.to_any(max_freq))) + alloc[-1].value._t = CORBA.TC_double + alloc.append(CF.DataType(id='FRONTEND::scanner_allocation::mode',value=any.to_any(mode))) + alloc.append(CF.DataType(id='FRONTEND::scanner_allocation::control_mode',value=any.to_any(control_mode))) + alloc.append(CF.DataType(id='FRONTEND::scanner_allocation::control_limit',value=any.to_any(control_limit))) + alloc[-1].value._t = CORBA.TC_double + retval = CF.DataType(id='FRONTEND::scanner_allocation',value=CORBA.Any(CF._tc_Properties,alloc)) + return retval + +def createTunerAllocation(tuner_type='RX_DIGITIZER',allocation_id=None,center_frequency=0.0,bandwidth=0.0,sample_rate=0.0, device_control=True,group_id='',rf_flow_id='',bandwidth_tolerance=0.0,sample_rate_tolerance=0.0,returnDict=True): if returnDict: retval = {'FRONTEND::tuner_allocation':{'FRONTEND::tuner_allocation::tuner_type':tuner_type,'FRONTEND::tuner_allocation::allocation_id':allocation_id, @@ -323,7 +346,7 @@ def createTunerAllocation(tuner_type='DDC',allocation_id=None,center_frequency=0 retval = CF.DataType(id='FRONTEND::tuner_allocation',value=CORBA.Any(CF._tc_Properties,alloc)) return retval -def createTunerGenericListenerAllocation(tuner_type='DDC',allocation_id=None,center_frequency=0.0,bandwidth=0.0,sample_rate=1.0, +def createTunerGenericListenerAllocation(tuner_type='RX_DIGITIZER',allocation_id=None,center_frequency=0.0,bandwidth=0.0,sample_rate=1.0, device_control=False,group_id='',rf_flow_id='',bandwidth_tolerance=0.0,sample_rate_tolerance=0.0,returnDict=True): if returnDict: retval = {'FRONTEND::tuner_allocation':{'FRONTEND::tuner_allocation::tuner_type':tuner_type,'FRONTEND::tuner_allocation::allocation_id':allocation_id, @@ -375,6 +398,7 @@ def createTunerListenerAllocation(existing_allocation_id,listener_allocation_id= def tune(device,tuner_type='RX_DIGITIZER',allocation_id=None,center_frequency=None,bandwidth=256000,sample_rate=None,device_control=True,group_id='',rf_flow_id='',bandwidth_tolerance=0.0,sample_rate_tolerance=0.0,returnDict=True,gain=None): numTuners = len(device.frontend_tuner_status) newAllocation = False + allAllocated = False #No tuners found on device if numTuners == 0: print "No Available Tuner" @@ -382,6 +406,8 @@ def tune(device,tuner_type='RX_DIGITIZER',allocation_id=None,center_frequency=No if numTuners >= 1: for index, key in enumerate(device.frontend_tuner_status): id_csv = device.frontend_tuner_status[index].allocation_id_csv + if len(id_csv) != 0: + allAllocated = True if allocation_id != None and allocation_id in id_csv: break if id_csv == '': @@ -410,14 +436,17 @@ def tune(device,tuner_type='RX_DIGITIZER',allocation_id=None,center_frequency=No break if allocation_id == None and not newAllocation and numTuners >= 1: - print "tune(): All tuners (", len(device.frontend_tuner_status), ") have been allocated. Specify an allocation_id to change tuning properties" + if allAllocated: + print "tune(): All tuners (", len(device.frontend_tuner_status), ") have been allocated. Specify an allocation_id to change tuning properties" + else: + print "tune(): unable to allocate a tuner with the given parameters" elif not newAllocation: tuner=None tuner_type=None allocation_status = _getAllocationStatus(device, numTuners, allocation_id) if allocation_status == None: - print "tune(): no matching allocation ID's for ",allocation_id + print "tune(): unable to allocate a tuner with the given parameters" return allocation_status elif "DigitalTuner_in" in device._providesPortDict.keys(): tuner_type = "DigitalTuner" @@ -492,6 +521,7 @@ def __init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execpa # Provides mapping from unique allocation ID to internal tuner (channel) number self.allocation_id_to_tuner_id = {} self.allocation_id_mapping_lock = threading.RLock() + self.supports_scan = False def deallocateCapacity(self, properties): """ @@ -504,7 +534,7 @@ def deallocateCapacity(self, properties): Output: None """ - self._log.debug("deallocateCapacity(%s)", properties) + self._deviceLog.debug("deallocateCapacity(%s)", properties) # Validate self._validateAllocProps(properties) # Consume @@ -535,7 +565,7 @@ def deallocateCapacity(self, properties): # Update usage state self._usageState = self.updateUsageState() - self._log.debug("deallocateCapacity() -->") + self._deviceLog.debug("deallocateCapacity() -->") def getControlAllocationId(self, idx): return self.tuner_allocation_ids[idx].control_allocation_id @@ -586,6 +616,11 @@ def updateUsageState(self): # as well as the tuner_allocation_ids vector. Call this function during initialization def setNumChannels(self,num,tuner_type='RX_DIGITIZER'): self.frontend_tuner_status = [] + self.addChannels(num, tuner_type) + + # This sets the number of entries in the frontend_tuner_status struct sequence property + # as well as the tuner_allocation_ids vector. Call this function during initialization + def addChannels(self,num,tuner_type='RX_DIGITIZER'): for ii in range(num): tuner_status = self.frontend_tuner_status_struct_struct() tuner_status.tuner_type = tuner_type @@ -597,18 +632,54 @@ def setNumChannels(self,num,tuner_type='RX_DIGITIZER'): finally: self.allocation_id_mapping_lock.release() - """ Allocation handlers """ - def allocate_frontend_tuner_allocation(self, frontend_tuner_allocation): + def allocateCapacity(self, capacities): + return False + + def _checkValidIds(self, propdict): + self._has_scanner = False + for prop_key in propdict: + if prop_key == "FRONTEND::scanner_allocation": + raise CF.Device.InvalidCapacity("FRONTEND::scanner_allocation found in allocation; this is not a scanning device", [CF.DataType(id=prop_key,value=any.to_any(propdict[prop_key]))]) + + for prop_key in propdict: + if prop_key != "FRONTEND::tuner_allocation" and prop_key != "FRONTEND::listener_allocation": + raise CF.Device.InvalidCapacity("UNKNOWN ALLOCATION PROPERTY "+prop_key, [CF.DataType(id=prop_key,value=any.to_any(propdict[prop_key]))]) + self._props[prop_key] = propdict[prop_key] + + def allocateCapacity(self, properties): + self._deviceLog.debug("allocateCapacity(%s)", properties) + + self._validateAllocProps(properties) + + propdict = {} + for prop in properties: + propdef = self._props.getPropDef(prop.id) + propdict[prop.id] = propdef._fromAny(prop.value) + + self._checkValidIds(propdict) + scanner_prop = None + if propdict.has_key('FRONTEND::scanner_allocation'): + scanner_prop = propdict['FRONTEND::scanner_allocation'] + + if propdict.has_key('FRONTEND::tuner_allocation'): + return self._allocate_frontend_tuner_allocation(propdict['FRONTEND::tuner_allocation'], scanner_prop) + if propdict.has_key('FRONTEND::listener_allocation'): + return self._allocate_frontend_listener_allocation(propdict['FRONTEND::listener_allocation']) + + raise CF.Device.InvalidCapacity("Unable to allocate this FEI device because FRONTEND::tuner_allocation and FRONTEND::listener_allocation not present", properties) + + def _allocate_frontend_tuner_allocation(self, frontend_tuner_allocation, scanner_prop = None): + exception_raised = False try: # Check allocation_id if not frontend_tuner_allocation.allocation_id: - self._log.info("allocate_frontend_tuner_allocation: MISSING ALLOCATION_ID") - raise CF.Device.InvalidCapacity("MISSING ALLOCATION_ID", frontend_tuner_allocation) - + self._deviceLog.info("allocate_frontend_tuner_allocation: MISSING ALLOCATION_ID") + raise CF.Device.InvalidCapacity("MISSING ALLOCATION_ID", struct_to_props(frontend_tuner_allocation)) + # Check if allocation ID has already been used if self.getTunerMapping(frontend_tuner_allocation.allocation_id) >= 0: - self._log.info("allocate_frontend_tuner_allocation: ALLOCATION_ID "+frontend_tuner_allocation.allocation_id+" ALREADY IN USE") - raise CF.Device.InvalidCapacity("ALLOCATION_ID "+frontend_tuner_allocation.allocation_id+" ALREADY IN USE", frontend_tuner_allocation) + self._deviceLog.info("allocate_frontend_tuner_allocation: ALLOCATION_ID "+frontend_tuner_allocation.allocation_id+" ALREADY IN USE") + raise CF.Device.InvalidCapacity("ALLOCATION_ID "+frontend_tuner_allocation.allocation_id+" ALREADY IN USE", struct_to_props(frontend_tuner_allocation)) self.allocation_id_mapping_lock.acquire() # Next, try to allocate a new tuner @@ -618,18 +689,18 @@ def allocate_frontend_tuner_allocation(self, frontend_tuner_allocation): self.tuner_allocation_ids.append(tuner_allocation_ids_struct()) for tuner_id in range(len(self.tuner_allocation_ids)): if self.frontend_tuner_status[tuner_id].tuner_type != frontend_tuner_allocation.tuner_type: - self._log.debug("allocate_frontend_tuner_allocation: Requested tuner type '" + str(frontend_tuner_allocation.tuner_type) + "' does not match tuner[" + str(tuner_id) + "].tuner_type ("+str(self.frontend_tuner_status[tuner_id].tuner_type)+")") + self._deviceLog.debug("allocate_frontend_tuner_allocation: Requested tuner type '" + str(frontend_tuner_allocation.tuner_type) + "' does not match tuner[" + str(tuner_id) + "].tuner_type ("+str(self.frontend_tuner_status[tuner_id].tuner_type)+")") continue if len(frontend_tuner_allocation.group_id) > 0 and frontend_tuner_allocation.group_id != self.frontend_tuner_status[tuner_id].group_id: - self._log.debug("allocate_frontend_tuner_allocation: Requested group_id '"+str(frontend_tuner_allocation.group_id)+"' does not match tuner[" + str(tuner_id) + "].group_id ("+str(self.frontend_tuner_status[tuner_id].group_id)+")") + self._deviceLog.debug("allocate_frontend_tuner_allocation: Requested group_id '"+str(frontend_tuner_allocation.group_id)+"' does not match tuner[" + str(tuner_id) + "].group_id ("+str(self.frontend_tuner_status[tuner_id].group_id)+")") continue # special case because allocation is specifying the input stream, which determines the rf_flow_id, etc. if len(frontend_tuner_allocation.rf_flow_id) > 0 and \ frontend_tuner_allocation.rf_flow_id != self.frontend_tuner_status[tuner_id].rf_flow_id and \ frontend_tuner_allocation.tuner_type != "CHANNELIZER": - self._log.debug("allocate_frontend_tuner_allocation: Requested rf_flow_id '"+str(frontend_tuner_allocation.rf_flow_id)+"' does not match tuner[" +str(tuner_id)+ "].rf_flow_id ("+str(self.frontend_tuner_status[tuner_id].rf_flow_id)+")") + self._deviceLog.debug("allocate_frontend_tuner_allocation: Requested rf_flow_id '"+str(frontend_tuner_allocation.rf_flow_id)+"' does not match tuner[" +str(tuner_id)+ "].rf_flow_id ("+str(self.frontend_tuner_status[tuner_id].rf_flow_id)+")") continue if frontend_tuner_allocation.device_control: @@ -640,11 +711,16 @@ def allocate_frontend_tuner_allocation(self, frontend_tuner_allocation): self.frontend_tuner_status[tuner_id].center_frequency = frontend_tuner_allocation.center_frequency self.frontend_tuner_status[tuner_id].bandwidth = frontend_tuner_allocation.bandwidth self.frontend_tuner_status[tuner_id].sample_rate = frontend_tuner_allocation.sample_rate - if len(self.tuner_allocation_ids[tuner_id].control_allocation_id)>0 or \ - not self.deviceSetTuning(frontend_tuner_allocation, self.frontend_tuner_status[tuner_id], tuner_id): - # either not available or didn't succeed setting tuning, try next tuner - self._log.debug("allocate_frontend_tuner_allocation: Tuner["+str(tuner_id)+"] is either not available or didn't succeed while setting tuning ") - continue + if self.supports_scan and self._has_scanner: + if len(self.tuner_allocation_ids[tuner_id].control_allocation_id)>0 or not self.deviceSetTuningScan(frontend_tuner_allocation, scanner_prop, self.frontend_tuner_status[tuner_id], tuner_id): + # either not available or didn't succeed setting tuning, try next tuner + self._deviceLog.debug("allocate_frontend_tuner_allocation: Tuner["+str(tuner_id)+"] is either not available or didn't succeed while setting tuning ") + continue + else: + if len(self.tuner_allocation_ids[tuner_id].control_allocation_id)>0 or not self.deviceSetTuning(frontend_tuner_allocation, self.frontend_tuner_status[tuner_id], tuner_id): + # either not available or didn't succeed setting tuning, try next tuner + self._deviceLog.debug("allocate_frontend_tuner_allocation: Tuner["+str(tuner_id)+"] is either not available or didn't succeed while setting tuning ") + continue if center_frequency == self.frontend_tuner_status[tuner_id].center_frequency and \ bandwidth == self.frontend_tuner_status[tuner_id].bandwidth and \ @@ -652,7 +728,7 @@ def allocate_frontend_tuner_allocation(self, frontend_tuner_allocation): self.frontend_tuner_status[tuner_id].center_frequency = frontend_tuner_allocation.center_frequency self.frontend_tuner_status[tuner_id].bandwidth = frontend_tuner_allocation.bandwidth self.frontend_tuner_status[tuner_id].sample_rate = frontend_tuner_allocation.sample_rate - + self.tuner_allocation_ids[tuner_id].control_allocation_id = frontend_tuner_allocation.allocation_id self.allocation_id_to_tuner_id[frontend_tuner_allocation.allocation_id] = tuner_id self.frontend_tuner_status[tuner_id].allocation_id_csv = self.createAllocationIdCsv(tuner_id) @@ -660,14 +736,15 @@ def allocate_frontend_tuner_allocation(self, frontend_tuner_allocation): # channelizer allocations must specify device control = true if frontend_tuner_allocation.tuner_type == "CHANNELIZER" or frontend_tuner_allocation.tuner_type == "TX": eout = str(frontend_tuner_allocation.tuner_type) + " allocation with device_control=false is invalid." - self._log.debug(eout) - raise CF.Device.InvalidCapacity(eout, frontend_tuner_allocation) + self._deviceLog.debug(eout) + raise CF.Device.InvalidCapacity(eout, struct_to_props(frontend_tuner_allocation)) # listener - if len(self.tuner_allocation_ids[tuner_id].control_allocation_id) == 0 or not listenerRequestValidation(frontend_tuner_allocation, tuner_id): + if len(self.tuner_allocation_ids[tuner_id].control_allocation_id) == 0 or not self.listenerRequestValidation(frontend_tuner_allocation, tuner_id): # either not allocated or can't support listener request - self._log.debug("allocate_frontend_tuner_allocation: Tuner["+str(tuner_id)+"] is either not available or can not support listener request ") + self._deviceLog.debug("allocate_frontend_tuner_allocation: Tuner["+str(tuner_id)+"] is either not available or can not support listener request ") continue self.tuner_allocation_ids[tuner_id].listener_allocation_ids.append(frontend_tuner_allocation.allocation_id) + self.allocation_id_to_tuner_id[frontend_tuner_allocation.allocation_id] = tuner_id self.frontend_tuner_status[tuner_id].allocation_id_csv = self.createAllocationIdCsv(tuner_id) self.assignListener(frontend_tuner_allocation.allocation_id,self.tuner_allocation_ids[tuner_id].control_allocation_id) @@ -675,46 +752,44 @@ def allocate_frontend_tuner_allocation(self, frontend_tuner_allocation): # check tolerances # only check when sample_rate was not set to don't care - self._log.debug(" allocate_frontend_tuner_allocation - SR requested: " + str(frontend_tuner_allocation.sample_rate) + " SR got: " +str(self.frontend_tuner_status[tuner_id].sample_rate)) + self._deviceLog.debug(" allocate_frontend_tuner_allocation - SR requested: " + str(frontend_tuner_allocation.sample_rate) + " SR got: " +str(self.frontend_tuner_status[tuner_id].sample_rate)) if floatingPointCompare(frontend_tuner_allocation.sample_rate,0)!=0 and \ (floatingPointCompare(self.frontend_tuner_status[tuner_id].sample_rate,frontend_tuner_allocation.sample_rate)<0 or floatingPointCompare(self.frontend_tuner_status[tuner_id].sample_rate,frontend_tuner_allocation.sample_rate+frontend_tuner_allocation.sample_rate * frontend_tuner_allocation.sample_rate_tolerance/100.0)>0 ): eout = "allocate_frontend_tuner_allocation(" + str(int(tuner_id)) +"): returned sr "+str(self.frontend_tuner_status[tuner_id].sample_rate)+" does not meet tolerance criteria of "+str(frontend_tuner_allocation.sample_rate_tolerance)+" percent" - self._log.info(eout) - raise RuntimeError(eout) + self._deviceLog.info(eout) + raise allocationException(eout) - self._log.debug(" allocate_frontend_tuner_allocation - BW requested: " + str(frontend_tuner_allocation.bandwidth) + " BW got: " +str(self.frontend_tuner_status[tuner_id].bandwidth)) + self._deviceLog.debug(" allocate_frontend_tuner_allocation - BW requested: " + str(frontend_tuner_allocation.bandwidth) + " BW got: " +str(self.frontend_tuner_status[tuner_id].bandwidth)) # Only check when bandwidth was not set to don't care if floatingPointCompare(frontend_tuner_allocation.bandwidth,0)!=0 and \ (floatingPointCompare(self.frontend_tuner_status[tuner_id].bandwidth,frontend_tuner_allocation.bandwidth)<0 or \ floatingPointCompare(self.frontend_tuner_status[tuner_id].bandwidth,frontend_tuner_allocation.bandwidth+frontend_tuner_allocation.bandwidth * frontend_tuner_allocation.bandwidth_tolerance/100.0)>0 ): - eout = "allocate_frontend_tuner_allocation("<= 0: + self.deallocate_frontend_tuner_allocation(frontend_tuner_allocation) return False except AllocationAlreadyExists, e: @@ -730,7 +805,7 @@ def allocate_frontend_tuner_allocation(self, frontend_tuner_allocation): return False except Exception, e: - self._log.info('The following error occurred on allocation:',e) + self._deviceLog.exception('The following error occurred on allocation:',e) #self.deallocateCapacity([frontend_tuner_allocation.getProp()]) raise e @@ -740,20 +815,32 @@ def deallocate_frontend_tuner_allocation(self, frontend_tuner_allocation): # Try to remove control of the device tuner_id = self.getTunerMapping(frontend_tuner_allocation.allocation_id) if tuner_id < 0: - self._log.debug("deallocate_frontend_tuner_allocation: ALLOCATION_ID NOT FOUND: [" + str(frontend_tuner_allocation.allocation_id) + "]") - raise CF.Device.InvalidCapacity("ALLOCATION_ID NOT FOUND: [" + str(frontend_tuner_allocation.allocation_id) + "]",frontend_tuner_allocation) + self._deviceLog.debug("deallocate_frontend_tuner_allocation: ALLOCATION_ID NOT FOUND: [" + str(frontend_tuner_allocation.allocation_id) + "]") + raise CF.Device.InvalidCapacity("ALLOCATION_ID NOT FOUND: [" + str(frontend_tuner_allocation.allocation_id) + "]",struct_to_props(frontend_tuner_allocation)) self.allocation_id_mapping_lock.acquire() try: - while self.frontend_tuner_status[self.allocation_id_to_tuner_id[frontend_tuner_allocation.allocation_id]].allocation_id_csv != frontend_tuner_allocation.allocation_id: + if self.frontend_tuner_status[self.allocation_id_to_tuner_id[frontend_tuner_allocation.allocation_id]].allocation_id_csv.split(',')[0] == frontend_tuner_allocation.allocation_id: + # remove if it is controlling + while self.frontend_tuner_status[self.allocation_id_to_tuner_id[frontend_tuner_allocation.allocation_id]].allocation_id_csv != frontend_tuner_allocation.allocation_id: + split_id = self.frontend_tuner_status[self.allocation_id_to_tuner_id[frontend_tuner_allocation.allocation_id]].allocation_id_csv.split(',') + for idx in range(len(split_id)): + if split_id[idx] == frontend_tuner_allocation.allocation_id: + continue + else: + self.removeTunerMappingByAllocationId(split_id[idx]) + self.removeListenerId(tuner_id, split_id[idx]) + break + else: + # remove if it is not controlling split_id = self.frontend_tuner_status[self.allocation_id_to_tuner_id[frontend_tuner_allocation.allocation_id]].allocation_id_csv.split(',') for idx in range(len(split_id)): - if split_id[idx] == frontend_tuner_allocation.allocation_id: + if split_id[idx] != frontend_tuner_allocation.allocation_id: continue else: self.removeTunerMappingByAllocationId(split_id[idx]) self.removeListenerId(tuner_id, split_id[idx]) - break + return finally: self.allocation_id_mapping_lock.release() @@ -772,39 +859,33 @@ def deallocate_frontend_tuner_allocation(self, frontend_tuner_allocation): self.frontend_tuner_status[tuner_id].allocation_id_csv = '' - def allocate_frontend_listener_allocation(self, frontend_listener_allocation): + def _allocate_frontend_listener_allocation(self, frontend_listener_allocation): try: # Check validity of allocation_id's if not frontend_listener_allocation.existing_allocation_id: - self._log.info("allocate_frontend_listener_allocation: MISSING EXISTING ALLOCATION ID") - raise CF.Device.InvalidCapacity("MISSING EXISTING ALLOCATION ID", frontend_listener_allocation) + self._deviceLog.info("allocate_frontend_listener_allocation: MISSING EXISTING ALLOCATION ID") + raise CF.Device.InvalidCapacity("MISSING EXISTING ALLOCATION ID", struct_to_props(frontend_listener_allocation)) if not frontend_listener_allocation.listener_allocation_id: - self._log.info("allocate_frontend_listener_allocation: MISSING LISTENER ALLOCATION ID") - raise CF.Device.InvalidCapacity("MISSING LISTENER ALLOCATION ID", frontend_listener_allocation) - - # Check if listener allocation ID has already been used - if self.getTunerMapping(frontend_listener_allocation.listener_allocation_id) >= 0: - self._log.info("allocate_frontend_listener_allocation: LISTENER ALLOCATION ID ALREADY IN USE") - raise CF.Device.InvalidCapacity("LISTENER ALLOCATION ID ALREADY IN USE", frontend_listener_allocation) + self._deviceLog.info("allocate_frontend_listener_allocation: MISSING LISTENER ALLOCATION ID") + raise CF.Device.InvalidCapacity("MISSING LISTENER ALLOCATION ID", struct_to_props(frontend_listener_allocation)) - #self.tuner_allocation_ids[tuner_id].lock.acquire() # Check if listener allocation ID has already been used if self.getTunerMapping(frontend_listener_allocation.listener_allocation_id) >= 0: - self._log.info("allocate_frontend_listener_allocation: LISTENER ALLOCATION ID ALREADY IN USE: [" + str(frontend_listener_allocation.listener_allocation_id << "]")) + self._deviceLog.info("allocate_frontend_listener_allocation: LISTENER ALLOCATION ID ALREADY IN USE: [" + str(frontend_listener_allocation.listener_allocation_id + "]")) raise AllocationAlreadyExists("LISTENER ALLOCATION ID ALREADY IN USE", frontend_listener_allocation) # Do not allocate if existing allocation ID does not exist tuner_id = self.getTunerMapping(frontend_listener_allocation.existing_allocation_id) if tuner_id < 0: - self._log.debug("allocate_frontend_listener_allocation: UNKNOWN CONTROL ALLOCATION ID: [" + str(frontend_listener_allocation.existing_allocation_id)+"]") + self._deviceLog.debug("allocate_frontend_listener_allocation: UNKNOWN CONTROL ALLOCATION ID: [" + str(frontend_listener_allocation.existing_allocation_id)+"]") raise FRONTEND.BadParameterException("UNKNOWN CONTROL ALLOCATION ID") # listener allocations are not permitted for channelizers or TX if self.frontend_tuner_status[tuner_id].tuner_type == "CHANNELIZER" or self.frontend_tuner_status[tuner_id].tuner_type == "TX": eout = "allocate_frontend_listener_allocation: listener allocations are not permitted for " + str(self.frontend_tuner_status[tuner_id].tuner_type) + " tuner type" - self._log.debug(eout) - raise CF.Device.InvalidCapacity(eout, frontend_listener_allocation) + self._deviceLog.debug(eout) + raise CF.Device.InvalidCapacity(eout, struct_to_props(frontend_listener_allocation)) self.allocation_id_mapping_lock.acquire() try: @@ -822,7 +903,7 @@ def allocate_frontend_listener_allocation(self, frontend_listener_allocation): except AllocationAlreadyExists, e: # Don't call deallocateCapacity if the allocationId already exists # - Would end up deallocating a valid tuner/listener - raise CF.Device.InvalidCapacity(e) + raise CF.Device.InvalidCapacity(str(e), struct_to_props(frontend_listener_allocation)) except CF.Device.InvalidCapacity, e: raise e @@ -831,7 +912,7 @@ def allocate_frontend_listener_allocation(self, frontend_listener_allocation): return False except Exception, e: - self._log.info('The following error occurred on allocation:',e) + self._deviceLog.info('The following error occurred on allocation:',e) raise e return False @@ -847,12 +928,12 @@ def removeListenerId(self, tuner_id, allocation_id): def deallocate_frontend_listener_allocation(self, frontend_listener_allocation): tuner_id = self.getTunerMapping(frontend_listener_allocation.listener_allocation_id) if tuner_id < 0: - self._log.debug("ALLOCATION_ID NOT FOUND: [" + str(frontend_listener_allocation.listener_allocation_id) + "]") + self._deviceLog.debug("ALLOCATION_ID NOT FOUND: [" + str(frontend_listener_allocation.listener_allocation_id) + "]") retval_struct = [CF.DataType(id='FRONTEND::listener_allocation::existing_allocation_id',value=any.to_any(frontend_listener_allocation.existing_allocation_id)),CF.DataType(id='FRONTEND::listener_allocation::listener_allocation_id',value=any.to_any(frontend_listener_allocation.existing_allocation_id))] retval = CF.DataType(id='FRONTEND::listener_allocation',value=any.to_any(retval_struct)) raise CF.Device.InvalidCapacity("ALLOCATION_ID NOT FOUND", [retval]) if self.tuner_allocation_ids[tuner_id].control_allocation_id == frontend_listener_allocation.listener_allocation_id: - self._log.debug("Controlling allocation id cannot be used as a listener id in a deallocation: [" + str(frontend_listener_allocation.listener_allocation_id) + "]") + self._deviceLog.debug("Controlling allocation id cannot be used as a listener id in a deallocation: [" + str(frontend_listener_allocation.listener_allocation_id) + "]") retval_struct = [CF.DataType(id='FRONTEND::listener_allocation::existing_allocation_id',value=any.to_any(frontend_listener_allocation.existing_allocation_id)),CF.DataType(id='FRONTEND::listener_allocation::listener_allocation_id',value=any.to_any(frontend_listener_allocation.existing_allocation_id))] retval = CF.DataType(id='FRONTEND::listener_allocation',value=any.to_any(retval_struct)) raise CF.Device.InvalidCapacity("Controlling allocation id cannot be used as a listener id", [retval]) @@ -878,7 +959,7 @@ def enableTuner(self, tuner_id, enable): return True def listenerRequestValidation(self, request, tuner_id): - self._log.trace("listenerRequestValidation(): request " + str(request) + " ,tuner_id " + str(tuner_id)) + self._deviceLog.trace("listenerRequestValidation(): request " + str(request) + " ,tuner_id " + str(tuner_id)) # ensure requested values are non-negative if floatingPointCompare(request.center_frequency,0)<0 or \ @@ -890,12 +971,12 @@ def listenerRequestValidation(self, request, tuner_id): # ensure lower end of requested band fits if floatingPointCompare((request.center_frequency-(request.bandwidth*0.5)),(self.frontend_tuner_status[tuner_id].center_frequency-(self.frontend_tuner_status[tuner_id].bandwidth*0.5))) < 0: - self._log.trace("listenerRequestValidation(): FAILED LOWER END TEST") + self._deviceLog.trace("listenerRequestValidation(): FAILED LOWER END TEST") return False # ensure upper end of requested band fits if floatingPointCompare((request.center_frequency + (request.bandwidth*0.5)),(self.frontend_tuner_status[tuner_id].center_frequency + (self.frontend_tuner_status[tuner_id].bandwidth*0.5))) > 0: - self._log.trace("listenerRequestValidation(): FAILED UPPER END TEST") + self._deviceLog.trace("listenerRequestValidation(): FAILED UPPER END TEST") return False # ensure tuner bandwidth meets requested tolerance @@ -948,7 +1029,7 @@ def sendEOS(self, allocation_id): break def removeTunerMappingByAllocationId(self, allocation_id): - self._log.trace("removeTunerMapping(allocation_id) allocation_id " + str(allocation_id)) + self._deviceLog.trace("removeTunerMapping(allocation_id) allocation_id " + str(allocation_id)) self.allocation_id_mapping_lock.acquire() try: if self.frontend_tuner_status[self.allocation_id_to_tuner_id[allocation_id]].allocation_id_csv.split(',')[0] == allocation_id: @@ -1040,7 +1121,7 @@ def printSRI(self, sri, strHeader = "DEBUG SRI"): print "\tmode:", sri.mode print "\tstreamID:", sri.streamID for keyword in sri.keywords: - print "\t KEYWORD KEY/VAL ::", keywords.id << ":", any.from_any(keywords.value) + print "\t KEYWORD KEY/VAL ::", keywords.id + ":", any.from_any(keywords.value) ###################################################################### # PROPERTIES @@ -1069,14 +1150,14 @@ def printSRI(self, sri, strHeader = "DEBUG SRI"): name="frontend_tuner_allocation", structdef=frontend_tuner_allocation, configurationkind=("allocation",), - mode="readwrite", + mode="writeonly", description="""Frontend Interfaces v2.0 main allocation structure""" ) frontend_listener_allocation = struct_property(id_="FRONTEND::listener_allocation", name="frontend_listener_allocation", structdef=frontend_listener_allocation, configurationkind=("allocation",), - mode="readwrite", + mode="writeonly", description="""Allocates a listener (subscriber) based off a previous allocation """ ) frontend_tuner_status = structseq_property(id_="FRONTEND::tuner_status", @@ -1089,4 +1170,26 @@ def printSRI(self, sri, strHeader = "DEBUG SRI"): ) - +class FrontendScannerDevice(FrontendTunerDevice): + + def __init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams): + FrontendTunerDevice.__init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams) + self.supports_scan = True + + frontend_scanner_allocation = struct_property(id_="FRONTEND::scanner_allocation", + name="frontend_scanner_allocation", + structdef=frontend_scanner_allocation, + configurationkind=("allocation",), + mode="writeonly", + description="""Frontend Interfaces v2.0 scanner allocation structure""" + ) + + def _checkValidIds(self, propdict): + self._has_scanner = False + for prop_key in propdict: + if prop_key != "FRONTEND::tuner_allocation" and prop_key != "FRONTEND::listener_allocation" and prop_key != "FRONTEND::scanner_allocation": + raise CF.Device.InvalidCapacity("UNKNOWN ALLOCATION PROPERTY "+prop_key, [CF.DataType(id=prop_key,value=any.to_any(propdict[prop_key]))]) + if prop_key == "FRONTEND::scanner_allocation": + self._has_scanner = True + self._props[prop_key] = propdict[prop_key] + diff --git a/frontendInterfaces/libsrc/setup.py b/frontendInterfaces/libsrc/setup.py index fd347a82e..c42f95127 100644 --- a/frontendInterfaces/libsrc/setup.py +++ b/frontendInterfaces/libsrc/setup.py @@ -29,7 +29,7 @@ # replaces it (i.e. a developer does a command-line build), use 1.X.X version='__VERSION__' if version.find('__') == 0: - version = '2.3.9' + version = '2.4.4' setup( name='frontend', diff --git a/frontendInterfaces/libsrc/testing/tests/cpp/Makefile.am b/frontendInterfaces/libsrc/testing/tests/cpp/Makefile.am new file mode 100644 index 000000000..258a3babe --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/cpp/Makefile.am @@ -0,0 +1,27 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +# Rules for the test code (use `make check` to execute) +TESTS = Frontend +check_PROGRAMS = $(TESTS) + +Frontend_SOURCES = main.cpp +Frontend_SOURCES += ValidateRequest.cpp Ports.cpp +Frontend_CXXFLAGS = -I../../../cpp/ $(redhawk_INCLUDES_auto) $(BULKIO_CFLAGS) $(BOOST_CPPFLAGS) $(OSSIE_CFLAGS) $(CPPUNIT_CFLAGS) +Frontend_LDADD = -L../../.. -lfrontend-@FRONTEND_API_VERSION@ $(BULKIO_LIBS) $(BOOST_LDFLAGS) $(BOOST_SYSTEM_LIB) $(OSSIE_LIBS) $(CPPUNIT_LIBS) $(LOG4CXX_LIBS) diff --git a/frontendInterfaces/libsrc/testing/tests/cpp/Ports.cpp b/frontendInterfaces/libsrc/testing/tests/cpp/Ports.cpp new file mode 100644 index 000000000..4b701da1d --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/cpp/Ports.cpp @@ -0,0 +1,434 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "Ports.h" + +CPPUNIT_TEST_SUITE_REGISTRATION(PortsTest); + +void PortsTest::testGPSGetter() +{ + gps_port_sample input_parent; + frontend::InGPSPort *input_port_1 = new frontend::InGPSPort("input_1", &input_parent); + frontend::OutGPSPort *output_port = new frontend::OutGPSPort("output"); + + input_parent.set_source_id("newvalue"); + + CPPUNIT_ASSERT_THROW(output_port->gps_info(), redhawk::PortCallError); + CPPUNIT_ASSERT_THROW(output_port->_get_gps_info("hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + frontend::GPSInfo gpsinfo = output_port->gps_info(); + CPPUNIT_ASSERT(gpsinfo.source_id == "newvalue"); + gpsinfo = output_port->_get_gps_info("hello"); + CPPUNIT_ASSERT(gpsinfo.source_id == "newvalue"); + CPPUNIT_ASSERT_THROW(output_port->_get_gps_info("foo"), redhawk::PortCallError); + + gps_port_sample input_parent_2; + input_parent_2.set_source_id("newvalue_2"); + frontend::InGPSPort *input_port_2 = new frontend::InGPSPort("input_2", &input_parent_2); + + output_port->connectPort(input_port_2->_this(), "foo"); + CPPUNIT_ASSERT_THROW(output_port->gps_info(), redhawk::PortCallError); + gpsinfo = output_port->_get_gps_info("hello"); + CPPUNIT_ASSERT(gpsinfo.source_id == "newvalue"); + CPPUNIT_ASSERT_THROW(output_port->_get_gps_info("something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} + +void PortsTest::testGPSSetter() +{ + gps_port_sample input_parent; + gps_port_sample input_parent_2; + frontend::InGPSPort *input_port_1 = new frontend::InGPSPort("input_1", &input_parent); + frontend::OutGPSPort *output_port = new frontend::OutGPSPort("output"); + + CPPUNIT_ASSERT(input_parent.get_source_id()=="original"); + + frontend::GPSInfo gpsinfo; + gpsinfo.source_id = "newvalue"; + + output_port->gps_info(gpsinfo); + CPPUNIT_ASSERT(input_parent.get_source_id() == "original"); + CPPUNIT_ASSERT_THROW(output_port->gps_info(gpsinfo, "hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + output_port->gps_info(gpsinfo); + CPPUNIT_ASSERT(input_parent.get_source_id() == "newvalue"); + + gpsinfo.source_id = "newvalue_2"; + output_port->gps_info(gpsinfo, "hello"); + CPPUNIT_ASSERT(input_parent.get_source_id() == "newvalue_2"); + + CPPUNIT_ASSERT_THROW(output_port->gps_info(gpsinfo, "foo"), redhawk::PortCallError); + + gpsinfo.source_id = "newvalue_3"; + frontend::InGPSPort *input_port_2 = new frontend::InGPSPort("input_2", &input_parent_2); + output_port->connectPort(input_port_2->_this(), "foo"); + + output_port->gps_info(gpsinfo); + CPPUNIT_ASSERT(input_parent.get_source_id() == "newvalue_3"); + CPPUNIT_ASSERT(input_parent_2.get_source_id() == "newvalue_3"); + + gpsinfo.source_id = "newvalue_4"; + output_port->gps_info(gpsinfo, "hello"); + CPPUNIT_ASSERT(input_parent.get_source_id() == "newvalue_4"); + CPPUNIT_ASSERT(input_parent_2.get_source_id() == "newvalue_3"); + + CPPUNIT_ASSERT_THROW(output_port->gps_info(gpsinfo, "something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} + +void PortsTest::testNavGetter() +{ + nav_port_sample input_parent; + frontend::InNavDataPort *input_port_1 = new frontend::InNavDataPort("input_1", &input_parent); + frontend::OutNavDataPort *output_port = new frontend::OutNavDataPort("output"); + + input_parent.set_source_id("newvalue"); + + CPPUNIT_ASSERT_THROW(output_port->nav_packet(), redhawk::PortCallError); + CPPUNIT_ASSERT_THROW(output_port->_get_nav_packet("hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + frontend::NavigationPacket navinfo = output_port->nav_packet(); + CPPUNIT_ASSERT(navinfo.source_id == "newvalue"); + navinfo = output_port->_get_nav_packet("hello"); + CPPUNIT_ASSERT(navinfo.source_id == "newvalue"); + CPPUNIT_ASSERT_THROW(output_port->_get_nav_packet("foo"), redhawk::PortCallError); + + nav_port_sample input_parent_2; + input_parent_2.set_source_id("newvalue_2"); + frontend::InNavDataPort *input_port_2 = new frontend::InNavDataPort ("input_2", &input_parent_2); + + output_port->connectPort(input_port_2->_this(), "foo"); + CPPUNIT_ASSERT_THROW(output_port->nav_packet(), redhawk::PortCallError); + navinfo = output_port->_get_nav_packet("hello"); + CPPUNIT_ASSERT(navinfo.source_id == "newvalue"); + CPPUNIT_ASSERT_THROW(output_port->_get_nav_packet("something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} + +void PortsTest::testNavSetter() +{ + nav_port_sample input_parent; + nav_port_sample input_parent_2; + frontend::InNavDataPort *input_port_1 = new frontend::InNavDataPort("input_1", &input_parent); + frontend::OutNavDataPort *output_port = new frontend::OutNavDataPort("output"); + + CPPUNIT_ASSERT(input_parent.get_source_id()=="original"); + + frontend::NavigationPacket navinfo; + navinfo.source_id = "newvalue"; + + output_port->nav_packet(navinfo); + CPPUNIT_ASSERT(input_parent.get_source_id() == "original"); + CPPUNIT_ASSERT_THROW(output_port->nav_packet(navinfo, "hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + output_port->nav_packet(navinfo); + CPPUNIT_ASSERT(input_parent.get_source_id() == "newvalue"); + + navinfo.source_id = "newvalue_2"; + output_port->nav_packet(navinfo, "hello"); + CPPUNIT_ASSERT(input_parent.get_source_id() == "newvalue_2"); + + CPPUNIT_ASSERT_THROW(output_port->nav_packet(navinfo, "foo"), redhawk::PortCallError); + + navinfo.source_id = "newvalue_3"; + frontend::InNavDataPort *input_port_2 = new frontend::InNavDataPort("input_2", &input_parent_2); + output_port->connectPort(input_port_2->_this(), "foo"); + + output_port->nav_packet(navinfo); + CPPUNIT_ASSERT(input_parent.get_source_id() == "newvalue_3"); + CPPUNIT_ASSERT(input_parent_2.get_source_id() == "newvalue_3"); + + navinfo.source_id = "newvalue_4"; + output_port->nav_packet(navinfo, "hello"); + CPPUNIT_ASSERT(input_parent.get_source_id() == "newvalue_4"); + CPPUNIT_ASSERT(input_parent_2.get_source_id() == "newvalue_3"); + + CPPUNIT_ASSERT_THROW(output_port->nav_packet(navinfo, "something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} + +void PortsTest::testRFInfoGetter() +{ + rfinfo_port_sample input_parent; + frontend::InRFInfoPort *input_port_1 = new frontend::InRFInfoPort("input_1", &input_parent); + frontend::OutRFInfoPort *output_port = new frontend::OutRFInfoPort("output"); + + input_parent.set_rf_flow_id("newvalue"); + + CPPUNIT_ASSERT_THROW(output_port->rfinfo_pkt(), redhawk::PortCallError); + CPPUNIT_ASSERT_THROW(output_port->_get_rfinfo_pkt("hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + frontend::RFInfoPkt rfinfo = output_port->rfinfo_pkt(); + CPPUNIT_ASSERT(rfinfo.rf_flow_id == "newvalue"); + rfinfo = output_port->_get_rfinfo_pkt("hello"); + CPPUNIT_ASSERT(rfinfo.rf_flow_id == "newvalue"); + CPPUNIT_ASSERT_THROW(output_port->_get_rfinfo_pkt("foo"), redhawk::PortCallError); + + rfinfo_port_sample input_parent_2; + input_parent_2.set_rf_flow_id("newvalue_2"); + frontend::InRFInfoPort *input_port_2 = new frontend::InRFInfoPort ("input_2", &input_parent_2); + + output_port->connectPort(input_port_2->_this(), "foo"); + CPPUNIT_ASSERT_THROW(output_port->rfinfo_pkt(), redhawk::PortCallError); + rfinfo = output_port->_get_rfinfo_pkt("hello"); + CPPUNIT_ASSERT(rfinfo.rf_flow_id == "newvalue"); + CPPUNIT_ASSERT_THROW(output_port->_get_rfinfo_pkt("something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} + +void PortsTest::testRFInfoSetter() +{ + rfinfo_port_sample input_parent; + rfinfo_port_sample input_parent_2; + frontend::InRFInfoPort *input_port_1 = new frontend::InRFInfoPort("input_1", &input_parent); + frontend::OutRFInfoPort *output_port = new frontend::OutRFInfoPort("output"); + + CPPUNIT_ASSERT(input_parent.get_rf_flow_id()=="original"); + + frontend::RFInfoPkt rfinfo; + rfinfo.rf_flow_id = "newvalue"; + + output_port->rfinfo_pkt(rfinfo); + CPPUNIT_ASSERT(input_parent.get_rf_flow_id() == "original"); + CPPUNIT_ASSERT_THROW(output_port->rfinfo_pkt(rfinfo, "hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + output_port->rfinfo_pkt(rfinfo); + CPPUNIT_ASSERT(input_parent.get_rf_flow_id() == "newvalue"); + + rfinfo.rf_flow_id = "newvalue_2"; + output_port->rfinfo_pkt(rfinfo, "hello"); + CPPUNIT_ASSERT(input_parent.get_rf_flow_id() == "newvalue_2"); + + CPPUNIT_ASSERT_THROW(output_port->rfinfo_pkt(rfinfo, "foo"), redhawk::PortCallError); + + rfinfo.rf_flow_id = "newvalue_3"; + frontend::InRFInfoPort *input_port_2 = new frontend::InRFInfoPort("input_2", &input_parent_2); + output_port->connectPort(input_port_2->_this(), "foo"); + + output_port->rfinfo_pkt(rfinfo); + CPPUNIT_ASSERT(input_parent.get_rf_flow_id() == "newvalue_3"); + CPPUNIT_ASSERT(input_parent_2.get_rf_flow_id() == "newvalue_3"); + + rfinfo.rf_flow_id = "newvalue_4"; + output_port->rfinfo_pkt(rfinfo, "hello"); + CPPUNIT_ASSERT(input_parent.get_rf_flow_id() == "newvalue_4"); + CPPUNIT_ASSERT(input_parent_2.get_rf_flow_id() == "newvalue_3"); + + CPPUNIT_ASSERT_THROW(output_port->rfinfo_pkt(rfinfo, "something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} + +void PortsTest::testRFSourceGetter() +{ + rfsource_port_sample input_parent; + frontend::InRFSourcePort *input_port_1 = new frontend::InRFSourcePort("input_1", &input_parent); + frontend::OutRFSourcePort *output_port = new frontend::OutRFSourcePort("output"); + + input_parent.set_rf_flow_id("newvalue"); + + CPPUNIT_ASSERT_THROW(output_port->current_rf_input(), redhawk::PortCallError); + CPPUNIT_ASSERT_THROW(output_port->_get_current_rf_input("hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + frontend::RFInfoPkt* rfsource = output_port->current_rf_input(); + CPPUNIT_ASSERT(rfsource->rf_flow_id == "newvalue"); + rfsource = output_port->_get_current_rf_input("hello"); + CPPUNIT_ASSERT(rfsource->rf_flow_id == "newvalue"); + CPPUNIT_ASSERT_THROW(output_port->_get_current_rf_input("foo"), redhawk::PortCallError); + + rfsource_port_sample input_parent_2; + input_parent_2.set_rf_flow_id("newvalue_2"); + frontend::InRFSourcePort *input_port_2 = new frontend::InRFSourcePort ("input_2", &input_parent_2); + + output_port->connectPort(input_port_2->_this(), "foo"); + CPPUNIT_ASSERT_THROW(output_port->current_rf_input(), redhawk::PortCallError); + rfsource = output_port->_get_current_rf_input("hello"); + CPPUNIT_ASSERT(rfsource->rf_flow_id == "newvalue"); + CPPUNIT_ASSERT_THROW(output_port->_get_current_rf_input("something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} + +void PortsTest::testRFSourceSetter() +{ + rfsource_port_sample input_parent; + rfsource_port_sample input_parent_2; + frontend::InRFSourcePort *input_port_1 = new frontend::InRFSourcePort("input_1", &input_parent); + frontend::OutRFSourcePort *output_port = new frontend::OutRFSourcePort("output"); + + CPPUNIT_ASSERT(input_parent.get_rf_flow_id()=="original"); + + frontend::RFInfoPkt rfsource; + rfsource.rf_flow_id = "newvalue"; + + output_port->current_rf_input(rfsource); + CPPUNIT_ASSERT(input_parent.get_rf_flow_id() == "original"); + CPPUNIT_ASSERT_THROW(output_port->current_rf_input(rfsource, "hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + output_port->current_rf_input(rfsource); + CPPUNIT_ASSERT(input_parent.get_rf_flow_id() == "newvalue"); + + rfsource.rf_flow_id = "newvalue_2"; + output_port->current_rf_input(rfsource, "hello"); + CPPUNIT_ASSERT(input_parent.get_rf_flow_id() == "newvalue_2"); + + CPPUNIT_ASSERT_THROW(output_port->current_rf_input(rfsource, "foo"), redhawk::PortCallError); + + rfsource.rf_flow_id = "newvalue_3"; + frontend::InRFSourcePort *input_port_2 = new frontend::InRFSourcePort("input_2", &input_parent_2); + output_port->connectPort(input_port_2->_this(), "foo"); + + output_port->current_rf_input(rfsource); + CPPUNIT_ASSERT(input_parent.get_rf_flow_id() == "newvalue_3"); + CPPUNIT_ASSERT(input_parent_2.get_rf_flow_id() == "newvalue_3"); + + rfsource.rf_flow_id = "newvalue_4"; + output_port->current_rf_input(rfsource, "hello"); + CPPUNIT_ASSERT(input_parent.get_rf_flow_id() == "newvalue_4"); + CPPUNIT_ASSERT(input_parent_2.get_rf_flow_id() == "newvalue_3"); + + CPPUNIT_ASSERT_THROW(output_port->current_rf_input(rfsource, "something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} + +void PortsTest::testTunerGetter() +{ + tuner_port_sample input_parent; + frontend::InDigitalScanningTunerPort *input_port_1 = new frontend::InDigitalScanningTunerPort("input_1", &input_parent); + frontend::OutDigitalTunerPort *output_port = new frontend::OutDigitalTunerPort("output"); + + input_parent.set_bw(1); + + CPPUNIT_ASSERT_THROW(output_port->getTunerBandwidth("first_tuner"), redhawk::PortCallError); + CPPUNIT_ASSERT_THROW(output_port->getTunerBandwidth("first_tuner", "hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + double bw = output_port->getTunerBandwidth("first_tuner"); + CPPUNIT_ASSERT(bw == 1); + bw = output_port->getTunerBandwidth("first_tuner", "hello"); + CPPUNIT_ASSERT(bw == 1); + CPPUNIT_ASSERT_THROW(output_port->getTunerBandwidth("first_tuner", "foo"), redhawk::PortCallError); + + tuner_port_sample input_parent_2; + input_parent_2.set_bw(2); + frontend::InDigitalScanningTunerPort *input_port_2 = new frontend::InDigitalScanningTunerPort ("input_2", &input_parent_2); + + output_port->connectPort(input_port_2->_this(), "foo"); + CPPUNIT_ASSERT_THROW(output_port->getTunerBandwidth("first_tuner"), redhawk::PortCallError); + bw = output_port->getTunerBandwidth("first_tuner", "hello"); + CPPUNIT_ASSERT(bw == 1); + CPPUNIT_ASSERT_THROW(output_port->getTunerBandwidth("first_tuner", "something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} + +void PortsTest::testTunerSetter() +{ + tuner_port_sample input_parent; + tuner_port_sample input_parent_2; + frontend::InDigitalScanningTunerPort *input_port_1 = new frontend::InDigitalScanningTunerPort("input_1", &input_parent); + frontend::OutDigitalTunerPort *output_port = new frontend::OutDigitalTunerPort("output"); + + CPPUNIT_ASSERT(input_parent.get_bw()==0); + + double bw = 1; + output_port->setTunerBandwidth("first_tuner", bw); + CPPUNIT_ASSERT(input_parent.get_bw() == 0); + CPPUNIT_ASSERT_THROW(output_port->setTunerBandwidth("first_tuner", bw, "hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + output_port->setTunerBandwidth("first_tuner", bw); + CPPUNIT_ASSERT(input_parent.get_bw() == bw); + + bw = 2; + output_port->setTunerBandwidth("first_tuner", bw, "hello"); + CPPUNIT_ASSERT(input_parent.get_bw() == bw); + + CPPUNIT_ASSERT_THROW(output_port->setTunerBandwidth("first_tuner", bw, "foo"), redhawk::PortCallError); + + bw = 3; + frontend::InDigitalScanningTunerPort *input_port_2 = new frontend::InDigitalScanningTunerPort("input_2", &input_parent_2); + output_port->connectPort(input_port_2->_this(), "foo"); + + output_port->setTunerBandwidth("first_tuner", bw); + CPPUNIT_ASSERT(input_parent.get_bw() == bw); + CPPUNIT_ASSERT(input_parent_2.get_bw() == bw); + + bw = 4; + output_port->setTunerBandwidth("first_tuner", bw, "hello"); + CPPUNIT_ASSERT(input_parent.get_bw() == 4); + CPPUNIT_ASSERT(input_parent_2.get_bw() == 3); + + CPPUNIT_ASSERT_THROW(output_port->setTunerBandwidth("first_tuner", bw, "something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} + +void PortsTest::setUp() +{ +} + +void PortsTest::tearDown() +{ +} diff --git a/frontendInterfaces/libsrc/testing/tests/cpp/Ports.h b/frontendInterfaces/libsrc/testing/tests/cpp/Ports.h new file mode 100644 index 000000000..9b5adbf5b --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/cpp/Ports.h @@ -0,0 +1,182 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef FRONTEND_PORTSTTEST_H +#define FRONTEND_PORTSTTEST_H + +#include +#include +#include +#include + +class gps_port_sample: public frontend::gps_delegation { + public: + gps_port_sample() { + id = "original"; + } + void set_source_id(std::string in_id) { + id = in_id; + } + std::string get_source_id() { + return id; + } + frontend::GPSInfo get_gps_info(const std::string& port_name) { + frontend::GPSInfo gpsinfo; + gpsinfo.source_id = id; + return gpsinfo; + } + void set_gps_info(const std::string& port_name, const frontend::GPSInfo &gps_info) { + id = gps_info.source_id; + } + frontend::GpsTimePos get_gps_time_pos(const std::string& port_name) { + return frontend::GpsTimePos(); + } + void set_gps_time_pos(const std::string& port_name, const frontend::GpsTimePos &gps_time_pos) { + } + std::string id; +}; + +class nav_port_sample: public frontend::nav_delegation { + public: + nav_port_sample() { + id = "original"; + } + void set_source_id(std::string in_id) { + id = in_id; + } + std::string get_source_id() { + return id; + } + frontend::NavigationPacket get_nav_packet(const std::string& port_name) { + frontend::NavigationPacket nav_info; + nav_info.source_id = id; + return nav_info; + } + void set_nav_packet(const std::string& port_name, const frontend::NavigationPacket &nav_info) { + id = nav_info.source_id; + } + std::string id; +}; + +class rfinfo_port_sample: public frontend::rfinfo_delegation { + public: + rfinfo_port_sample() { + id = "original"; + } + void set_rf_flow_id(std::string in_id) { + id = in_id; + } + std::string get_rf_flow_id() { + return id; + } + std::string get_rf_flow_id(const std::string& port_name) { + return std::string("none"); + } + void set_rf_flow_id(const std::string& port_name, const std::string& id) { + } + frontend::RFInfoPkt get_rfinfo_pkt(const std::string& port_name) { + frontend::RFInfoPkt rfinfo; + rfinfo.rf_flow_id = id; + return rfinfo; + } + void set_rfinfo_pkt(const std::string& port_name, const frontend::RFInfoPkt &pkt) { + id = pkt.rf_flow_id; + } + std::string id; +}; +class rfsource_port_sample: public frontend::rfsource_delegation { + public: + rfsource_port_sample() { + id = "original"; + } + void set_rf_flow_id(std::string in_id) { + id = in_id; + } + std::string get_rf_flow_id() { + return id; + } + std::vector get_available_rf_inputs(const std::string& port_name) { + return std::vector(); + } + void set_available_rf_inputs(const std::string& port_name, std::vector &inputs) { + } + frontend::RFInfoPkt get_current_rf_input(const std::string& port_name) { + frontend::RFInfoPkt rfinfo; + rfinfo.rf_flow_id = id; + return rfinfo; + } + void set_current_rf_input(const std::string& port_name, const frontend::RFInfoPkt &input) { + id = input.rf_flow_id; + } + std::string id; +}; + +class tuner_port_sample: public frontend::digital_scanning_tuner_delegation { + public: + tuner_port_sample() { + bw = 0; + } + void set_bw(double in_bw) { + bw = in_bw; + } + double get_bw() { + return bw; + } + void setTunerBandwidth(const std::string& id, double in_bw) { + bw = in_bw; + } + double getTunerBandwidth(const std::string& id) { + return bw; + } + CF::Properties* getTunerStatus(const std::string &id) {}; + double bw; +}; + +class PortsTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(PortsTest); + CPPUNIT_TEST(testGPSGetter); + CPPUNIT_TEST(testGPSSetter); + CPPUNIT_TEST(testNavGetter); + CPPUNIT_TEST(testNavSetter); + CPPUNIT_TEST(testRFInfoGetter); + CPPUNIT_TEST(testRFInfoSetter); + CPPUNIT_TEST(testRFSourceGetter); + CPPUNIT_TEST(testRFSourceSetter); + CPPUNIT_TEST(testTunerGetter); + CPPUNIT_TEST(testTunerSetter); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testGPSGetter(); + void testGPSSetter(); + void testNavGetter(); + void testNavSetter(); + void testRFInfoGetter(); + void testRFInfoSetter(); + void testRFSourceGetter(); + void testRFSourceSetter(); + void testTunerGetter(); + void testTunerSetter(); +}; + +#endif // FRONTEND_PORTSTTEST_H diff --git a/frontendInterfaces/libsrc/testing/tests/cpp/ValidateRequest.cpp b/frontendInterfaces/libsrc/testing/tests/cpp/ValidateRequest.cpp new file mode 100644 index 000000000..adf45b018 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/cpp/ValidateRequest.cpp @@ -0,0 +1,159 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "ValidateRequest.h" + +#include +#include +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(ValidateRequestTest); + +void ValidateRequestTest::testSRI() +{ + frontend::frontend_tuner_allocation_struct request; + BULKIO::StreamSRI upstream_sri; + request.center_frequency = 100e6; + request.bandwidth = 1e6; + request.sample_rate = 2e6; + upstream_sri.xdelta = 1/2e6; + upstream_sri.mode = 0; + redhawk::PropertyMap& keywords = redhawk::PropertyMap::cast(upstream_sri.keywords); + double cf, bw; + cf = 100e6; + bw = 1e6; + keywords["CHAN_RF"] = cf; + keywords["FRONTEND::BANDWIDTH"] = bw; + CPPUNIT_ASSERT(validateRequestVsSRI(request, upstream_sri, false)); + cf = 100.49e6; + keywords["CHAN_RF"] = cf; + CPPUNIT_ASSERT(validateRequestVsSRI(request, upstream_sri, false)); + cf = 99.51e6; + keywords["CHAN_RF"] = cf; + CPPUNIT_ASSERT(validateRequestVsSRI(request, upstream_sri, false)); + cf = 100.51e6; + keywords["CHAN_RF"] = cf; + CPPUNIT_ASSERT_THROW(validateRequestVsSRI(request, upstream_sri, false), FRONTEND::BadParameterException); + cf = 99.49e6; + keywords["CHAN_RF"] = cf; + CPPUNIT_ASSERT_THROW(validateRequestVsSRI(request, upstream_sri, false), FRONTEND::BadParameterException); +} + +void ValidateRequestTest::testDeviceSRI() +{ + frontend::frontend_tuner_allocation_struct request; + BULKIO::StreamSRI upstream_sri; + request.center_frequency = 100e6; + request.bandwidth = 1e6; + request.sample_rate = 2e6; + upstream_sri.xdelta = 1/2e6; + upstream_sri.mode = 0; + redhawk::PropertyMap& keywords = redhawk::PropertyMap::cast(upstream_sri.keywords); + double cf, bw; + cf = 100e6; + bw = 1e6; + keywords["CHAN_RF"] = cf; + keywords["FRONTEND::BANDWIDTH"] = bw; + double min_dev_cf = 99e6; + double max_dev_cf = 100e6; + double max_dev_bw = 3e6; + double max_dev_sr = 6e6; + CPPUNIT_ASSERT(validateRequestVsDevice(request, upstream_sri, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)); + cf = 100.49e6; + keywords["CHAN_RF"] = cf; + CPPUNIT_ASSERT(validateRequestVsDevice(request, upstream_sri, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)); + cf = 99.51e6; + keywords["CHAN_RF"] = cf; + CPPUNIT_ASSERT(validateRequestVsDevice(request, upstream_sri, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)); + cf = 100.51e6; + keywords["CHAN_RF"] = cf; + CPPUNIT_ASSERT_THROW(validateRequestVsDevice(request, upstream_sri, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr), FRONTEND::BadParameterException); + cf = 99.49e6; + keywords["CHAN_RF"] = cf; + CPPUNIT_ASSERT_THROW(validateRequestVsDevice(request, upstream_sri, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr), FRONTEND::BadParameterException); +} + +void ValidateRequestTest::testRFInfo() +{ + frontend::frontend_tuner_allocation_struct request; + frontend::RFInfoPkt rfinfo; + request.center_frequency = 100e6; + request.bandwidth = 1e6; + request.sample_rate = 2e6; + double cf, bw; + cf = 100e6; + bw = 1e6; + rfinfo.rf_center_freq = cf; + rfinfo.rf_bandwidth = bw; + CPPUNIT_ASSERT(validateRequestVsRFInfo(request, rfinfo, false)); + cf = 100.49e6; + rfinfo.rf_center_freq = cf; + CPPUNIT_ASSERT(validateRequestVsRFInfo(request, rfinfo, false)); + cf = 99.51e6; + rfinfo.rf_center_freq = cf; + CPPUNIT_ASSERT(validateRequestVsRFInfo(request, rfinfo, false)); + cf = 100.51e6; + rfinfo.rf_center_freq = cf; + CPPUNIT_ASSERT_THROW(validateRequestVsRFInfo(request, rfinfo, false), FRONTEND::BadParameterException); + cf = 99.49e6; + rfinfo.rf_center_freq = cf; + CPPUNIT_ASSERT_THROW(validateRequestVsRFInfo(request, rfinfo, false), FRONTEND::BadParameterException); +} + +void ValidateRequestTest::testDeviceRFInfo() +{ + frontend::frontend_tuner_allocation_struct request; + frontend::RFInfoPkt rfinfo; + request.center_frequency = 100e6; + request.bandwidth = 1e6; + request.sample_rate = 2e6; + rfinfo.rf_center_freq = 100e6; + rfinfo.rf_bandwidth = 1e6; + double cf, bw; + cf = 100e6; + bw = 1e6; + rfinfo.rf_center_freq = cf; + rfinfo.rf_bandwidth = bw; + double min_dev_cf = 99e6; + double max_dev_cf = 100e6; + double max_dev_bw = 3e6; + double max_dev_sr = 6e6; + CPPUNIT_ASSERT(validateRequestVsDevice(request, rfinfo, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)); + cf = 100.49e6; + rfinfo.rf_center_freq = cf; + CPPUNIT_ASSERT(validateRequestVsDevice(request, rfinfo, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)); + cf = 99.51e6; + rfinfo.rf_center_freq = cf; + CPPUNIT_ASSERT(validateRequestVsDevice(request, rfinfo, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)); + cf = 100.51e6; + rfinfo.rf_center_freq = cf; + CPPUNIT_ASSERT_THROW(validateRequestVsDevice(request, rfinfo, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr), FRONTEND::BadParameterException); + cf = 99.49e6; + rfinfo.rf_center_freq = cf; + CPPUNIT_ASSERT_THROW(validateRequestVsDevice(request, rfinfo, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr), FRONTEND::BadParameterException); +} + +void ValidateRequestTest::setUp() +{ +} + +void ValidateRequestTest::tearDown() +{ +} diff --git a/frontendInterfaces/libsrc/testing/tests/cpp/ValidateRequest.h b/frontendInterfaces/libsrc/testing/tests/cpp/ValidateRequest.h new file mode 100644 index 000000000..5b8c51d50 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/cpp/ValidateRequest.h @@ -0,0 +1,44 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef FRONTEND_VALIDATEREQUESTTEST_H +#define FRONTEND_VALIDATEREQUESTTEST_H + +#include + +class ValidateRequestTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(ValidateRequestTest); + CPPUNIT_TEST(testSRI); + CPPUNIT_TEST(testDeviceSRI); + CPPUNIT_TEST(testRFInfo); + CPPUNIT_TEST(testDeviceRFInfo); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testSRI(); + void testDeviceSRI(); + void testRFInfo(); + void testDeviceRFInfo(); +}; + +#endif // FRONTEND_VALIDATEREQUESTTEST_H diff --git a/frontendInterfaces/libsrc/testing/tests/cpp/main.cpp b/frontendInterfaces/libsrc/testing/tests/cpp/main.cpp new file mode 100644 index 000000000..ccb79b0b8 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/cpp/main.cpp @@ -0,0 +1,126 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// log4cxx includes need to follow CorbaUtils, otherwise "ossie/debug.h" will +// issue warnings about the logging macros +#include +#include + +int main(int argc, char* argv[]) +{ + const char* short_options = "v"; + struct option long_options[] = { + { "xunit-file", required_argument, 0, 'x' }, + { "log-level", required_argument, 0, 'l' }, + { "log-config", required_argument, 0, 'c' }, + { "verbose", no_argument, 0, 'v' }, + { 0, 0, 0, 0 } + }; + + bool verbose = false; + const char* xunit_file = 0; + const char* log_config = 0; + std::string log_level; + int status; + while ((status = getopt_long(argc, argv, short_options, long_options, NULL)) >= 0) { + switch (status) { + case '?': // Invalid option + return -1; + case 'x': + xunit_file = optarg; + break; + case 'l': + log_level = optarg; + break; + case 'c': + log_config = optarg; + break; + case 'v': + verbose = true; + break; + } + } + + // Many tests require CORBA, and possibly the REDHAWK ORB singleton, so + // initialize up front. + ossie::corba::CorbaInit(0,0); + + // If a log4j configuration file was given, read it. + if (log_config) { + log4cxx::PropertyConfigurator::configure(log_config); + } else { + // Set up a simple configuration that logs on the console. + log4cxx::BasicConfigurator::configure(); + } + + // Apply the log level (can override config file). + log4cxx::LevelPtr level = log4cxx::Level::toLevel(log_level, log4cxx::Level::getInfo()); + log4cxx::Logger::getRootLogger()->setLevel(level); + + // Create the test runner. + CppUnit::TextTestRunner runner; + + // Enable verbose output, displaying the name of each test as it runs. + if (verbose) { + runner.eventManager().addListener(new CppUnit::BriefTestProgressListener()); + } + + // Use a compiler outputter instead of the default text one. + runner.setOutputter(new CppUnit::CompilerOutputter(&runner.result(), std::cerr)); + + // Get the top level suite from the registry. + CppUnit::Test* suite = CppUnit::TestFactoryRegistry::getRegistry().makeTest(); + runner.addTest(suite); + + // If an argument was given, assume it was the name of a test or suite. + std::string test_path; + if (optind < argc) { + test_path = argv[optind]; + } + + // Run the tests: don't pause, write output, don't print progress in + // verbose mode (which seems ironic, but the test progress listener will + // print each test name) + bool success = runner.run(test_path, false, true, !verbose); + + // Write XML file, if requested. + if (xunit_file) { + std::ofstream file(xunit_file); + CppUnit::XmlOutputter xml_outputter(&runner.result(), file); + xml_outputter.write(); + } + + // Return error code 1 if the one of test failed. + return success ? 0 : 1; +} diff --git a/frontendInterfaces/libsrc/testing/tests/cpp/runtests b/frontendInterfaces/libsrc/testing/tests/cpp/runtests new file mode 100755 index 000000000..b0f3bb1f2 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/cpp/runtests @@ -0,0 +1,5 @@ +# +# +make -j 10 Frontend +./Frontend --xunit-file ../cppunit-results.xml + diff --git a/frontendInterfaces/libsrc/testing/tests/java/AllTests.java b/frontendInterfaces/libsrc/testing/tests/java/AllTests.java new file mode 100644 index 000000000..61bf6835a --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/AllTests.java @@ -0,0 +1,31 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.junit.runners.Suite.SuiteClasses; + +@RunWith(Suite.class) +@SuiteClasses({ + ValidateRequestTest.class, + PortsTest.class +}) +public class AllTests { +} diff --git a/frontendInterfaces/libsrc/testing/tests/java/Main.java b/frontendInterfaces/libsrc/testing/tests/java/Main.java new file mode 100644 index 000000000..404c7c55d --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/Main.java @@ -0,0 +1,150 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +import org.apache.log4j.BasicConfigurator; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.PropertyConfigurator; + +import org.junit.Test; +import org.junit.runner.JUnitCore; +import org.junit.runner.Description; +import org.junit.runner.Result; +import org.junit.runner.Request; + +import utils.ChainFilter; +import utils.TestFilter; +import utils.TextListener; + +public class Main { + + public static Description getTestDescription(String target) throws ClassNotFoundException, NoSuchMethodException + { + // Try to see if it's a class with tests first + try { + return getClassDescription(target); + } catch (ClassNotFoundException exc) { + // The target might be "class.method" + } + + // Split package/class from method name + int pos = target.lastIndexOf('.'); + if (pos < 0) { + // No dots, must be an invalid class + throw new ClassNotFoundException(target); + } + String suite = target.substring(0, pos); + String name = target.substring(pos+1); + + // Class and method lookup may throw exceptions, but it's up to the + // caller to handle them + Class clazz = Class.forName(suite); + clazz.getMethod(name); + return Description.createTestDescription(clazz, name); + } + + public static Description getClassDescription(String target) throws ClassNotFoundException + { + Class clazz = Class.forName(target); + + // Create a suite description + Description desc = Description.createSuiteDescription(clazz); + for (Method method : clazz.getMethods()) { + // Find all methods that are annotated as tests + if (method.getAnnotation(Test.class) != null) { + desc.addChild(Description.createTestDescription(clazz, method.getName(), method.getAnnotations())); + } + } + + return desc; + } + + public static void main(String[] args) { + List tests = new ArrayList<>(); + + boolean verbose = false; + Level log_level = null; + String log_config = null; + + Iterator iter = Arrays.asList(args).iterator(); + while (iter.hasNext()) { + String arg = iter.next(); + if (arg.startsWith("-")) { + // Option argument + if (arg.equals("--log-level")) { + log_level = Level.toLevel(iter.next()); + } else if (arg.equals("--log-config")) { + log_config = iter.next(); + } else if (arg.equals("-v") || arg.equals("--verbose")) { + verbose = true; + } else { + System.err.println("Unrecognized option \"" + arg + "\""); + System.exit(1); + } + } else { + // First non-option argument, add remaining arguments to the + // list of tests + tests.add(arg); + while (iter.hasNext()) { + tests.add(iter.next()); + } + } + } + + if (log_config != null) { + PropertyConfigurator.configure(log_config); + } else { + BasicConfigurator.configure(); + if (log_level == null) { + log_level = Level.INFO; + } + } + + if (log_level != null) { + Logger.getRootLogger().setLevel(log_level); + } + + Request request = Request.aClass(AllTests.class); + if (!tests.isEmpty()) { + ChainFilter filter = new ChainFilter(); + for (String test : tests) { + try { + Description desc = getTestDescription(test); + filter.addFilter(new TestFilter(desc)); + } catch (ClassNotFoundException|NoSuchMethodException exc) { + System.err.println("ERROR: No test '" + test + "'"); + System.exit(1); + } + } + request = request.filterWith(filter); + } + + JUnitCore runner = new JUnitCore(); + runner.addListener(new TextListener(verbose)); + Result result = runner.run(request); + System.exit(result.wasSuccessful() ? 0 : 1); + } +} diff --git a/frontendInterfaces/libsrc/testing/tests/java/Makefile.am b/frontendInterfaces/libsrc/testing/tests/java/Makefile.am new file mode 100644 index 000000000..06277f7f0 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/Makefile.am @@ -0,0 +1,44 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK frontendInterfaces. +# +# REDHAWK burstioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK burstioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +@rh_jarfile_rules@ + +TESTS = Frontend +check_SCRIPTS = Frontend + +noinst_java_JARFILES = frontend-tests.jar + +frontend_tests_jar_SOURCE = utils/TestFilter.java +frontend_tests_jar_SOURCE += utils/ChainFilter.java +frontend_tests_jar_SOURCE += utils/TextListener.java +frontend_tests_jar_SOURCE += AllTests.java +frontend_tests_jar_SOURCE += Main.java +frontend_tests_jar_SOURCE += ValidateRequestTest.java + +frontend_tests_jar_CLASSPATH = $(BULKIO_CLASSPATH):$(OSSIE_CLASSPATH):$(JUNIT_CLASSPATH):../../../frontend.jar:../../../../FRONTENDInterfaces.jar:. +frontend_tests_jar_JAVACFLAGS = -g -Xlint + +Frontend : frontend-tests.jar Makefile + @echo "#!/bin/bash" > $@ + @echo "export LD_LIBRARY_PATH=$(top_builddir)/jni/.libs:$(OSSIE_HOME)/lib:$(OSSIE_HOME)/lib64" >> $@ + @echo "exec java -cp frontend-tests.jar:$(frontend_tests_jar_CLASSPATH) -Dlog4j.configuration=file:$(srcdir)/log4j_config.txt Main \$$*" >> $@ + @chmod +x $@ + +CLEANFILES = Frontend diff --git a/frontendInterfaces/libsrc/testing/tests/java/PortsTest.java b/frontendInterfaces/libsrc/testing/tests/java/PortsTest.java new file mode 100644 index 000000000..f5dfcb5c4 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/PortsTest.java @@ -0,0 +1,967 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import utils.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import frontend.*; +import FRONTEND.*; +/*import FRONTEND.GPSInfo; +import FRONTEND.GpsTimePos; +import FRONTEND.FrontendException; +import FRONTEND.NotSupportedException; +import FRONTEND.BadParameterException;*/ +import java.util.ArrayList; +import org.omg.CORBA.ORB; +import org.ossie.redhawk.PortCallError; +import CF.PortPackage.InvalidPort; +import CF.PortPackage.OccupiedPort; +import org.omg.PortableServer.POA; +import org.omg.PortableServer.POAPackage.ServantAlreadyActive; +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; +import org.omg.PortableServer.POAPackage.ObjectNotActive; + +@RunWith(JUnit4.class) +public class PortsTest +{ + public class gps_port_sample implements frontend.GPSDelegate { + + public String id; + + public gps_port_sample() { + id = "original"; + }; + public void set_source_id(String in_id) { + id = in_id; + }; + public String get_source_id() { + return id; + }; + public GPSInfo get_gps_info(String port_name) throws FrontendException, BadParameterException, NotSupportedException { + GPSInfo _gps = new GPSInfo(); + _gps.additional_info = new CF.DataType[0]; + _gps.mode = new String(""); + _gps.rf_flow_id = new String(""); + _gps.source_id = new String(id); + _gps.status_message = new String(""); + _gps.timestamp = new BULKIO.PrecisionUTCTime(); + return _gps; + }; + public void set_gps_info(String port_name, GPSInfo data) throws FrontendException, BadParameterException, NotSupportedException { + id = data.source_id; + }; + public GpsTimePos get_gps_time_pos(String port_name) throws FrontendException, BadParameterException, NotSupportedException { + return new GpsTimePos(); + }; + public void set_gps_time_pos(String port_name, GpsTimePos data) throws FrontendException, BadParameterException, NotSupportedException { + }; + } + + public class nav_port_sample implements frontend.NavDataDelegate { + + public String id; + + public nav_port_sample() { + id = "original"; + }; + public void set_source_id(String in_id) { + id = in_id; + }; + public String get_source_id() { + return id; + }; + public NavigationPacket get_nav_packet(String port_name) throws FrontendException, BadParameterException, NotSupportedException { + NavigationPacket _nav = new NavigationPacket(); + _nav.acceleration = new AccelerationInfo(); + _nav.acceleration.coordinate_system = new String(""); + _nav.acceleration.datum = new String(""); + _nav.additional_info = new CF.DataType[0]; + _nav.attitude = new AttitudeInfo(); + _nav.cposition = new CartesianPositionInfo(); + _nav.cposition.datum = new String(""); + _nav.position = new PositionInfo(); + _nav.position.datum = new String(""); + _nav.rf_flow_id = new String(""); + _nav.source_id = new String(id); + _nav.timestamp = new BULKIO.PrecisionUTCTime(); + _nav.velocity = new VelocityInfo(); + _nav.velocity.coordinate_system = new String(""); + _nav.velocity.datum = new String(""); + return _nav; + }; + public void set_nav_packet(String port_name, NavigationPacket data) throws FrontendException, BadParameterException, NotSupportedException { + id = data.source_id; + }; + } + + public class rfinfo_port_sample implements frontend.RFInfoDelegate { + + public String id; + + public rfinfo_port_sample() { + id = "original"; + }; + public void set_source_id(String in_id) { + id = in_id; + }; + public String get_source_id() { + return id; + }; + public String get_rf_flow_id(String port_name) throws FrontendException, BadParameterException, NotSupportedException { + return new String(""); + }; + public void set_rf_flow_id(String port_name, String data) throws FrontendException, BadParameterException, NotSupportedException { + }; + public RFInfoPkt get_rfinfo_pkt(String port_name) throws FrontendException, BadParameterException, NotSupportedException { + RFInfoPkt foo = new RFInfoPkt(); + foo.rf_flow_id = new String(id); + foo.sensor = new SensorInfo(); + foo.sensor.collector = new String(""); + foo.sensor.antenna = new AntennaInfo(); + foo.sensor.antenna.description = new String(""); + foo.sensor.antenna.name = new String(""); + foo.sensor.antenna.size = new String(""); + foo.sensor.antenna.type = new String(""); + foo.sensor.feed = new FeedInfo(); + foo.sensor.feed.name = new String(""); + foo.sensor.feed.polarization = new String(""); + foo.sensor.feed.freq_range = new FreqRange(); + foo.sensor.feed.freq_range.values = new double[0]; + foo.sensor.mission = new String(""); + foo.sensor.rx = new String(""); + foo.ext_path_delays = new PathDelay[0]; + foo.capabilities = new RFCapabilities(); + foo.capabilities.freq_range = new FreqRange(); + foo.capabilities.freq_range.values = new double[0]; + foo.capabilities.bw_range = new FreqRange(); + foo.capabilities.bw_range.values = new double[0]; + foo.additional_info = new CF.DataType[0]; + return foo; + }; + public void set_rfinfo_pkt(String port_name, RFInfoPkt data) throws FrontendException, BadParameterException, NotSupportedException { + id = data.rf_flow_id; + }; + } + public class rfsource_port_sample implements frontend.RFSourceDelegate { + + public String id; + + public rfsource_port_sample() { + id = "original"; + }; + public void set_source_id(String in_id) { + id = in_id; + }; + public String get_source_id() { + return id; + }; + public RFInfoPkt[] get_available_rf_inputs(String port_name) throws FrontendException, BadParameterException, NotSupportedException { + return new RFInfoPkt[0]; + }; + public void set_available_rf_inputs(String port_name, RFInfoPkt[] data) throws FrontendException, BadParameterException, NotSupportedException { + }; + public RFInfoPkt get_current_rf_input(String port_name) throws FrontendException, BadParameterException, NotSupportedException { + RFInfoPkt foo = new RFInfoPkt(); + foo.rf_flow_id = new String(id); + foo.sensor = new SensorInfo(); + foo.sensor.collector = new String(""); + foo.sensor.antenna = new AntennaInfo(); + foo.sensor.antenna.description = new String(""); + foo.sensor.antenna.name = new String(""); + foo.sensor.antenna.size = new String(""); + foo.sensor.antenna.type = new String(""); + foo.sensor.feed = new FeedInfo(); + foo.sensor.feed.name = new String(""); + foo.sensor.feed.polarization = new String(""); + foo.sensor.feed.freq_range = new FreqRange(); + foo.sensor.feed.freq_range.values = new double[0]; + foo.sensor.mission = new String(""); + foo.sensor.rx = new String(""); + foo.ext_path_delays = new PathDelay[0]; + foo.capabilities = new RFCapabilities(); + foo.capabilities.freq_range = new FreqRange(); + foo.capabilities.freq_range.values = new double[0]; + foo.capabilities.bw_range = new FreqRange(); + foo.capabilities.bw_range.values = new double[0]; + foo.additional_info = new CF.DataType[0]; + return foo; + }; + public void set_current_rf_input(String port_name, RFInfoPkt data) throws FrontendException, BadParameterException, NotSupportedException { + id = data.rf_flow_id; + }; + } + public class digitaltuner_port_sample implements frontend.DigitalTunerDelegate { + + public double bw; + + public digitaltuner_port_sample() { + bw = 0; + }; + public void set_bw(double in_bw) { + bw = in_bw; + }; + public double get_bw() { + return bw; + }; + public String getTunerType(String id) throws FrontendException, BadParameterException, NotSupportedException { + return ""; + }; + + public boolean getTunerDeviceControl(String id) throws FrontendException, BadParameterException, NotSupportedException { + return false; + }; + + public String getTunerGroupId(String id) throws FrontendException, BadParameterException, NotSupportedException { + return ""; + }; + + public String getTunerRfFlowId(String id) throws FrontendException, BadParameterException, NotSupportedException { + return ""; + }; + + public CF.DataType[] getTunerStatus(String id) throws FrontendException, BadParameterException, NotSupportedException { + return null; + }; + + public void setTunerCenterFrequency(String id, double freq) throws FrontendException, BadParameterException, NotSupportedException {}; + + public double getTunerCenterFrequency(String id) throws FrontendException, BadParameterException, NotSupportedException { + return 0.0; + } + + public void setTunerBandwidth(String id, double _bw) throws FrontendException, BadParameterException, NotSupportedException { + bw = _bw; + }; + + public double getTunerBandwidth(String id) throws FrontendException, BadParameterException, NotSupportedException { + return bw; + } + + public void setTunerAgcEnable(String id, boolean enable) throws FrontendException, BadParameterException, NotSupportedException {}; + + public boolean getTunerAgcEnable(String id) throws FrontendException, BadParameterException, NotSupportedException { + return true; + } + + public void setTunerGain(String id, float gain) throws FrontendException, BadParameterException, NotSupportedException {}; + + public float getTunerGain(String id) throws FrontendException, BadParameterException, NotSupportedException { + return (float)0.0; + } + + public void setTunerReferenceSource(String id, int source) throws FrontendException, BadParameterException, NotSupportedException {}; + + public int getTunerReferenceSource(String id) throws FrontendException, BadParameterException, NotSupportedException { + return 0; + } + + public void setTunerEnable(String id, boolean enable) throws FrontendException, BadParameterException, NotSupportedException {}; + + public boolean getTunerEnable(String id) throws FrontendException, BadParameterException, NotSupportedException { + return true; + } + + public void setTunerOutputSampleRate(String id, double sr) throws FrontendException, BadParameterException, NotSupportedException {}; + + public double getTunerOutputSampleRate(String id) throws FrontendException, BadParameterException, NotSupportedException { + return 0.0; + } + } + + @Test + public void testGPSGetter() { + String[] args = null; + final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, null ); + final POA rootpoa = org.ossie.corba.utils.RootPOA(); + + gps_port_sample input_parent = new gps_port_sample(); + InGPSPort input_port_1 = new InGPSPort("input_1", input_parent); + OutGPSPort output_port = new OutGPSPort("output"); + try { + byte[] oid = rootpoa.activate_object(input_port_1); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + input_parent.set_source_id("newvalue"); + + Assert.assertThrows(PortCallError.class, () -> output_port.gps_info()); + Assert.assertThrows(PortCallError.class, () -> output_port._get_gps_info("hello")); + + try { + output_port.connectPort(input_port_1._this(), "hello"); + } catch (InvalidPort | OccupiedPort e) { + Assert.assertTrue(false); + }; + + GPSInfo gpsinfo = null; + try { + gpsinfo = output_port.gps_info(); + Assert.assertEquals(gpsinfo.source_id, "newvalue"); + gpsinfo = output_port._get_gps_info("hello"); + Assert.assertEquals(gpsinfo.source_id, "newvalue"); + Assert.assertThrows(PortCallError.class, () -> output_port._get_gps_info("foo")); + } catch (PortCallError e) { + Assert.assertTrue(false); + }; + + gps_port_sample input_parent_2 = new gps_port_sample(); + input_parent_2.set_source_id("newvalue_2"); + InGPSPort input_port_2 = new InGPSPort("input_2", input_parent); + try { + byte[] oid = rootpoa.activate_object(input_port_2); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + try { + output_port.connectPort(input_port_2._this(), "foo"); + } catch (InvalidPort | OccupiedPort e) { + Assert.assertTrue(false); + }; + Assert.assertThrows(PortCallError.class, () -> output_port.gps_info()); + try { + gpsinfo = output_port._get_gps_info("hello"); + Assert.assertEquals(gpsinfo.source_id, "newvalue"); + Assert.assertThrows(PortCallError.class, () -> output_port._get_gps_info("something")); + } catch (PortCallError e) { + Assert.assertTrue(false); + }; + output_port.disconnectPort("hello"); + } + + @Test + public void testGPSSetter() + { + String[] args = null; + final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, null ); + final POA rootpoa = org.ossie.corba.utils.RootPOA(); + + gps_port_sample input_parent = new gps_port_sample(); + gps_port_sample input_parent_2 = new gps_port_sample(); + InGPSPort input_port_1 = new InGPSPort("input_1", input_parent); + InGPSPort input_port_2 = new InGPSPort("input_2", input_parent_2); + OutGPSPort output_port = new OutGPSPort("output"); + try { + byte[] oid = rootpoa.activate_object(input_port_1); + oid = rootpoa.activate_object(input_port_2); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + Assert.assertEquals(input_parent.get_source_id(), "original"); + + GPSInfo gpsinfo = new GPSInfo(); + gpsinfo.additional_info = new CF.DataType[0]; + gpsinfo.mode = new String(""); + gpsinfo.rf_flow_id = new String(""); + gpsinfo.source_id = new String("newvalue"); + gpsinfo.status_message = new String(""); + gpsinfo.timestamp = new BULKIO.PrecisionUTCTime(); + + try { + output_port.gps_info(gpsinfo); + Assert.assertEquals(input_parent.get_source_id(), "original"); + Assert.assertThrows(PortCallError.class, () -> output_port.gps_info(gpsinfo, "hello")); + + output_port.connectPort(input_port_1._this(), "hello"); + + output_port.gps_info(gpsinfo); + Assert.assertEquals(input_parent.get_source_id(), "newvalue"); + + gpsinfo.source_id = new String("newvalue_2"); + output_port.gps_info(gpsinfo, "hello"); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_2"); + + Assert.assertThrows(PortCallError.class, () -> output_port.gps_info(gpsinfo, "foo")); + + gpsinfo.source_id = new String("newvalue_3"); + output_port.connectPort(input_port_2._this(), "foo"); + + output_port.gps_info(gpsinfo); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_3"); + Assert.assertEquals(input_parent_2.get_source_id(), "newvalue_3"); + gpsinfo.source_id = new String("newvalue_4"); + output_port.gps_info(gpsinfo, "hello"); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_4"); + Assert.assertEquals(input_parent_2.get_source_id(), "newvalue_3"); + } catch (InvalidPort | OccupiedPort | PortCallError e) { + Assert.assertTrue(false); + }; + } + + @Test + public void testNavGetter() { + String[] args = null; + final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, null ); + final POA rootpoa = org.ossie.corba.utils.RootPOA(); + + nav_port_sample input_parent = new nav_port_sample(); + InNavDataPort input_port_1 = new InNavDataPort("input_1", input_parent); + OutNavDataPort output_port = new OutNavDataPort("output"); + try { + byte[] oid = rootpoa.activate_object(input_port_1); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + input_parent.set_source_id("newvalue"); + + Assert.assertThrows(PortCallError.class, () -> output_port.nav_packet()); + Assert.assertThrows(PortCallError.class, () -> output_port._get_nav_packet("hello")); + + try { + output_port.connectPort(input_port_1._this(), "hello"); + } catch (InvalidPort | OccupiedPort e) { + Assert.assertTrue(false); + }; + + NavigationPacket navdata = null; + try { + navdata = output_port.nav_packet(); + Assert.assertEquals(navdata.source_id, "newvalue"); + navdata = output_port._get_nav_packet("hello"); + Assert.assertEquals(navdata.source_id, "newvalue"); + Assert.assertThrows(PortCallError.class, () -> output_port._get_nav_packet("foo")); + } catch (PortCallError e) { + Assert.assertTrue(false); + }; + + nav_port_sample input_parent_2 = new nav_port_sample(); + input_parent_2.set_source_id("newvalue_2"); + InNavDataPort input_port_2 = new InNavDataPort("input_2", input_parent); + try { + byte[] oid = rootpoa.activate_object(input_port_2); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + try { + output_port.connectPort(input_port_2._this(), "foo"); + } catch (InvalidPort | OccupiedPort e) { + Assert.assertTrue(false); + }; + Assert.assertThrows(PortCallError.class, () -> output_port.nav_packet()); + try { + navdata = output_port._get_nav_packet("hello"); + Assert.assertEquals(navdata.source_id, "newvalue"); + Assert.assertThrows(PortCallError.class, () -> output_port._get_nav_packet("something")); + } catch (PortCallError e) { + Assert.assertTrue(false); + }; + output_port.disconnectPort("hello"); + } + + @Test + public void testNavSetter() + { + String[] args = null; + final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, null ); + final POA rootpoa = org.ossie.corba.utils.RootPOA(); + + nav_port_sample input_parent = new nav_port_sample(); + nav_port_sample input_parent_2 = new nav_port_sample(); + InNavDataPort input_port_1 = new InNavDataPort("input_1", input_parent); + InNavDataPort input_port_2 = new InNavDataPort("input_2", input_parent_2); + OutNavDataPort output_port = new OutNavDataPort("output"); + try { + byte[] oid = rootpoa.activate_object(input_port_1); + oid = rootpoa.activate_object(input_port_2); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + Assert.assertEquals(input_parent.get_source_id(), "original"); + + NavigationPacket navdata = new NavigationPacket(); + navdata.acceleration = new AccelerationInfo(); + navdata.acceleration.coordinate_system = new String(""); + navdata.acceleration.datum = new String(""); + navdata.additional_info = new CF.DataType[0]; + navdata.attitude = new AttitudeInfo(); + navdata.cposition = new CartesianPositionInfo(); + navdata.cposition.datum = new String(""); + navdata.position = new PositionInfo(); + navdata.position.datum = new String(""); + navdata.rf_flow_id = new String(""); + navdata.source_id = new String("newvalue"); + navdata.timestamp = new BULKIO.PrecisionUTCTime(); + navdata.velocity = new VelocityInfo(); + navdata.velocity.coordinate_system = new String(""); + navdata.velocity.datum = new String(""); + + try { + output_port.nav_packet(navdata); + Assert.assertEquals(input_parent.get_source_id(), "original"); + Assert.assertThrows(PortCallError.class, () -> output_port.nav_packet(navdata, "hello")); + + output_port.connectPort(input_port_1._this(), "hello"); + + output_port.nav_packet(navdata); + Assert.assertEquals(input_parent.get_source_id(), "newvalue"); + + navdata.source_id = new String("newvalue_2"); + output_port.nav_packet(navdata, "hello"); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_2"); + + Assert.assertThrows(PortCallError.class, () -> output_port.nav_packet(navdata, "foo")); + + navdata.source_id = new String("newvalue_3"); + output_port.connectPort(input_port_2._this(), "foo"); + + output_port.nav_packet(navdata); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_3"); + Assert.assertEquals(input_parent_2.get_source_id(), "newvalue_3"); + navdata.source_id = new String("newvalue_4"); + output_port.nav_packet(navdata, "hello"); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_4"); + Assert.assertEquals(input_parent_2.get_source_id(), "newvalue_3"); + } catch (InvalidPort | OccupiedPort | PortCallError e) { + Assert.assertTrue(false); + }; + } + + @Test + public void testRFInfoGetter() { + String[] args = null; + final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, null ); + final POA rootpoa = org.ossie.corba.utils.RootPOA(); + + rfinfo_port_sample input_parent = new rfinfo_port_sample(); + InRFInfoPort input_port_1 = new InRFInfoPort("input_1", input_parent); + OutRFInfoPort output_port = new OutRFInfoPort("output"); + try { + byte[] oid = rootpoa.activate_object(input_port_1); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + input_parent.set_source_id("newvalue"); + + Assert.assertThrows(PortCallError.class, () -> output_port.rfinfo_pkt()); + Assert.assertThrows(PortCallError.class, () -> output_port._get_rfinfo_pkt("hello")); + + try { + output_port.connectPort(input_port_1._this(), "hello"); + } catch (InvalidPort | OccupiedPort e) { + Assert.assertTrue(false); + }; + + RFInfoPkt rfinfo = null; + try { + rfinfo = output_port.rfinfo_pkt(); + Assert.assertEquals(rfinfo.rf_flow_id, "newvalue"); + rfinfo = output_port._get_rfinfo_pkt("hello"); + Assert.assertEquals(rfinfo.rf_flow_id, "newvalue"); + Assert.assertThrows(PortCallError.class, () -> output_port._get_rfinfo_pkt("foo")); + } catch (PortCallError e) { + Assert.assertTrue(false); + }; + + rfinfo_port_sample input_parent_2 = new rfinfo_port_sample(); + input_parent_2.set_source_id("newvalue_2"); + InRFInfoPort input_port_2 = new InRFInfoPort("input_2", input_parent); + try { + byte[] oid = rootpoa.activate_object(input_port_2); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + try { + output_port.connectPort(input_port_2._this(), "foo"); + } catch (InvalidPort | OccupiedPort e) { + Assert.assertTrue(false); + }; + Assert.assertThrows(PortCallError.class, () -> output_port.rfinfo_pkt()); + try { + rfinfo = output_port._get_rfinfo_pkt("hello"); + Assert.assertEquals(rfinfo.rf_flow_id, "newvalue"); + Assert.assertThrows(PortCallError.class, () -> output_port._get_rfinfo_pkt("something")); + } catch (PortCallError e) { + Assert.assertTrue(false); + }; + output_port.disconnectPort("hello"); + } + + @Test + public void testRFInfoSetter() + { + String[] args = null; + final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, null ); + final POA rootpoa = org.ossie.corba.utils.RootPOA(); + + rfinfo_port_sample input_parent = new rfinfo_port_sample(); + rfinfo_port_sample input_parent_2 = new rfinfo_port_sample(); + InRFInfoPort input_port_1 = new InRFInfoPort("input_1", input_parent); + InRFInfoPort input_port_2 = new InRFInfoPort("input_2", input_parent_2); + OutRFInfoPort output_port = new OutRFInfoPort("output"); + try { + byte[] oid = rootpoa.activate_object(input_port_1); + oid = rootpoa.activate_object(input_port_2); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + Assert.assertEquals(input_parent.get_source_id(), "original"); + + RFInfoPkt rfinfo = new RFInfoPkt(); + rfinfo.rf_flow_id = new String("newvalue"); + rfinfo.sensor = new SensorInfo(); + rfinfo.sensor.collector = new String(""); + rfinfo.sensor.antenna = new AntennaInfo(); + rfinfo.sensor.antenna.description = new String(""); + rfinfo.sensor.antenna.name = new String(""); + rfinfo.sensor.antenna.size = new String(""); + rfinfo.sensor.antenna.type = new String(""); + rfinfo.sensor.feed = new FeedInfo(); + rfinfo.sensor.feed.name = new String(""); + rfinfo.sensor.feed.polarization = new String(""); + rfinfo.sensor.feed.freq_range = new FreqRange(); + rfinfo.sensor.feed.freq_range.values = new double[0]; + rfinfo.sensor.mission = new String(""); + rfinfo.sensor.rx = new String(""); + rfinfo.ext_path_delays = new PathDelay[0]; + rfinfo.capabilities = new RFCapabilities(); + rfinfo.capabilities.freq_range = new FreqRange(); + rfinfo.capabilities.freq_range.values = new double[0]; + rfinfo.capabilities.bw_range = new FreqRange(); + rfinfo.capabilities.bw_range.values = new double[0]; + rfinfo.additional_info = new CF.DataType[0]; + + try { + output_port.rfinfo_pkt(rfinfo); + Assert.assertEquals(input_parent.get_source_id(), "original"); + Assert.assertThrows(PortCallError.class, () -> output_port.rfinfo_pkt(rfinfo, "hello")); + + output_port.connectPort(input_port_1._this(), "hello"); + + output_port.rfinfo_pkt(rfinfo); + Assert.assertEquals(input_parent.get_source_id(), "newvalue"); + + rfinfo.rf_flow_id = new String("newvalue_2"); + output_port.rfinfo_pkt(rfinfo, "hello"); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_2"); + + Assert.assertThrows(PortCallError.class, () -> output_port.rfinfo_pkt(rfinfo, "foo")); + + rfinfo.rf_flow_id = new String("newvalue_3"); + output_port.connectPort(input_port_2._this(), "foo"); + + output_port.rfinfo_pkt(rfinfo); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_3"); + Assert.assertEquals(input_parent_2.get_source_id(), "newvalue_3"); + rfinfo.rf_flow_id = new String("newvalue_4"); + output_port.rfinfo_pkt(rfinfo, "hello"); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_4"); + Assert.assertEquals(input_parent_2.get_source_id(), "newvalue_3"); + } catch (InvalidPort | OccupiedPort | PortCallError e) { + Assert.assertTrue(false); + }; + } + + @Test + public void testRFSourceGetter() { + String[] args = null; + final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, null ); + final POA rootpoa = org.ossie.corba.utils.RootPOA(); + + rfsource_port_sample input_parent = new rfsource_port_sample(); + InRFSourcePort input_port_1 = new InRFSourcePort("input_1", input_parent); + OutRFSourcePort output_port = new OutRFSourcePort("output"); + try { + byte[] oid = rootpoa.activate_object(input_port_1); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + input_parent.set_source_id("newvalue"); + + Assert.assertThrows(PortCallError.class, () -> output_port.current_rf_input()); + Assert.assertThrows(PortCallError.class, () -> output_port._get_current_rf_input("hello")); + + try { + output_port.connectPort(input_port_1._this(), "hello"); + } catch (InvalidPort | OccupiedPort e) { + Assert.assertTrue(false); + }; + + RFInfoPkt rfinfo = null; + try { + rfinfo = output_port.current_rf_input(); + Assert.assertEquals(rfinfo.rf_flow_id, "newvalue"); + rfinfo = output_port._get_current_rf_input("hello"); + Assert.assertEquals(rfinfo.rf_flow_id, "newvalue"); + Assert.assertThrows(PortCallError.class, () -> output_port._get_current_rf_input("foo")); + } catch (PortCallError e) { + Assert.assertTrue(false); + }; + + rfsource_port_sample input_parent_2 = new rfsource_port_sample(); + input_parent_2.set_source_id("newvalue_2"); + InRFSourcePort input_port_2 = new InRFSourcePort("input_2", input_parent); + try { + byte[] oid = rootpoa.activate_object(input_port_2); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + try { + output_port.connectPort(input_port_2._this(), "foo"); + } catch (InvalidPort | OccupiedPort e) { + Assert.assertTrue(false); + }; + Assert.assertThrows(PortCallError.class, () -> output_port.current_rf_input()); + try { + rfinfo = output_port._get_current_rf_input("hello"); + Assert.assertEquals(rfinfo.rf_flow_id, "newvalue"); + Assert.assertThrows(PortCallError.class, () -> output_port._get_current_rf_input("something")); + } catch (PortCallError e) { + Assert.assertTrue(false); + }; + output_port.disconnectPort("hello"); + } + + @Test + public void testRFSourceSetter() + { + String[] args = null; + final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, null ); + final POA rootpoa = org.ossie.corba.utils.RootPOA(); + + rfsource_port_sample input_parent = new rfsource_port_sample(); + rfsource_port_sample input_parent_2 = new rfsource_port_sample(); + InRFSourcePort input_port_1 = new InRFSourcePort("input_1", input_parent); + InRFSourcePort input_port_2 = new InRFSourcePort("input_2", input_parent_2); + OutRFSourcePort output_port = new OutRFSourcePort("output"); + try { + byte[] oid = rootpoa.activate_object(input_port_1); + oid = rootpoa.activate_object(input_port_2); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + Assert.assertEquals(input_parent.get_source_id(), "original"); + + RFInfoPkt rfinfo = new RFInfoPkt(); + rfinfo.rf_flow_id = new String("newvalue"); + rfinfo.sensor = new SensorInfo(); + rfinfo.sensor.collector = new String(""); + rfinfo.sensor.antenna = new AntennaInfo(); + rfinfo.sensor.antenna.description = new String(""); + rfinfo.sensor.antenna.name = new String(""); + rfinfo.sensor.antenna.size = new String(""); + rfinfo.sensor.antenna.type = new String(""); + rfinfo.sensor.feed = new FeedInfo(); + rfinfo.sensor.feed.name = new String(""); + rfinfo.sensor.feed.polarization = new String(""); + rfinfo.sensor.feed.freq_range = new FreqRange(); + rfinfo.sensor.feed.freq_range.values = new double[0]; + rfinfo.sensor.mission = new String(""); + rfinfo.sensor.rx = new String(""); + rfinfo.ext_path_delays = new PathDelay[0]; + rfinfo.capabilities = new RFCapabilities(); + rfinfo.capabilities.freq_range = new FreqRange(); + rfinfo.capabilities.freq_range.values = new double[0]; + rfinfo.capabilities.bw_range = new FreqRange(); + rfinfo.capabilities.bw_range.values = new double[0]; + rfinfo.additional_info = new CF.DataType[0]; + + try { + output_port.current_rf_input(rfinfo); + Assert.assertEquals(input_parent.get_source_id(), "original"); + Assert.assertThrows(PortCallError.class, () -> output_port.current_rf_input(rfinfo, "hello")); + + output_port.connectPort(input_port_1._this(), "hello"); + + output_port.current_rf_input(rfinfo); + Assert.assertEquals(input_parent.get_source_id(), "newvalue"); + + rfinfo.rf_flow_id = new String("newvalue_2"); + output_port.current_rf_input(rfinfo, "hello"); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_2"); + + Assert.assertThrows(PortCallError.class, () -> output_port.current_rf_input(rfinfo, "foo")); + + rfinfo.rf_flow_id = new String("newvalue_3"); + output_port.connectPort(input_port_2._this(), "foo"); + + output_port.current_rf_input(rfinfo); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_3"); + Assert.assertEquals(input_parent_2.get_source_id(), "newvalue_3"); + rfinfo.rf_flow_id = new String("newvalue_4"); + output_port.current_rf_input(rfinfo, "hello"); + Assert.assertEquals(input_parent.get_source_id(), "newvalue_4"); + Assert.assertEquals(input_parent_2.get_source_id(), "newvalue_3"); + } catch (InvalidPort | OccupiedPort | PortCallError e) { + Assert.assertTrue(false); + }; + } + + @Test + public void testTunerGetter() { + String[] args = null; + final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, null ); + final POA rootpoa = org.ossie.corba.utils.RootPOA(); + + digitaltuner_port_sample input_parent = new digitaltuner_port_sample(); + InDigitalTunerPort input_port_1 = new InDigitalTunerPort("input_1", input_parent); + OutDigitalTunerPort output_port = new OutDigitalTunerPort("output"); + try { + byte[] oid = rootpoa.activate_object(input_port_1); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + input_parent.set_bw(1); + + Assert.assertThrows(PortCallError.class, () -> output_port.getTunerBandwidth("first_tuner")); + Assert.assertThrows(PortCallError.class, () -> output_port.getTunerBandwidth("first_tuner", "hello")); + + try { + output_port.connectPort(input_port_1._this(), "hello"); + } catch (InvalidPort | OccupiedPort e) { + Assert.assertTrue(false); + }; + + double bw = 0; + try { + bw = output_port.getTunerBandwidth("first_tuner"); + Assert.assertTrue(bw==1); + bw = output_port.getTunerBandwidth("first_tuner", "hello"); + Assert.assertTrue(bw==1); + Assert.assertThrows(PortCallError.class, () -> output_port.getTunerBandwidth("first_tuner", "foo")); + } catch (PortCallError e) { + Assert.assertTrue(false); + }; + + digitaltuner_port_sample input_parent_2 = new digitaltuner_port_sample(); + input_parent_2.set_bw(2); + InDigitalTunerPort input_port_2 = new InDigitalTunerPort("input_2", input_parent); + try { + byte[] oid = rootpoa.activate_object(input_port_2); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + try { + output_port.connectPort(input_port_2._this(), "foo"); + } catch (InvalidPort | OccupiedPort e) { + Assert.assertTrue(false); + }; + Assert.assertThrows(PortCallError.class, () -> output_port.getTunerBandwidth("first_tuner")); + try { + bw = output_port.getTunerBandwidth("first_tuner", "hello"); + Assert.assertTrue(bw==1); + Assert.assertThrows(PortCallError.class, () -> output_port.getTunerBandwidth("first_tuner", "something")); + } catch (PortCallError e) { + Assert.assertTrue(false); + }; + output_port.disconnectPort("hello"); + } + + @Test + public void testTunerSetter() + { + String[] args = null; + final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, null ); + final POA rootpoa = org.ossie.corba.utils.RootPOA(); + + digitaltuner_port_sample input_parent = new digitaltuner_port_sample(); + digitaltuner_port_sample input_parent_2 = new digitaltuner_port_sample(); + InDigitalTunerPort input_port_1 = new InDigitalTunerPort("input_1", input_parent); + InDigitalTunerPort input_port_2 = new InDigitalTunerPort("input_2", input_parent_2); + OutDigitalTunerPort output_port = new OutDigitalTunerPort("output"); + try { + byte[] oid = rootpoa.activate_object(input_port_1); + oid = rootpoa.activate_object(input_port_2); + } catch (ServantAlreadyActive | WrongPolicy e) { + } + + Assert.assertTrue(input_parent.get_bw()==0); + double bw = 1; + + try { + output_port.setTunerBandwidth("first_tuner", bw); + Assert.assertTrue(input_parent.get_bw()==0); + final double testbw = bw; + Assert.assertThrows(PortCallError.class, () -> output_port.setTunerBandwidth("first_tuner", testbw, "hello")); + + output_port.connectPort(input_port_1._this(), "hello"); + + output_port.setTunerBandwidth("first_tuner", bw); + Assert.assertTrue(input_parent.get_bw() == bw); + + bw = 2; + output_port.setTunerBandwidth("first_tuner", bw, "hello"); + Assert.assertTrue(input_parent.get_bw() == bw); + + final double testbw_2 = bw; + Assert.assertThrows(PortCallError.class, () -> output_port.setTunerBandwidth("first_tuner", testbw_2, "foo")); + + bw = 3; + output_port.connectPort(input_port_2._this(), "foo"); + + output_port.setTunerBandwidth("first_tuner", bw); + Assert.assertTrue(input_parent.get_bw() == bw); + Assert.assertTrue(input_parent_2.get_bw() == bw); + bw = 4; + output_port.setTunerBandwidth("first_tuner", bw, "hello"); + Assert.assertTrue(input_parent.get_bw() == bw); + Assert.assertTrue(input_parent_2.get_bw() == 3); + } catch (InvalidPort | OccupiedPort | PortCallError e) { + Assert.assertTrue(false); + }; + } + +/*void PortsTest::testTunerSetter() +{ + tuner_port_sample input_parent; + tuner_port_sample input_parent_2; + frontend::InDigitalScanningTunerPort *input_port_1 = new frontend::InDigitalScanningTunerPort("input_1", &input_parent); + frontend::OutDigitalTunerPort *output_port = new frontend::OutDigitalTunerPort("output"); + + CPPUNIT_ASSERT(input_parent.get_bw()==0); + + double bw = 1; + output_port->setTunerBandwidth("first_tuner", bw); + CPPUNIT_ASSERT(input_parent.get_bw() == 0); + CPPUNIT_ASSERT_THROW(output_port->setTunerBandwidth("first_tuner", bw, "hello"), redhawk::PortCallError); + + output_port->connectPort(input_port_1->_this(), "hello"); + + output_port->setTunerBandwidth("first_tuner", bw); + CPPUNIT_ASSERT(input_parent.get_bw() == bw); + + bw = 2; + output_port->setTunerBandwidth("first_tuner", bw, "hello"); + CPPUNIT_ASSERT(input_parent.get_bw() == bw); + + CPPUNIT_ASSERT_THROW(output_port->setTunerBandwidth("first_tuner", bw, "foo"), redhawk::PortCallError); + + bw = 3; + frontend::InDigitalScanningTunerPort *input_port_2 = new frontend::InDigitalScanningTunerPort("input_2", &input_parent_2); + output_port->connectPort(input_port_2->_this(), "foo"); + + output_port->setTunerBandwidth("first_tuner", bw); + CPPUNIT_ASSERT(input_parent.get_bw() == bw); + CPPUNIT_ASSERT(input_parent_2.get_bw() == bw); + + bw = 4; + output_port->setTunerBandwidth("first_tuner", bw, "hello"); + CPPUNIT_ASSERT(input_parent.get_bw() == 4); + CPPUNIT_ASSERT(input_parent_2.get_bw() == 3); + + CPPUNIT_ASSERT_THROW(output_port->setTunerBandwidth("first_tuner", bw, "something"), redhawk::PortCallError); + + output_port->disconnectPort("hello"); + + input_port_1->_remove_ref(); +} +*/ + +} diff --git a/frontendInterfaces/libsrc/testing/tests/java/ValidateRequestTest.java b/frontendInterfaces/libsrc/testing/tests/java/ValidateRequestTest.java new file mode 100644 index 000000000..a10350fdf --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/ValidateRequestTest.java @@ -0,0 +1,297 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; +import frontend.*; +import FRONTEND.BadParameterException; +import java.util.ArrayList; +import org.omg.CORBA.ORB; + +@RunWith(JUnit4.class) +public class ValidateRequestTest +{ + + @Test + public void testSRI() { + FETypes.frontend_tuner_allocation_struct request = new FETypes.frontend_tuner_allocation_struct(); + BULKIO.StreamSRI upstream_sri = new BULKIO.StreamSRI(); + request.center_frequency.setValue(100e6); + request.bandwidth.setValue(1e6); + request.sample_rate.setValue(2e6); + upstream_sri.xdelta = 1/2e6; + upstream_sri.mode = 0; + upstream_sri.keywords = new CF.DataType[2]; + CF.DataType chanrf = new CF.DataType(); + CF.DataType chanbw = new CF.DataType(); + chanrf.id = "CHAN_RF"; + chanrf.value = ORB.init().create_any(); + chanbw.id = "FRONTEND::BANDWIDTH"; + chanbw.value = ORB.init().create_any(); + int cf_idx = 0; + int bw_idx = 1; + double cf, bw; + cf = 100e6; + chanrf.value.insert_double(cf); + upstream_sri.keywords[cf_idx] = chanrf; + bw = 1e6; + chanbw.value.insert_double(bw); + upstream_sri.keywords[bw_idx] = chanbw; + boolean retval = false; + try { + retval = FrontendTunerDevice.validateRequestVsSRI(request, upstream_sri, false); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 100.49e6; + upstream_sri.keywords[cf_idx].value.insert_double(cf); + try { + retval = FrontendTunerDevice.validateRequestVsSRI(request, upstream_sri, false); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 99.51e6; + upstream_sri.keywords[cf_idx].value.insert_double(cf); + try { + retval = FrontendTunerDevice.validateRequestVsSRI(request, upstream_sri, false); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 100.51e6; + upstream_sri.keywords[cf_idx].value.insert_double(cf); + try { + retval = FrontendTunerDevice.validateRequestVsSRI(request, upstream_sri, false); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertFalse(retval); + + cf = 99.49e6; + upstream_sri.keywords[cf_idx].value.insert_double(cf); + try { + retval = FrontendTunerDevice.validateRequestVsSRI(request, upstream_sri, false); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertFalse(retval); + } + + @Test + public void testDeviceSRI() { + FETypes.frontend_tuner_allocation_struct request = new FETypes.frontend_tuner_allocation_struct(); + BULKIO.StreamSRI upstream_sri = new BULKIO.StreamSRI(); + request.center_frequency.setValue(100e6); + request.bandwidth.setValue(1e6); + request.sample_rate.setValue(2e6); + request.tuner_type.setValue("RX"); + upstream_sri.xdelta = 1/2e6; + upstream_sri.mode = 0; + upstream_sri.keywords = new CF.DataType[2]; + CF.DataType chanrf = new CF.DataType(); + CF.DataType chanbw = new CF.DataType(); + chanrf.id = "CHAN_RF"; + chanrf.value = ORB.init().create_any(); + chanbw.id = "FRONTEND::BANDWIDTH"; + chanbw.value = ORB.init().create_any(); + int cf_idx = 0; + int bw_idx = 1; + double cf, bw; + cf = 100e6; + chanrf.value.insert_double(cf); + upstream_sri.keywords[cf_idx] = chanrf; + bw = 1e6; + chanbw.value.insert_double(bw); + upstream_sri.keywords[bw_idx] = chanbw; + double min_dev_cf = 99e6; + double max_dev_cf = 101e6; + double max_dev_bw = 3e6; + double max_dev_sr = 6e6; + boolean retval = false; + try { + retval = FrontendTunerDevice.validateRequestVsDevice(request, upstream_sri, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 100.49e6; + upstream_sri.keywords[cf_idx].value.insert_double(cf); + try { + retval = FrontendTunerDevice.validateRequestVsDevice(request, upstream_sri, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 99.51e6; + upstream_sri.keywords[cf_idx].value.insert_double(cf); + try { + retval = FrontendTunerDevice.validateRequestVsDevice(request, upstream_sri, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 100.51e6; + upstream_sri.keywords[cf_idx].value.insert_double(cf); + try { + retval = FrontendTunerDevice.validateRequestVsDevice(request, upstream_sri, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertFalse(retval); + + cf = 99.49e6; + upstream_sri.keywords[cf_idx].value.insert_double(cf); + try { + retval = FrontendTunerDevice.validateRequestVsDevice(request, upstream_sri, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertFalse(retval); + } + + @Test + public void testRFInfo() { + FETypes.frontend_tuner_allocation_struct request = new FETypes.frontend_tuner_allocation_struct(); + FRONTEND.RFInfoPkt rfinfo = new FRONTEND.RFInfoPkt(); + request.center_frequency.setValue(100e6); + request.bandwidth.setValue(1e6); + request.sample_rate.setValue(2e6); + double cf, bw; + cf = 100e6; + bw = 1e6; + rfinfo.rf_center_freq = cf; + rfinfo.rf_bandwidth = bw; + boolean retval = false; + try { + retval = FrontendTunerDevice.validateRequestVsRFInfo(request, rfinfo, false); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 100.49e6; + rfinfo.rf_center_freq = cf; + try { + retval = FrontendTunerDevice.validateRequestVsRFInfo(request, rfinfo, false); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 99.51e6; + rfinfo.rf_center_freq = cf; + try { + retval = FrontendTunerDevice.validateRequestVsRFInfo(request, rfinfo, false); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 100.51e6; + rfinfo.rf_center_freq = cf; + try { + retval = FrontendTunerDevice.validateRequestVsRFInfo(request, rfinfo, false); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertFalse(retval); + + cf = 99.49e6; + rfinfo.rf_center_freq = cf; + try { + retval = FrontendTunerDevice.validateRequestVsRFInfo(request, rfinfo, false); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertFalse(retval); + } + + @Test + public void testDeviceRFInfo() { + FETypes.frontend_tuner_allocation_struct request = new FETypes.frontend_tuner_allocation_struct(); + FRONTEND.RFInfoPkt rfinfo = new FRONTEND.RFInfoPkt(); + request.center_frequency.setValue(100e6); + request.bandwidth.setValue(1e6); + request.sample_rate.setValue(2e6); + request.tuner_type.setValue("RX"); + double cf, bw; + cf = 100e6; + bw = 1e6; + rfinfo.rf_center_freq = cf; + rfinfo.rf_bandwidth = bw; + double min_dev_cf = 99e6; + double max_dev_cf = 101e6; + double max_dev_bw = 3e6; + double max_dev_sr = 6e6; + boolean retval = false; + try { + retval = FrontendTunerDevice.validateRequestVsDevice(request, rfinfo, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 100.49e6; + rfinfo.rf_center_freq = cf; + try { + retval = FrontendTunerDevice.validateRequestVsDevice(request, rfinfo, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 99.51e6; + rfinfo.rf_center_freq = cf; + try { + retval = FrontendTunerDevice.validateRequestVsDevice(request, rfinfo, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertTrue(retval); + + cf = 100.51e6; + rfinfo.rf_center_freq = cf; + try { + retval = FrontendTunerDevice.validateRequestVsDevice(request, rfinfo, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertFalse(retval); + + cf = 99.49e6; + rfinfo.rf_center_freq = cf; + try { + retval = FrontendTunerDevice.validateRequestVsDevice(request, rfinfo, false, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr); + } catch (FRONTEND.BadParameterException e) { + retval = false; + } + Assert.assertFalse(retval); + } +} diff --git a/frontendInterfaces/libsrc/testing/tests/java/build.xml b/frontendInterfaces/libsrc/testing/tests/java/build.xml new file mode 100644 index 000000000..9207af8b1 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/build.xml @@ -0,0 +1,57 @@ + + + + + Builds and runs the JUnit tests and produces XML output + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/frontendInterfaces/libsrc/testing/tests/java/log4j_config.txt b/frontendInterfaces/libsrc/testing/tests/java/log4j_config.txt new file mode 100644 index 000000000..d8d5d84e4 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/log4j_config.txt @@ -0,0 +1,5 @@ +log4j.rootLogger=WARN,stdout + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%m%n diff --git a/frontendInterfaces/libsrc/testing/tests/java/utils/Assert.java b/frontendInterfaces/libsrc/testing/tests/java/utils/Assert.java new file mode 100644 index 000000000..f267896ac --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/utils/Assert.java @@ -0,0 +1,43 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +/** + * Extended JUnit assertion class that adds an assert for checking that an + * exception is thrown by an expression. + */ +public class Assert extends org.junit.Assert { + private Assert() + { + } + + public static void assertThrows(Class exception, RunnableWithException runnable) + { + try { + runnable.run(); + } catch (Exception exc) { + assertTrue("expected exception:<"+ exception.getName() + + "> but got:<" + exc.getClass().getName() + ">", + exception.isInstance(exc)); + return; + } + fail("exception not raised"); + } +}; diff --git a/frontendInterfaces/libsrc/testing/tests/java/utils/ChainFilter.java b/frontendInterfaces/libsrc/testing/tests/java/utils/ChainFilter.java new file mode 100644 index 000000000..b4d51f5f6 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/utils/ChainFilter.java @@ -0,0 +1,63 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.runner.Description; +import org.junit.runner.manipulation.Filter; + +/** + * JUnit test filter that combines multiple filters, selecting any test that + * satisfies one of the filters. + */ +public class ChainFilter extends Filter { + public void addFilter(Filter filter) + { + filters.add(filter); + } + + @Override + public boolean shouldRun(Description description) + { + for (Filter filter : this.filters) { + if (filter.shouldRun(description)) { + return true; + } + } + return false; + } + + @Override + public String describe() + { + String result = ""; + for (Filter filter : this.filters) { + if (!result.isEmpty()) { + result = result + ", "; + } + result += filter.describe(); + } + return "[" + result + "]"; + } + + private List filters = new ArrayList<>(); +} diff --git a/frontendInterfaces/libsrc/testing/tests/java/utils/RunnableWithException.java b/frontendInterfaces/libsrc/testing/tests/java/utils/RunnableWithException.java new file mode 100644 index 000000000..7bdb41b8b --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/utils/RunnableWithException.java @@ -0,0 +1,30 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +/** + * Equivalent to java.lang.Runnable, but declared to throw all exceptions. This + * is useful for testing that the correct exception is thrown without going + * through a lot of boilerplate try/catch or JUnit's expected exceptions. + */ +@FunctionalInterface +public interface RunnableWithException { + public void run() throws Exception; +} diff --git a/frontendInterfaces/libsrc/testing/tests/java/utils/TestFilter.java b/frontendInterfaces/libsrc/testing/tests/java/utils/TestFilter.java new file mode 100644 index 000000000..82d76e9b0 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/utils/TestFilter.java @@ -0,0 +1,69 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +import org.junit.runner.Description; +import org.junit.runner.manipulation.Filter; + +/** + * JUnit test filter that selects a single test, or a suite of tests from a + * single class. + */ +public class TestFilter extends Filter { + public TestFilter(Description description) + { + test = description; + } + + @Override + public boolean shouldRun(Description description) + { + // Suite-to-suite or test-to-test comparison + if (test.equals(description)) { + return true; + } + if (description.isTest()) { + for (Description child : test.getChildren()) { + if (child.equals(description)) { + return true; + } + } + } else { + for (Description child : description.getChildren()) { + if (shouldRun(child)) { + return true; + } + } + } + return false; + } + + @Override + public String describe() + { + if (test.isTest()) { + return "Method " + test.getDisplayName(); + } else { + return "Class " + test.getDisplayName(); + } + } + + private Description test; +} diff --git a/frontendInterfaces/libsrc/testing/tests/java/utils/TextListener.java b/frontendInterfaces/libsrc/testing/tests/java/utils/TextListener.java new file mode 100644 index 000000000..5e02c378e --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/java/utils/TextListener.java @@ -0,0 +1,107 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +import java.io.PrintStream; +import java.text.NumberFormat; + +import org.junit.runner.Description; +import org.junit.runner.Result; +import org.junit.runner.notification.Failure; +import org.junit.runner.notification.RunListener; + +/** + * JUnit RunListener to provide similar output to CppUnit and Python: mainly, + * printing the name of each test as it runs with verbose mode enabled. + */ +public class TextListener extends RunListener { + public TextListener(boolean verbose) + { + this.verbose = verbose; + this.stream = System.out; + this.testPassed = false; + } + + public void testRunFinished(Result result) + { + stream.println(); + stream.println("Time: " + elapsedTimeAsString(result.getRunTime())); + + for (Failure failure : result.getFailures()) { + stream.println(failure.getTestHeader()); + stream.println(failure.getTrace()); + } + + if (result.wasSuccessful()) { + stream.println("OK (" + result.getRunCount() + " tests)"); + } else { + stream.println("FAILURES!!!"); + stream.println("Tests run: " + result.getRunCount() + ", Failures: " + result.getFailureCount()); + } + } + + public void testStarted(Description description) + { + if (verbose) { + stream.print(description.getDisplayName() + " : "); + } else { + stream.print("."); + } + testPassed = true; + } + + public void testIgnored(Description description) + { + if (verbose) { + stream.print("IGNORED"); + } else { + stream.print("I"); + } + testPassed = false; + } + + public void testFailure(Failure failure) + { + if (verbose) { + stream.print("FAILED"); + } else { + stream.print("F"); + } + testPassed = false; + } + + public void testFinished(Description description) + { + if (verbose) { + if (testPassed) { + stream.print("OK"); + } + stream.println(); + } + } + + protected String elapsedTimeAsString(long runTime) { + return NumberFormat.getInstance().format((double) runTime / 1000); + } + + private boolean verbose; + private PrintStream stream; + private boolean testPassed; +} diff --git a/frontendInterfaces/libsrc/testing/tests/log4j.ex1 b/frontendInterfaces/libsrc/testing/tests/log4j.ex1 new file mode 100644 index 000000000..2aa29ab84 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/log4j.ex1 @@ -0,0 +1,25 @@ + + +# Set root logger default levels and appender +log4j.rootLogger=INFO, CONSOLE + + +# Appender layout +log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender +log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout +log4j.appender.CONSOLE.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n + +log4j.appender.STDERR=org.apache.log4j.ConsoleAppender +log4j.appender.STDERR.layout=org.apache.log4j.PatternLayout +log4j.appender.STDERR.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +log4j.appender.STDERR.Threshold=WARN +log4j.appender.STDERR.Target=System.err + +log4j.appender.NULL=org.apache.log4j.FileAppender +log4j.appender.NULL.layout=org.apache.log4j.PatternLayout +log4j.appender.NULL.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n +log4j.appender.NULL.File=/dev/null + +log4j.category.CPP_Ports_i=ERROR,CONSOLE +log4j.logger.redhawk.bulkio.output=TRACE, CONSOLE +#log4j.additivity.redhawk.bulkio=true diff --git a/frontendInterfaces/libsrc/testing/tests/python/runtests.py b/frontendInterfaces/libsrc/testing/tests/python/runtests.py new file mode 100755 index 000000000..183987689 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/python/runtests.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import sys +import getopt + +from omniORB import CORBA + +from ossie.utils.log4py import logging +import ossie.utils.log4py.config + +class MultiTestLoader(unittest.TestLoader): + """ + Extend the default TestLoader to support a list of modules, at least for + the purposes of loadTestsFromName and loadTestsFromNames. + """ + def loadTestsFromName(self, name, modules): + if not isinstance(modules, list): + return unittest.TestLoader.loadTestsFromName(self, name, modules) + else: + # Try all modules in order, returning the first one that has + # matching tests + for mod in modules: + try: + return unittest.TestLoader.loadTestsFromName(self, name, mod) + except AttributeError: + pass + raise AttributeError("test '%s' not found" % (name,)) + +class TestProgram(object): + def __init__(self, modules=None): + if modules is None: + self.modules = [sys.modules['__main__']] + else: + self.modules = modules + self.verbosity = 1 + self.testRunner = None + + self.parseArgs(sys.argv[1:]) + self.createTests() + self.runTests() + + def createTests(self): + # Load tests, filtering by name (if arguments were given). + loader = MultiTestLoader() + if self.testNames: + self.test = loader.loadTestsFromNames(self.testNames, self.modules) + else: + self.test = unittest.TestSuite() + for mod in self.modules: + self.test.addTests(loader.loadTestsFromModule(mod)) + + def parseArgs(self, argv): + import getopt + short_options = 'vx' + long_options = ['xunit', 'log-level=', 'log-config=', 'verbose'] + + xunit = False + log_level = None + log_config = None + options, args = getopt.getopt(argv, short_options, long_options) + for opt, value in options: + if opt in ('-v', '--verbose'): + self.verbosity = 2 + elif opt in ('-x', '--xunit'): + xunit = True + elif opt == '--log-level': + # Map from string names to Python levels (this does not appear to + # be built into Python's logging module) + log_level = ossie.utils.log4py.config._LEVEL_TRANS.get(value.upper(), None) + elif opt == '--log-config': + log_config = value + + + # If requested, use XML output (but the module is non-standard, so it + # may not be available). + if xunit: + try: + import xmlrunner + self.testRunner = xmlrunner.XMLTestRunner(verbosity=self.verbosity) + except ImportError: + print >>sys.stderr, 'WARNING: XML test runner module is not installed' + except TypeError: + # Maybe it didn't like the verbosity argument + self.testRunner = xmlrunner.XMLTestRunner() + + # If a log4j configuration file was given, read it. + if log_config: + ossie.utils.log4py.config.fileConfig(log_config) + else: + # Set up a simple configuration that logs on the console. + logging.basicConfig() + + # Apply the log level (can override config file). + if log_level: + logging.getLogger().setLevel(log_level) + + # Any additional arguments are test names + self.testNames = args + + def runTests(self): + # Many tests require CORBA, so initialize up front + orb = CORBA.ORB_init() + root_poa = orb.resolve_initial_references("RootPOA") + manager = root_poa._get_the_POAManager() + manager.activate() + + # Default: use text output. + if not self.testRunner: + self.testRunner = unittest.TextTestRunner(verbosity=self.verbosity) + + result = self.testRunner.run(self.test) + + orb.shutdown(True) + + sys.exit(not result.wasSuccessful()) + +main = TestProgram + +if __name__ == '__main__': + import os + import glob + import imp + + # Find all Python files in the current directory and import them, adding + # their tests to the overall test suite. + modules = [] + for filename in glob.glob('*.py'): + modname, ext = os.path.splitext(filename) + fd = None + try: + fd, fn, desc = imp.find_module(modname) + mod = imp.load_module(modname, fd, fn, desc) + modules.append(mod) + finally: + if fd: + fd.close() + + main(modules) diff --git a/frontendInterfaces/libsrc/testing/tests/python/test_create_tuner_allocation.py b/frontendInterfaces/libsrc/testing/tests/python/test_create_tuner_allocation.py new file mode 100644 index 000000000..86d2394d0 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/python/test_create_tuner_allocation.py @@ -0,0 +1,57 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import copy + +import bulkio +from bulkio.bulkioInterfaces import BULKIO + +import sys +sys.path.insert(0, '../../../../src/python') +sys.path.insert(0, '../../../../libsrc/python') + +from omniORB import any as _any +from ossie.cf import CF + +from redhawk.frontendInterfaces import FRONTEND +import tuner_device +import fe_types +from ossie.properties import struct_property + +class ValidateRequestTest(unittest.TestCase): + + def testFrontendAllocationStruct(self): + frontend_allocation = tuner_device.createTunerAllocation(tuner_type="RX_DIGITIZER",bandwidth=24.576, center_frequency=30000000, bandwidth_tolerance=100, allocation_id='hello') + self.assertEquals(frontend_allocation['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::sample_rate_tolerance'], 0.0) + self.assertEquals(frontend_allocation['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::group_id'], '') + self.assertEquals(frontend_allocation['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::tuner_type'], 'RX_DIGITIZER') + self.assertEquals(frontend_allocation['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::bandwidth'], 24.576) + self.assertEquals(frontend_allocation['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::rf_flow_id'], '') + self.assertEquals(frontend_allocation['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::sample_rate'], 0.0) + self.assertEquals(frontend_allocation['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::allocation_id'], 'hello') + self.assertEquals(frontend_allocation['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::device_control'], True) + self.assertEquals(frontend_allocation['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::center_frequency'], 30000000) + self.assertEquals(frontend_allocation['FRONTEND::tuner_allocation']['FRONTEND::tuner_allocation::bandwidth_tolerance'], 100) + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/frontendInterfaces/libsrc/testing/tests/python/test_frontend_scanner_allocation.py b/frontendInterfaces/libsrc/testing/tests/python/test_frontend_scanner_allocation.py new file mode 100644 index 000000000..54ea68dda --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/python/test_frontend_scanner_allocation.py @@ -0,0 +1,52 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import copy + +import bulkio +from bulkio.bulkioInterfaces import BULKIO + +import sys +sys.path.insert(0, '../../../../src/python') +sys.path.insert(0, '../../../../libsrc/python') + +from omniORB import any as _any +from ossie.cf import CF + +from redhawk.frontendInterfaces import FRONTEND +import tuner_device +import fe_types +from ossie.properties import struct_property + +class ValidateRequestTest(unittest.TestCase): + + def testFrontendAllocationStruct(self): + frontend_scanner_allocation = struct_property(id_="FRONTEND::scanner_allocation",name="frontend_scanner_allocation",structdef=fe_types.frontend_scanner_allocation,configurationkind=("allocation",),mode="writeonly",description="""Frontend Interfaces v2.0 scanner allocation structure""") + self.assertEquals(frontend_scanner_allocation.fields['FRONTEND::scanner_allocation::min_freq'][1].type_, 'double') + self.assertEquals(frontend_scanner_allocation.fields['FRONTEND::scanner_allocation::max_freq'][1].type_, 'double') + self.assertEquals(frontend_scanner_allocation.fields['FRONTEND::scanner_allocation::mode'][1].type_, 'string') + self.assertEquals(frontend_scanner_allocation.fields['FRONTEND::scanner_allocation::control_mode'][1].type_, 'string') + self.assertEquals(frontend_scanner_allocation.fields['FRONTEND::scanner_allocation::control_limit'][1].type_, 'double') + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/frontendInterfaces/libsrc/testing/tests/python/test_ports.py b/frontendInterfaces/libsrc/testing/tests/python/test_ports.py new file mode 100644 index 000000000..a98275e91 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/python/test_ports.py @@ -0,0 +1,519 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import copy + +import bulkio +from bulkio.bulkioInterfaces import BULKIO + +import sys +sys.path.insert(0, '../../../../src/python') +sys.path.insert(0, '../../../../libsrc/python') + +from omniORB import any as _any +from ossie.cf import CF + +from redhawk.frontendInterfaces import FRONTEND +import tuner_device +from input_ports import * +from output_ports import * +import fe_types +from ossie.resource import PortCallError + +class PortsTest(unittest.TestCase): + class gps_port_sample(gps_delegation): + def __init__(self): + self._id = "original" + def set_source_id(self, in_id): + self._id = in_id + def get_source_id(self): + return self._id; + def get_gps_info(self, port_name): + _gpsinfo = FRONTEND.GPSInfo('','','',1L,1L,1L,1.0,1.0,1.0,1.0,1,1.0,'',BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0),[]) + _gpsinfo.source_id = self._id + return _gpsinfo + def set_gps_info(self, port_name, gps_info): + self._id = gps_info.source_id; + def get_gps_time_pos(self, port_name): + _positioninfo = FRONTEND.PositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _gpstimepos = FRONTEND.GpsTimePos(_positioninfo,BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0)) + return _gpstimepos + def set_gps_time_pos(self, port_name, gps_time_pos): + pass + + class nav_port_sample(nav_delegation): + def __init__(self): + self._id = "original" + def set_source_id(self, in_id): + self._id = in_id + def get_source_id(self): + return self._id; + def get_nav_packet(self, port_name): + _time = BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0) + _positioninfo = FRONTEND.PositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _cartesianpos=FRONTEND.CartesianPositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _velocityinfo=FRONTEND.VelocityInfo(False,'DATUM_WGS84','',0.0,0.0,0.0) + _accelerationinfo=FRONTEND.AccelerationInfo(False,'DATUM_WGS84','',0.0,0.0,0.0) + _attitudeinfo=FRONTEND.AttitudeInfo(False,0.0,0.0,0.0) + _navpacket=FRONTEND.NavigationPacket(self._id,'',_positioninfo,_cartesianpos,_velocityinfo,_accelerationinfo,_attitudeinfo,_time,[]) + return _navpacket + def set_nav_packet(self, port_name, nav_info): + self._id = nav_info.source_id + + class rfinfo_port_sample(rfinfo_delegation): + def __init__(self): + self._id = "original" + def set_source_id(self, in_id): + self._id = in_id + def get_source_id(self): + return self._id; + def get_rf_flow_id(self, port_name): + return "" + def set_rf_flow_id(self, port_name, id): + pass + def get_rfinfo_pkt(self, port_name): + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + _rfinfopkt=FRONTEND.RFInfoPkt(self._id,0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + return _rfinfopkt + def set_rfinfo_pkt(self, port_name, pkt): + self._id = pkt.rf_flow_id + + class rfsource_port_sample(rfsource_delegation): + def __init__(self): + self._id = "original" + def set_rf_flow_id(self, in_id): + self._id = in_id + def get_rf_flow_id(self): + return self._id; + def get_available_rf_inputs(self, port_name): + return [] + def set_available_rf_inputs(self, port_name, inputs): + pass + def get_current_rf_input(self, port_name): + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + _rfinfopkt=FRONTEND.RFInfoPkt(self._id,0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + return _rfinfopkt + def set_current_rf_input(self, port_name, _input): + self._id = _input.rf_flow_id + + class tuner_port_sample(digital_scanning_tuner_delegation): + def __init__(self): + self._bw = 0 + def set_bw(self, in_bw): + self._bw= in_bw + def get_bw(self): + return self._bw + def setTunerBandwidth(self, id, in_bw): + self._bw = in_bw + def getTunerBandwidth(self, id): + return self._bw; + + def testGPSGetter(self): + input_parent = self.gps_port_sample() + input_port_1 = InGPSPort("input_1", input_parent) + output_port = OutGPSPort("output") + + input_parent.set_source_id("newvalue"); + + self.assertRaises(PortCallError, output_port._get_gps_info); + self.assertRaises(PortCallError, output_port._get_gps_info, 'hello') + + output_port.connectPort(input_port_1._this(), "hello") + + gpsinfo = output_port._get_gps_info() + self.assertEquals(gpsinfo.source_id, "newvalue") + gpsinfo = output_port._get_gps_info("hello") + self.assertEquals(gpsinfo.source_id, "newvalue") + self.assertRaises(PortCallError, output_port._get_gps_info, 'foo') + + input_parent_2 = self.gps_port_sample() + input_parent_2.set_source_id("newvalue_2") + input_port_2 = InGPSPort("input_2", input_parent_2) + + output_port.connectPort(input_port_2._this(), "foo") + self.assertRaises(PortCallError, output_port._get_gps_info) + gpsinfo = output_port._get_gps_info("hello") + self.assertEquals(gpsinfo.source_id, "newvalue") + self.assertRaises(PortCallError, output_port._get_gps_info, "something") + + output_port.disconnectPort("hello") + + def testGPSSetter(self): + input_parent = self.gps_port_sample() + input_parent_2 = self.gps_port_sample() + input_port_1 = InGPSPort("input_1", input_parent) + output_port = OutGPSPort("output") + + self.assertEquals(input_parent.get_source_id(),"original") + + gpsinfo = FRONTEND.GPSInfo('','','',1L,1L,1L,1.0,1.0,1.0,1.0,1,1.0,'',BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0),[]) + gpsinfo.source_id = "newvalue" + + output_port._set_gps_info(gpsinfo) + self.assertEquals(input_parent.get_source_id(), "original") + self.assertRaises(PortCallError, output_port._set_gps_info, gpsinfo, "hello") + + output_port.connectPort(input_port_1._this(), "hello") + + output_port._set_gps_info(gpsinfo) + self.assertEquals(input_parent.get_source_id(), "newvalue") + + gpsinfo.source_id = "newvalue_2"; + output_port._set_gps_info(gpsinfo, "hello") + self.assertEquals(input_parent.get_source_id(), "newvalue_2") + + self.assertRaises(PortCallError, output_port._set_gps_info, gpsinfo, "foo") + + gpsinfo.source_id = "newvalue_3" + input_port_2 = InGPSPort("input_2", input_parent_2) + output_port.connectPort(input_port_2._this(), "foo") + + output_port._set_gps_info(gpsinfo); + self.assertEquals(input_parent.get_source_id(), "newvalue_3") + self.assertEquals(input_parent_2.get_source_id(), "newvalue_3") + + gpsinfo.source_id = "newvalue_4"; + output_port._set_gps_info(gpsinfo, "hello") + self.assertEquals(input_parent.get_source_id(), "newvalue_4") + self.assertEquals(input_parent_2.get_source_id(), "newvalue_3") + + self.assertRaises(PortCallError, output_port._set_gps_info, gpsinfo, "something") + + output_port.disconnectPort("hello") + + def testNavGetter(self): + input_parent = self.nav_port_sample() + input_port_1 = InNavDataPort("input_1", input_parent) + output_port = OutNavDataPort("output") + + input_parent.set_source_id("newvalue"); + + self.assertRaises(PortCallError, output_port._get_nav_packet); + self.assertRaises(PortCallError, output_port._get_nav_packet, 'hello') + + output_port.connectPort(input_port_1._this(), "hello") + + navinfo = output_port._get_nav_packet() + self.assertEquals(navinfo.source_id, "newvalue") + navinfo = output_port._get_nav_packet("hello") + self.assertEquals(navinfo.source_id, "newvalue") + self.assertRaises(PortCallError, output_port._get_nav_packet, 'foo') + + input_parent_2 = self.nav_port_sample() + input_parent_2.set_source_id("newvalue_2") + input_port_2 = InNavDataPort("input_2", input_parent_2) + + output_port.connectPort(input_port_2._this(), "foo") + self.assertRaises(PortCallError, output_port._get_nav_packet) + navinfo = output_port._get_nav_packet("hello") + self.assertEquals(navinfo.source_id, "newvalue") + self.assertRaises(PortCallError, output_port._get_nav_packet, "something") + + output_port.disconnectPort("hello") + + def testNavSetter(self): + input_parent = self.nav_port_sample() + input_parent_2 = self.nav_port_sample() + input_port_1 = InNavDataPort("input_1", input_parent) + output_port = OutNavDataPort("output") + + self.assertEquals(input_parent.get_source_id(),"original") + + _time = BULKIO.PrecisionUTCTime(1,1,1.0,1.0,1.0) + _positioninfo = FRONTEND.PositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _cartesianpos=FRONTEND.CartesianPositionInfo(False,'DATUM_WGS84',0.0,0.0,0.0) + _velocityinfo=FRONTEND.VelocityInfo(False,'DATUM_WGS84','',0.0,0.0,0.0) + _accelerationinfo=FRONTEND.AccelerationInfo(False,'DATUM_WGS84','',0.0,0.0,0.0) + _attitudeinfo=FRONTEND.AttitudeInfo(False,0.0,0.0,0.0) + navpacket=FRONTEND.NavigationPacket('','',_positioninfo,_cartesianpos,_velocityinfo,_accelerationinfo,_attitudeinfo,_time,[]) + navpacket.source_id = "newvalue" + + output_port._set_nav_packet(navpacket) + self.assertEquals(input_parent.get_source_id(), "original") + self.assertRaises(PortCallError, output_port._set_nav_packet, navpacket, "hello") + + output_port.connectPort(input_port_1._this(), "hello") + + output_port._set_nav_packet(navpacket) + self.assertEquals(input_parent.get_source_id(), "newvalue") + + navpacket.source_id = "newvalue_2"; + output_port._set_nav_packet(navpacket, "hello") + self.assertEquals(input_parent.get_source_id(), "newvalue_2") + + self.assertRaises(PortCallError, output_port._set_nav_packet, navpacket, "foo") + + navpacket.source_id = "newvalue_3" + input_port_2 = InNavDataPort("input_2", input_parent_2) + output_port.connectPort(input_port_2._this(), "foo") + + output_port._set_nav_packet(navpacket); + self.assertEquals(input_parent.get_source_id(), "newvalue_3") + self.assertEquals(input_parent_2.get_source_id(), "newvalue_3") + + navpacket.source_id = "newvalue_4"; + output_port._set_nav_packet(navpacket, "hello") + self.assertEquals(input_parent.get_source_id(), "newvalue_4") + self.assertEquals(input_parent_2.get_source_id(), "newvalue_3") + + self.assertRaises(PortCallError, output_port._set_nav_packet, navpacket, "something") + + output_port.disconnectPort("hello") + + def testRFInfoGetter(self): + input_parent = self.rfinfo_port_sample() + input_port_1 = InRFInfoPort("input_1", input_parent) + output_port = OutRFInfoPort("output") + + input_parent.set_source_id("newvalue"); + + self.assertRaises(PortCallError, output_port._get_rfinfo_pkt); + self.assertRaises(PortCallError, output_port._get_rfinfo_pkt, 'hello') + + output_port.connectPort(input_port_1._this(), "hello") + + rfinfo = output_port._get_rfinfo_pkt() + self.assertEquals(rfinfo.rf_flow_id, "newvalue") + rfinfo = output_port._get_rfinfo_pkt("hello") + self.assertEquals(rfinfo.rf_flow_id, "newvalue") + self.assertRaises(PortCallError, output_port._get_rfinfo_pkt, 'foo') + + input_parent_2 = self.rfinfo_port_sample() + input_parent_2.set_source_id("newvalue_2") + input_port_2 = InRFInfoPort("input_2", input_parent_2) + + output_port.connectPort(input_port_2._this(), "foo") + self.assertRaises(PortCallError, output_port._get_rfinfo_pkt) + rfinfo = output_port._get_rfinfo_pkt("hello") + self.assertEquals(rfinfo.rf_flow_id, "newvalue") + self.assertRaises(PortCallError, output_port._get_rfinfo_pkt, "something") + + output_port.disconnectPort("hello") + + def testRFInfoSetter(self): + input_parent = self.rfinfo_port_sample() + input_parent_2 = self.rfinfo_port_sample() + input_port_1 = InRFInfoPort("input_1", input_parent) + output_port = OutRFInfoPort("output") + + self.assertEquals(input_parent.get_source_id(),"original") + + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + rfinfo=FRONTEND.RFInfoPkt('',0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + rfinfo.rf_flow_id = "newvalue" + + output_port._set_rfinfo_pkt(rfinfo) + self.assertEquals(input_parent.get_source_id(), "original") + self.assertRaises(PortCallError, output_port._set_rfinfo_pkt, rfinfo, "hello") + + output_port.connectPort(input_port_1._this(), "hello") + + output_port._set_rfinfo_pkt(rfinfo) + self.assertEquals(input_parent.get_source_id(), "newvalue") + + rfinfo.rf_flow_id = "newvalue_2"; + output_port._set_rfinfo_pkt(rfinfo, "hello") + self.assertEquals(input_parent.get_source_id(), "newvalue_2") + + self.assertRaises(PortCallError, output_port._set_rfinfo_pkt, rfinfo, "foo") + + rfinfo.rf_flow_id = "newvalue_3" + input_port_2 = InRFInfoPort("input_2", input_parent_2) + output_port.connectPort(input_port_2._this(), "foo") + + output_port._set_rfinfo_pkt(rfinfo); + self.assertEquals(input_parent.get_source_id(), "newvalue_3") + self.assertEquals(input_parent_2.get_source_id(), "newvalue_3") + + rfinfo.rf_flow_id = "newvalue_4"; + output_port._set_rfinfo_pkt(rfinfo, "hello") + self.assertEquals(input_parent.get_source_id(), "newvalue_4") + self.assertEquals(input_parent_2.get_source_id(), "newvalue_3") + + self.assertRaises(PortCallError, output_port._set_rfinfo_pkt, rfinfo, "something") + + output_port.disconnectPort("hello") + + def testRFSourceGetter(self): + input_parent = self.rfsource_port_sample() + input_port_1 = InRFSourcePort("input_1", input_parent) + output_port = OutRFSourcePort("output") + + input_parent.set_rf_flow_id("newvalue"); + + self.assertRaises(PortCallError, output_port._get_current_rf_input); + self.assertRaises(PortCallError, output_port._get_current_rf_input, 'hello') + + output_port.connectPort(input_port_1._this(), "hello") + + rfsource = output_port._get_current_rf_input() + self.assertEquals(rfsource.rf_flow_id, "newvalue") + rfsource = output_port._get_current_rf_input("hello") + self.assertEquals(rfsource.rf_flow_id, "newvalue") + self.assertRaises(PortCallError, output_port._get_current_rf_input, 'foo') + + input_parent_2 = self.rfsource_port_sample() + input_parent_2.set_rf_flow_id("newvalue_2") + input_port_2 = InRFSourcePort("input_2", input_parent_2) + + output_port.connectPort(input_port_2._this(), "foo") + self.assertRaises(PortCallError, output_port._get_current_rf_input) + rfsource = output_port._get_current_rf_input("hello") + self.assertEquals(rfsource.rf_flow_id, "newvalue") + self.assertRaises(PortCallError, output_port._get_current_rf_input, "something") + + output_port.disconnectPort("hello") + + def testRFSourceSetter(self): + input_parent = self.rfsource_port_sample() + input_parent_2 = self.rfsource_port_sample() + input_port_1 = InRFSourcePort("input_1", input_parent) + output_port = OutRFSourcePort("output") + + self.assertEquals(input_parent.get_rf_flow_id(),"original") + + _antennainfo=FRONTEND.AntennaInfo('','','','') + _freqrange=FRONTEND.FreqRange(0,0,[]) + _feedinfo=FRONTEND.FeedInfo('','',_freqrange) + _sensorinfo=FRONTEND.SensorInfo('','','',_antennainfo,_feedinfo) + _rfcapabilities=FRONTEND.RFCapabilities(_freqrange,_freqrange) + rfsource=FRONTEND.RFInfoPkt('',0.0,0.0,0.0,False,_sensorinfo,[],_rfcapabilities,[]) + rfsource.rf_flow_id = "newvalue" + + output_port._set_current_rf_input(rfsource) + self.assertEquals(input_parent.get_rf_flow_id(), "original") + self.assertRaises(PortCallError, output_port._set_current_rf_input, rfsource, "hello") + + output_port.connectPort(input_port_1._this(), "hello") + + output_port._set_current_rf_input(rfsource) + self.assertEquals(input_parent.get_rf_flow_id(), "newvalue") + + rfsource.rf_flow_id = "newvalue_2"; + output_port._set_current_rf_input(rfsource, "hello") + self.assertEquals(input_parent.get_rf_flow_id(), "newvalue_2") + + self.assertRaises(PortCallError, output_port._set_current_rf_input, rfsource, "foo") + + rfsource.rf_flow_id = "newvalue_3" + input_port_2 = InRFSourcePort("input_2", input_parent_2) + output_port.connectPort(input_port_2._this(), "foo") + + output_port._set_current_rf_input(rfsource); + self.assertEquals(input_parent.get_rf_flow_id(), "newvalue_3") + self.assertEquals(input_parent_2.get_rf_flow_id(), "newvalue_3") + + rfsource.rf_flow_id = "newvalue_4"; + output_port._set_current_rf_input(rfsource, "hello") + self.assertEquals(input_parent.get_rf_flow_id(), "newvalue_4") + self.assertEquals(input_parent_2.get_rf_flow_id(), "newvalue_3") + + self.assertRaises(PortCallError, output_port._set_current_rf_input, rfsource, "something") + + output_port.disconnectPort("hello") + + def testTunerGetter(self): + input_parent = self.tuner_port_sample() + input_port_1 = InDigitalScanningTunerPort("input_1", input_parent) + output_port = OutDigitalTunerPort("output") + + input_parent.set_bw(1); + + self.assertRaises(PortCallError, output_port.getTunerBandwidth, 'first_tuner'); + self.assertRaises(PortCallError, output_port.getTunerBandwidth, 'first_tuner', 'hello') + + output_port.connectPort(input_port_1._this(), "hello") + + bw = output_port.getTunerBandwidth('first_tuner') + self.assertEquals(bw, 1) + bw = output_port.getTunerBandwidth('first_tuner', "hello") + self.assertEquals(bw, 1) + self.assertRaises(PortCallError, output_port.getTunerBandwidth, 'first_tuner', 'foo') + + input_parent_2 = self.tuner_port_sample() + input_parent_2.set_bw(2) + input_port_2 = InDigitalScanningTunerPort("input_2", input_parent_2) + + output_port.connectPort(input_port_2._this(), "foo") + self.assertRaises(PortCallError, output_port.getTunerBandwidth, 'first_tuner') + bw = output_port.getTunerBandwidth('first_tuner', "hello") + self.assertEquals(bw, 1) + self.assertRaises(PortCallError, output_port.getTunerBandwidth, 'first_tuner', "something") + + output_port.disconnectPort("hello") + + def testTunerSetter(self): + input_parent = self.tuner_port_sample() + input_parent_2 = self.tuner_port_sample() + input_port_1 = InDigitalScanningTunerPort("input_1", input_parent) + output_port = OutDigitalTunerPort("output") + + self.assertEquals(input_parent.get_bw(),0) + + bw = 1 + output_port.setTunerBandwidth("first_tuner", bw) + self.assertEquals(input_parent.get_bw(), 0) + self.assertRaises(PortCallError, output_port.setTunerBandwidth, "first_tuner", bw, "hello") + + output_port.connectPort(input_port_1._this(), "hello") + + output_port.setTunerBandwidth("first_tuner", bw) + self.assertEquals(input_parent.get_bw(), bw) + + bw = 2 + output_port.setTunerBandwidth("first_tuner", bw, "hello") + self.assertEquals(input_parent.get_bw(), bw) + + self.assertRaises(PortCallError, output_port.setTunerBandwidth, "first_tuner", bw, "foo") + + bw = 3 + input_port_2 = InDigitalScanningTunerPort("input_2", input_parent_2) + output_port.connectPort(input_port_2._this(), "foo") + + output_port.setTunerBandwidth("first_tuner", bw); + self.assertEquals(input_parent.get_bw(), bw) + self.assertEquals(input_parent_2.get_bw(), bw) + + bw = 4 + output_port.setTunerBandwidth("first_tuner", bw, "hello") + self.assertEquals(input_parent.get_bw(), bw) + self.assertEquals(input_parent_2.get_bw(), 3) + + self.assertRaises(PortCallError, output_port.setTunerBandwidth, "first_tuner", bw, "something") + + output_port.disconnectPort("hello") + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/frontendInterfaces/libsrc/testing/tests/python/test_validaterequest.py b/frontendInterfaces/libsrc/testing/tests/python/test_validaterequest.py new file mode 100644 index 000000000..8599e5575 --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/python/test_validaterequest.py @@ -0,0 +1,159 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import copy + +import bulkio +from bulkio.bulkioInterfaces import BULKIO + +import sys +sys.path.insert(0, '../../../../src/python') +sys.path.insert(0, '../../../../libsrc/python') + +from omniORB import any as _any +from ossie.cf import CF + +from redhawk.frontendInterfaces import FRONTEND +import tuner_device +import fe_types + +class ValidateRequestTest(unittest.TestCase): + + def testSRI(self): + request = fe_types.frontend_tuner_allocation() + upstream_sri = BULKIO.StreamSRI(hversion=1, xstart=0.0, xdelta=1/2e6, + xunits=BULKIO.UNITS_TIME, subsize=0, ystart=0.0, ydelta=0.0, + yunits=BULKIO.UNITS_NONE, mode=0, streamID="", blocking=False, keywords=[]) + request.center_frequency = 100e6 + request.bandwidth = 1e6 + request.sample_rate = 2e6 + cf = 100e6 + bw = 1e6 + _keywords=[CF.DataType(id="CHAN_RF", value=_any.to_any(cf)), CF.DataType(id="FRONTEND::BANDWIDTH", value=_any.to_any(bw))] + upstream_sri.keywords = _keywords + self.assertTrue(tuner_device.validateRequestVsSRI(request, upstream_sri, False)) + cf = 100.49e6 + _keywords=[CF.DataType(id="CHAN_RF", value=_any.to_any(cf)), CF.DataType(id="FRONTEND::BANDWIDTH", value=_any.to_any(bw))] + upstream_sri.keywords = _keywords + self.assertTrue(tuner_device.validateRequestVsSRI(request, upstream_sri, False)) + cf = 99.51e6 + _keywords=[CF.DataType(id="CHAN_RF", value=_any.to_any(cf)), CF.DataType(id="FRONTEND::BANDWIDTH", value=_any.to_any(bw))] + upstream_sri.keywords = _keywords + self.assertTrue(tuner_device.validateRequestVsSRI(request, upstream_sri, False)) + cf = 100.51e6 + _keywords=[CF.DataType(id="CHAN_RF", value=_any.to_any(cf)), CF.DataType(id="FRONTEND::BANDWIDTH", value=_any.to_any(bw))] + upstream_sri.keywords = _keywords + self.assertRaises(FRONTEND.BadParameterException, tuner_device.validateRequestVsSRI, request, upstream_sri, False) + cf = 99.49e6 + _keywords=[CF.DataType(id="CHAN_RF", value=_any.to_any(cf)), CF.DataType(id="FRONTEND::BANDWIDTH", value=_any.to_any(bw))] + upstream_sri.keywords = _keywords + self.assertRaises(FRONTEND.BadParameterException, tuner_device.validateRequestVsSRI, request, upstream_sri, False) + + def testDeviceSRI(self): + request = fe_types.frontend_tuner_allocation() + upstream_sri = BULKIO.StreamSRI(hversion=1, xstart=0.0, xdelta=1/2e6, + xunits=BULKIO.UNITS_TIME, subsize=0, ystart=0.0, ydelta=0.0, + yunits=BULKIO.UNITS_NONE, mode=0, streamID="", blocking=False, keywords=[]) + request.center_frequency = 100e6 + request.bandwidth = 1e6 + request.sample_rate = 2e6 + cf = 100e6 + bw = 1e6 + _keywords=[CF.DataType(id="CHAN_RF", value=_any.to_any(cf)), CF.DataType(id="FRONTEND::BANDWIDTH", value=_any.to_any(bw))] + upstream_sri.keywords = _keywords + min_dev_cf = 99.5e6 + max_dev_cf = 100.5e6 + max_dev_bw = 3e6 + max_dev_sr = 6e6 + self.assertTrue(tuner_device.validateRequestVsDeviceStream(request, upstream_sri, False, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)) + cf = 100.49e6 + _keywords=[CF.DataType(id="CHAN_RF", value=_any.to_any(cf)), CF.DataType(id="FRONTEND::BANDWIDTH", value=_any.to_any(bw))] + upstream_sri.keywords = _keywords + self.assertTrue(tuner_device.validateRequestVsDeviceStream(request, upstream_sri, False, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)) + cf = 99.51e6 + _keywords=[CF.DataType(id="CHAN_RF", value=_any.to_any(cf)), CF.DataType(id="FRONTEND::BANDWIDTH", value=_any.to_any(bw))] + upstream_sri.keywords = _keywords + self.assertTrue(tuner_device.validateRequestVsDeviceStream(request, upstream_sri, False, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)) + cf = 100.51e6 + _keywords=[CF.DataType(id="CHAN_RF", value=_any.to_any(cf)), CF.DataType(id="FRONTEND::BANDWIDTH", value=_any.to_any(bw))] + upstream_sri.keywords = _keywords + self.assertRaises(FRONTEND.BadParameterException, tuner_device.validateRequestVsDeviceStream, request, upstream_sri, False, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr) + cf = 99.49e6 + _keywords=[CF.DataType(id="CHAN_RF", value=_any.to_any(cf)), CF.DataType(id="FRONTEND::BANDWIDTH", value=_any.to_any(bw))] + upstream_sri.keywords = _keywords + self.assertRaises(FRONTEND.BadParameterException, tuner_device.validateRequestVsDeviceStream, request, upstream_sri, False, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr) + + def testRFInfo(self): + request = fe_types.frontend_tuner_allocation() + rfinfo = fe_types.RFInfoPkt() + request.center_frequency = 100e6 + request.bandwidth = 1e6 + request.sample_rate = 2e6 + cf = 100e6 + bw = 1e6 + rfinfo.rf_center_freq = cf + rfinfo.rf_bandwidth = bw + self.assertTrue(tuner_device.validateRequestVsRFInfo(request, rfinfo, False)) + cf = 100.49e6 + rfinfo.rf_center_freq = cf + self.assertTrue(tuner_device.validateRequestVsRFInfo(request, rfinfo, False)) + cf = 99.51e6 + rfinfo.rf_center_freq = cf + self.assertTrue(tuner_device.validateRequestVsRFInfo(request, rfinfo, False)) + cf = 100.51e6 + rfinfo.rf_center_freq = cf + self.assertRaises(FRONTEND.BadParameterException, tuner_device.validateRequestVsRFInfo, request, rfinfo, False) + cf = 99.49e6 + rfinfo.rf_center_freq = cf + self.assertRaises(FRONTEND.BadParameterException, tuner_device.validateRequestVsRFInfo, request, rfinfo, False) + + def testDeviceRFInfo(self): + request = fe_types.frontend_tuner_allocation() + rfinfo = fe_types.RFInfoPkt() + request.center_frequency = 100e6 + request.bandwidth = 1e6 + request.sample_rate = 2e6 + cf = 100e6 + bw = 1e6 + rfinfo.rf_center_freq = cf + rfinfo.rf_bandwidth = bw + min_dev_cf = 99.5e6 + max_dev_cf = 100.5e6 + max_dev_bw = 3e6 + max_dev_sr = 6e6 + self.assertTrue(tuner_device.validateRequestVsDevice(request, rfinfo, False, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)) + cf = 100.49e6 + rfinfo.rf_center_freq = cf + self.assertTrue(tuner_device.validateRequestVsDevice(request, rfinfo, False, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)) + cf = 99.51e6 + rfinfo.rf_center_freq = cf + self.assertTrue(tuner_device.validateRequestVsDevice(request, rfinfo, False, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr)) + cf = 100.51e6 + rfinfo.rf_center_freq = cf + self.assertRaises(FRONTEND.BadParameterException, tuner_device.validateRequestVsDevice, request, rfinfo, False, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr) + cf = 99.49e6 + rfinfo.rf_center_freq = cf + self.assertRaises(FRONTEND.BadParameterException, tuner_device.validateRequestVsDevice, request, rfinfo, False, min_dev_cf, max_dev_cf, max_dev_bw, max_dev_sr) + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/frontendInterfaces/libsrc/testing/tests/runtests b/frontendInterfaces/libsrc/testing/tests/runtests new file mode 100755 index 000000000..1ec6262cf --- /dev/null +++ b/frontendInterfaces/libsrc/testing/tests/runtests @@ -0,0 +1,53 @@ +#!/bin/bash +# +# Runs relative to frontend project +# + +# can't use relative path for subdir's +# must get actual path so that subdir's use the correct path +frontend_top=$(cd ../../..;pwd) +frontend_libsrc_top=$frontend_top/libsrc +export LD_LIBRARY_PATH=$frontend_libsrc_top/.libs:$frontend_top/.libs:$frontend_top/jni/.libs:${LD_LIBRARY_PATH} +export PYTHONPATH=$frontend_libsrc_top/python:${PYTHONPATH} +export CLASSPATH=${frontend_libsrc_top}/frontend.jar:${frontend_top}/frontendInterfaces.jar:${CLASSPATH} + +# Limit the number of threads Java uses for the garbage collector to avoid +# misleading Java "out of memory" errors that in all actuality appear to be +# due to hitting the per-user process limit +export _JAVA_OPTIONS="-XX:ParallelGCThreads=1" + +# +# Run Python Sandbox based testing (this is a placeholder, there are none right now) +# + +if [ $# -gt 0 ] +then + # run an associated test script + python $* + exit +else + if test -n "$(find . -maxdepth 1 -name 'test_*.py' -print -quit)" + then + for pt in test_*.py ; do + python $pt + done + fi +fi + +# +# Run Java unit tests +# +ant -f java/build.xml + + +# +# Run C++ unit tests +# +cd cpp +./runtests +cd - + +# +# Run Python unit tests with XML output +# +(cd python && ./runtests.py -x) diff --git a/frontendInterfaces/pom.xml b/frontendInterfaces/pom.xml deleted file mode 100644 index fba3e0fa3..000000000 --- a/frontendInterfaces/pom.xml +++ /dev/null @@ -1,100 +0,0 @@ - - 4.0.0 - - redhawk.coreframework - parent - 2.0.9-SNAPSHOT - ../pom.xml - - frontend-interfaces - bundle - - - ${project.groupId} - bulkio-interfaces - ${project.version} - - - ${project.groupId} - cf-interfaces - ${project.version} - - - - idl - - - - - idl - - - src/java - - - org.codehaus.gmaven - gmaven-plugin - 1.3 - - - set-main-artifact - package - - execute - - - - project.artifact.setFile(new - File("${project.basedir}/FRONTENDInterfaces.jar")) - - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.7 - - - attach-artifacts - package - - attach-artifact - - - - - ${project.build.directory}/${project.artifactId}-${project.version}.jar - beta - jar - - - - - - - - - maven-assembly-plugin - 2.2-beta-5 - - - attach-idlzip - package - - single - - - - assembly.xml - - - - - - - - diff --git a/pom.xml b/pom.xml deleted file mode 100644 index fb260e9ac..000000000 --- a/pom.xml +++ /dev/null @@ -1,82 +0,0 @@ - - 4.0.0 - redhawk.coreframework - parent - 2.0.9-SNAPSHOT - pom - - Used to add maven coordinates to jars produced via make and generate source jar and other artifacts. - - - Releases - file://${project.basedir}/repo/releases - Snapshots - file://${project.basedir}/repo/snapshots - - - redhawk/src/omnijni - redhawk/src/base/framework/java - redhawk/src/base/framework/java/ossie - bulkioInterfaces - bulkioInterfaces/libsrc - frontendInterfaces - frontendInterfaces/libsrc - burstioInterfaces - burstioInterfaces/src/java - - - - - - org.apache.felix - maven-bundle-plugin - true - - - - org.apache.maven.plugins - maven-source-plugin - 3.0.1 - - - cf-attach-sources - verify - - jar-no-fork - - - - - - maven-javadoc-plugin - 2.10.4 - - false - - - - attach-javadoc - package - - jar - - - - - - - - - Releases - ${distributionManagement.repository.id} - ${distributionManagement.repository.url} - - - Snapshots - ${distributionManagement.snapshotRepository.id} - ${distributionManagement.snapshotRepository.url} - - - diff --git a/redhawk-codegen/redhawk-codegen b/redhawk-codegen/redhawk-codegen index 6fd97c058..42e2b1892 100755 --- a/redhawk-codegen/redhawk-codegen +++ b/redhawk-codegen/redhawk-codegen @@ -90,7 +90,8 @@ if __name__ == '__main__': # Deprecated options shortopts += 'm:' - longopts = ['help', 'template=', 'impl=', 'impldir=', 'lang=', 'variant=', 'check-template=', 'version'] + longopts = ['help', 'template=', 'impl=', 'impldir=', 'lang=', 'variant=', + 'check-template=', 'header=', 'version'] # add predefined template settings a full word options longopts.extend(TemplateSetup.keys()) # Deprecated options @@ -106,6 +107,7 @@ if __name__ == '__main__': resource_type=None language=None variant="" + headerFile = None checkSupport = False for key, value in opts: if key == '--help': @@ -135,6 +137,8 @@ if __name__ == '__main__': break elif key == '--variant': variant = value + elif key == '--header': + headerFile = value elif key == '-l': action = Function.LIST elif key == '-f': @@ -271,6 +275,15 @@ if __name__ == '__main__': else: projectTemplate = 'redhawk.codegen.jinja.project.component' + + header = None + if headerFile: + try: + with open(headerFile, 'r') as fp: + header = fp.read().rstrip() + except Exception as exc: + raise SystemExit("Unable to read license header file '%s': %s" % (headerFile, exc.strerror)) + try: package = importTemplate(projectTemplate) except: @@ -279,13 +292,14 @@ if __name__ == '__main__': outputdir='', overwrite=overwrite, crcs=toplevelCRCs, - variant=variant) + variant=variant, + header=header) generators.append(('Component '+softpkg.name(), projectGenerator)) if softpkg.type() != ComponentTypes.SHAREDPACKAGE: # Generate unit tests from redhawk.codegen.jinja.unitTests.resource import sca - generator = sca.factory(outputdir='tests', overwrite=overwrite) + generator = sca.factory(outputdir='tests', overwrite=overwrite, header=header) generators.append(('Tests '+softpkg.name(), generator)) # Set up generation for each requested implementation. @@ -376,6 +390,7 @@ if __name__ == '__main__': overwrite = overwrite, crcs = implSettings.generatedFileCRCs, variant = variant, + header = header, **implSettings.properties) # Add this generator to the top-level project generator, so that it can diff --git a/redhawk-codegen/redhawk-codegen.spec b/redhawk-codegen/redhawk-codegen.spec index 68de887ad..2035575da 100644 --- a/redhawk-codegen/redhawk-codegen.spec +++ b/redhawk-codegen/redhawk-codegen.spec @@ -23,8 +23,8 @@ Prefix: %{_prefix} Name: redhawk-codegen -Version: 2.0.9 -Release: 1%{?dist} +Version: 2.2.1 +Release: 2%{?dist} Summary: Redhawk Code Generators Group: Applications/Engineering @@ -38,7 +38,7 @@ BuildArch: noarch BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot Requires: python -Requires: redhawk >= 2.0 +Requires: redhawk = %{version} %if 0%{?rhel} >= 7 Requires: python-jinja2 BuildRequires: python-jinja2 @@ -93,6 +93,12 @@ rm -rf $RPM_BUILD_ROOT %endif %changelog +* Wed Jun 28 2017 Ryan Bauman - 2.1.2-1 +- Bump for 2.1.2-rc1 + +* Wed Jun 28 2017 Ryan Bauman - 2.1.1-2 +- Bump for 2.1.1-rc2 + * Thu May 21 2015 - 2.0.0-2 - Update python-jinja2 package for el7 diff --git a/redhawk-codegen/redhawk/codegen/generate.py b/redhawk-codegen/redhawk/codegen/generate.py index 52de58d68..56ba334e1 100644 --- a/redhawk-codegen/redhawk/codegen/generate.py +++ b/redhawk-codegen/redhawk/codegen/generate.py @@ -36,7 +36,11 @@ def importTemplate(template): """ Imports a code generation module from the given fully-qualified name. """ - package = __import__(template) + try: + package = __import__(template) + except Exception, e: + print e + raise # Since the module name probably has dots, get the most specific module # (e.g. 'component' from 'template.cpp.component'). diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/mapping.py b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/mapping.py index 539a3b32b..517c32dd5 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/mapping.py +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/mapping.py @@ -29,6 +29,15 @@ def _mapComponent(self, softpkg): cppcomp['interfacedeps'] = tuple(self.getInterfaceDependencies(softpkg)) return cppcomp + def _mapImplementation(self, impl): + impldict = {} + if impl.isModule(): + impldict['module'] = True + impldict['target'] = impl.entrypoint().replace('.so', '.la') + else: + impldict['target'] = impl.entrypoint() + return impldict + def getInterfaceDependencies(self, softpkg): for namespace in self.getInterfaceNamespaces(softpkg): yield libraries.getPackageRequires(namespace) diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/Makefile.am b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/Makefile.am index ada219a54..156acb440 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/Makefile.am +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/Makefile.am @@ -19,30 +19,46 @@ #} #% set outputdir = generator.getOutputDir() #% set componentdir = component.name.replace('.','/') -#% set executable = component.impl.entrypoint|relpath(outputdir) -#% set target = automake.canonicalName(executable) +#% set target = component.impl.target|relpath(outputdir) +#% set installdir = '/'.join(['$(prefix)', component.sdrpath, componentdir]) +#% set impldir = installdir + '/' + outputdir +#% set amtarget = automake.canonicalName(target) #{% block license %} #{# Allow child templates to include license #} #{% endblock %} +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects + #{% block binInfo %} ossieName = {{component.name}} -bindir = $(prefix)/{{component.sdrpath}}/{{componentdir}}/{{outputdir}}/ -bin_PROGRAMS = {{executable}} +#{% if component.impl.module %} +libdir = {{impldir}} +lib_LTLIBRARIES = {{target}} +#{% else %} +bindir = {{impldir}} +bin_PROGRAMS = {{target}} +#{% endif %} #{% endblock %} #{% block xmlInfo %} -xmldir = $(prefix)/{{component.sdrpath}}/{{componentdir}}/ +xmldir = {{installdir}} dist_xml_DATA = {{component.profile.values()|relpath(outputdir)|join(' ')}} #{% endblock %} -ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie -AUTOMAKE_OPTIONS = subdir-objects +#{% if component.impl.module %} +#{% set solib = component.impl.entrypoint|relpath(outputdir) %} +.PHONY: convenience-link clean-convenience-link -#{% if component.mFunction != None %} -mdir = $(prefix)/{{component.sdrpath}}/{{componentdir}}/{{outputdir}}/ -dist_m_DATA = *.m -#{% endif %} +all-local : convenience-link +clean-local : clean-convenience-link + +convenience-link : {{ target }} + @ln -fs .libs/{{solib}} +clean-convenience-link: + @rm -f {{solib}} + +#{% endif %} #{% block distClean %} distclean-local: rm -rf m4 @@ -68,16 +84,20 @@ distclean-local: # generated by the REDHAWK IDE. You can remove/modify the following lines if # you wish to manually control these options. include $(srcdir)/Makefile.am.ide -{{target}}_SOURCES = $(redhawk_SOURCES_auto) -{{target}}_LDADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) -{{target}}_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +{{amtarget}}_SOURCES = $(redhawk_SOURCES_auto) +#{% if component.impl.module %} +{{amtarget}}_LIBADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +#{% else %} +{{amtarget}}_LDADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +#{% endif %} +{{amtarget}}_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +#{% if component.impl.module %} +{{amtarget}}_LDFLAGS = -shared -module -export-dynamic -export-symbols-regex 'make_component' -avoid-version $(redhawk_LDFLAGS_auto) +#{% else %} #{% if component is programmabledevice %} {{target}}_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) -ldl #{% else %} {{target}}_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) #{% endif %} -#{% if component.mFunction %} -{{target}}_CXXFLAGS += $(M_FUNCTION_INTERPRETER_INCLUDE) -{{target}}_LDFLAGS += $(BOOST_FILESYSTEM_LIB) $(M_FUNCTION_INTERPRETER_LOAD) #{% endif %} #{% endblock %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/configure.ac b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/configure.ac index 175720981..8db7869a1 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/configure.ac +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/configure.ac @@ -24,6 +24,9 @@ AC_INIT({{component.name}}, {{component.version}}) AM_INIT_AUTOMAKE([nostdinc foreign]) AC_CONFIG_MACRO_DIR([m4]) +#{% if component.impl.module %} +LT_INIT([dlopen]) +#{% endif %} #{% endblock %} #{% block acChecks %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/main.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/main.cpp index 179e2c860..e82d6a53c 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/main.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/base/templates/main.cpp @@ -40,6 +40,14 @@ void signal_catcher(int sig) } } /*{% endif %}*/ +/*{% if not component is device and component.impl.module %}*/ +extern "C" { + Resource_impl* make_component(const std::string& uuid, const std::string& identifier) + { + return new ${component.userclass.name}(uuid.c_str(), identifier.c_str()); + } +} +/*{% else %}*/ int main(int argc, char* argv[]) { /*{% if component is device %}*/ @@ -56,4 +64,5 @@ int main(int argc, char* argv[]) /*{% endif %}*/ return 0; } +/*{% endif %}*/ /*{% endblock %}*/ diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/generator.py b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/generator.py index cdacde947..e932f55ac 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/generator.py +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/generator.py @@ -47,6 +47,7 @@ def map(self, softpkg): if prop['cppname'] in ('device_kind', 'device_model', 'frontend_tuner_allocation', 'frontend_listener_allocation', + 'frontend_scanner_allocation', 'frontend_tuner_status'): prop['inherited'] = True return component diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/mapping.py b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/mapping.py index d1fcd1d49..ec5404b08 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/mapping.py +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/mapping.py @@ -40,7 +40,9 @@ def getImplementedInterfaces(softpkg): # Ensure that parent interfaces also gets added (so, e.g., a device # with a DigitalTuner should also report that it's an AnalogTuner # and FrontendTuner) - inherits = { 'DigitalTuner': ('AnalogTuner', 'FrontendTuner'), + inherits = { 'DigitalScanningTuner': ('ScanningTuner', 'DigitalTuner', 'AnalogTuner', 'FrontendTuner'), + 'AnalogScanningTuner': ('ScanningTuner', 'AnalogTuner', 'FrontendTuner'), + 'DigitalTuner': ('AnalogTuner', 'FrontendTuner'), 'AnalogTuner': ('FrontendTuner',) } for port in softpkg.providesPorts(): @@ -74,7 +76,13 @@ def superClasses(softpkg): # Add the most specific tuner delegate interface: # (Digital > Analog > Frontend) - if 'DigitalTuner' in deviceinfo: + if 'DigitalScanningTuner' in deviceinfo: + classes.append({'name': 'virtual frontend::digital_scanning_tuner_delegation', 'header': ''}) + parent['name'] = 'frontend::FrontendScanningTunerDevice' + elif 'AnalogScanningTuner' in deviceinfo: + classes.append({'name': 'virtual frontend::analog_scanning_tuner_delegation', 'header': ''}) + parent['name'] = 'frontend::FrontendScanningTunerDevice' + elif 'DigitalTuner' in deviceinfo: classes.append({'name': 'virtual frontend::digital_tuner_delegation', 'header': ''}) elif 'AnalogTuner' in deviceinfo: classes.append({'name': 'virtual frontend::analog_tuner_delegation', 'header': ''}) @@ -107,7 +115,8 @@ class FrontendPropertyMapper(CppPropertyMapper): FRONTEND_BUILTINS = ( 'FRONTEND::tuner_allocation', - 'FRONTEND::listener_allocation' + 'FRONTEND::listener_allocation', + 'FRONTEND::scanner_allocation' ) def mapStructProperty(self, prop, fields): diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource.cpp index 3be1aedde..d058c2ceb 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource.cpp @@ -49,8 +49,32 @@ void ${className}::deviceDisable(frontend_tuner_status_struct_struct &fts, size_ fts.enabled = false; return; } +/*{% if 'ScanningTuner' in component.implements %}*/ +bool ${className}::deviceSetTuningScan(const frontend::frontend_tuner_allocation_struct &request, const frontend::frontend_scanner_allocation_struct &scan_request, frontend_tuner_status_struct_struct &fts, size_t tuner_id){ + /************************************************************ + + This function is called when the allocation request contains a scanner allocation + + modify fts, which corresponds to this->frontend_tuner_status[tuner_id] + At a minimum, bandwidth, center frequency, and sample_rate have to be set + If the device is tuned to exactly what the request was, the code should be: + fts.bandwidth = request.bandwidth; + fts.center_frequency = request.center_frequency; + fts.sample_rate = request.sample_rate; + + return true if the tuning succeeded, and false if it failed + ************************************************************/ + #warning deviceSetTuning(): Evaluate whether or not a tuner is added ********* + return true; +} +/*{% endif %}*/ bool ${className}::deviceSetTuning(const frontend::frontend_tuner_allocation_struct &request, frontend_tuner_status_struct_struct &fts, size_t tuner_id){ /************************************************************ +/*{% if 'ScanningTuner' in component.implements %}*/ + + This function is called when the allocation request does not contain a scanner allocation + +/*{% endif %}*/ modify fts, which corresponds to this->frontend_tuner_status[tuner_id] At a minimum, bandwidth, center frequency, and sample_rate have to be set If the device is tuned to exactly what the request was, the code should be: @@ -108,7 +132,7 @@ void ${className}::setTunerCenterFrequency(const std::string& allocation_id, dou if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); if(allocation_id != getControlAllocationId(idx)) throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); - if (freq<0) throw FRONTEND::BadParameterException(); + if (freq<0) throw FRONTEND::BadParameterException("Center frequency cannot be less than 0"); // set hardware to new value. Raise an exception if it's not possible this->frontend_tuner_status[idx].center_frequency = freq; } @@ -124,7 +148,7 @@ void ${className}::setTunerBandwidth(const std::string& allocation_id, double bw if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); if(allocation_id != getControlAllocationId(idx)) throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); - if (bw<0) throw FRONTEND::BadParameterException(); + if (bw<0) throw FRONTEND::BadParameterException("Bandwidth cannot be less than 0"); // set hardware to new value. Raise an exception if it's not possible this->frontend_tuner_status[idx].bandwidth = bw; } @@ -187,7 +211,7 @@ void ${className}::setTunerOutputSampleRate(const std::string& allocation_id, do if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); if(allocation_id != getControlAllocationId(idx)) throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); - if (sr<0) throw FRONTEND::BadParameterException(); + if (sr<0) throw FRONTEND::BadParameterException("Sample rate cannot be less than 0"); // set hardware to new value. Raise an exception if it's not possible this->frontend_tuner_status[idx].sample_rate = sr; } @@ -198,6 +222,29 @@ double ${className}::getTunerOutputSampleRate(const std::string& allocation_id){ return frontend_tuner_status[idx].sample_rate; } /*{% endif %}*/ +/*{% if 'ScanningTuner' in component.implements %}*/ +frontend::ScanStatus ${className}::getScanStatus(const std::string& allocation_id) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + frontend::ManualStrategy* tmp = new frontend::ManualStrategy(0); + frontend::ScanStatus retval(tmp); + return retval; +} + +void ${className}::setScanStartTime(const std::string& allocation_id, const BULKIO::PrecisionUTCTime& start_time) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); +} + +void ${className}::setScanStrategy(const std::string& allocation_id, const frontend::ScanStrategy* scan_strategy) { + long idx = getTunerMapping(allocation_id); + if (idx < 0) throw FRONTEND::FrontendException("Invalid allocation id"); + if(allocation_id != getControlAllocationId(idx)) + throw FRONTEND::FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner").c_str()); +} +/*{% endif %}*/ /*{% if 'GPS' in component.implements %}*/ frontend::GPSInfo ${className}::get_gps_info(const std::string& port_name) diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource.h b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource.h index 41d8a4e78..9c3d1d75b 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource.h +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource.h @@ -52,6 +52,11 @@ double getTunerOutputSampleRate(const std::string& allocation_id); void setTunerOutputSampleRate(const std::string& allocation_id, double sr); /*{% endif %}*/ +/*{% if 'ScanningTuner' in component.implements %}*/ + frontend::ScanStatus getScanStatus(const std::string& allocation_id); + void setScanStartTime(const std::string& allocation_id, const BULKIO::PrecisionUTCTime& start_time); + void setScanStrategy(const std::string& allocation_id, const frontend::ScanStrategy* scan_strategy); +/*{% endif %}*/ /*{% if 'GPS' in component.implements %}*/ frontend::GPSInfo get_gps_info(const std::string& port_name); void set_gps_info(const std::string& port_name, const frontend::GPSInfo &gps_info); @@ -84,6 +89,9 @@ // these are pure virtual, must be implemented here void deviceEnable(frontend_tuner_status_struct_struct &fts, size_t tuner_id); void deviceDisable(frontend_tuner_status_struct_struct &fts, size_t tuner_id); +/*{% if 'ScanningTuner' in component.implements %}*/ + bool deviceSetTuningScan(const frontend::frontend_tuner_allocation_struct &request, const frontend::frontend_scanner_allocation_struct &scan_request, frontend_tuner_status_struct_struct &fts, size_t tuner_id); +/*{% endif %}*/ bool deviceSetTuning(const frontend::frontend_tuner_allocation_struct &request, frontend_tuner_status_struct_struct &fts, size_t tuner_id); bool deviceDeleteTuning(frontend_tuner_status_struct_struct &fts, size_t tuner_id); diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource_base.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource_base.cpp index 4a4b3c322..2f6af0eaf 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource_base.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource_base.cpp @@ -20,29 +20,6 @@ //% extends "pull/resource_base.cpp" /*{% block extensions %}*/ /*{% if 'FrontendTuner' in component.implements %}*/ -/* This sets the number of entries in the frontend_tuner_status struct sequence property - * as well as the tuner_allocation_ids vector. Call this function during initialization - */ -void ${className}::setNumChannels(size_t num) -{ - this->setNumChannels(num, "RX_DIGITIZER"); -} -/* This sets the number of entries in the frontend_tuner_status struct sequence property - * as well as the tuner_allocation_ids vector. Call this function during initialization - */ - -void ${className}::setNumChannels(size_t num, std::string tuner_type) -{ - frontend_tuner_status.clear(); - frontend_tuner_status.resize(num); - tuner_allocation_ids.clear(); - tuner_allocation_ids.resize(num); - for (std::vector::iterator iter=frontend_tuner_status.begin(); iter!=frontend_tuner_status.end(); iter++) { - iter->enabled = false; - iter->tuner_type = tuner_type; - } -} - void ${className}::frontendTunerStatusChanged(const std::vector* oldValue, const std::vector* newValue) { this->tuner_allocation_ids.resize(this->frontend_tuner_status.size()); diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource_base.h b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource_base.h index c9bcd8ee0..8e5187e9f 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource_base.h +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/resource_base.h @@ -48,9 +48,4 @@ /*{% endblock %}*/ /*{% block extendedProtected%}*/ - -/*{% if 'FrontendTuner' in component.implements %}*/ - virtual void setNumChannels(size_t num); - virtual void setNumChannels(size_t num, std::string tuner_type); -/*{% endif %}*/ /*{% endblock %}*/ diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/template_impl.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/template_impl.cpp index 0ec4d77b3..48ca0a264 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/template_impl.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/frontend/templates/template_impl.cpp @@ -25,5 +25,9 @@ #include "struct_props.h" #include +/*{% if 'ScanningTuner' in component.implements %}*/ +template class frontend::FrontendScanningTunerDevice; +/*{% else %}*/ template class frontend::FrontendTunerDevice; +/*{% endif %}*/ diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/generator.py b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/generator.py index a2a3b0091..6ff8d2218 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/generator.py +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/generator.py @@ -59,7 +59,7 @@ class OctaveComponentGenerator(PullComponentGenerator): def templates(self, component): templates = [ CppTemplate('mFunction/main.cpp'), - AutomakeTemplate('base/Makefile.am'), + AutomakeTemplate('Makefile.am'), AutomakeTemplate('base/Makefile.am.ide', userfile=True), AutoconfTemplate('configure.ac'), diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/Makefile.am b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/Makefile.am new file mode 100644 index 000000000..f59fe6ca9 --- /dev/null +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/Makefile.am @@ -0,0 +1,31 @@ +#{# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +#} +#{% extends "base/Makefile.am" %} + +#{% block extensions %} +mdir = {{impldir}} +dist_m_DATA = *.m +#{% endblock %} + +#{% block compileFlags %} +{{ super() -}} +{{amtarget}}_CXXFLAGS += $(OCTAVE_CPPFLAGS) +{{amtarget}}_LDFLAGS += $(BOOST_FILESYSTEM_LIB) $(OCTAVE_LIBS) +#{% endblock %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/configure.ac b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/configure.ac index ac0604fd5..9ce5cc77f 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/configure.ac +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/configure.ac @@ -19,14 +19,10 @@ #} #{% extends "base/configure.ac" %} -#{% block acChecks %} -AC_HEADER_M_FUNCTION() -AC_LIB_M_FUNCTION() -{{ super() -}} -#{% endblock %} #{% block coreDeps %} {{ super() -}} AX_BOOST_FILESYSTEM +RH_OCTAVE([{{versions.octave}}]) #{% endblock %} #{% block softpkgDeps %} #{% endblock %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/octaveResource_base.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/octaveResource_base.cpp index 4177700b3..852c07e52 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/octaveResource_base.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/mFunction/templates/octaveResource_base.cpp @@ -90,7 +90,7 @@ const octave_value_list ${className}::_feval( errorStr += _diaryFile; // Log the error and throw an exception - LOG_ERROR(${className}, errorStr); + RH_ERROR(this->_baseLog, errorStr); throw std::invalid_argument(""); } return result; diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/octave.py b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/octave.py index 764cf783b..6c04d69f4 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/octave.py +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/octave.py @@ -19,20 +19,34 @@ # import commands +import os from redhawk.codegen.jinja.cpp.component.mFunction.generator import OctaveComponentGenerator, loader +from redhawk.codegen import versions def factory(**opts): return OctaveComponentGenerator(**opts) -def check(): +def _version_tuple(ver): + return tuple(int(n) for n in ver.split('.')) + +def _check_octave(): # Attempt to determine if octave-devel v3.4 or greater is installed. - findCommand = 'find /usr -regextype posix-extended -regex ".*include\/octave\-[3]+\.[4-9]+\.[0-9]+$" -print -quit 2>/dev/null' - (status,output) = commands.getstatusoutput(findCommand) - if output == "": - # suitable octave header files were not found - print "Could not find suitable Octave installation. Octave-devel v3.4 or greater is required." + (status, output) = commands.getstatusoutput('octave-config -v') + if status: return False - else: - # suitable octave header files were found + + # Check the version against the minimum + version = _version_tuple(output) + if version < _version_tuple(versions.octave): + return False + + incdir = commands.getoutput('octave-config -p OCTINCLUDEDIR') + return os.path.exists(incdir) + +def check(): + if _check_octave(): return True + else: + print "Could not find suitable Octave installation. Octave-devel v%s or greater is required." % versions.octave + return False diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/persona/templates/persona_base.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/persona/templates/persona_base.cpp index 7b3601068..04dac833e 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/persona/templates/persona_base.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/persona/templates/persona_base.cpp @@ -147,9 +147,9 @@ void ${className}::releaseObject() releaseOutPorts(); // SR:419 - LOG_DEBUG(${className}, __FUNCTION__ << ": Receive releaseObject call"); + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": Receive releaseObject call"); if (_adminState == CF::Device::UNLOCKED) { - LOG_DEBUG(${className}, __FUNCTION__ << ": Releasing Device") + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": Releasing Device") setAdminState(CF::Device::SHUTTING_DOWN); // SR:418 @@ -161,7 +161,7 @@ void ${className}::releaseObject() } } - LOG_DEBUG(${className}, __FUNCTION__ << ": Done Releasing Device") + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": Done Releasing Device") } } @@ -170,14 +170,14 @@ CORBA::Boolean ${className}::attemptToProgramParent() { // Return false if there is no reference to the parent if (_parentDevice == NULL) { - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": No reference to parent exists!"); return false; } if (_parentAllocated == false) { - LOG_DEBUG(${className}, __FUNCTION__ << + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": About to allocate parent device"); beforeHardwareProgrammed(); @@ -205,7 +205,7 @@ CORBA::Boolean ${className}::attemptToUnprogramParent() { // Return false if there is no reference to the parent if (_parentDevice == NULL) { - LOG_ERROR(${className}, __FUNCTION__ << ": No reference to parent exists!"); + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": No reference to parent exists!"); return false; } @@ -214,7 +214,7 @@ CORBA::Boolean ${className}::attemptToUnprogramParent() // Grab previous user-defined allocation request if (_previousRequestProps.length() == 0) { - LOG_ERROR(${className}, __FUNCTION__ << ": Previously requested hw_load Props empty!"); + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Previously requested hw_load Props empty!"); return false; } @@ -249,14 +249,14 @@ CF::ExecutableDevice::ProcessID_Type ${className}::execute ( for (unsigned int ii = 0; ii < parameters.length(); ii++) { propId = parameters[ii].id; propValue = ossie::any_to_string(parameters[ii].value); - LOG_DEBUG(${className}, __FUNCTION__ << + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": InstantiateResourceProp: ID['" << propId << "'] = " << propValue); } // Attempt to create and verify the resource resourcePtr = instantiateResource(name, options, parameters); if (resourcePtr == NULL) { - LOG_FATAL(${className}, __FUNCTION__ << + RH_FATAL(this->_deviceLog, __FUNCTION__ << ": Unable to instantiate '" << name << "'"); throw (CF::ExecutableDevice::ExecuteFail()); } @@ -333,7 +333,7 @@ Resource_impl* ${className}::instantiateResource( pHandle = dlopen(absPath.c_str(), RTLD_NOW); if (!pHandle) { errorMsg = dlerror(); - LOG_FATAL(${className}, __FUNCTION__ << + RH_FATAL(this->_deviceLog, __FUNCTION__ << ": Unable to open library '" << absPath.c_str() << "': " << errorMsg); return NULL; } @@ -363,7 +363,7 @@ Resource_impl* ${className}::instantiateResource( fnPtr = dlsym(pHandle, symbol); if (!fnPtr) { errorMsg = dlerror(); - LOG_FATAL(${className}, __FUNCTION__ << + RH_FATAL(this->_deviceLog, __FUNCTION__ << ": Unable to find symbol '" << symbol << "': " << errorMsg); return NULL; } @@ -375,7 +375,7 @@ Resource_impl* ${className}::instantiateResource( try { resourcePtr = generateResource(argc, argv, constructorPtr, libraryName); } catch (...) { - LOG_FATAL(${className}, __FUNCTION__ << + RH_FATAL(this->_deviceLog, __FUNCTION__ << ": Unable to construct persona device: '" << argv[0] << "'"); } @@ -401,7 +401,7 @@ void ${className}::formatRequestProps( // Sanity check - Kick out if properties are empty if (requestProps.length() == 0) { - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Unable to format hw_load_request properties. Properties are empty!"); return; } @@ -410,7 +410,7 @@ void ${className}::formatRequestProps( if (requestProps.length() == 1) { propId = requestProps[0].id; if (propId == "hw_load_requests") { - LOG_DEBUG(${className}, __FUNCTION__ << + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": No formatting occurred - Request properties are properly formatted!"); formattedProps = requestProps; return; @@ -429,7 +429,7 @@ void ${className}::formatRequestProps( // Case 2 - Properties are multiple hw_load_request structs if (allPropsAreHwLoadRequest) { - LOG_DEBUG(${className}, __FUNCTION__ << + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": Found hw_load_request array - Formatting to structseq"); formattedProps.length(1); formattedProps[0].id = "hw_load_requests"; @@ -439,7 +439,7 @@ void ${className}::formatRequestProps( // Case 3 - Properties reprensent the contents of a single hw_load_request if (foundRequestId) { - LOG_DEBUG(${className}, __FUNCTION__ << + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": Found hw_load_request contents - Formatting to struct and structseq"); hwLoadRequest.length(1); @@ -452,6 +452,6 @@ void ${className}::formatRequestProps( return; } - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Unable to format hw_load_request properties - Format unknown!"); } diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/persona/templates/resource.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/persona/templates/resource.cpp index 9f8ed284f..41de2c610 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/persona/templates/resource.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/persona/templates/resource.cpp @@ -218,12 +218,23 @@ PREPARE_LOGGING(${className}) //Add to ${component.userclass.header} void scaleChanged(const std::string&); - - + + Logging: + + The member _baseLog is a logger whose base name is the component (or device) instance name. + New logs should be created based on this logger name. + + To create a new logger, + rh_logger::LoggerPtr my_logger = this->_baseLog->getChildLogger("foo"); + + Assuming component instance name abc_1, my_logger will then be created with the + name "abc_1.user.foo". + + ************************************************************************************************/ int ${className}::serviceFunction() { - LOG_DEBUG(${className}, "serviceFunction() example log message"); + RH_DEBUG(this->_baseLog, "serviceFunction() example log message"); return NOOP; } @@ -235,7 +246,7 @@ CORBA::Boolean ${className}::allocateCapacity(const CF::Properties& capacities) bool allocationSuccess = false; if (isBusy() || isLocked()) { - LOG_WARN(${className}, __FUNCTION__ << + RH_WARN(this->_baseLog, __FUNCTION__ << ": Cannot allocate capacities... Device state is locked and/or busy"); return false; } diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/programmable/templates/programmable_base.h b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/programmable/templates/programmable_base.h index 1b2f9bd55..ab81a2172 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/programmable/templates/programmable_base.h +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/programmable/templates/programmable_base.h @@ -61,7 +61,7 @@ namespace HW_LOAD { }; static std::string getId() { - return std::string("hw_load_request"); + return std::string("hw_load_status"); }; std::string request_id; @@ -84,16 +84,16 @@ inline bool operator>>= (const CORBA::Any& a, HW_LOAD::default_hw_load_request_s if (!(a >>= temp)) return false; CF::Properties& props = *temp; for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("request_id", props[idx].id)) { + if (!strcmp("hw_load_request::request_id", props[idx].id)) { if (!(props[idx].value >>= s.request_id)) return false; } - if (!strcmp("requester_id", props[idx].id)) { + if (!strcmp("hw_load_request::requester_id", props[idx].id)) { if (!(props[idx].value >>= s.requester_id)) return false; } - if (!strcmp("hardware_id", props[idx].id)) { + if (!strcmp("hw_load_request::hardware_id", props[idx].id)) { if (!(props[idx].value >>= s.hardware_id)) return false; } - if (!strcmp("load_filepath", props[idx].id)) { + if (!strcmp("hw_load_request:load_filepath", props[idx].id)) { if (!(props[idx].value >>= s.load_filepath)) return false; } } @@ -103,17 +103,60 @@ inline bool operator>>= (const CORBA::Any& a, HW_LOAD::default_hw_load_request_s inline void operator<<= (CORBA::Any& a, const HW_LOAD::default_hw_load_request_struct& s) { CF::Properties props; props.length(4); - props[0].id = CORBA::string_dup("request_id"); + props[0].id = CORBA::string_dup("hw_load_request::request_id"); + props[0].value <<= s.request_id; + props[1].id = CORBA::string_dup("hw_load_request::requester_id"); + props[1].value <<= s.requester_id; + props[2].id = CORBA::string_dup("hw_load_request::hardware_id"); + props[2].value <<= s.hardware_id; + props[3].id = CORBA::string_dup("hw_load_request::load_filepath"); + props[3].value <<= s.load_filepath; + a <<= props; +}; + + + +inline bool operator>>= (const CORBA::Any& a, HW_LOAD::default_hw_load_status_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + CF::Properties& props = *temp; + for (unsigned int idx = 0; idx < props.length(); idx++) { + if (!strcmp("hw_load_status::request_id", props[idx].id)) { + if (!(props[idx].value >>= s.request_id)) return false; + } + if (!strcmp("hw_load_status::requester_id", props[idx].id)) { + if (!(props[idx].value >>= s.requester_id)) return false; + } + if (!strcmp("hw_load_status::hardware_id", props[idx].id)) { + if (!(props[idx].value >>= s.hardware_id)) return false; + } + if (!strcmp("hw_load_status:load_filepath", props[idx].id)) { + if (!(props[idx].value >>= s.load_filepath)) return false; + } + if (!strcmp("hw_load_status:state", props[idx].id)) { + if (!(props[idx].value >>= s.state)) return false; + } + } + return true; +}; + +inline void operator<<= (CORBA::Any& a, const HW_LOAD::default_hw_load_status_struct& s) { + CF::Properties props; + props.length(4); + props[0].id = CORBA::string_dup("hw_load_status::request_id"); props[0].value <<= s.request_id; - props[1].id = CORBA::string_dup("requester_id"); + props[1].id = CORBA::string_dup("hw_load_status::requester_id"); props[1].value <<= s.requester_id; - props[2].id = CORBA::string_dup("hardware_id"); + props[2].id = CORBA::string_dup("hw_load_status::hardware_id"); props[2].value <<= s.hardware_id; - props[3].id = CORBA::string_dup("load_filepath"); + props[3].id = CORBA::string_dup("hw_load_status::load_filepath"); props[3].value <<= s.load_filepath; + props[3].id = CORBA::string_dup("hw_load_status::state"); + props[3].value <<= s.state; a <<= props; }; + /*{% if component is device %}*/ typedef std::string ${executeType.capitalize()}Id; typedef std::map<${executeType.capitalize()}Id, ${executeClass}*> ${executeType.capitalize()}Map; @@ -241,7 +284,7 @@ class ${className} : public ${baseClass} // system, use the dev filesystem to copy into cache if (isSharedLibrary && existsOnDevFS) { fs = _deviceManager->fileSys();; - LOG_DEBUG(${className}, __FUNCTION__ << + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": File-system switched to dev"); } @@ -261,7 +304,7 @@ class ${className} : public ${baseClass} CF::Device::InvalidState, CORBA::SystemException ) { - LOG_DEBUG(${className}, __FUNCTION__ << + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": Instantiating ${executeType} '" << name << "'... "); // Initialize local variables @@ -271,7 +314,7 @@ class ${className} : public ${baseClass} // Attempt to instantiate the object contained in the shared library ${executeType} = instantiate${executeType.capitalize()}(name, options, parameters); if (${executeType} == NULL) { - LOG_FATAL(${className}, __FUNCTION__ << + RH_FATAL(this->_deviceLog, __FUNCTION__ << ": Unable to instantiate '" << name << "'"); throw (CF::ExecutableDevice::ExecuteFail()); } @@ -283,7 +326,7 @@ class ${className} : public ${baseClass} _${executeType}Map[${executeType}Id] = ${executeType}; _processMap[++_processIdIncrement] = ${executeType}Id; - LOG_DEBUG(${className}, __FUNCTION__ << + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": ${executeType.capitalize()} '" << ${executeType}Id << "' has been successfully instantiated"); return _processIdIncrement; @@ -316,7 +359,7 @@ class ${className} : public ${baseClass} return; } } - LOG_WARN(${className}, __FUNCTION__ << + RH_WARN(this->_deviceLog, __FUNCTION__ << ": Unable to locate ${executeType} using pid '" << processId <<"'"); } @@ -351,7 +394,7 @@ class ${className} : public ${baseClass} // Grab the current hw_load_request struct loadRequestsPtr = getHwLoadRequests(); if (loadRequestsPtr == NULL) { - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Unable to get HwLoadRequest vector! Pointer is NULL"); continue; } @@ -370,7 +413,7 @@ class ${className} : public ${baseClass} (*cfPropsPtr)[iv].value >>= (*loadRequestsPtr)[iv]; } } else { - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Unable to convert HW_LOAD_REQUEST prop!"); continue; } @@ -381,7 +424,7 @@ class ${className} : public ${baseClass} // Grab the current hw_load_status struct statusVecPtr = getHwLoadStatuses(); if (statusVecPtr == NULL) { - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Unable to get HwLoadStatus vector! Pointer is NULL"); continue; } @@ -397,7 +440,7 @@ class ${className} : public ${baseClass} } updateAdminStates(); - LOG_DEBUG(${className}, __FUNCTION__ << ": Allocation Result: " << allocationSuccess); + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": Allocation Result: " << allocationSuccess); return allocationSuccess; } @@ -421,7 +464,7 @@ class ${className} : public ${baseClass} id = capacities[ii].id; if (id == HW_LOAD_REQUEST_PROP()) { - LOG_DEBUG(${className}, __FUNCTION__ << + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": Deallocating hw_load_requests..."); // Attempt to Convert Any to unwrappable type @@ -438,7 +481,7 @@ class ${className} : public ${baseClass} (*cfPropsPtr)[iv].value >>= loadRequestsToRemove[iv]; } } else { - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Unable to convert HW_LOAD_REQUEST property"); continue; } @@ -446,7 +489,7 @@ class ${className} : public ${baseClass} // Grab the current hw_load_status struct statusVecPtr = getHwLoadStatuses(); if (statusVecPtr == NULL) { - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Unable to get HwLoadStatus vector! Pointer is NULL"); continue; } @@ -493,7 +536,7 @@ class ${className} : public ${baseClass} void setHwLoadRequestsPtr(HwLoadRequestVec* propPtr) { if (propPtr == NULL) { - LOG_ERROR(${className}, "CANNOT SET HW_LOAD_REQUESTS_PTR: PROPERTY IS NULL"); + RH_ERROR(this->_deviceLog, "CANNOT SET HW_LOAD_REQUESTS_PTR: PROPERTY IS NULL"); return; } _hwLoadRequestsPtr = propPtr; @@ -501,7 +544,7 @@ class ${className} : public ${baseClass} void setHwLoadStatusesPtr(HwLoadStatusVec* propPtr) { if (propPtr == NULL) { - LOG_ERROR(${className}, "CANNOT SET HW_LOAD_STATUSES_PTR: PROPERTY IS NULL"); + RH_ERROR(this->_deviceLog, "CANNOT SET HW_LOAD_STATUSES_PTR: PROPERTY IS NULL"); return; } _hwLoadStatusesPtr = propPtr; @@ -535,7 +578,7 @@ class ${className} : public ${baseClass} void* pHandle = dlopen(absPath.c_str(), RTLD_NOW); if (!pHandle) { char* errorMsg = dlerror(); - LOG_FATAL(${className}, __FUNCTION__ << + RH_FATAL(this->_deviceLog, __FUNCTION__ << ": Unable to open library '" << absPath.c_str() << "': " << errorMsg); return NULL; } @@ -551,7 +594,7 @@ class ${className} : public ${baseClass} for (size_t ii = 0; ii < combinedProps.length(); ii++) { std::string id(combinedProps[ii].id); std::string val = ossie::any_to_string(combinedProps[ii].value); - LOG_DEBUG(${className}, "ARGV[" << id << "]: " << val); + RH_DEBUG(this->_deviceLog, "ARGV[" << id << "]: " << val); } // Convert combined properties into ARGV/ARGC format @@ -576,7 +619,7 @@ class ${className} : public ${baseClass} void* fnPtr = dlsym(pHandle, symbol); if (!fnPtr) { char* errorMsg = dlerror(); - LOG_FATAL(${className}, __FUNCTION__ << + RH_FATAL(this->_deviceLog, __FUNCTION__ << ": Unable to find symbol '" << symbol << "': " << errorMsg); return NULL; } @@ -589,7 +632,7 @@ class ${className} : public ${baseClass} try { ${executeType}Ptr = generate${executeType.capitalize()}(argc, argv, constructPtr, libraryName); } catch (...) { - LOG_FATAL(${className}, __FUNCTION__ << + RH_FATAL(this->_deviceLog, __FUNCTION__ << ": Unable to construct ${executeType} device: '" << argv[0] << "'"); } @@ -642,7 +685,7 @@ class ${className} : public ${baseClass} success |= applyHwLoadRequest(loadRequestVec[ii], loadStatusVec[availableStatusIndex]); usedStatusIndices[ii] = availableStatusIndex;; } else { - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Device cannot be allocated against. No load capacity"); success = false; } @@ -721,7 +764,7 @@ class ${className} : public ${baseClass} { HwLoadStatusVec* statusVecPtr = getHwLoadStatuses(); if (statusVecPtr == NULL) { - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Unable to get HwLoadStatus vector! Pointer is NULL"); return false; } @@ -746,7 +789,7 @@ class ${className} : public ${baseClass} // Grab the current hw_load_status struct HwLoadStatusVec* statusVecPtr = getHwLoadStatuses(); if (statusVecPtr == NULL) { - LOG_ERROR(${className}, __FUNCTION__ << + RH_ERROR(this->_deviceLog, __FUNCTION__ << ": Unable to get HwLoadStatus vector! Pointer is NULL"); return; } @@ -764,7 +807,7 @@ class ${className} : public ${baseClass} if (strVecContainsStr(allRequesterIds, iter->first)) { continue; // Skip the running ${executeType}s } - LOG_DEBUG(${className}, __FUNCTION__ << + RH_DEBUG(this->_deviceLog, __FUNCTION__ << ": Locking device '" << ossie::corba::returnString(iter->second->identifier()) << "'"); iter->second->adminState(CF::Device::LOCKED); } diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/programmable/templates/resource.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/programmable/templates/resource.cpp index 022357305..0ee38c34e 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/programmable/templates/resource.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/programmable/templates/resource.cpp @@ -244,7 +244,7 @@ void ${className}::initialize() throw (CF::LifeCycle::InitializeError, CORBA::Sy ************************************************************************************************/ int ${className}::serviceFunction() { - LOG_DEBUG(${className}, "serviceFunction() example log message"); + RH_DEBUG(this->_baseLog, "serviceFunction() example log message"); return NOOP; } diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/port_impl.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/port_impl.cpp index 05d750c4f..92b643c4c 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/port_impl.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/port_impl.cpp @@ -27,6 +27,17 @@ #include "${component.userclass.header}" + +/****************************************** + * + * Logging: + * To log, use the _portLog member (not available in the constructor) + * + * For example, + * RH_DEBUG(_portLog, "this is a debug message"); + * + ******************************************/ + /*{% for portgen in component.portgenerators if portgen.hasImplementation() %}*/ // ---------------------------------------------------------------------------------------- // ${portgen.className()} definition diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource.cpp index 88a9d56b9..b2de95eb7 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource.cpp @@ -87,16 +87,18 @@ void ${className}::constructor() The options for devices are: TX, RX, RX_DIGITIZER, CHANNELIZER, DDC, RC_DIGITIZER_CHANNELIZER For example, if this device has 5 physical - tuners, each an RX_DIGITIZER, then the code in the construct function should look like this: + tuners, 3 RX_DIGITIZER and 2 CHANNELIZER, then the code in the construct function + should look like this: - this->setNumChannels(5, "RX_DIGITIZER"); + this->addChannels(3, "RX_DIGITIZER"); + this->addChannels(2, "CHANNELIZER"); The incoming request for tuning contains a string describing the requested tuner type. The string for the request must match the string in the tuner status. /*{% endif %}*/ ***********************************************************************************/ /*{% if 'FrontendTuner' in component.implements %}*/ - this->setNumChannels(1, "RX_DIGITIZER"); + this->addChannels(1, "RX_DIGITIZER"); /*{% endif %}*/ } @@ -151,8 +153,7 @@ void ${className}::updateUsageState() Data is passed to the serviceFunction through by reading from input streams (BulkIO only). The input stream class is a port-specific class, so each port implementing the BulkIO interface will have its own type-specific input stream. - UDP multicast (dataSDDS and dataVITA49) and string-based (dataString, dataXML and - dataFile) do not support streams. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. The input stream from which to read can be requested with the getCurrentStream() method. The optional argument to getCurrentStream() is a floating point number that @@ -180,15 +181,18 @@ void ${className}::updateUsageState() // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out // The mapping between the port and the class is found // in the ${artifactType} base class header file - // The ${artifactType} class must have an output stream member; add to - // ${component.userclass.header}: - // bulkio::OutFloatStream outputStream; bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); if (!inputStream) { // No streams are available return NOOP; } + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + bulkio::ShortDataBlock block = inputStream.read(); if (!block) { // No data available // Propagate end-of-stream @@ -198,42 +202,40 @@ void ${className}::updateUsageState() return NOOP; } - short* inputData = block.data(); - std::vector outputData; - outputData.resize(block.size()); - for (size_t index = 0; index < block.size(); ++index) { - outputData[index] = (float) inputData[index]; - } - - // If there is no output stream open, create one - if (!outputStream) { - outputStream = dataFloat_out->createStream(block.sri()); - } else if (block.sriChanged()) { + if (block.sriChanged()) { // Update output SRI outputStream.sri(block.sri()); } - // Write to the output stream - outputStream.write(outputData, block.getTimestamps()); + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); - // Propagate end-of-stream - if (inputStream.eos()) { - outputStream.close(); + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; } + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + return NORMAL; If working with complex data (i.e., the "mode" on the SRI is set to true), the data block's complex() method will return true. Data blocks - provide functions that return the correct interpretation of the data - buffer and number of complex elements: + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: if (block.complex()) { - std::complex* data = block.cxdata(); - for (size_t index = 0; index < block.cxsize(); ++index) { - data[index] = std::abs(data[index]); + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; } - outputStream.write(data, block.cxsize(), bulkio::time::utils::now()); + outputStream.write(outData, block.getStartTime()); } Interactions with non-BULKIO ports are left up to the ${artifactType} developer's discretion @@ -335,18 +337,29 @@ void ${className}::updateUsageState() void ${className}::scaleChanged(float oldValue, float newValue) { - LOG_DEBUG(${className}, "scaleValue changed from" << oldValue << " to " << newValue); + RH_DEBUG(this->_baseLog, "scaleValue changed from" << oldValue << " to " << newValue); } void ${className}::statusChanged(const status_struct& oldValue, const status_struct& newValue) { - LOG_DEBUG(${className}, "status changed"); + RH_DEBUG(this->_baseLog, "status changed"); } //Add to ${component.userclass.header} void scaleChanged(float oldValue, float newValue); void statusChanged(const status_struct& oldValue, const status_struct& newValue); - + + Logging: + + The member _baseLog is a logger whose base name is the component (or device) instance name. + New logs should be created based on this logger name. + + To create a new logger, + rh_logger::LoggerPtr my_logger = this->_baseLog->getChildLogger("foo"); + + Assuming component instance name abc_1, my_logger will then be created with the + name "abc_1.user.foo". + /*{% if component is device %}*/ Allocation: @@ -378,7 +391,7 @@ void ${className}::updateUsageState() ************************************************************************************************/ int ${className}::serviceFunction() { - LOG_DEBUG(${className}, "serviceFunction() example log message"); + RH_DEBUG(this->_baseLog, "serviceFunction() example log message"); return NOOP; } diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource_base.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource_base.cpp index 99d336bc1..4d56d7b8b 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource_base.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource_base.cpp @@ -88,6 +88,8 @@ ${baseClass}(uuid, label), ThreadedComponent() { + setThreadName(label); + /*{% block constructorBody %}*/ loadProperties(); /*{% for port in component.ports %}*/ @@ -95,6 +97,7 @@ /*{% endif %}*/ ${port.cppname} = new ${port.constructor}; + ${port.cppname}->setLogger(this->_baseLog->getChildLogger("${port.name}", "ports")); /*{% if port.hasDescription %}*/ addPort("${port.name}", "${port.description}", ${port.cppname}); /*{% else %}*/ @@ -120,7 +123,7 @@ { /*{% block destructorBody %}*/ /*{% for port in component.ports %}*/ - delete ${port.cppname}; + ${port.cppname}->_remove_ref(); ${port.cppname} = 0; /*{% endfor %}*/ /*{% endblock %}*/ diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource_base.h b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource_base.h index a1bc4d598..3bdf6f382 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource_base.h +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/component/pull/templates/resource_base.h @@ -50,6 +50,17 @@ /*# Allow child templates to add #define statements #*/ /*{% endblock %}*/ +/*{% from "properties/properties.cpp" import enumvalues %}*/ +/*{% for prop in component.properties if prop.enums %}*/ +/*{% if loop.first %}*/ +namespace enums { +/*{% endif %}*/ + ${enumvalues(prop)|indent(4)} +/*{% if loop.last %}*/ +} + +/*{% endif %}*/ +/*{% endfor %}*/ /*{% block classPrototype %}*/ class ${className} : public ${component.superclasses|join(', public ', attribute='name')}, protected ThreadedComponent /*{% endblock %}*/ diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/generic.py b/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/generic.py index 9862b1e7b..0d815e71e 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/generic.py +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/generic.py @@ -231,11 +231,37 @@ def dependencies(self): def loader(self): return jinja2.PackageLoader(__package__) + def hasOut(self): + for op in self.idl.operations(): + for p in op.params: + if p.direction == 'out': + return True + return False + + def hasInOut(self): + for op in self.idl.operations(): + for p in op.params: + if p.direction == 'inout': + return True + return False + def operations(self): for op in self.idl.operations(): + _out = False + for p in op.params: + if p.direction == 'out': + _out = True + break + _inout = False + for p in op.params: + if p.direction == 'inout': + _inout = True + break yield {'name': op.name, 'arglist': ', '.join('%s %s' % (argumentType(p.paramType,p.direction), p.name) for p in op.params), 'argnames': ', '.join(p.name for p in op.params), + 'hasout': _out, + 'hasinout': _inout, 'temporary': temporaryType(op.returnType), 'initializer': temporaryValue(op.returnType), 'returns': baseReturnType(op.returnType)} @@ -243,9 +269,13 @@ def operations(self): # for attributes of an interface...provide manipulator methods # for attr in self.idl.attributes(): + readwrite_attr = False + if not attr.readonly: + readwrite_attr = True yield {'name': attr.name, 'arglist': '', 'argnames': '', + 'readwrite_attr': readwrite_attr, 'temporary': temporaryType(attr.attrType), 'initializer': temporaryValue(attr.attrType), 'returns': baseReturnType(attr.attrType)} diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/templates/generic.uses.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/templates/generic.uses.cpp index 95248181a..6c6baf6a9 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/templates/generic.uses.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/templates/generic.uses.cpp @@ -32,10 +32,38 @@ Port_Uses_base_impl(port_name) { } /*{% for operation in portgen.operations() %}*/ +//% set hasreturn = operation.returns != 'void' +/*{% if hasreturn %}*/ +/*{% set returnstate='true' %}*/ +/*{% else %}*/ +/*{% set returnstate='false' %}*/ +/*{% endif %}*/ +//% set hasout = operation.hasout +/*{% if hasout %}*/ +/*{% set _hasout='true' %}*/ +/*{% else %}*/ +/*{% set _hasout='false' %}*/ +/*{% endif %}*/ +//% set hasinout = operation.hasinout +/*{% if hasinout %}*/ +/*{% set _hasinout='true' %}*/ +/*{% else %}*/ +/*{% set _hasinout='false' %}*/ +/*{% endif %}*/ +/*{% if operation.readwrite_attr %}*/ +${operation.returns} ${classname}::${operation.name}() { + return _get_${operation.name}(""); +} -${operation.returns} ${classname}::${operation.name}(${operation.arglist}) +${operation.returns} ${classname}::_get_${operation.name}(const std::string __connection_id__) +/*{% else %}*/ +/*{% if operation.arglist %}*/ +${operation.returns} ${classname}::${operation.name}(${operation.arglist}, const std::string __connection_id__) +/*{% else %}*/ +${operation.returns} ${classname}::${operation.name}(const std::string __connection_id__) +/*{% endif %}*/ +/*{% endif %}*/ { -//% set hasreturn = operation.returns != 'void' /*{% if hasreturn %}*/ ${operation.temporary} retval${' = %s' % operation.initializer if operation.initializer}; /*{% endif %}*/ @@ -43,11 +71,14 @@ Port_Uses_base_impl(port_name) boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in - if (active) { - for (i = outConnections.begin(); i != outConnections.end(); ++i) { + __evaluateRequestBasedOnConnections(__connection_id__, ${returnstate}, ${_hasinout}, ${_hasout}); + if (this->active) { + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if (not __connection_id__.empty() and __connection_id__ != (*i).second) + continue; try { ${"retval = " if hasreturn}((*i).first)->${operation.name}(${operation.argnames}); - } catch(...) { + } catch (...) { LOG_ERROR(${classname},"Call to ${operation.name} by ${classname} failed"); throw; } diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/templates/generic.uses.h b/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/templates/generic.uses.h index a939f63cd..f5edcc749 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/templates/generic.uses.h +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/ports/templates/generic.uses.h @@ -27,9 +27,62 @@ class ${classname} : public Port_Uses_base_impl, public POA_ExtendedCF::Queryabl ~${classname}(); /*{% for op in portgen.operations() %}*/ - ${op.returns} ${op.name}(${op.arglist}); +/*{% if op.readwrite_attr %}*/ + ${op.returns} ${op.name}(); + ${op.returns} _get_${op.name}(const std::string __connection_id__); +/*{% else %}*/ +/*{% if op.arglist %}*/ + ${op.returns} ${op.name}(${op.arglist}, const std::string __connection_id__ = ""); +/*{% else %}*/ + ${op.returns} ${op.name}(const std::string __connection_id__ = ""); +/*{% endif %}*/ +/*{% endif %}*/ /*{% endfor %}*/ + std::vector getConnectionIds() + { + std::vector retval; + for (unsigned int i = 0; i < outConnections.size(); i++) { + retval.push_back(outConnections[i].second); + } + return retval; + }; + + void __evaluateRequestBasedOnConnections(const std::string &__connection_id__, bool returnValue, bool inOut, bool out) { + if (__connection_id__.empty() and (this->outConnections.size() > 1)) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.", + getConnectionIds()); + } + } + if (this->outConnections.empty()) { + if (out or inOut or returnValue) { + throw redhawk::PortCallError("No connections available.", std::vector()); + } else { + if (not __connection_id__.empty()) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } + if ((not __connection_id__.empty()) and (not this->outConnections.empty())) { + bool foundConnection = false; + std::vector < std::pair < ${vartype}, std::string > >::iterator i; + for (i = this->outConnections.begin(); i != this->outConnections.end(); ++i) { + if ((*i).second == __connection_id__) { + foundConnection = true; + break; + } + } + if (not foundConnection) { + std::ostringstream eout; + eout<<"The requested connection id ("<<__connection_id__<<") does not exist."; + throw redhawk::PortCallError(eout.str(), getConnectionIds()); + } + } + } + ExtendedCF::UsesConnectionSequence * connections() { boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/properties/mapping.py b/redhawk-codegen/redhawk/codegen/jinja/cpp/properties/mapping.py index 25b9151c3..21ad00e48 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/properties/mapping.py +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/properties/mapping.py @@ -19,11 +19,38 @@ # from redhawk.codegen.lang import cpp -from redhawk.codegen.lang.idl import IDLInterface +from redhawk.codegen.lang.idl import IDLInterface, CorbaTypes from redhawk.codegen.jinja.mapping import PropertyMapper +_formats = { + CorbaTypes.OCTET : 'o', + CorbaTypes.BOOLEAN : 'b', + CorbaTypes.CHAR : 'c', + CorbaTypes.SHORT : 'h', + CorbaTypes.USHORT : 'H', + CorbaTypes.LONG : 'i', + CorbaTypes.ULONG : 'I', + CorbaTypes.LONGLONG : 'l', + CorbaTypes.ULONGLONG : 'L', + CorbaTypes.FLOAT : 'f', + CorbaTypes.DOUBLE : 'd', + CorbaTypes.STRING : 's', + CorbaTypes.OBJREF : 's', + CorbaTypes.UTCTIME : 'u', +} + class CppPropertyMapper(PropertyMapper): + def _getSimpleFormat(self, prop, isSequence): + format = _formats[prop.type()] + if prop.isComplex(): + format = '2' + format; + if isSequence: + format = '[' + format + ']' + if prop.isOptional(): + format += '?' + return format + def mapProperty(self, prop): cppprop = {} if prop.hasName(): @@ -39,20 +66,39 @@ def mapSimpleProperty(self, prop): cppprop['isOptional'] = prop.isOptional() cppprop['cpptype'] = cpp.cppType(prop.type(), prop.isComplex()) if prop.hasValue(): - cppprop['cppvalue'] = cpp.literal(prop.value(), + _prepend = '' + _append = '' + if prop.type() == 'utctime': + _prepend = '"' + _append = '"' + cppprop['cppvalue'] = _prepend+cpp.literal(prop.value(), prop.type(), - prop.isComplex()) + prop.isComplex())+_append + cppprop['format'] = self._getSimpleFormat(prop, False) return cppprop + def mapEnumeration(self, prop, label, value): + cppenum = {} + cppenum['cpplabel'] = cpp.identifier(label) + cppenum['cppvalue'] = cpp.literal(value, prop.type(), prop.isComplex()) + return cppenum + def mapSimpleSequenceProperty(self, prop): cppprop = self.mapProperty(prop) cppprop['cpptype'] = cpp.sequenceType(prop.type(), prop.isComplex()) + cppprop['iscomplex'] = prop.isComplex() cppprop['isOptional'] = prop.isOptional() if prop.hasValue(): - cppprop['cppvalues'] = [cpp.literal(v, + _prepend = '' + _append = '' + if prop.type() == 'utctime': + _prepend = 'redhawk::time::utils::convert("' + _append = '")' + cppprop['cppvalues'] = [_prepend+cpp.literal(v, prop.type(), - prop.isComplex()) + prop.isComplex())+_append for v in prop.value()] + cppprop['format'] = self._getSimpleFormat(prop, True) return cppprop def mapStructProperty(self, prop, fields): @@ -60,6 +106,7 @@ def mapStructProperty(self, prop, fields): typename = self.getStructPropertyType(prop) cppprop['cpptype'] = typename cppprop['cppvalue'] = typename + '()' + cppprop['format'] = ''.join(f['format'] for f in fields) return cppprop def getStructPropertyType(self, prop): @@ -79,7 +126,7 @@ def mapStructValue(self, structdef, value): if type(value[identifier]) == list: newval[identifier] = [] for val in value[identifier]: - newval[identifier].append(cpp.literal(val, field['type'])) + newval[identifier].append(cpp.literal(val, field['type'], field['iscomplex'])) else: - newval[identifier] = cpp.literal(value[identifier], field['type']) + newval[identifier] = cpp.literal(value[identifier], field['type'], field['iscomplex']) return newval diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/properties/templates/properties.cpp b/redhawk-codegen/redhawk/codegen/jinja/cpp/properties/templates/properties.cpp index c8390644a..f3427828d 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/properties/templates/properties.cpp +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/properties/templates/properties.cpp @@ -72,7 +72,19 @@ addProperty(${prop.cppname}, /*{%- endmacro %}*/ /*{% macro structdef(struct) %}*/ -/*{% from "properties/properties.cpp" import initsequence %}*/ +/*{% for field in struct.fields if field.enums %}*/ +/*{% if loop.first %}*/ +namespace enums { + // Enumerated values for ${struct.identifier} + namespace ${struct.cppname} { +/*{% endif %}*/ + ${enumvalues(field)|indent(8)} +/*{% if loop.last %}*/ + } +} + +/*{% endif %}*/ +/*{% endfor %}*/ struct ${struct.cpptype}${' : public '+struct.baseclass if struct.baseclass} { ${struct.cpptype} ()${' : '+struct.baseclass+'()' if struct.baseclass} { @@ -87,11 +99,15 @@ struct ${struct.cpptype}${' : public '+struct.baseclass if struct.baseclass} { /*{% endif %}*/ /*{% endif %}*/ /*{% endfor %}*/ - }; + } static std::string getId() { return std::string("${struct.identifier}"); - }; + } + + static const char* getFormat() { + return "${struct.format}"; + } /*{% for field in struct.fields if not field.inherited %}*/ /*{% if loop.first %}*/ @@ -187,3 +203,12 @@ inline bool operator!= (const ${struct.cpptype}& s1, const ${struct.cpptype}& s2 return !(s1==s2); } /*{%- endmacro %}*/ + +/*{% macro enumvalues(prop) %}*/ +// Enumerated values for ${prop.identifier} +namespace ${prop.cppname} { +/*{% for enum in prop.enums %}*/ + static const ${prop.cpptype} ${enum.cpplabel} = ${enum.cppvalue}; +/*{% endfor %}*/ +} +/*{%- endmacro %}*/ diff --git a/redhawk-codegen/redhawk/codegen/jinja/cpp/template.py b/redhawk-codegen/redhawk/codegen/jinja/cpp/template.py index fa61f1b68..c1a5f4b99 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/cpp/template.py +++ b/redhawk-codegen/redhawk/codegen/jinja/cpp/template.py @@ -23,6 +23,10 @@ from redhawk.codegen.jinja.template import TemplateFile class CppTemplate(TemplateFile): + COMMENT_START = '/*' + COMMENT_LINE = ' *' + COMMENT_END = ' */' + def options(self): return { 'trim_blocks': True, diff --git a/redhawk-codegen/redhawk/codegen/jinja/environment.py b/redhawk-codegen/redhawk/codegen/jinja/environment.py index 17429496b..ee7055b78 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/environment.py +++ b/redhawk-codegen/redhawk/codegen/jinja/environment.py @@ -50,6 +50,7 @@ def __init__(self, *args, **kwargs): self.tests['simplesequence'] = tests.is_simplesequence self.tests['struct'] = tests.is_struct self.tests['structsequence'] = tests.is_structsequence + self.tests['enumerated'] = tests.is_enumerated self.tests['provides'] = tests.is_provides self.tests['uses'] = tests.is_uses self.tests['bidir'] = tests.is_bidir diff --git a/redhawk-codegen/redhawk/codegen/jinja/generator.py b/redhawk-codegen/redhawk/codegen/jinja/generator.py index 30a159290..6036fb9fb 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/generator.py +++ b/redhawk-codegen/redhawk/codegen/jinja/generator.py @@ -21,8 +21,9 @@ import os import sys import stat +import tempfile +import shutil -from redhawk.codegen.lang.idl import IDLInterface from redhawk.codegen import utils from redhawk.codegen import versions @@ -35,6 +36,7 @@ def __init__( overwrite=False, crcs={}, variant="", + header=None, **options): self.outputdir = outputdir @@ -73,6 +75,9 @@ def __init__( else: self.crcs[filename] = crcs[filename] + # Save the header (if given) + self.header = header + def parseopts(self): """ Parse additional options passed to the constructor. Subclasses should @@ -145,6 +150,10 @@ def fileinfo(self, softpkg): return files + def _addHeader(self, gen, header): + """ + """ + def generate(self, softpkg, *filenames): loader = self.loader(softpkg) @@ -189,8 +198,10 @@ def generate(self, softpkg, *filenames): env = CodegenEnvironment(loader=loader, **template.options()) env.filters.update(template.filters()) tmpl = env.get_template(template.template) - outfile = open(filename, 'w') - try: + + # Initially, write the output to a temporary file to avoid trashing + # the original file if the template is malformed + with tempfile.NamedTemporaryFile() as outfile: # Start with the template-specific context, then add the mapped # component and a reference to this generator with known names. context = template.context() @@ -199,18 +210,45 @@ def generate(self, softpkg, *filenames): context['versions'] = versions # Evaluate the template in streaming mode (rather than all at - # once), dumping to the output file. - tmpl.stream(**context).dump(outfile) + # once) + gen = tmpl.generate(**context) + if self.header: + # Define a generator function to insert the header at the + # top of the file + def generate(gen, header): + first = True + for chunk in gen: + if first: + # Take "shebang" into account for executable + # scripts + if chunk.startswith('#!'): + line, chunk = chunk.split('\n', 1) + yield line + '\n' + yield header + first = False + yield chunk + + # Wrap the template's generator with the header insertion + # generator + gen = generate(gen, template.comment(self.header)) + + # Write the stream to the output file + for chunk in gen: + outfile.write(chunk) + # Add a trailing newline to work around a Jinja bug. outfile.write('\n') + # Now that generation has succeeded, flush the temporary file + # to ensure the contents are completer, and copy to the target + # location + outfile.file.flush() + shutil.copy(outfile.name, filename) + # Set the executable bit, if requested by the template. if template.executable: - fd = outfile.fileno() - st = os.fstat(fd) + st = os.stat(filename) os.chmod(filename, st.st_mode|stat.S_IEXEC) - finally: - outfile.close() generated.append((template.filename, action)) diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/component/base/templates/properties.java b/redhawk-codegen/redhawk/codegen/jinja/java/component/base/templates/properties.java index d875ce1f0..18518530a 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/component/base/templates/properties.java +++ b/redhawk-codegen/redhawk/codegen/jinja/java/component/base/templates/properties.java @@ -173,3 +173,26 @@ public String getId() { ${structsequence(prop)} /*{% endif %}*/ /*{% endmacro %}*/ + +/*{% macro enumvalues(prop) %}*/ +/*{% if prop is structsequence %}*/ +/*{% set prop = prop.structdef %}*/ +/*{% endif %}*/ +/** + * Enumerated values for ${prop.identifier} + */ +public static class ${prop.javaname} { +/*{% if prop is struct %}*/ +/*{% for field in prop.fields if field.enums %}*/ +/*{% if not loop.first %}*/ + +/*{% endif %}*/ + ${enumvalues(field)|indent(4)} +/*{% endfor %}*/ +/*{% else %}*/ +/*{% for enum in prop.enums %}*/ + public static final ${enum.javatype} ${enum.javalabel} = ${enum.javavalue}; +/*{% endfor %}*/ +/*{% endif %}*/ +} +/*{% endmacro %}*/ diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/generator.py b/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/generator.py index db16eda4e..6c1565e88 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/generator.py +++ b/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/generator.py @@ -49,6 +49,7 @@ def map(self, softpkg): if prop['javaname'] in ('device_kind', 'device_model', 'frontend_tuner_allocation', 'frontend_listener_allocation', + 'frontend_scanner_allocation', 'frontend_tuner_status'): prop['inherited'] = True return component diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/mapping.py b/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/mapping.py index f044d13c8..e7d208449 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/mapping.py +++ b/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/mapping.py @@ -49,7 +49,9 @@ def getImplementedInterfaces(softpkg): # Ensure that parent interfaces also gets added (so, e.g., a device # with a DigitalTuner should also report that it's an AnalogTuner # and FrontendTuner) - inherits = { 'DigitalTuner': ('AnalogTuner', 'FrontendTuner'), + inherits = { 'DigitalScanningTuner': ('ScanningTuner', 'DigitalTuner', 'AnalogTuner', 'FrontendTuner'), + 'AnalogScanningTuner': ('ScanningTuner', 'AnalogTuner', 'FrontendTuner'), + 'DigitalTuner': ('AnalogTuner', 'FrontendTuner'), 'AnalogTuner': ('FrontendTuner',) } for port in softpkg.providesPorts(): @@ -78,6 +80,9 @@ def superclass(self,softpkg): if sc['name'] == 'ThreadedDevice': sc['name'] = 'frontend.FrontendTunerDevice' sc['header'] = '' + if 'ScanningTuner' in deviceinfo: + if sc['name'] == 'frontend.FrontendTunerDevice': + sc['name'] = 'frontend.FrontendScanningTunerDevice' return sc diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/templates/resource.java b/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/templates/resource.java index b662e17a2..891f32b3b 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/templates/resource.java +++ b/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/templates/resource.java @@ -64,9 +64,37 @@ public void deviceDisable(frontend_tuner_status_struct_struct fts, int tuner_id) fts.enabled.setValue(false); return; } +/*{% if 'ScanningTuner' in component.implements %}*/ + public boolean deviceSetTuningScan(final frontend.FETypes.frontend_tuner_allocation_struct request, final frontend.FETypes.frontend_scanner_allocation_struct scan_request, frontend_tuner_status_struct_struct fts, int tuner_id) + { + /************************************************************ + + This function is called when the allocation request contains a scanner allocation + + modify fts, which corresponds to this.frontend_tuner_status.getValue().get(tuner_id) + + The bandwidth, center frequency, and sampling rate that the hardware was actually tuned + to needs to populate fts (to make sure that it meets the tolerance requirement. For example, + if the tuned values match the requested values, the code would look like this: + + fts.bandwidth.setValue(request.bandwidth.getValue()); + fts.center_frequency.setValue(request.center_frequency.getValue()); + fts.sample_rate.setValue(request.sample_rate.getValue()); + + return true if the tuning succeeded, and false if it failed + ************************************************************/ + System.out.println("deviceSetTuning(): Evaluate whether or not a tuner is added *********"); + return true; + } +/*{% endif %}*/ public boolean deviceSetTuning(final frontend.FETypes.frontend_tuner_allocation_struct request, frontend_tuner_status_struct_struct fts, int tuner_id) { /************************************************************ +/*{% if 'ScanningTuner' in component.implements %}*/ + + This function is called when the allocation request does not contain a scanner allocation + +/*{% endif %}*/ modify fts, which corresponds to this.frontend_tuner_status.getValue().get(tuner_id) The bandwidth, center frequency, and sampling rate that the hardware was actually tuned @@ -131,9 +159,9 @@ public void setTunerCenterFrequency(final String allocation_id, double freq) thr { int idx = getTunerMapping(allocation_id); if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); - if(allocation_id != getControlAllocationId(idx)) + if (!allocation_id.equals(getControlAllocationId(idx))) throw new FRONTEND.FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner")); - if (freq<0) throw new FRONTEND.BadParameterException(); + if (freq<0) throw new FRONTEND.BadParameterException("Center frequency cannot be less than 0"); // set hardware to new value. Raise an exception if it's not possible this.frontend_tuner_status.getValue().get(idx).center_frequency.setValue(freq); } @@ -149,9 +177,9 @@ public void setTunerBandwidth(final String allocation_id, double bw) throws FRON { int idx = getTunerMapping(allocation_id); if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); - if(allocation_id != getControlAllocationId(idx)) + if (!allocation_id.equals(getControlAllocationId(idx))) throw new FRONTEND.FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner")); - if (bw<0) throw new FRONTEND.BadParameterException(); + if (bw<0) throw new FRONTEND.BadParameterException("Bandwidth cannot be less than 0"); // set hardware to new value. Raise an exception if it's not possible this.frontend_tuner_status.getValue().get(idx).bandwidth.setValue(bw); } @@ -197,7 +225,7 @@ public void setTunerEnable(final String allocation_id, boolean enable) throws FR { int idx = getTunerMapping(allocation_id); if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); - if(allocation_id != getControlAllocationId(idx)) + if (!allocation_id.equals(getControlAllocationId(idx))) throw new FRONTEND.FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner")); // set hardware to new value. Raise an exception if it's not possible this.frontend_tuner_status.getValue().get(idx).enabled.setValue(enable); @@ -216,9 +244,9 @@ public void setTunerOutputSampleRate(final String allocation_id, double sr) thro { int idx = getTunerMapping(allocation_id); if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); - if(allocation_id != getControlAllocationId(idx)) + if (!allocation_id.equals(getControlAllocationId(idx))) throw new FRONTEND.FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner")); - if (sr<0) throw new FRONTEND.BadParameterException(); + if (sr<0) throw new FRONTEND.BadParameterException("Sample rate cannot be less than 0"); // set hardware to new value. Raise an exception if it's not possible this.frontend_tuner_status.getValue().get(idx).sample_rate.setValue(sr); } @@ -230,6 +258,33 @@ public double getTunerOutputSampleRate(final String allocation_id) throws FRONTE return frontend_tuner_status.getValue().get(idx).sample_rate.getValue(); } /*{% endif %}*/ +/*{% if 'ScanningTuner' in component.implements %}*/ + + public FRONTEND.ScanningTunerPackage.ScanStatus getScanStatus(String allocation_id) throws FRONTEND.FrontendException, FRONTEND.BadParameterException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + FRONTEND.ScanningTunerPackage.ScanStatus status = null; + return status; + } + + public void setScanStartTime(String allocation_id, BULKIO.PrecisionUTCTime start_time) throws FRONTEND.FrontendException, FRONTEND.BadParameterException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + if (!allocation_id.equals(getControlAllocationId(idx))) + throw new FRONTEND.FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner")); + } + + public void setScanStrategy(String allocation_id, FRONTEND.ScanningTunerPackage.ScanStrategy scan_strategy) throws FRONTEND.FrontendException, FRONTEND.BadParameterException + { + int idx = getTunerMapping(allocation_id); + if (idx < 0) throw new FRONTEND.FrontendException("Invalid allocation id"); + if (!allocation_id.equals(getControlAllocationId(idx))) + throw new FRONTEND.FrontendException(("ID "+allocation_id+" does not have authorization to modify the tuner")); + } + +/*{% endif %}*/ /*{% if 'GPS' in component.implements %}*/ public FRONTEND.GPSInfo get_gps_info(final String port_name) diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/templates/resource_base.java b/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/templates/resource_base.java index 1d9139ac2..f2857958e 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/templates/resource_base.java +++ b/redhawk-codegen/redhawk/codegen/jinja/java/component/frontend/templates/resource_base.java @@ -45,6 +45,12 @@ /*{% if component.hasfrontendprovides %}*/ /*{% set implementedClasses = "" %}*/ /*{% for port in component.ports if port is provides %}*/ +/*{% if port.javatype == "frontend.InAnalogScanningTunerPort" and "AnalogScanningTunerDelegate" not in implementedClasses %}*/ +/*{% set implementedClasses = implementedClasses + ",AnalogScanningTunerDelegate" %}*/ +/*{% endif %}*/ +/*{% if port.javatype == "frontend.InDigitalScanningTunerPort" and "DigitalScanningTunerDelegate" not in implementedClasses %}*/ +/*{% set implementedClasses = implementedClasses + ",DigitalScanningTunerDelegate" %}*/ +/*{% endif %}*/ /*{% if port.javatype == "frontend.InAnalogTunerPort" and "AnalogTunerDelegate" not in implementedClasses %}*/ /*{% set implementedClasses = implementedClasses + ",AnalogTunerDelegate" %}*/ /*{% endif %}*/ @@ -80,30 +86,6 @@ public abstract class ${classname} extends ${superClass} /*{% block extensions %}*/ /*{% if 'FrontendTuner' in component.implements %}*/ - /* This sets the number of entries in the frontend_tuner_status struct sequence property - * as well as the tuner_allocation_ids vector. Call this function during initialization - */ - public void setNumChannels(int num) - { - this.setNumChannels(num, "RX_DIGITIZER"); - } - - /* This sets the number of entries in the frontend_tuner_status struct sequence property - * as well as the tuner_allocation_ids vector. Call this function during initialization - */ - public void setNumChannels(int num, String tuner_type) - { - frontend_tuner_status.setValue(new ArrayList()); - tuner_allocation_ids = new ArrayList.tunerAllocationIdsStruct>(); - for (int idx=0;idx listeners = new HashMap(); public void frontendTunerStatusChanged(final List oldValue, final List newValue) @@ -271,7 +253,7 @@ public void matchAllocationIdToStreamId(final String allocation_id, final String connection_descriptor_struct tmp = new connection_descriptor_struct(); /*{% for port in component.ports if port.multiout %}*/ tmp.connection_id.setValue(allocation_id); - tmp.port_name.setValue("${port.javaname}"); + tmp.port_name.setValue("${port.name}"); tmp.stream_id.setValue(stream_id); this.connectionTable.getValue().add(tmp); /*{% endfor %}*/ diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/component/pull/templates/resource.java b/redhawk-codegen/redhawk/codegen/jinja/java/component/pull/templates/resource.java index 88ba53cf3..9000c7827 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/component/pull/templates/resource.java +++ b/redhawk-codegen/redhawk/codegen/jinja/java/component/pull/templates/resource.java @@ -26,6 +26,7 @@ package ${component.package}; import java.util.Properties; +import org.ossie.component.RHLogger; /*{% block mainadditionalimports %}*/ /*# Allow for child class imports #*/ /*{% endblock %}*/ @@ -69,7 +70,7 @@ public class ${classname} extends ${baseclass} { * //Add the following method to the class: * private void scaleValueChanged(Float oldValue, Float newValue) * { - * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * _baseLog.debug("Changed scaleValue " + oldValue + " to " + newValue); * } * * The recommended practice is for the implementation of valueChanged() to @@ -191,22 +192,24 @@ public class ${classname} extends ${baseclass} { public void constructor() { /*{% if 'FrontendTuner' in component.implements %}*/ - /************************************************************************** + /************************************************************************** - For a tuner device, the structure frontend_tuner_status needs to match the number - of tuners that this device controls and what kind of device it is. - The options for devices are: TX, RX, RX_DIGITIZER, CHANNELIZER, DDC, RC_DIGITIZER_CHANNELIZER + For a tuner device, the structure frontend_tuner_status needs to match the number + of tuners that this device controls and what kind of device it is. + The options for devices are: TX, RX, RX_DIGITIZER, CHANNELIZER, DDC, RC_DIGITIZER_CHANNELIZER - For example, if this device has 5 physical - tuners, each an RX_DIGITIZER, then the code in the construct function should look like this: + For example, if this device has 5 physical + tuners, 3 RX_DIGITIZER and 2 CHANNELIZER, then the code in the construct function + should look like this: - this.setNumChannels(5, "RX_DIGITIZER"); + this.addChannels(3, "RX_DIGITIZER"); + this.addChannels(2, "CHANNELIZER"); - The incoming request for tuning contains a string describing the requested tuner - type. The string for the request must match the string in the tuner status. + The incoming request for tuning contains a string describing the requested tuner + type. The string for the request must match the string in the tuner status. - **************************************************************************/ - this.setNumChannels(1, "RX_DIGITIZER"); + **************************************************************************/ + this.addChannels(1, "RX_DIGITIZER"); /*{% endif %}*/ } @@ -313,6 +316,17 @@ protected void updateUsageState() * type. The standard Java type coercion rules apply (e.g., truncation * of floating point values when converting to integer types). * + * Logging: + * + * The member _baseLog is a logger whose base name is the component (or device) instance name. + * New logs should be created based on this logger name. + * + * To create a new logger, + * RHLogger my_logger = this._baseLog.getChildLogger("foo"); + * + * Assuming component instance name abc_1, my_logger will then be created with the + * name "abc_1.user.foo". + * * Example: * * This example assumes that the ${artifactType} has two ports: @@ -345,7 +359,7 @@ protected void updateUsageState() * */ protected int serviceFunction() { - logger.debug("serviceFunction() example log message"); + _baseLog.debug("serviceFunction() example log message"); return NOOP; } diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/component/pull/templates/resource_base.java b/redhawk-codegen/redhawk/codegen/jinja/java/component/pull/templates/resource_base.java index 0d90fdbc4..1adc5136d 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/component/pull/templates/resource_base.java +++ b/redhawk-codegen/redhawk/codegen/jinja/java/component/pull/templates/resource_base.java @@ -50,6 +50,7 @@ /*{% endif %}*/ /*{% endfor %}*/ import java.util.Properties; +import org.ossie.component.RHLogger; import org.apache.log4j.Logger; @@ -116,6 +117,19 @@ public abstract class ${classname} extends ${superClass} { public final static Logger logger = Logger.getLogger(${classname}.class.getName()); /*{% import "base/properties.java" as properties with context %}*/ +/*{% for prop in component.properties if prop is enumerated %}*/ +/*{% if loop.first %}*/ + /** + * Enumerated values for properties + */ + public static class enums { +/*{% endif %}*/ + ${properties.enumvalues(prop)|indent(8)} +/*{% if loop.last %}*/ + } +/*{% endif %}*/ + +/*{% endfor %}*/ /*{% for prop in component.properties %}*/ /*{% if not prop.inherited %}*/ ${properties.create(prop)|indent(4)} @@ -150,7 +164,11 @@ public abstract class ${classname} extends ${superClass} { */ public ${classname}() { +/*{% if 'FrontendTuner' in component.implements %}*/ + super(frontend_tuner_status_struct_struct.class); +/*{% else %}*/ super(); +/*{% endif %}*/ setLogger( logger, ${classname}.class.getName() ); @@ -204,6 +222,12 @@ public void valueChanged (List oldValue, List oldValue, List newValue) diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/ports/burstio.py b/redhawk-codegen/redhawk/codegen/jinja/java/ports/burstio.py index 32be05597..f68fb09ab 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/ports/burstio.py +++ b/redhawk-codegen/redhawk/codegen/jinja/java/ports/burstio.py @@ -47,11 +47,5 @@ def className(self): classname += 'Out' return classname - def start(self): - return 'start()' - - def stop(self): - return 'stop()' - def supportsMultiOut(self): return (self.direction == 'uses') diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/ports/generator.py b/redhawk-codegen/redhawk/codegen/jinja/java/ports/generator.py index 0caf49d8d..62befcda3 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/ports/generator.py +++ b/redhawk-codegen/redhawk/codegen/jinja/java/ports/generator.py @@ -53,12 +53,6 @@ def poaClass(self): def _ctorArgs(self, name): return tuple() - def start(self): - return None - - def stop(self): - return None - def constructor(self, name): return '%s(%s)' % (self.className(), ', '.join(self._ctorArgs(name))) diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/ports/generic.py b/redhawk-codegen/redhawk/codegen/jinja/java/ports/generic.py index 96667651d..bd1d54911 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/ports/generic.py +++ b/redhawk-codegen/redhawk/codegen/jinja/java/ports/generic.py @@ -129,6 +129,13 @@ class GenericPortGenerator(JavaPortGenerator): def loader(self): return jinja2.PackageLoader(__package__) +class GenericProvidesPortGenerator(GenericPortGenerator): + def _implementation(self): + return JavaTemplate('generic.provides.java') + + def _ctorArgs(self, name): + return ('this', java.stringLiteral(name)) + def operations(self): for op in self.idl.operations(): yield {'name': op.name, @@ -148,14 +155,6 @@ def operations(self): 'arglist': baseType(attr.attrType)+ ' data', 'argnames': ('data',), 'returns': 'void'} - - -class GenericProvidesPortGenerator(GenericPortGenerator): - def _implementation(self): - return JavaTemplate('generic.provides.java') - - def _ctorArgs(self, name): - return ('this', java.stringLiteral(name)) class GenericUsesPortGenerator(GenericPortGenerator): def _implementation(self): @@ -163,3 +162,57 @@ def _implementation(self): def _ctorArgs(self, name): return (java.stringLiteral(name),) + + def hasOut(self): + for op in self.idl.operations(): + for p in op.params: + if p.direction == 'out': + return True + return False + + def hasInOut(self): + for op in self.idl.operations(): + for p in op.params: + if p.direction == 'inout': + return True + return False + + def operations(self): + for op in self.idl.operations(): + _out = False + for p in op.params: + if p.direction == 'out': + _out = True + break + _inout = False + for p in op.params: + if p.direction == 'inout': + _inout = True + break + _raises = [baseType(r) for r in op.raises] + _raises.append('PortCallError') + yield {'name': op.name, + 'arglist': ', '.join('%s %s' % (paramType(p), p.name) for p in op.params), + 'argnames': [p.name for p in op.params], + 'hasout': _out, + 'hasinout': _inout, + 'throws': ', '.join(_raises), + 'defaultval': defaultValue(op.returnType), + 'returns': baseType(op.returnType)} + for attr in self.idl.attributes(): + readwrite_attr = False + if not attr.readonly: + readwrite_attr = True + yield {'name': attr.name, + 'arglist': '', + 'argnames': tuple(), + 'readwrite_attr': readwrite_attr, + 'throws': 'PortCallError', + 'defaultval': defaultValue(attr.attrType), + 'returns': baseType(attr.attrType)} + if not attr.readonly: + yield {'name': attr.name, + 'arglist': baseType(attr.attrType)+ ' data', + 'throws': 'PortCallError', + 'argnames': ('data',), + 'returns': 'void'} diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/ports/mapping.py b/redhawk-codegen/redhawk/codegen/jinja/java/ports/mapping.py index ef23ea640..f4ad5a8aa 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/ports/mapping.py +++ b/redhawk-codegen/redhawk/codegen/jinja/java/ports/mapping.py @@ -28,7 +28,5 @@ def _mapPort(self, port, generator): javaport['javaname'] = java.identifier('port_'+port.name()) javaport['javatype'] = generator.className() javaport['constructor'] = generator.constructor(port.name()) - javaport['start'] = generator.start() - javaport['stop'] = generator.stop() javaport['multiout'] = generator.supportsMultiOut() return javaport diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/ports/message.py b/redhawk-codegen/redhawk/codegen/jinja/java/ports/message.py index e94596b0f..2811b7de5 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/ports/message.py +++ b/redhawk-codegen/redhawk/codegen/jinja/java/ports/message.py @@ -40,7 +40,7 @@ def __init__(self, port): BuiltinJavaPort.__init__(self, 'org.ossie.events.MessageConsumerPort', port) def _ctorArgs(self, name): - return (java.stringLiteral(name),"this.logger") + return (java.stringLiteral(name),"this._baseLog") class MessageSupplierPortGenerator(BuiltinJavaPort): def __init__(self, port): diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/ports/templates/generic.provides.java b/redhawk-codegen/redhawk/codegen/jinja/java/ports/templates/generic.provides.java index 3f41143e5..6f641f89f 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/ports/templates/generic.provides.java +++ b/redhawk-codegen/redhawk/codegen/jinja/java/ports/templates/generic.provides.java @@ -23,6 +23,7 @@ import ${component.package}.${component.baseclass.name}; import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; /** * @generated @@ -50,6 +51,12 @@ public class ${classname} extends ${portgenerator.poaClass()} implements PortBas //begin-user-code //end-user-code } + + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } /*{% for operation in portgenerator.operations() %}*/ /** diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/ports/templates/generic.uses.java b/redhawk-codegen/redhawk/codegen/jinja/java/ports/templates/generic.uses.java index 3dc9b67e9..8a44bd687 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/ports/templates/generic.uses.java +++ b/redhawk-codegen/redhawk/codegen/jinja/java/ports/templates/generic.uses.java @@ -26,11 +26,12 @@ import java.util.Map; import org.ossie.component.QueryableUsesPort; import org.ossie.component.PortBase; +import org.ossie.redhawk.PortCallError; /** * @generated */ -public class ${classname} extends QueryableUsesPort<${interface}> implements ${interface}, PortBase { +public class ${classname} extends QueryableUsesPort<${interface}> implements PortBase { /** * Map of connection Ids to port objects @@ -63,17 +64,69 @@ public class ${classname} extends QueryableUsesPort<${interface}> implements ${i public ${operation.returns} ${operation.name}(${operation.arglist})${" throws " + operation.throws if operation.throws} { //% set hasreturn = operation.returns != 'void' +/*{% if operation.argnames %}*/ + ${'return ' if hasreturn}this.${operation.name}(${operation.argnames|join(', ')}, ""); +/*{% else %}*/ +/*{% if operation.readwrite_attr %}*/ + ${'return ' if hasreturn}this._get_${operation.name}(""); +/*{% else %}*/ + ${'return ' if hasreturn}this.${operation.name}(""); +/*{% endif %}*/ +/*{% endif %}*/ + } + +/*{% if operation.arglist %}*/ + public ${operation.returns} ${operation.name}(${operation.arglist}, String __connection_id__)${" throws " + operation.throws if operation.throws} +/*{% else %}*/ +/*{% if operation.readwrite_attr %}*/ + public ${operation.returns} _get_${operation.name}(String __connection_id__)${" throws " + operation.throws if operation.throws} +/*{% else %}*/ + public ${operation.returns} ${operation.name}(String __connection_id__)${" throws " + operation.throws if operation.throws} +/*{% endif %}*/ +/*{% endif %}*/ + { +/*{% if hasreturn %}*/ +/*{% set returnstate='true' %}*/ +/*{% else %}*/ +/*{% set returnstate='false' %}*/ +/*{% endif %}*/ +//% set hasout = operation.hasout +/*{% if hasout %}*/ +/*{% set _hasout='true' %}*/ +/*{% else %}*/ +/*{% set _hasout='false' %}*/ +/*{% endif %}*/ +//% set hasinout = operation.hasinout +/*{% if hasinout %}*/ +/*{% set _hasinout='true' %}*/ +/*{% else %}*/ +/*{% set _hasinout='false' %}*/ +/*{% endif %}*/ /*{% if hasreturn %}*/ ${operation.returns} retval = ${java.defaultValue(operation.returns)}; /*{% endif %}*/ synchronized(this.updatingPortsLock) { // don't want to process while command information is coming in + try { + __evaluateRequestBasedOnConnections(__connection_id__, ${returnstate}, ${_hasinout}, ${_hasout}); + } catch (PortCallError e) { + throw e; + } if (this.active) { //begin-user-code //end-user-code - - for (${interface} p : this.outPorts.values()) { - ${'retval = ' if hasreturn}p.${operation.name}(${operation.argnames|join(', ')}); + try { + if (!__connection_id__.isEmpty()) { + ${'retval = ' if hasreturn}this.outPorts.get(__connection_id__).${operation.name}(${operation.argnames|join(', ')}); + } else { + for (${interface} p : this.outPorts.values()) { + ${'retval = ' if hasreturn}p.${operation.name}(${operation.argnames|join(', ')}); + } + } + } catch(org.omg.CORBA.SystemException e) { + throw e; + } catch(Throwable e) { + throw new RuntimeException(e); } } } // don't want to process while command information is coming in diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/properties.py b/redhawk-codegen/redhawk/codegen/jinja/java/properties.py index 3fd6d3769..be5912e7f 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/properties.py +++ b/redhawk-codegen/redhawk/codegen/jinja/java/properties.py @@ -38,6 +38,7 @@ CorbaTypes.ULONGLONG: java.Types.LONG, CorbaTypes.FLOAT: java.Types.FLOAT, CorbaTypes.DOUBLE: java.Types.DOUBLE, + CorbaTypes.UTCTIME: 'CF.UTCTime', CorbaTypes.STRING: 'String', CorbaTypes.OBJREF: 'String' } @@ -55,6 +56,7 @@ CorbaTypes.ULONGLONG: 'ULongLong', CorbaTypes.FLOAT: 'Float', CorbaTypes.DOUBLE: 'Double', + CorbaTypes.UTCTIME: 'UTCTime', CorbaTypes.STRING: 'String', CorbaTypes.OBJREF: 'Objref' } @@ -100,20 +102,31 @@ def mapSimpleProperty(self, prop): javatype, complex = prop.isComplex()) else: - value = java.NULL + if javaprop['javatype'] == 'CF.UTCTime': + value = '(CF.UTCTime)'+java.NULL + else: + value = java.NULL javaprop['javavalue'] = value javaprop['isOptional'] = prop.isOptional() return javaprop + def mapEnumeration(self, prop, label, value): + javaenum = {} + enumtype = self.javaType(prop.type()) + javaenum['javatype'] = enumtype + javaenum['javalabel'] = java.identifier(label) + javaenum['javavalue'] = java.literal(value, enumtype, prop.isComplex()) + return javaenum + def mapSimpleSequenceProperty(self, prop): javaprop, javatype = self._createComplexJavaProp(prop) - values = [] + values = [] if prop.hasValue(): - for value in prop.value(): + for value in prop.value(): values.append(java.literal(value, javatype, complex = prop.isComplex())) - javaprop['javavalues'] = values + javaprop['javavalues'] = values javaprop['isOptional'] = prop.isOptional() return javaprop diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/service/templates/service.java b/redhawk-codegen/redhawk/codegen/jinja/java/service/templates/service.java index 1ae74c990..db4feb9db 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/service/templates/service.java +++ b/redhawk-codegen/redhawk/codegen/jinja/java/service/templates/service.java @@ -33,6 +33,7 @@ import org.apache.log4j.Logger; import org.ossie.component.Service; +import org.ossie.component.RHLogger; import CF.InvalidObjectReference; import org.omg.PortableServer.POA; @@ -52,6 +53,10 @@ public class ${userclass} extends Service implements ${interface}Operations public ${userclass}(Map execparams) { setLogger( logger, ${userclass}.class.getName() ); + if (execparams.containsKey("SERVICE_NAME")) { + this.serviceName = execparams.get("SERVICE_NAME"); + _baseLog = RHLogger.getLogger(this.serviceName); + } } public void terminateService() diff --git a/redhawk-codegen/redhawk/codegen/jinja/java/template.py b/redhawk-codegen/redhawk/codegen/jinja/java/template.py index 9cc65c920..778cd9e88 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/java/template.py +++ b/redhawk-codegen/redhawk/codegen/jinja/java/template.py @@ -23,6 +23,10 @@ from redhawk.codegen.jinja.template import TemplateFile class JavaTemplate(TemplateFile): + COMMENT_START = '/*' + COMMENT_LINE = ' *' + COMMENT_END = ' */' + def __init__(self, template, filename=None, userfile=False, package=None, context={}): super(JavaTemplate,self).__init__(template, filename, userfile=userfile) self.package = package diff --git a/redhawk-codegen/redhawk/codegen/jinja/mapping.py b/redhawk-codegen/redhawk/codegen/jinja/mapping.py index 25ccd8900..4b242f2c6 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/mapping.py +++ b/redhawk-codegen/redhawk/codegen/jinja/mapping.py @@ -19,6 +19,7 @@ # import os +import warnings from redhawk.codegen.model.properties import Kinds from redhawk.codegen.model.softwarecomponent import ComponentTypes @@ -43,12 +44,24 @@ def _mapProperty(self, prop, propclass): def _mapSimple(self, prop): propdict = self._mapProperty(prop, 'simple') propdict['type'] = prop.type() + if prop.hasEnumerations(): + propdict['enums'] = [self._mapEnumeration(prop, l, v) for (l, v) in prop.enumerations()] propdict.update(self.mapSimpleProperty(prop)) return propdict def mapSimpleProperty(self, prop): return {} + def _mapEnumeration(self, prop, label, value): + enumdict = {} + enumdict['label'] = label + enumdict['value'] = value + enumdict.update(self.mapEnumeration(prop, label, value)) + return enumdict + + def mapEnumeration(self, prop, label, value): + return {} + def _mapSimpleSequence(self, prop): propdict = self._mapProperty(prop, 'simplesequence') propdict['type'] = prop.type() @@ -60,8 +73,7 @@ def mapSimpleSequenceProperty(self, prop): def _mapStruct(self, prop): propdict = self._mapProperty(prop, 'struct') - fields = [self._mapSimple(s) for s in prop.fields() if isinstance(s, redhawk.codegen.model.properties.SimpleProperty)] - fields += [self._mapSimpleSequence(s) for s in prop.fields() if isinstance(s, redhawk.codegen.model.properties.SimpleSequenceProperty)] + fields = [self._mapField(f) for f in prop.fields()] propdict['fields'] = fields propdict.update(self.mapStructProperty(prop, fields)) return propdict @@ -81,6 +93,16 @@ def _mapStructSequence(self, prop): def mapStructSequenceProperty(self, prop, structdef): return {} + def _mapField(self, prop): + # NB: This does not support struct or struct sequences as fields; + # however, if at some point the PRF is extended to add them, + if prop.isStruct(): + warnings.warn('Only simple and simplesequence properties may appear in a struct') + elif prop.isSequence(): + return self._mapSimpleSequence(prop) + else: + return self._mapSimple(prop) + def mapProperties(self, softpkg): simple = [self._mapSimple(s) for s in softpkg.getSimpleProperties()] simplesequence = [self._mapSimpleSequence(s) for s in softpkg.getSimpleSequenceProperties()] @@ -272,7 +294,6 @@ class ComponentMapper(SoftpkgMapper): def mapComponent(self, softpkg): component = self.mapSoftpkg(softpkg) component['license'] = None - component['mFunction'] = None if softpkg.descriptor(): if softpkg.descriptor().supports('IDL:CF/AggregateDevice:1.0'): component['aggregate'] = True diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/base/templates/properties.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/base/templates/properties.py index 9c125afb7..498e8963a 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/base/templates/properties.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/base/templates/properties.py @@ -243,3 +243,23 @@ def getMembers(self): ${structsequence(prop)} #{% endif %} #{% endmacro %} + +#{% macro enumvalues(prop) %} +#{% if prop is structsequence %} +#{% set prop = prop.structdef %} +#{% endif %} +# Enumerated values for ${prop.identifier} +class ${prop.pyname}: +#{% if prop is struct %} +#{% for field in prop.fields if field.enums %} +#{% if not loop.first %} + +#{% endif %} + ${enumvalues(field)|indent(4)} +#{% endfor %} +#{% else %} +#{% for enum in prop.enums %} + ${enum.pylabel} = ${enum.pyvalue} +#{% endfor %} +#{% endif %} +#{% endmacro %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/binary/templates/resource_base.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/binary/templates/resource_base.py index d4e0852b6..076f5c667 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/binary/templates/resource_base.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/binary/templates/resource_base.py @@ -51,7 +51,7 @@ #{% filter lines|unique|join('\n') %} #{% for portgen in component.portgenerators %} #{% if loop.first %} -from ossie.resource import usesport, providesport +from ossie.resource import usesport, providesport, PortCallError #{% endif %} #{% for statement in portgen.imports() %} ${statement} @@ -335,7 +335,7 @@ def releaseObject(self): try: self.stop() except Exception: - self._log.exception("Error stopping") + selg._baseLog.exception("Error stopping") self.threadControlLock.acquire() try: ${superclass}.releaseObject(self) @@ -393,7 +393,7 @@ class ${portgen.templateClass()}(${portgen.poaClass()}): #{% for portgen in component.portgenerators if portgen is provides and portgen.hasImplementation() %} #{% if loop.first %} -'''provides port(s)''' +'''provides port(s). Send logging to _portLog ''' #{% endif %} #{% include portgen.implementation() %} @@ -401,7 +401,7 @@ class ${portgen.templateClass()}(${portgen.poaClass()}): #{% for portgen in component.portgenerators if portgen is uses and portgen.hasImplementation() %} #{% if loop.first %} -'''uses port(s)''' +'''uses port(s). Send logging to _portLog ''' #{% endif %} #{% include portgen.implementation() %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/generator.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/generator.py index 62bd5839a..051444d11 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/generator.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/generator.py @@ -47,6 +47,7 @@ def map(self, softpkg): if prop['pyname'] in ('device_kind', 'device_model', 'frontend_tuner_allocation', 'frontend_listener_allocation', + 'frontend_scanner_allocation', 'frontend_tuner_status'): prop['inherited'] = True return component diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/mapping.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/mapping.py index a1536fae8..267936231 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/mapping.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/mapping.py @@ -41,7 +41,9 @@ def getImplementedInterfaces(softpkg): # Ensure that parent interfaces also gets added (so, e.g., a device # with a DigitalTuner should also report that it's an AnalogTuner # and FrontendTuner) - inherits = { 'DigitalTuner': ('AnalogTuner', 'FrontendTuner'), + inherits = { 'DigitalScanningTuner': ('ScanningTuner', 'DigitalTuner', 'AnalogTuner', 'FrontendTuner'), + 'AnalogScanningTuner': ('ScanningTuner', 'AnalogTuner', 'FrontendTuner'), + 'DigitalTuner': ('AnalogTuner', 'FrontendTuner'), 'AnalogTuner': ('FrontendTuner',) } for port in softpkg.providesPorts(): @@ -75,7 +77,13 @@ def superClasses(softpkg): # Add the most specific tuner delegate interface: # (Digital > Analog > Frontend) - if 'DigitalTuner' in deviceinfo: + if 'DigitalScanningTuner' in deviceinfo: + classes.append({'name': 'digital_scanning_tuner_delegation', 'package': 'frontend'}) + parent['name'] = 'FrontendScannerDevice' + elif 'AnalogScanningTuner' in deviceinfo: + classes.append({'name': 'analog_scanning_tuner_delegation', 'package': 'frontend'}) + parent['name'] = 'FrontendScannerDevice' + elif 'DigitalTuner' in deviceinfo: classes.append({'name': 'digital_tuner_delegation', 'package': 'frontend'}) elif 'AnalogTuner' in deviceinfo: classes.append({'name': 'analog_tuner_delegation', 'package': 'frontend'}) @@ -108,6 +116,7 @@ class FrontendPropertyMapper(PythonPropertyMapper): FRONTEND_BUILTINS = ( 'FRONTEND::tuner_allocation', + 'FRONTEND::scanner_allocation', 'FRONTEND::listener_allocation' ) diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/templates/resource.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/templates/resource.py index 8677396d3..0fe514770 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/templates/resource.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/templates/resource.py @@ -51,9 +51,37 @@ def deviceDisable(self,fts, tuner_id): fts.enabled = False return +#{% if 'ScanningTuner' in component.implements %} + def deviceSetTuningScan(self,request, scan_request, fts, tuner_id): + ''' + ************************************************************ + + This function is called when the allocation request contains a scanner allocation + + modify fts, which corresponds to self.frontend_tuner_status[tuner_id] + + The bandwidth, center frequency, and sampling rate that the hardware was actually tuned + to needs to populate fts (to make sure that it meets the tolerance requirement. For example, + if the tuned values match the requested values, the code would look like this: + + fts.bandwidth = request.bandwidth + fts.center_frequency = request.center_frequency + fts.sample_rate = request.sample_rate + + return True if the tuning succeeded, and False if it failed + ************************************************************''' + print "deviceSetTuning(): Evaluate whether or not a tuner is added *********" + return True +#{% endif %} + def deviceSetTuning(self,request, fts, tuner_id): ''' ************************************************************ +#{% if 'ScanningTuner' in component.implements %} + + This function is called when the allocation request does not contain a scanner allocation + +#{% endif %} modify fts, which corresponds to self.frontend_tuner_status[tuner_id] The bandwidth, center frequency, and sampling rate that the hardware was actually tuned @@ -112,7 +140,7 @@ def setTunerCenterFrequency(self,allocation_id, freq): if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") if allocation_id != self.getControlAllocationId(idx): raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) - if freq<0: raise FRONTEND.BadParameterException() + if freq<0: raise FRONTEND.BadParameterException("Center frequency cannot be less than 0") # set hardware to new value. Raise an exception if it's not possible self.frontend_tuner_status[idx].center_frequency = freq @@ -126,7 +154,7 @@ def setTunerBandwidth(self,allocation_id, bw): if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") if allocation_id != self.getControlAllocationId(idx): raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) - if bw<0: raise FRONTEND.BadParameterException() + if bw<0: raise FRONTEND.BadParameterException("Bandwidth cannot be less than 0") # set hardware to new value. Raise an exception if it's not possible self.frontend_tuner_status[idx].bandwidth = bw @@ -174,7 +202,7 @@ def setTunerOutputSampleRate(self,allocation_id, sr): if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") if allocation_id != self.getControlAllocationId(idx): raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) - if sr<0: raise FRONTEND.BadParameterException() + if sr<0: raise FRONTEND.BadParameterException("Sample rate cannot be less than 0") # set hardware to new value. Raise an exception if it's not possible self.frontend_tuner_status[idx].sample_rate = sr @@ -183,6 +211,36 @@ def getTunerOutputSampleRate(self,allocation_id): if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") return self.frontend_tuner_status[idx].sample_rate +#{% endif %} +#{% if 'ScanningTuner' in component.implements %} + + def getScanStatus(self, allocation_id): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + # set hardware to new value. Raise an exception if it's not possible + _scan_strategy=FRONTEND.ScanningTuner.ScanStrategy( + FRONTEND.ScanningTuner.MANUAL_SCAN, + FRONTEND.ScanningTuner.ScanModeDefinition(center_frequency=1.0), + FRONTEND.ScanningTuner.TIME_BASED, + 0.0) + _scan_status=FRONTEND.ScanningTuner.ScanStatus(_scan_strategy, + start_time=bulkio.timestamp.now(), + center_tune_frequencies=[], + started=False) + return _scan_status + + def setScanStartTime(self, allocation_id, start_time): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + + def setScanStrategy(self, allocation_id, scan_strategy): + idx = self.getTunerMapping(allocation_id) + if idx < 0: raise FRONTEND.FrontendException("Invalid allocation id") + if allocation_id != self.getControlAllocationId(idx): + raise FRONTEND.FrontendException(("ID "+str(allocation_id)+" does not have authorization to modify the tuner")) + #{% endif %} #{% if 'GPS' in component.implements %} @@ -228,7 +286,7 @@ def set_nav_packet(self,port_name, nav_info): def get_rf_flow_id(self,port_name): return "" - def set_rf_flow_id(self,port_name, id): + def set_rf_flow_id(self,port_name, _id): pass def get_rfinfo_pkt(self,port_name): diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/templates/resource_base.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/templates/resource_base.py index dcfe9213a..83b126d0c 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/templates/resource_base.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/frontend/templates/resource_base.py @@ -20,15 +20,21 @@ #% extends "pull/resource_base.py" #{% block baseadditionalimports %} import frontend +from omniORB import any as _any #{% if 'FrontendTuner' in component.implements %} from frontend import FRONTEND +from ossie.properties import struct_to_props BOOLEAN_VALUE_HERE=False #{% endif %} #{% endblock %} #{% block extensions %} #{% for prop in component.properties if prop.name == "frontend_tuner_status" %} # Rebind tuner status property with custom struct definition +#{% if 'ScanningTuner' in component.implements %} + frontend_tuner_status = FrontendScannerDevice.frontend_tuner_status.rebind() +#{% else %} frontend_tuner_status = FrontendTunerDevice.frontend_tuner_status.rebind() +#{% endif %} frontend_tuner_status.structdef = frontend_tuner_status_struct_struct #{% endfor %} @@ -40,7 +46,8 @@ def getTunerStatus(self,allocation_id): tuner_id = self.getTunerMapping(allocation_id) if tuner_id < 0: raise FRONTEND.FrontendException(("ERROR: ID: " + str(allocation_id) + " IS NOT ASSOCIATED WITH ANY TUNER!")) - return [CF.DataType(id=self.frontend_tuner_status[tuner_id].getId(),value=self.frontend_tuner_status[tuner_id]._toAny())] + _props = self.query([CF.DataType(id='FRONTEND::tuner_status',value=_any.to_any(None))]) + return _props[0].value._v[tuner_id]._v def assignListener(self,listen_alloc_id, allocation_id): # find control allocation_id @@ -87,11 +94,6 @@ def removeListener(self,listen_alloc_id): del self.listeners[listen_alloc_id] #{% if component.hasmultioutport %} - old_table = self.connectionTable - for entry in list(self.connectionTable): - if entry.connection_id == listen_alloc_id: - self.connectionTable.remove(entry) - #{% for port_out in component.ports if port_out.multiout %} # Check to see if port "${port_out.pyname}" has a connection for this listener tmp = self.${port_out.pyname}._get_connections() @@ -100,6 +102,12 @@ def removeListener(self,listen_alloc_id): if connection_id == listen_alloc_id: self.${port_out.pyname}.disconnectPort(connection_id) #{% endfor %} + + old_table = self.connectionTable + for entry in list(self.connectionTable): + if entry.connection_id == listen_alloc_id: + self.connectionTable.remove(entry) + self.connectionTableChanged(old_table, self.connectionTable) #{% endif %} @@ -164,7 +172,7 @@ def matchAllocationIdToStreamId(self,allocation_id, stream_id, port_name): tmp = bulkio.connection_descriptor_struct() #{% for port in component.ports if port.multiout %} tmp.connection_id = allocation_id - tmp.port_name = "${port.pyname}" + tmp.port_name = "${port.portname}" tmp.stream_id = stream_id self.connectionTable.append(tmp) #{% endfor %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/persona_base.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/persona_base.py index dec6a0ddb..cf518e999 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/persona_base.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/persona_base.py @@ -53,7 +53,7 @@ def releaseObject(self): self.terminate(pid) #{% endif %} - self._log.debug("releaseObject()") + self._baseLog.debug("releaseObject()") if self._adminState == CF.Device.UNLOCKED: self._adminState = CF.Device.SHUTTING_DOWN @@ -67,22 +67,22 @@ def releaseObject(self): objid = self._default_POA().servant_to_id(self) self._default_POA().deactivate_object(objid) except: - self._log.error("failed releaseObject()") + self._baseLog.error("failed releaseObject()") def attemptToProgramParent(self): if (not self._parentDevice): - self._log.error("Unable to Program parent: No reference to parent device exists!") + self._baseLog.error("Unable to Program parent: No reference to parent device exists!") return False if (not self._parentAllocated): - self._log.debug("About to allocate parent device!") + self._baseLog.debug("About to allocate parent device!") self.beforeHardwareProgrammed() requestProps = self.hwLoadRequest() formattedProps = self._formatRequestProps(requestProps) if not formattedProps: - self._log.error("Failed to format hw_load_request props for parent device") + self._baseLog.error("Failed to format hw_load_request props for parent device") return False self._parentAllocated = self._parentDevice.allocateCapacity(formattedProps) @@ -97,14 +97,14 @@ def attemptToProgramParent(self): def attemptToUnprogramParent(self): if (not self._parentDevice): - self._log.error("Unable to Program parent: No reference to parent device exists!") + self._baseLog.error("Unable to Program parent: No reference to parent device exists!") return False if self._parentAllocated: - self._log.debug("About to deallocate parent device!") + self._baseLog.debug("About to deallocate parent device!") if (not self._previousRequestProps): - self._log.error("Previously requested hw_load props empty!") + self._baseLog.error("Previously requested hw_load props empty!") return False self._parentDevice.deallocateCapacity(self._previousRequestProps) @@ -120,14 +120,14 @@ def execute(self, name, options, parameters): propId = param.id propVal = param.value.value() if type(propVal) == str: - self._log.debug("InstantiateResourceProp: ID['" + + self._baseLog.debug("InstantiateResourceProp: ID['" + str(propId) + "'] = " + str(propVal)) resource = instantiateResource(name, options, parameters) if not resource: msg = "Unable to dynamically instantiate resource!" - self._log.error(msg) + self._baseLog.error(msg) raise CF.ExecutableDevice.ExecuteFail(CF.CF_NOTSET, msg) resourceId = resource._get_identifier() @@ -203,18 +203,18 @@ def afterHardwareUnprogrammed(self): def _formatRequestProps(self, requestProps): # Sanity check... Can't format nothing! if not requestProps: - self._log.error("Unable to format hw_load_request_properties. Properties are empty!") + self._baseLog.error("Unable to format hw_load_request_properties. Properties are empty!") return None # Sanity check... Make sure the type has an id field! if not hasattr(requestProps[0], "id"): - self._log.error("Unable to format hw_load_request_properties. Properties must be of list of 'CF.Datatype'") + self._baseLog.error("Unable to format hw_load_request_properties. Properties must be of list of 'CF.Datatype'") return None # Case 1 - Properties are already formatted properly if len(requestProps) == 1: if requestProps[0].id == "hw_load_requests": - self._log.debug("No formatting occurred - Assumed formatting is proper") + self._baseLog.debug("No formatting occurred - Assumed formatting is proper") return requestProps # Further inspection of properties @@ -227,16 +227,16 @@ def _formatRequestProps(self, requestProps): # Case 2 - Properties are list of hw_load_request structs if allPropsAreHwLoadRequest: - self._log.debug("Found hw_load_request list - Formatting to structseq") + self._baseLog.debug("Found hw_load_request list - Formatting to structseq") return [CF.DataType(id="hw_load_requests", value=requestProps)] # Case 3 - Properties represent the contents of a single hw_load_request if foundRequestId: - self._log.debug("Found hw_load_request contents - Formatting to structseq") + self._baseLog.debug("Found hw_load_request contents - Formatting to structseq") structProp = CF.DataType(id="hw_load_request", value=requestProps) return [CF.DataType(id="hw_load_requests", value=structProp)] - self._log.error("Unable to format hw_load_request_properties. Properties are empty!") + self._baseLog.error("Unable to format hw_load_request_properties. Properties are empty!") return None diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/resource.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/resource.py index 1eda13c42..54318d058 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/resource.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/resource.py @@ -103,6 +103,18 @@ def process(self): Properties are accessed directly as member variables. If the property name is baudRate, then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + Logging: + + The member _baseLog is a logger whose base name is the component (or device) instance name. + New logs should be created based on this logger name. + + To create a new logger, + my_logger = self._baseLog.getChildLogger("foo") + + Assuming component instance name abc_1, my_logger will then be created with the + name "abc_1.user.foo". + Example: # This example assumes that the ${artifactType} has two ports: @@ -136,7 +148,7 @@ def process(self): """ # TODO fill in your code here - self._log.debug("process() example log message") + self._baseLog.debug("process() example log message") return NOOP def hwLoadRequest(self): diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/resource_base.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/resource_base.py index 61fcdf599..0dbb13131 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/resource_base.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/persona/templates/resource_base.py @@ -51,7 +51,7 @@ #{% filter lines|unique|join('\n') %} #{% for portgen in component.portgenerators %} #{% if loop.first %} -from ossie.resource import usesport, providesport +from ossie.resource import usesport, providesport, PortCallError #{% endif %} #{% for statement in portgen.imports() %} ${statement} @@ -172,7 +172,7 @@ def releaseObject(self): try: self.stop() except Exception: - self._log.exception("Error stopping") + self._baseLog.exception("Error stopping") self.threadControlLock.acquire() try: ${superclass}.releaseObject(self) @@ -230,7 +230,7 @@ class ${portgen.templateClass()}(${portgen.poaClass()}): #{% for portgen in component.portgenerators if portgen is provides and portgen.hasImplementation() %} #{% if loop.first %} -'''provides port(s)''' +'''provides port(s). Send logging to _portLog ''' #{% endif %} #{% include portgen.implementation() %} @@ -238,7 +238,7 @@ class ${portgen.templateClass()}(${portgen.poaClass()}): #{% for portgen in component.portgenerators if portgen is uses and portgen.hasImplementation() %} #{% if loop.first %} -'''uses port(s)''' +'''uses port(s). Send logging to _portLog ''' #{% endif %} #{% include portgen.implementation() %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/programmable_base.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/programmable_base.py index e7d53ff53..eb734eee0 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/programmable_base.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/programmable_base.py @@ -137,7 +137,7 @@ def execute(self, name, options, parameters): # Validate that the ${executeType} was instantiated properly if not ${executeType}: msg = "Unable to instantiate '%s'" % str(name) - self._log.error(msg) + self._baseLog.error(msg) raise CF.ExecutableDevice.ExecuteFail(CF.CF_NOTSET, msg) # Validate that we can set the parentDevice reference on the persona @@ -145,7 +145,7 @@ def execute(self, name, options, parameters): persona._parentDevice = self else: msg = "Unable to set parent device on persona '%s'" % str(name) - self._log.warning(msg) + self._baseLog.warning(msg) raise CF.ExecutableDevice.ExecuteFail(CF.CF_NOTSET, msg) # Setup variables for storing the mappings @@ -231,14 +231,14 @@ def allocateCapacity(self, capacities): self._populateHwLoadRequest(hwLoadRequestsContainer, capacity) if not (self.hwLoadRequestsAreValid(hwLoadRequestsContainer)): - self._log.warn("Received invalid hw_load_request - Not allocating hardware!") + self._baseLog.warn("Received invalid hw_load_request - Not allocating hardware!") continue; hwLoadStatusesContainer = self.getHwLoadStatusesContainer() allocationSuccess = self._applyHwLoadRequests(hwLoadRequestsContainer, hwLoadStatusesContainer) if (allocationSuccess): - self._log.warn("TODO: Figure out this callback in allocateCapacity") + self._baseLog.warn("TODO: Figure out this callback in allocateCapacity") # TODO: Figure out the callback finally: self.updateAdminStates() @@ -254,7 +254,7 @@ def deallocateCapacity(self, capacities): if capacity.id != HW_LOAD_REQUEST_PROP: continue; - self._log.debug("Deallocating hw_load_requests...") + self._baseLog.debug("Deallocating hw_load_requests...") # TODO: Populate hwLoadRequestsToRemove hwLoadStatusesContainer = self.getHwLoadStatusesContainer() @@ -267,12 +267,12 @@ def deallocateCapacity(self, capacities): if not deallocationSuccess: msg = "Unable to deallocate hw_load_requests!" - self._log.error(msg) + self._baseLog.error(msg) raise CF.Device.InvalidCapacity(msg, capacities) def releaseObject(self): - self._log.debug("Received release call") + self._baseLog.debug("Received release call") # Wrapped in list in order to use list copy for fakePid in list(self._processMap.keys()): self.terminate(fakePid) @@ -323,7 +323,7 @@ def _applyHwLoadRequests(self, loadRequestContainer, loadStatusContainer): success |= self._applyHwLoadRequest(loadRequest, availableStatusContainer) usedStatusIndices.append(availableStatusIndex) else: - self._log.error("Device cannot be allocated against. No more hw_load capacity available!") + self._baseLog.error("Device cannot be allocated against. No more hw_load capacity available!") success = False # Rollback all statuses that we're previously valid @@ -368,11 +368,11 @@ def _resetHwLoadStatus(self, loadStatusStruct): loadStatusStruct.state = HwLoadStates.INACTIVE def loadHardware(self, newStatus): - self._log.debug("Method 'loadHardware' is not implemented!") + self._baseLog.debug("Method 'loadHardware' is not implemented!") return True def unloadHardware(self, loadStatus): - self._log.debug("Method 'unloadHardware' is not implemented") + self._baseLog.debug("Method 'unloadHardware' is not implemented") def _removeHwLoadRequestFromStatus(self, hwLoadRequest, hwLoadStatusContainer): for hwLoadStatus in hwLoadStatusContainer: @@ -405,7 +405,7 @@ def updateAdminStates(self): for resource in self._${executeType}Map.values(): resourceId = resource._get_identifier() if runningPersonas.count(resourceId) < 1: - self._log.debug("Locking device: '%s'" % str(resourceId)) + self._baseLog.debug("Locking device: '%s'" % str(resourceId)) resource._set_adminState(CF.Device.LOCKED) #{% endif %} @@ -420,7 +420,7 @@ def _populateHwLoadRequest(self, container, requests): # Iterate through each request in list for request in requestVals: if request.id != "hw_load_request": - self._log.warn("Unable to convert incoming request - PropId must be 'hw_load_request'") + self._baseLog.warn("Unable to convert incoming request - PropId must be 'hw_load_request'") continue; request_id = "" @@ -467,7 +467,7 @@ def _locateHwLoadStatuses(self, container, capacities): for status in container: # Validate that our container has the 'request_id' field if not hasattr(status, "request_id"): - self._log.warn("Unable to locate status by request_id: \ + self._baseLog.warn("Unable to locate status by request_id: \ HwLoadStatuses does not have request_id field!") return retVal diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/resource.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/resource.py index de22352ff..7e7c37e40 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/resource.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/resource.py @@ -102,6 +102,18 @@ def process(self): Properties are accessed directly as member variables. If the property name is baudRate, then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + Logging: + + The member _baseLog is a logger whose base name is the component (or device) instance name. + New logs should be created based on this logger name. + + To create a new logger, + my_logger = self._baseLog.getChildLogger("foo") + + Assuming component instance name abc_1, my_logger will then be created with the + name "abc_1.user.foo". + Example: # This example assumes that the ${artifactType} has two ports: @@ -135,7 +147,7 @@ def process(self): """ # TODO fill in your code here - self._log.debug("process() example log message") + self._baseLog.debug("process() example log message") return NOOP diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/resource_base.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/resource_base.py index 9d758c9b7..c2c4b4233 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/resource_base.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/programmable/templates/resource_base.py @@ -48,7 +48,7 @@ #{% filter lines|unique|join('\n') %} #{% for portgen in component.portgenerators %} #{% if loop.first %} -from ossie.resource import usesport, providesport +from ossie.resource import usesport, providesport, PortCallError #{% endif %} #{% for statement in portgen.imports() %} ${statement} @@ -157,7 +157,7 @@ def releaseObject(self): try: self.stop() except Exception: - self._log.exception("Error stopping") + self._baseLog.exception("Error stopping") self.threadControlLock.acquire() try: ${superclass}.releaseObject(self) @@ -215,7 +215,7 @@ class ${portgen.templateClass()}(${portgen.poaClass()}): #{% for portgen in component.portgenerators if portgen is provides and portgen.hasImplementation() %} #{% if loop.first %} -'''provides port(s)''' +'''provides port(s). Send logging to _portLog ''' #{% endif %} #{% include portgen.implementation() %} @@ -223,7 +223,7 @@ class ${portgen.templateClass()}(${portgen.poaClass()}): #{% for portgen in component.portgenerators if portgen is uses and portgen.hasImplementation() %} #{% if loop.first %} -'''uses port(s)''' +'''uses port(s). Send logging to _portLog ''' #{% endif %} #{% include portgen.implementation() %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/pull/templates/resource.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/pull/templates/resource.py index b76c2c37b..aa35492ea 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/pull/templates/resource.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/pull/templates/resource.py @@ -58,9 +58,11 @@ def constructor(self): The options for devices are: TX, RX, RX_DIGITIZER, CHANNELIZER, DDC, RC_DIGITIZER_CHANNELIZER For example, if this device has 5 physical - tuners, each an RX_DIGITIZER, then the code in the construct function should look like this: + tuners, 3 RX_DIGITIZER and 2 CHANNELIZER, then the code in the construct function + should look like this: - self.setNumChannels(5, "RX_DIGITIZER"); + self.addChannels(3, "RX_DIGITIZER"); + self.addChannels(2, "CHANNELIZER"); The incoming request for tuning contains a string describing the requested tuner type. The string for the request must match the string in the tuner status. @@ -68,7 +70,7 @@ def constructor(self): """ # TODO add customization here. #{% if 'FrontendTuner' in component.implements %} - self.setNumChannels(1, "RX_DIGITIZER"); + self.addChannels(1, "RX_DIGITIZER"); #{% endif %} #{% block updateUsageState %} @@ -112,27 +114,36 @@ def process(self): Each port instance is accessed through members of the following form: self.port_ - Data is obtained in the process function through the getPacket call (BULKIO only) on a - provides port member instance. The optional argument is a timeout value, in seconds. - A zero value is non-blocking, while a negative value is blocking. Constants have been - defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no - timeout is given, it defaults to non-blocking. - - The return value is a named tuple with the following fields: - - dataBuffer - - T - - EOS - - streamID - - SRI - - sriChanged - - inputQueueFlushed - If no data is available due to a timeout, all fields are None. + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). UDP multicast (dataSDDS and dataVITA49) ports do not support + streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio.const.BLOCKING and + bulkio.const.NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that include the SRI that was in effect at the time + the data was received, and the time stamps associated with that data. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + If working with complex data (i.e., the "mode" on the SRI is set to 1), + the data block's complex attribute will return True. Data blocks provide a + cxdata attribute that gives the data as a list of complex values: + + if block.complex: + outData = [val.conjugate() for val in block.cxdata] + outputStream.write(outData, block.getStartTime()) - To send data, call the appropriate function in the port directly. In the case of BULKIO, - convenience functions have been added in the port classes that aid in output. - Interactions with non-BULKIO ports are left up to the ${artifactType} developer's discretion. - + Messages: To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described @@ -191,6 +202,17 @@ def mycallback(self, id, old_value, new_value): The callback is then registered on the component as: self.addPropertyChangeListener('baudRate', self.mycallback) + + Logging: + + The member _baseLog is a logger whose base name is the component (or device) instance name. + New logs should be created based on this logger name. + + To create a new logger, + my_logger = self._baseLog.getChildLogger("foo") + + Assuming component instance name abc_1, my_logger will then be created with the + name "abc_1.user.foo". #{% if component is device %} Allocation: @@ -223,29 +245,36 @@ def my_dealloc_fn(self, value): # - A float value called amplitude # - A boolean called increaseAmplitude - packet = self.port_dataShort_in.getPacket() - - if packet.dataBuffer is None: + inputStream = self.port_dataShort_in.getCurrentStream() + if not inputStream: return NOOP - - outData = range(len(packet.dataBuffer)) - for i in range(len(packet.dataBuffer)): - if self.increaseAmplitude: - outData[i] = float(packet.dataBuffer[i]) * self.amplitude - else: - outData[i] = float(packet.dataBuffer[i]) - - # NOTE: You must make at least one valid pushSRI call - if packet.sriChanged: - self.port_dataFloat_out.pushSRI(packet.SRI); - self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + outputStream = self.port_dataFloat_out.getStream(inputStream.streamID) + if not outputStream: + outputStream = self.port_dataFloat_out.createStream(inputStream.sri) + + block = inputStream.read() + if not block: + if inputStream.eos(): + outputStream.close() + return NOOP + + if self.increaseAmplitude: + scale = self.amplitude + else: + scale = 1.0 + outData = [float(val) * scale for val in block.data] + + if block.sriChanged: + outputStream.sri = block.sri + + outputStream.write(outData, block.getStartTime()) return NORMAL """ # TODO fill in your code here - self._log.debug("process() example log message") + self._baseLog.debug("process() example log message") return NOOP #{% block extensions %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/component/pull/templates/resource_base.py b/redhawk-codegen/redhawk/codegen/jinja/python/component/pull/templates/resource_base.py index 4242b9dc6..f245cf1ad 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/component/pull/templates/resource_base.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/component/pull/templates/resource_base.py @@ -52,7 +52,7 @@ import Queue, copy, time, threading #{% for portgen in component.portgenerators %} #{% if loop.first %} -from ossie.resource import usesport, providesport +from ossie.resource import usesport, providesport, PortCallError #{% endif %} #{% for statement in portgen.imports() %} ${statement} @@ -63,6 +63,14 @@ #{# Allow additional child class imports #} #{% endblock %} +#{% import "base/properties.py" as properties with context %} +#{% for prop in component.properties if prop is enumerated %} +#{% if loop.first %} +class enums: +#{% endif %} + ${properties.enumvalues(prop)|indent(4)} + +#{% endfor %} class ${className}(${component.poaclass}, ${component.superclasses|join(', ', attribute='name')}, ThreadedComponent): # These values can be altered in the __init__ of your derived class @@ -93,6 +101,7 @@ def __init__(self, identifier, execparams): # Instantiate the default implementations for all ports on this ${artifactType} #{% for port in component.ports %} self.${port.pyname} = ${port.constructor} + self.${port.pyname}._portLog = self._baseLog.getChildLogger('${port.name}', 'ports') #{% endfor %} #{% if component.hasmultioutport %} self.addPropertyChangeListener('connectionTable',self.updated_connectionTable) @@ -121,7 +130,7 @@ def releaseObject(self): try: self.stop() except Exception: - self._log.exception("Error stopping") + self._baseLog.exception("Error stopping") ${superclass}.releaseObject(self) ###################################################################### @@ -160,7 +169,6 @@ class ${portgen.templateClass()}(${portgen.poaClass()}): # # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file # or by using the IDE. -#{% import "base/properties.py" as properties with context %} #{% filter codealign %} #{% for prop in component.properties %} #{% if prop is struct and not prop.builtin %} @@ -178,7 +186,7 @@ class ${portgen.templateClass()}(${portgen.poaClass()}): #{% for portgen in component.portgenerators if portgen is provides and portgen.hasImplementation() %} #{% if loop.first %} -'''provides port(s)''' +'''provides port(s). Send logging to _portLog ''' #{% endif %} #{% include portgen.implementation() %} @@ -186,7 +194,7 @@ class ${portgen.templateClass()}(${portgen.poaClass()}): #{% for portgen in component.portgenerators if portgen is uses and portgen.hasImplementation() %} #{% if loop.first %} -'''uses port(s)''' +'''uses port(s). Send logging to _portLog ''' #{% endif %} #{% include portgen.implementation() %} diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/ports/frontend.py b/redhawk-codegen/redhawk/codegen/jinja/python/ports/frontend.py index 6502c3951..7dbd93b26 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/ports/frontend.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/ports/frontend.py @@ -50,7 +50,7 @@ def _ctorArgs(self, port): return [python.stringLiteral(port.name())] def constructor(self, name): - fei_ports = ['InDigitalTunerPort','InFrontendTunerPort','InAnalogTunerPort','InGPSPort','InRFInfoPort','InRFSourcePort','InNavDataPort'] + fei_ports = ['InDigitalTunerPort','InDigitalScanningTunerPort','InFrontendTunerPort','InAnalogTunerPort','InGPSPort','InRFInfoPort','InRFSourcePort','InNavDataPort'] for _port in fei_ports: if _port in self.className(): return '%s(%s, self)' % (self.className(), ', '.join(self._ctorArgs(name))) diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/ports/generic.py b/redhawk-codegen/redhawk/codegen/jinja/python/ports/generic.py index 2e20a012f..c3cb2c905 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/ports/generic.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/ports/generic.py @@ -80,16 +80,32 @@ def operations(self): args.append(param.name) if param.direction in ('inout', 'out'): returns.append(str(param.paramType)) + _out = False + for p in op.params: + if p.direction == 'out': + _out = True + break + _inout = False + for p in op.params: + if p.direction == 'inout': + _inout = True + break yield {'name': op.name, + 'hasout': _out, + 'hasinout': _inout, + 'hasreturnType': str(op.returnType), 'args': args, 'returns': returns} for attr in self.idl.attributes(): yield {'name': '_get_'+attr.name, 'args': [], + 'is_attribute': True, + 'base_attribute': attr.name, 'returns': [str(attr.attrType)]} if not attr.readonly: yield {'name': '_set_'+attr.name, 'args': ['data'], + 'hasreturnType': 'void', 'returns': []} def _implementation(self): diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/ports/mapping.py b/redhawk-codegen/redhawk/codegen/jinja/python/ports/mapping.py index b73ae8dd3..155f91fd3 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/ports/mapping.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/ports/mapping.py @@ -26,6 +26,7 @@ class PythonPortMapper(PortMapper): def _mapPort(self, port, generator): pyport = {} pyport['pyname'] = python.identifier('port_'+port.name()) + pyport['portname'] = python.identifier(port.name()) pyport['constructor'] = generator.constructor(port) pyport['multiout'] = generator.supportsMultiOut() return pyport diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/ports/templates/generic.uses.py b/redhawk-codegen/redhawk/codegen/jinja/python/ports/templates/generic.uses.py index 5fef611be..c7e471602 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/ports/templates/generic.uses.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/ports/templates/generic.uses.py @@ -25,6 +25,29 @@ def __init__(self, parent, name): self.outConnections = {} self.port_lock = threading.Lock() + def getConnectionIds(self): + return self.outConnections.keys() + + def _evaluateRequestBasedOnConnections(self, __connection_id__, returnValue, inOut, out): + if not __connection_id__ and len(self.outConnections) > 1: + if (out or inOut or returnValue): + raise PortCallError("Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.", self.getConnectionIds()) + + if len(self.outConnections) == 0: + if (out or inOut or returnValue): + raise PortCallError("No connections available.", self.getConnectionIds()) + else: + if __connection_id__: + raise PortCallError("The requested connection id ("+__connection_id__+") does not exist.", self.getConnectionIds()) + if __connection_id__ and len(self.outConnections) > 0: + foundConnection = False + for connId, port in self.outConnections.items(): + if __connection_id__ == connId: + foundConnection = True + break + if not foundConnection: + raise PortCallError("The requested connection id ("+__connection_id__+") does not exist.", self.getConnectionIds()) + def connectPort(self, connection, connectionId): self.port_lock.acquire() try: @@ -49,7 +72,29 @@ def _get_connections(self): #{% for operation in portgen.operations() %} #{% set arglist = ['self'] + operation.args %} - def ${operation.name}(${arglist|join(', ')}): +#{% if arglist %} +#{% if operation.is_attribute %} + def ${operation.base_attribute}(self, __connection_id__ = ""): +#{% if operation.returns %} + return self.${operation.name}(__connection_id__) +#{% else %} + self.${operation.name}(__connection_id__) +#{% endif %} + +#{% endif %} + def ${operation.name}(${arglist|join(', ')}, __connection_id__ = ""): +#{% else %} +#{% if operation.is_attribute %} + def ${operation.base_attribute}(__connection_id__ = ""): +#{% if operation.returns %} + return self.${operation.name}(__connection_id__) +#{% else %} + self.${operation.name}(__connection_id__) +#{% endif %} + +#{% endif %} + def ${operation.name}(__connection_id__ = ""): +#{% endif %} #{% if operation.returns|length > 1 %} retVal = [] #{% elif operation.returns|first == 'string' %} @@ -59,13 +104,35 @@ def ${operation.name}(${arglist|join(', ')}): #{% endif %} self.port_lock.acquire() +#{% set hasreturn = (operation.hasreturnType != 'void') %} +#{% if hasreturn %} +#{% set returnstate='True' %} +#{% else %} +#{% set returnstate='False' %} +#{% endif %} +#{% set hasout = operation.hasout %} +#{% if hasout %} +#{% set _hasout='True' %} +#{% else %} +#{% set _hasout='False' %} +#{% endif %} +#{% set hasinout = operation.hasinout %} +#{% if hasinout %} +#{% set _hasinout='True' %} +#{% else %} +#{% set _hasinout='False' %} +#{% endif %} + try: + self._evaluateRequestBasedOnConnections(__connection_id__, ${returnstate}, ${_hasinout}, ${_hasout}) for connId, port in self.outConnections.items(): + if (__connection_id__ and __connection_id__ != connId): + continue if port != None: try: ${"retVal = " if operation.returns}port.${operation.name}(${operation.args|join(', ')}) except Exception: - self.parent._log.exception("The call to ${operation.name} failed on port %s connection %s instance %s", self.name, connId, port) + self.parent._baseLog.exception("The call to ${operation.name} failed on port %s connection %s instance %s", self.name, connId, port) raise finally: self.port_lock.release() diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/properties.py b/redhawk-codegen/redhawk/codegen/jinja/python/properties.py index f5582c531..4164e2bca 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/properties.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/properties.py @@ -47,6 +47,12 @@ def mapSimpleProperty(self, simple): pyprop['value'] = simple.value() return pyprop + def mapEnumeration(self, prop, label, value): + pyenum = {} + pyenum['pylabel'] = python.identifier(label) + pyenum['pyvalue'] = python.literal(value, prop.type(), prop.isComplex()) + return pyenum + def mapSimpleSequenceProperty(self, simplesequence): pyprop = self.mapProperty(simplesequence) pyprop['isComplex'] = simplesequence.isComplex() diff --git a/redhawk-codegen/redhawk/codegen/jinja/python/service/templates/service.py b/redhawk-codegen/redhawk/codegen/jinja/python/service/templates/service.py index 8fbaeda14..bcdd912f2 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/python/service/templates/service.py +++ b/redhawk-codegen/redhawk/codegen/jinja/python/service/templates/service.py @@ -40,6 +40,7 @@ class ${className}(${component.baseclass}): def __init__(self, name="${className}", execparams={}): self.name = name + self._baseLog = logging.getLogger(self.name) self._log = logging.getLogger(self.name) def terminateService(self): diff --git a/redhawk-codegen/redhawk/codegen/jinja/template.py b/redhawk-codegen/redhawk/codegen/jinja/template.py index fbcd11361..0e53bf96b 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/template.py +++ b/redhawk-codegen/redhawk/codegen/jinja/template.py @@ -21,6 +21,12 @@ import os class TemplateFile(object): + # Most file types that we generate use hash for comment lines, but + # subclasses can override if needed + COMMENT_START = '#' + COMMENT_LINE = '#' + COMMENT_END = '#' + def __init__(self, template, filename=None, executable=False, userfile=False): self.template = template if filename: @@ -38,3 +44,19 @@ def filters(self): def context(self): return {} + + def comment(self, text): + """ + Generates a comment block from 'text' suitable for this template type. + """ + def generate(t): + yield self.COMMENT_START + for line in t.split('\n'): + # Add a space between the comment marker and the line, but only + # if the line is non-empty + if line: + line = ' ' + line + yield self.COMMENT_LINE + line + yield self.COMMENT_END + yield '' + return '\n'.join(generate(text)) diff --git a/redhawk-codegen/redhawk/codegen/jinja/tests.py b/redhawk-codegen/redhawk/codegen/jinja/tests.py index 7e4e6d966..00839fbd4 100644 --- a/redhawk-codegen/redhawk/codegen/jinja/tests.py +++ b/redhawk-codegen/redhawk/codegen/jinja/tests.py @@ -95,6 +95,23 @@ def is_structsequence(prop): """ return prop['class'] == 'structsequence' +def is_enumerated(prop): + """ + Returns True if the property, or one of its nested properties, has + enumerated values. + """ + if is_simple(prop): + return 'enums' in prop + elif is_struct(prop): + # If any field is enumerated, return True + return any(is_enumerated(f) for f in prop['fields']) + elif is_structsequence(prop): + # Check the struct defintion + return is_enumerated(prop['structdef']) + else: + # Simple sequence properties do not support enumerations + return False + def _getvalue(obj, name): """ Looks up the item or attribute 'name' in obj. If the item is callable, diff --git a/redhawk-codegen/redhawk/codegen/lang/cpp.py b/redhawk-codegen/redhawk/codegen/lang/cpp.py index 376968ae4..80a473ef0 100644 --- a/redhawk-codegen/redhawk/codegen/lang/cpp.py +++ b/redhawk-codegen/redhawk/codegen/lang/cpp.py @@ -55,6 +55,7 @@ CorbaTypes.FLOAT: 'float', CorbaTypes.DOUBLE: 'double', CorbaTypes.STRING: 'std::string', + CorbaTypes.UTCTIME: 'CF::UTCTime', CorbaTypes.OBJREF: 'std::string' } diff --git a/redhawk-codegen/redhawk/codegen/lang/idl.py b/redhawk-codegen/redhawk/codegen/lang/idl.py index f33b4e469..9cc269b3a 100644 --- a/redhawk-codegen/redhawk/codegen/lang/idl.py +++ b/redhawk-codegen/redhawk/codegen/lang/idl.py @@ -25,7 +25,7 @@ from redhawk.codegen.utils import strenum CorbaTypes = strenum('octet','boolean','char','short','ushort','long','ulong', - 'longlong','ulonglong','float','double','string','objref') + 'longlong','ulonglong','float','double','string','objref', 'utctime') idlRepo = IDLLibrary() idlRepo.addSearchPath(os.path.join(os.environ['OSSIEHOME'], 'share/idl')) diff --git a/redhawk-codegen/redhawk/codegen/lang/java.py b/redhawk-codegen/redhawk/codegen/lang/java.py index a906b9dfa..7f3418db1 100644 --- a/redhawk-codegen/redhawk/codegen/lang/java.py +++ b/redhawk-codegen/redhawk/codegen/lang/java.py @@ -28,8 +28,8 @@ TRUE = 'true' FALSE = 'false' -Types = strenum('boolean', 'char', 'byte', 'short', 'int', 'long', 'float', 'double') -BoxTypes = strenum('Boolean', 'Character', 'Byte', 'Short', 'Integer', 'Long', 'Float', 'Double') +Types = strenum('boolean', 'char', 'byte', 'short', 'int', 'long', 'float', 'double', 'utctime') +BoxTypes = strenum('Boolean', 'Character', 'Byte', 'Short', 'Integer', 'Long', 'Float', 'Double', 'UTCTime') _reservedKeywords = set(("abstract", "assert", "boolean", "break", "byte", "case", "catch", "char", "class", "const", "continue", "default", @@ -49,7 +49,8 @@ Types.INT: BoxTypes.INTEGER, Types.LONG: BoxTypes.LONG, Types.FLOAT: BoxTypes.FLOAT, - Types.DOUBLE: BoxTypes.DOUBLE + Types.DOUBLE: BoxTypes.DOUBLE, + Types.UTCTIME: BoxTypes.UTCTIME } _typeSize = { @@ -121,9 +122,28 @@ def _complexLiteral(value, typename): return "new CF." + typename + "(" + str(real) + "," + str(imag) + ")" + +def checkValue(value): + base=10 + if type(value) == str: + _v=value.upper() + if _v.startswith('0X') or _v.startswith('X'): + if _v.startswith('X'): value='0'+value + base=16 + if _v.startswith('0O') or _v.startswith('O'): + if _v.startswith('O'): value='0'+value + base=8 + if _v.startswith('0B') or _v.startswith('B'): + if _v.startswith('B'): value='0'+value + base=2 + return value, base + + def literal(value, typename, complex=False): if complex: return _complexLiteral(value, typename) + elif typename == 'CF.UTCTime': + return stringLiteral(value) elif typename == 'String': return stringLiteral(value) elif typename == 'Object': @@ -135,23 +155,29 @@ def literal(value, typename, complex=False): else: return value elif typename in (Types.LONG, BoxTypes.LONG): - return value+'L' + value, base = checkValue(value) + return repr(long(value,base)) elif typename in (Types.BOOLEAN, BoxTypes.BOOLEAN): return translateBoolean(value) elif typename in (Types.BYTE, BoxTypes.BYTE): - return '(byte)%d' % int(value) + value, base = checkValue(value) + return '(byte)%d' % int(value,base) elif typename in (Types.SHORT, BoxTypes.SHORT): - return '(short)%d' % int(value) + value, base = checkValue(value) + return '(short)%d' % int(value,base) elif typename in (Types.CHAR, BoxTypes.CHARACTER): return charLiteral(value) elif typename in (Types.INT, BoxTypes.INTEGER): - return str(int(value)) + value, base = checkValue(value) + return str(int(value,base)) else: return NULL def defaultValue(typename): if typename == 'String': return stringLiteral('') + elif typename == 'CF.UTCTime': + return stringLiteral('(CF.UTCTime)'+NULL) elif typename == 'Object': return NULL elif typename == Types.BOOLEAN: diff --git a/redhawk-codegen/redhawk/codegen/lang/python.py b/redhawk-codegen/redhawk/codegen/lang/python.py index addd634b8..603d7fc83 100644 --- a/redhawk-codegen/redhawk/codegen/lang/python.py +++ b/redhawk-codegen/redhawk/codegen/lang/python.py @@ -40,14 +40,34 @@ def boolLiteral(value): value = bool(value) return str(value) +def checkValue(value): + base=10 + if type(value) == str: + _v=value.upper() + if _v.startswith('0X') or _v.startswith('X'): + if _v.startswith('X'): value='0'+value + base=16 + if _v.startswith('0O') or _v.startswith('O'): + if _v.startswith('O'): value='0'+value + base=8 + if _v.startswith('0B') or _v.startswith('B'): + if _v.startswith('B'): value='0'+value + base=2 + return value, base + def floatLiteral(value): return repr(float(value)) +def UTCTimeLiteral(value): + return '"%s"' % (value,) + def intLiteral(value): - return repr(int(value)) + value, base = checkValue(value) + return repr(int(value,base)) def longLiteral(value): - return repr(long(value)) + value, base = checkValue(value) + return repr(long(value,base)) def stringToBoolean(value): if value.lower() == 'true': @@ -69,6 +89,7 @@ def stringToBoolean(value): CorbaTypes.ULONGLONG: longLiteral, CorbaTypes.FLOAT: floatLiteral, CorbaTypes.DOUBLE: floatLiteral, + CorbaTypes.UTCTIME: UTCTimeLiteral, CorbaTypes.STRING: stringLiteral, CorbaTypes.OBJREF: stringLiteral, } diff --git a/redhawk-codegen/redhawk/codegen/model/properties.py b/redhawk-codegen/redhawk/codegen/model/properties.py index 456efd4c6..68d0f47d3 100644 --- a/redhawk-codegen/redhawk/codegen/model/properties.py +++ b/redhawk-codegen/redhawk/codegen/model/properties.py @@ -154,6 +154,9 @@ def hasValue(self): def value(self): return self.xml.value + def hasEnumerations(self): + return bool(self.xml.enumerations) + def enumerations(self): if self.xml.enumerations: return [(e.label, e.value) for e in self.xml.enumerations.enumeration] diff --git a/redhawk-codegen/redhawk/codegen/model/softpkg.py b/redhawk-codegen/redhawk/codegen/model/softpkg.py index c7532edbc..e47685c13 100644 --- a/redhawk-codegen/redhawk/codegen/model/softpkg.py +++ b/redhawk-codegen/redhawk/codegen/model/softpkg.py @@ -59,6 +59,9 @@ def entrypoint(self): return self.__impl.code.localfile.name return self.__impl.code.entrypoint + def isModule(self): + return self.__impl.code.get_type() == 'SharedLibrary' + def localfile(self): return self.__impl.code.localfile.name diff --git a/redhawk-codegen/redhawk/codegen/versions.py b/redhawk-codegen/redhawk/codegen/versions.py index 1f90671e5..5f86b3569 100644 --- a/redhawk-codegen/redhawk/codegen/versions.py +++ b/redhawk-codegen/redhawk/codegen/versions.py @@ -17,16 +17,16 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # -codegen = '2.0.9' -redhawk = '2.0' +codegen = '2.2.1' +redhawk = '2.2' jinja2 = '2.6' boost = '1.41' omniORB4 = '4.1.0' python = '2.4' -java = '1.6' +java = '1.8' log4j = '1.2.15' octave = '3.4.3' bulkio = redhawk burstio = redhawk -frontend = '2.2' +frontend = '2.4' diff --git a/redhawk/.cproject b/redhawk/.cproject index 317a8b759..7fcef79ca 100644 --- a/redhawk/.cproject +++ b/redhawk/.cproject @@ -241,4 +241,5 @@ + diff --git a/redhawk/src/Makefile.am b/redhawk/src/Makefile.am index 7d0029d03..83f1183a1 100644 --- a/redhawk/src/Makefile.am +++ b/redhawk/src/Makefile.am @@ -34,7 +34,11 @@ if HAVE_JAVASUPPORT OMNIJNI = omnijni endif -SUBDIRS = acinclude etc $(OMNIJNI) base control tools xml idl testing +if BUILD_TESTS +TEST_DIR=testing +endif + +SUBDIRS = acinclude etc $(OMNIJNI) base control tools xml idl $(TEST_DIR) # Install makefile fragments amdir = $(datadir)/aminclude/redhawk diff --git a/redhawk/src/acinclude/Makefile.am b/redhawk/src/acinclude/Makefile.am index ded2755c6..cbcffefe1 100644 --- a/redhawk/src/acinclude/Makefile.am +++ b/redhawk/src/acinclude/Makefile.am @@ -55,4 +55,5 @@ dist_ac_DATA = \ pkg.m4 \ AC_M_FUNCTION_INTEGRATION.m4 \ rhpkg.m4 \ - redhawk.m4 + redhawk.m4 \ + octave.m4 diff --git a/redhawk/src/acinclude/java.m4 b/redhawk/src/acinclude/java.m4 index 38762d99c..0df1eaa8f 100644 --- a/redhawk/src/acinclude/java.m4 +++ b/redhawk/src/acinclude/java.m4 @@ -83,9 +83,14 @@ EOF else AC_MSG_RESULT([no]) AC_SUBST([JAVAC], [no]) + AS_EXIT(1) fi rm -f Test.java Test.class fi + if test "$JAVAC" == "no"; then + echo "Java set for required, but no Java is installed" + AS_EXIT(1) + fi ]) dnl RH_PROG_JAR diff --git a/redhawk/src/acinclude/octave.m4 b/redhawk/src/acinclude/octave.m4 new file mode 100644 index 000000000..7495fb2e0 --- /dev/null +++ b/redhawk/src/acinclude/octave.m4 @@ -0,0 +1,69 @@ +dnl +dnl This file is protected by Copyright. Please refer to the COPYRIGHT file +dnl distributed with this source distribution. +dnl +dnl This file is part of REDHAWK core. +dnl +dnl REDHAWK core is free software: you can redistribute it and/or modify it under +dnl the terms of the GNU Lesser General Public License as published by the Free +dnl Software Foundation, either version 3 of the License, or (at your option) any +dnl later version. +dnl +dnl REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +dnl FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +dnl details. +dnl +dnl You should have received a copy of the GNU Lesser General Public License +dnl along with this program. If not, see http://www.gnu.org/licenses/. +dnl + +# RH_PROG_OCTAVE_CONFIG +# --------------------- +AC_DEFUN([RH_PROG_OCTAVE_CONFIG], +[ + AC_ARG_VAR([OCTAVE_CONFIG], [path to octave-config utility]) + AS_IF([test "x$ac_cv_env_OCTAVE_CONFIG_set" != "xset"], [ + AC_PATH_TOOL([OCTAVE_CONFIG], [octave-config]) + ]) +]) + +# RH_OCTAVE([MINIMUM-VERSION]) +# +# Checks for Octave installation, with an optional minimum version +# ----------------------------------------------------------------------------- +AC_DEFUN([RH_OCTAVE], +[ + dnl Require octave-config to be able to get the include and library + dnl directories + AC_REQUIRE([RH_PROG_OCTAVE_CONFIG]) + AS_IF([test "x$OCTAVE_CONFIG" == "x"], [ + AC_ERROR([octave-config was not found]) + ]) + + dnl If a minimum version was given, get the Octave version from octave-config + dnl and compare; otherwise, just assume it should work + AS_IF([test x$1 != x], [ + AC_MSG_CHECKING([for Octave >= $1]) + rh_octave_version=`$OCTAVE_CONFIG -v` + AS_VERSION_COMPARE([$rh_octave_version], [$1], [ + AC_ERROR([Octave version $rh_octave_version found, $1 required]) + ], [], []) + ], [ + AC_MSG_CHECKING([for Octave]) + ]) + + dnl Get the include directory from octave-config, then format it into usable + dnl include paths + rh_octave_incdir=`$OCTAVE_CONFIG -p OCTINCLUDEDIR` + OCTAVE_CPPFLAGS="-I${rh_octave_incdir}/.. -I${rh_octave_incdir}" + AC_SUBST([OCTAVE_CPPFLAGS]) + + dnl Get the library directory from octave-config for use as a linker path, + dnl then add the "octave" an "octinterp" libraries to linker flags + rh_octave_libdir=`$OCTAVE_CONFIG -p OCTLIBDIR` + OCTAVE_LIBS="-L${rh_octave_libdir} -loctave -loctinterp" + AC_SUBST([OCTAVE_LIBS]) + + AC_MSG_RESULT([yes]) +]) diff --git a/redhawk/src/acinclude/ossie.m4 b/redhawk/src/acinclude/ossie.m4 index 52f10ce05..77bd4df03 100644 --- a/redhawk/src/acinclude/ossie.m4 +++ b/redhawk/src/acinclude/ossie.m4 @@ -180,21 +180,41 @@ ossie_cv_pyscheme, AC_SUBST(PYTHON_INSTALL_SCHEME, $ossie_cv_pyscheme) ]) -AC_DEFUN([OSSIE_ENABLE_PERSISTENCE], -[AC_MSG_CHECKING([to see if domain persistence should be enabled]) - AC_ARG_ENABLE(persistence, - AS_HELP_STRING([--enable-persistence=[persist_type]], [Enable persistence support. Supported types: bdb, gdbm, sqlite, none)]), - [ - AC_MSG_RESULT([$enableval]) - AX_BOOST_SERIALIZATION - if test "x$enableval" == "x" -o "x$enableval" == "xnone" ; then - AC_SUBST(PERSISTENCE_CFLAGS, "") - AC_SUBST(PERSISTENCE_LIBS, "") +AC_DEFUN([OSSIE_ENABLE_PERSISTENCE], [ + AC_MSG_CHECKING([to see if domain persistence should be enabled]) + AC_ARG_ENABLE(persistence, [ +AS_HELP_STRING([--enable-persistence@<:@=persist_type@:>@], [Enable persistence support. Supported types: bdb, gdbm, sqlite @<:@default=sqlite@:>@]) +AS_HELP_STRING([--disable-persistence], [Disable persistence support])], + [], + [ + dnl Default behavior is implicit yes + enable_persistence="yes" + ]) + + dnl Default backend is sqlite + AS_IF([test "x$enable_persistence" = "xyes"], [ + enable_persistence="sqlite" + ]) + + AS_IF([test "x$enable_persistence" = "xno" -o "x$enable_persistence" = "xnone"], [ + AC_MSG_RESULT([no]) + ], [ + AC_MSG_RESULT([$enable_persistence]) + if test "x$enable_persistence" = "xsqlite"; then + CHECK_SQLITE3_LIB + if test x"$ac_sqlite3_header" == "xyes" -a x"$ac_sqlite3_lib" == "xyes"; then + PERSISTENCE_CFLAGS="" + PERSISTENCE_LIBS="-lsqlite3" + AC_DEFINE(HAVE_SQLITE, 1, [Define if sqlite is available]) + AC_DEFINE(ENABLE_SQLITE_PERSISTENCE, 1, [enable SQLite-based persistence]) + else + AC_MSG_ERROR([System cannot support sqlite persistence]) + fi elif test "x$enableval" == "xbdb"; then CHECK_BDB_LIB if test x"$ac_bdb_header" == "xyes" -a x"$ac_bdb_lib" == "xyes"; then - AC_SUBST(PERSISTENCE_CFLAGS, "") - AC_SUBST(PERSISTENCE_LIBS, "-ldb_cxx") + PERSISTENCE_CFLAGS="" + PERSISTENCE_LIBS="-ldb_cxx" AC_DEFINE(HAVE_BDB, 1, [Define if bdb is available]) AC_DEFINE(ENABLE_BDB_PERSISTENCE, 1, [enable BDB-based persistence]) else @@ -203,30 +223,19 @@ AC_DEFUN([OSSIE_ENABLE_PERSISTENCE], elif test "x$enableval" == "xgdbm"; then CHECK_GDBM_LIB if test x"$ac_gdbm_header" == "xyes" -a x"$ac_gdbm_lib" == "xyes"; then - AC_SUBST(PERSISTENCE_CFLAGS, "") - AC_SUBST(PERSISTENCE_LIBS, "-lgdbm") + PERSISTENCE_CFLAGS="" + PERSISTENCE_LIBS="-lgdbm" AC_DEFINE(HAVE_GDBM, 1, [Define if gdbm is available]) AC_DEFINE(ENABLE_GDBM_PERSISTENCE, 1, [enable gdbm-based persistence]) else AC_MSG_ERROR([System cannot support gdbm persistence]) fi - elif test "x$enableval" == "xsqlite"; then - CHECK_SQLITE3_LIB - if test x"$ac_sqlite3_header" == "xyes" -a x"$ac_sqlite3_lib" == "xyes"; then - AC_SUBST(PERSISTENCE_CFLAGS, "") - AC_SUBST(PERSISTENCE_LIBS, "-lsqlite3") - AC_DEFINE(HAVE_SQLITE, 1, [Define if sqlite is available]) - AC_DEFINE(ENABLE_SQLITE_PERSISTENCE, 1, [enable SQLite-based persistence]) - else - AC_MSG_ERROR([System cannot support sqlite persistence]) - fi else AC_MSG_ERROR([Invalid persistence type specified]) fi - ], - [ - AC_MSG_RESULT([no]) - ]) + AC_SUBST(PERSISTENCE_CFLAGS) + AC_SUBST(PERSISTENCE_LIBS) + ]) ]) AC_DEFUN([CHECK_BDB_LIB], diff --git a/redhawk/src/acinclude/testdir.m4 b/redhawk/src/acinclude/testdir.m4 new file mode 100644 index 000000000..4935ca50a --- /dev/null +++ b/redhawk/src/acinclude/testdir.m4 @@ -0,0 +1,10 @@ +build_tests="yes" +AC_ARG_WITH(tests, + [AC_HELP_STRING([--without-tests], + [disables building of testing directory]) ], + [ test "$withval" = "no" && build_tests="no" || build_tests="yes" ], + [ build_tests="yes" ] + ) +AM_CONDITIONAL([BUILD_TESTS], [test "x$build_tests" = xyes] ) +AC_SUBST(build_tests) + diff --git a/redhawk/src/acinclude/unitdir.m4 b/redhawk/src/acinclude/unitdir.m4 new file mode 100644 index 000000000..9221c4f9f --- /dev/null +++ b/redhawk/src/acinclude/unitdir.m4 @@ -0,0 +1,20 @@ +unitdir=/usr/lib/systemd/system +AC_ARG_WITH(unitdir, + [AC_HELP_STRING([--with-unitdir@<:@=unit-dir-path@:>@], + [install systemd unit files @<:@Default: no, and path defaults to /usr/lib/systemd/system if not given@:>@])], + [ case "${withval}" in + no) + install_systemdunits=0 + ;; + yes) + install_systemdunits=1 + ;; + *) + install_systemdunits=1 + unitdir=${withval} + ;; + esac ], + [use_systemd=0] + ) +AM_CONDITIONAL([INSTALL_SYSTEMDUNITS], [test "x$install_systemdunits" = x1]) +AC_SUBST(unitdir) diff --git a/redhawk/src/aminclude/idlj.am b/redhawk/src/aminclude/idlj.am index cd3b0e183..ba0aa889e 100644 --- a/redhawk/src/aminclude/idlj.am +++ b/redhawk/src/aminclude/idlj.am @@ -17,11 +17,16 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # + # Find all Java files in path referenced by first argument, replace that path # with a variable refernce to the second argument (e.g., output will contain # the unexpanded value '$(IDLJ_BUILDDIR)') and combine all lines into one. rh__idlfind = find $(1) -name '*.java' | sed 's|^$(1)|$$($2)|' | paste -s -d " " - +# Turns the leading path of a pattern-matched segment in an IDL file into an +# underscore-separated variable name +rh__idlvar = $(subst /,_,$*) + ############################################################################### # Generate Java source from IDL files using IDLJ ############################################################################### @@ -35,10 +40,10 @@ IDLJ_BUILDDIR ?= . .idlj/%.mk : %.idl $(AM_V_at)rm -rf $(rh__idljtemp); mkdir -p $(rh__idljtemp) $(AM_V_at)$(rh__idlj) -td $(rh__idljtemp) $< - $(AM_V_at)echo -n "$*_idlj_SOURCE = " > $@ + $(AM_V_at)echo -n "$(rh__idlvar)_idlj_SOURCE = " > $@ $(AM_V_at)$(call rh__idlfind,$(rh__idljtemp),IDLJ_BUILDDIR) >> $@ $(AM_V_at)rm -rf $(rh__idljtemp) - $(AM_V_at)echo '$$($*_idlj_SOURCE) : $(rh__idljtemp).idlj' >> $@ + $(AM_V_at)echo '$$($(rh__idlvar)_idlj_SOURCE) : $(rh__idljtemp).idlj' >> $@ $(AM_V_at)echo '.INTERMEDIATE : $(rh__idljtemp).idlj' >> $@ $(AM_V_at)echo '$(rh__idljtemp).idlj : $<' >> $@ $(AM_V_at)echo ' @mkdir -p $$(IDLJ_BUILDDIR)' >> $@ @@ -57,10 +62,10 @@ IDLJNI_BUILDDIR ?= . .idljni/%.mk : %.idl $(AM_V_at)rm -rf $(rh__idljnitemp); mkdir -p $(rh__idljnitemp) $(AM_V_at)$(rh__idljni) -C $(rh__idljnitemp) $< - $(AM_V_at)echo -n "$*_idljni_SOURCE = " > $@ + $(AM_V_at)echo -n "$(rh__idlvar)_idljni_SOURCE = " > $@ $(AM_V_at)$(call rh__idlfind,$(rh__idljnitemp),IDLJNI_BUILDDIR) >> $@ $(AM_V_at)rm -rf $(rh__idljnitemp) - $(AM_V_at)echo '$$($*_idljni_SOURCE) : $(rh__idljnitemp).idljni' >> $@ + $(AM_V_at)echo '$$($(rh__idlvar)_idljni_SOURCE) : $(rh__idljnitemp).idljni' >> $@ $(AM_V_at)echo '.INTERMEDIATE : $(rh__idljnitemp).idljni' >> $@ $(AM_V_at)echo '$(rh__idljnitemp).idljni : $<' >> $@ $(AM_V_at)echo ' @mkdir -p $$(IDLJNI_BUILDDIR)' >> $@ @@ -84,7 +89,7 @@ endef define JAVA_IDL_template # Include Java IDL makefile fragments. $(foreach mkfile,$($(1)_IDLSRC:%.idl=.$(1)/%.mk),$(eval $(call INCLUDE_IFEXIST,$(mkfile)))) -$(eval $(1)_SOURCE = $(foreach idlfile,$($(1)_IDLSRC:%.idl=%),$$($(idlfile)_$(1)_SOURCE))) +$(eval $(1)_SOURCE = $(foreach idlfile,$($(1)_IDLSRC:%.idl=%),$$($(subst /,_,$(idlfile))_$(1)_SOURCE))) # Chain clean and distclean off of Automake targets .PHONY: clean-$(1) distclean-$(1) diff --git a/redhawk/src/base/framework/AnyUtils.cpp b/redhawk/src/base/framework/AnyUtils.cpp index 97bf52f35..2031dde94 100644 --- a/redhawk/src/base/framework/AnyUtils.cpp +++ b/redhawk/src/base/framework/AnyUtils.cpp @@ -79,6 +79,24 @@ namespace { } } + // Specialization for boolean, first checking for literal values "true" and + // "false" (case insensitive), then convering via lexical cast to double + // (for widest range) and comparing with zero + template<> + inline bool stringToNumber (const std::string& str) + { + std::string out; + std::transform(str.begin(), str.end(), std::back_inserter(out), ::tolower); + if (out == "true") { + return true; + } else if (out == "false") { + return false; + } else { + double temp = boost::lexical_cast(str); + return (temp != 0.0); + } + } + // Specialization for CORBA::Octet (unsigned char), always converting via // double because lexical_cast throws a bad_lexical_cast exception with // CORBA::Octet @@ -145,12 +163,16 @@ namespace { #define ANY_TO_NUMERIC_TYPE(T,N) \ bool ossie::any::toNumber (const CORBA::Any& any, T& value) \ { \ - return anyToNumber(any, value); \ + try { \ + return ::anyToNumber(any, value); \ + } catch (...) { \ + return false; \ + } \ } \ T ossie::any::to##N (const CORBA::Any& any) \ { \ - T value; \ - if (ossie::any::toNumber(any, value)) { \ + T value = 0; \ + if (::anyToNumber(any, value)) { \ return value; \ } else { \ throw std::invalid_argument("Non-numeric Any type"); \ diff --git a/redhawk/src/base/framework/BufferManager.cpp b/redhawk/src/base/framework/BufferManager.cpp new file mode 100644 index 000000000..cd1537ae9 --- /dev/null +++ b/redhawk/src/base/framework/BufferManager.cpp @@ -0,0 +1,436 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include +#include "inplace_list.h" + +using redhawk::BufferManager; + +struct BufferManager::CacheBlock { + CacheBlock(size_t size) : + size(size), + cache(0) + { + } + + static CacheBlock* from_pointer(void* ptr) + { + CacheBlock* block = reinterpret_cast(ptr); + return block - 1; + } + + void* data() + { + return this + 1; + } + + static size_t required_bytes(size_t bytes) + { + return bytes + sizeof(CacheBlock); + } + + static size_t usable_bytes(size_t bytes) + { + return bytes - sizeof(CacheBlock); + } + + const size_t size; + BufferManager::BufferCache* cache; +}; + +class BufferManager::BufferCache { +public: + // The CacheNode structure contains fields that are only required when a + // memory block is being stored in the cache + struct CacheNode : public CacheBlock { + CacheNode* prev; + CacheNode* next; + size_t lastUsed; + }; + + BufferCache(BufferManager* manager) : + _manager(manager), + _enabled(true), + _time(0), + _maxBytes(-1), + _maxBlocks(-1), + _maxAge(-1), + _hits(0), + _misses(0), + _currentBytes(0), + _refcount(1) + { + } + + CacheBlock* fetch(size_t bytes) + { + boost::mutex::scoped_lock lock(_lock); + CacheNode* node = _fetch(bytes); + if (!node) { + ++_misses; + return 0; + } + + ++_hits; + _currentBytes -= node->size; + _manager->_decreaseSize(node->size); + return node; + } + + void store(CacheBlock* block) + { + // Overlay the CacheNode struct on the block + CacheNode* node = static_cast(block); + boost::mutex::scoped_lock lock(_lock); + node->lastUsed = ++_time; + _cache.push_front(*node); + _currentBytes += node->size; + _manager->_increaseSize(node->size); + _compact(); + } + + void enable(bool enabled) + { + boost::mutex::scoped_lock lock(_lock); + _enabled = enabled; + if (!_enabled) { + _compact(); + } + } + + void setMaxBytes(size_t bytes) + { + boost::mutex::scoped_lock lock(_lock); + _maxBytes = bytes; + _compact(); + } + + void setMaxBlocks(size_t blocks) + { + boost::mutex::scoped_lock lock(_lock); + _maxBlocks = blocks; + _compact(); + } + + void setMaxAge(size_t age) + { + boost::mutex::scoped_lock lock(_lock); + _maxAge = age; + _compact(); + } + + size_t hits() + { + return _hits; + } + + size_t misses() + { + return _misses; + } + + size_t size() + { + return _cache.size(); + } + + void incref() + { + __sync_fetch_and_add(&_refcount, 1); + } + + bool decref() + { + size_t count = __sync_sub_and_fetch(&_refcount, 1); + if (count == 0) { + delete this; + return false; + } + return true; + } + + static void release(BufferCache* cache) + { + cache->decref(); + } + +private: + ~BufferCache() + { + _enabled = false; + _compact(); + _manager->_removeCache(this); + } + + inline CacheNode* _fetch(size_t bytes) + { + for (CacheList::iterator iter = _cache.begin(); iter != _cache.end(); ++iter) { + if (iter->size == bytes) { + CacheNode* node = iter.get_node(); + _cache.erase(iter); + return node; + } + } + return 0; + } + + inline bool _overThreshold() + { + if (_cache.empty()) { + return false; + } + if (!_enabled) { + return true; + } + size_t age = _time - _cache.back().lastUsed; + return (age > _maxAge) || (_currentBytes > _maxBytes) || (_cache.size() > _maxBlocks); + } + + inline void _compact() + { + size_t previous = _currentBytes; + while (_overThreshold()) { + CacheNode* node = &_cache.back(); + _cache.pop_back(); + _currentBytes -= node->size; + _manager->_deallocate(node); + } + size_t delta = previous - _currentBytes; + if (delta > 0) { + _manager->_decreaseSize(delta); + } + } + + BufferManager* _manager; + boost::mutex _lock; + typedef redhawk::inplace_list CacheList; + CacheList _cache; + bool _enabled; + size_t _time; + + size_t _maxBytes; + size_t _maxBlocks; + size_t _maxAge; + size_t _hits; + size_t _misses; + size_t _currentBytes; + volatile size_t _refcount; +}; + +BufferManager::BufferManager() : + _threadCache(&BufferCache::release), + _enabled(true), + _maxThreadBytes(-1), + _maxThreadBlocks(-1), + _maxThreadAge(-1), + _hits(0), + _misses(0), + _currentBytes(0), + _highWaterBytes(0) +{ +} + +BufferManager::~BufferManager() +{ +} + +BufferManager& BufferManager::Instance() +{ + return _instance; +} + +void* BufferManager::allocate(size_t bytes) +{ + bytes = _nearestSize(bytes); + + BufferCache* cache = 0; + CacheBlock* block = 0; + if (_enabled) { + cache = _getCache(); + cache->incref(); + block = cache->fetch(bytes); + } + if (!block) { + block = _allocate(bytes); + block->cache = cache; + } + return block->data(); +} + +void BufferManager::deallocate(void* ptr) +{ + CacheBlock* block = CacheBlock::from_pointer(ptr); + BufferCache* cache = block->cache; + if (cache) { + if (cache->decref() && _enabled) { + cache->store(block); + return; + } + } + _deallocate(block); +} + +size_t BufferManager::_nearestSize(size_t bytes) +{ + // Include cache overhead in rounding + bytes = CacheBlock::required_bytes(bytes); + if (bytes <= 128*1024) { + // Up to 128K, round to nearest 1K + bytes = (bytes + 1023) & ~1023; + } else { + // Round to nearest 4K + bytes = (bytes + 4095) & ~4095; + } + // Remove cache overhead, leaving usable bytes + return CacheBlock::usable_bytes(bytes); +} + +BufferManager::CacheBlock* BufferManager::_allocate(size_t bytes) +{ + void* buffer = ::operator new(CacheBlock::required_bytes(bytes)); + return new (buffer) CacheBlock(bytes); +} + +void BufferManager::_deallocate(CacheBlock* block) +{ + ::operator delete(block); +} + +bool BufferManager::isEnabled() const +{ + return _enabled; +} + +void BufferManager::enable(bool enabled) +{ + _enabled = enabled; + boost::mutex::scoped_lock lock(_lock); + for (CacheList::iterator ii = _caches.begin(); ii != _caches.end(); ++ii) { + (*ii)->enable(enabled); + } +} + +size_t BufferManager::getMaxThreadBytes() const +{ + return _maxThreadBytes; +} + +void BufferManager::setMaxThreadBytes(size_t bytes) +{ + boost::mutex::scoped_lock lock(_lock); + _maxThreadBytes = bytes; + for (CacheList::iterator ii = _caches.begin(); ii != _caches.end(); ++ii) { + (*ii)->setMaxBytes(bytes); + } +} + +size_t BufferManager::getMaxThreadBlocks() const +{ + return _maxThreadBlocks; +} + +void BufferManager::setMaxThreadBlocks(size_t blocks) +{ + boost::mutex::scoped_lock lock(_lock); + _maxThreadBlocks = blocks; + for (CacheList::iterator ii = _caches.begin(); ii != _caches.end(); ++ii) { + (*ii)->setMaxBlocks(blocks); + } +} + +size_t BufferManager::getMaxThreadAge() const +{ + return _maxThreadAge; +} + +void BufferManager::setMaxThreadAge(size_t age) +{ + boost::mutex::scoped_lock lock(_lock); + _maxThreadAge = age; + for (CacheList::iterator ii = _caches.begin(); ii != _caches.end(); ++ii) { + (*ii)->setMaxAge(age); + } +} + +BufferManager::Statistics BufferManager::getStatistics() +{ + boost::mutex::scoped_lock lock(_lock); + Statistics stats; + stats.caches = _caches.size(); + stats.hits = _hits; + stats.misses = _misses; + stats.blocks = 0; + stats.bytes = _currentBytes; + stats.highBytes = _highWaterBytes; + + for (CacheList::iterator iter = _caches.begin(); iter != _caches.end(); ++iter) { + BufferCache* cache = *iter; + stats.hits += cache->hits(); + stats.misses += cache->misses(); + stats.blocks += cache->size(); + } + return stats; +} + +BufferManager::BufferCache* BufferManager::_getCache() +{ + BufferCache* cache = _threadCache.get(); + if (!cache) { + cache = new BufferCache(this); + cache->setMaxBytes(_maxThreadBytes); + cache->setMaxBlocks(_maxThreadBlocks); + cache->setMaxAge(_maxThreadAge); + this->_addCache(cache); + _threadCache.reset(cache); + } + return cache; +} + +void BufferManager::_addCache(BufferCache* cache) +{ + boost::mutex::scoped_lock lock(_lock); + _caches.insert(cache); +} + +void BufferManager::_removeCache(BufferCache* cache) +{ + boost::mutex::scoped_lock lock(_lock); + _hits += cache->hits(); + _misses += cache->misses(); + _caches.erase(cache); +} + +void BufferManager::_increaseSize(size_t bytes) +{ + size_t current = __sync_add_and_fetch(&_currentBytes, bytes); + size_t high = _highWaterBytes; + while (current > high) { + high = __sync_val_compare_and_swap(&_highWaterBytes, high, current); + } +} + +void BufferManager::_decreaseSize(size_t bytes) +{ + __sync_fetch_and_sub(&_currentBytes, bytes); +} + +BufferManager BufferManager::_instance; diff --git a/redhawk/src/base/framework/Component.cpp b/redhawk/src/base/framework/Component.cpp index 4189e1bd8..140322779 100644 --- a/redhawk/src/base/framework/Component.cpp +++ b/redhawk/src/base/framework/Component.cpp @@ -19,34 +19,57 @@ */ #include "ossie/Component.h" -Component::Component(const char* _uuid) : Resource_impl (_uuid) { - this->_app = NULL; - this->_net = NULL; +Component::Component(const char* _uuid) : + Resource_impl(_uuid), + _app(new redhawk::ApplicationContainer()), + _net(new redhawk::NetworkContainer()) +{ } -Component::Component(const char* _uuid, const char *label) : Resource_impl (_uuid, label) { - this->_app = NULL; - this->_net = NULL; +Component::Component(const char* _uuid, const char *label) : + Resource_impl(_uuid, label), + _app(new redhawk::ApplicationContainer()), + _net(new redhawk::NetworkContainer()) +{ } -Component::~Component() { - if (this->_app != NULL) - delete this->_app; - if (this->_net != NULL) - delete this->_net; +Component::~Component() +{ } void Component::setAdditionalParameters(std::string &softwareProfile, std::string &application_registrar_ior, std::string &nic) { CORBA::ORB_ptr orb = ossie::corba::Orb(); Resource_impl::setAdditionalParameters(softwareProfile, application_registrar_ior, nic); - this->_net = new redhawk::NetworkContainer(nic); + this->_net.reset(new redhawk::NetworkContainer(nic)); CORBA::Object_var applicationRegistrarObject = orb->string_to_object(application_registrar_ior.c_str()); CF::ApplicationRegistrar_var applicationRegistrar = ossie::corba::_narrowSafe(applicationRegistrarObject); if (!CORBA::is_nil(applicationRegistrar)) { - CF::Application_var app = applicationRegistrar->app(); - this->_app = new redhawk::ApplicationContainer(app); - return; + CF::Application_var app = applicationRegistrar->app(); + this->_app.reset(new redhawk::ApplicationContainer(app)); + } +} + +redhawk::ApplicationContainer* Component::getApplication() +{ + return _app.get(); +} + +redhawk::NetworkContainer* Component::getNetwork() +{ + return _net.get(); +} + +void Component::setCommandLineProperty(const std::string& id, const redhawk::Value& value) +{ + if (id == "NIC") { + _net.reset(new redhawk::NetworkContainer(value.toString())); + } else { + Resource_impl::setCommandLineProperty(id, value); } - this->_app = new redhawk::ApplicationContainer(); +} + +void Component::setApplication(CF::Application_ptr application) +{ + _app.reset(new redhawk::ApplicationContainer(application)); } diff --git a/redhawk/src/base/framework/CorbaUtils.cpp b/redhawk/src/base/framework/CorbaUtils.cpp index e092bd039..9ea6a315b 100644 --- a/redhawk/src/base/framework/CorbaUtils.cpp +++ b/redhawk/src/base/framework/CorbaUtils.cpp @@ -24,9 +24,17 @@ #include #include #include +#include +#include #include "ossie/CorbaUtils.h" +#ifdef minor +// Depending on g++ settings, the C stdlib defines a minor() macro that breaks +// CORBA::SystemException's minor() methods +#undef minor +#endif + static CORBA::ORB_var orb = CORBA::ORB::_nil(); static PortableServer::POA_var root_poa = PortableServer::POA::_nil(); static CosNaming::NamingContext_var inc = CosNaming::NamingContext::_nil(); @@ -185,6 +193,15 @@ bool isPersistenceEnabled () return persistenceEnabled; } +CORBA::TypeCode_ptr unalias(CORBA::TypeCode_ptr type) +{ + if (type->kind() == CORBA::tk_alias) { + return type->content_type(); + } else { + return CORBA::TypeCode::_duplicate(type); + } +} + bool isValidType (const CORBA::Any& lhs, const CORBA::Any& rhs) { CORBA::TypeCode_var tc1 = lhs.type(); @@ -394,6 +411,62 @@ void setObjectCommFailureRetries (CORBA::Object_ptr obj, int numRetries) omniORB::installCommFailureExceptionHandler(obj, reinterpret_cast(numRetries), handleCommFailure); } +PortableServer::ServantBase* internal::getLocalServant(CORBA::Object_ptr object) +{ + // Find the identity using internal omniORB interfaces, and then check + // whether the object is local to this address space; reading through the + // omniORB library code, it does not appear that this operation requires + // holding the internal omniORB lock + omniIdentity* identity = object->_PR_getobj()->_identity(); + if (identity->inThisAddressSpace()) { + // Given that it's in the same address space, one would assume that + // casting to omniLocalIdentity should always succeed, but since this + // is using undocumented internal omniORB interfaces, use dynamic_cast + // defensively just in case + omniLocalIdentity* local_identity = dynamic_cast(identity); + if (local_identity) { + omniServant* servant = local_identity->servant(); + // Instead of returning omniServant, which is omniORB-specific, + // use its _downcast() method to get a pointer to the servant as + // the standard PortableServer::ServantBase class; while not + // strictly necessary in this case, multiple inheritance can make + // casts between related types tricky (note that it returns a void + // pointer, so it still requires a cast, just not dynamic_cast) + return reinterpret_cast(servant->_downcast()); + } + } + return 0; +} + +std::string describeException(const CORBA::SystemException& exc) +{ + std::ostringstream out; + out << "CORBA::" << exc._name() << " (minor: " << exc.minor() << " completed: "; + switch (exc.completed()) { + case CORBA::COMPLETED_YES: + out << "YES"; + break; + case CORBA::COMPLETED_NO: + out << "NO"; + break; + case CORBA::COMPLETED_MAYBE: + out << "MAYBE"; + break; + } + out << ")"; + return out.str(); +} + +std::string describeException(const CORBA::Exception& exc) { + const CORBA::SystemException* sys = dynamic_cast(&exc); + if (sys) { + return describeException(*sys); + } + std::ostringstream out; + out << exc._name() << " (" << exc._rep_id() << ")"; + return out.str(); +} + #define LNTRACE( lname, expression ) RH_TRACE( rh_logger::Logger::getLogger(lname), expression ) #define LNDEBUG( lname, expression ) RH_DEBUG( rh_logger::Logger::getLogger(lname), expression ) #define LNINFO( lname, expression ) RH_INFO( rh_logger::Logger::getLogger(lname), expression ) diff --git a/redhawk/src/base/framework/Device_impl.cpp b/redhawk/src/base/framework/Device_impl.cpp index bcc96f948..6396c5de7 100644 --- a/redhawk/src/base/framework/Device_impl.cpp +++ b/redhawk/src/base/framework/Device_impl.cpp @@ -27,9 +27,6 @@ #include "ossie/CorbaUtils.h" #include "ossie/Events.h" - -PREPARE_CF_LOGGING(Device_impl) - // // Helper class for performing cleanup when an allocation partially succeeds // @@ -72,6 +69,7 @@ class DeallocationHelper { CF::Properties capacities; }; +PREPARE_CF_LOGGING(Device_impl) void Device_impl::initResources (char* devMgr_ior, char* _id, char* lbl, char* sftwrPrfl) @@ -90,61 +88,58 @@ void Device_impl::initResources (char* devMgr_ior, char* _id, useNewAllocation = false; this->_devMgr = NULL; -} + setLogger(this->_baseLog->getChildLogger("Device", "system")); +} -Device_impl::Device_impl (char* devMgr_ior, char* _id, char* lbl, char* sftwrPrfl) : Resource_impl(_id) +Device_impl::Device_impl (char* devMgr_ior, char* _id, char* lbl, char* sftwrPrfl) : Resource_impl(_id, lbl) { - LOG_TRACE(Device_impl, "Constructing Device") initResources(devMgr_ior, _id, lbl, sftwrPrfl); - LOG_TRACE(Device_impl, "Done Constructing Device") + RH_TRACE(_deviceLog, "Done Constructing Device") } Device_impl::Device_impl (char* devMgr_ior, char* _id, char* lbl, char* sftwrPrfl, - CF::Properties& capacities) : Resource_impl(_id) + CF::Properties& capacities) : Resource_impl(_id, lbl) { - LOG_TRACE(Device_impl, "Constructing Device") initResources(devMgr_ior, _id, lbl, sftwrPrfl); configure (capacities); - LOG_TRACE(Device_impl, "Done Constructing Device") + RH_TRACE(_deviceLog, "Done Constructing Device") } Device_impl::Device_impl (char* devMgr_ior, char* _id, char* lbl, char* sftwrPrfl, - CF::Properties& capacities, char* compositeDev_ior) : Resource_impl(_id) + CF::Properties& capacities, char* compositeDev_ior) : Resource_impl(_id, lbl) { - LOG_TRACE(Device_impl, "Constructing Device") initResources(devMgr_ior, _id, lbl, sftwrPrfl); _compositeDev_ior = compositeDev_ior; CORBA::Object_var _aggDev_obj = ossie::corba::Orb()->string_to_object(_compositeDev_ior.c_str()); if (CORBA::is_nil(_aggDev_obj)) { - LOG_ERROR(Device_impl, "Invalid composite device IOR: " << _compositeDev_ior); + RH_ERROR(_deviceLog, "Invalid composite device IOR: " << _compositeDev_ior); } else { _aggregateDevice = CF::AggregateDevice::_narrow(_aggDev_obj); _aggregateDevice->addDevice(this->_this()); } configure (capacities); - LOG_TRACE(Device_impl, "Done Constructing Device") + RH_TRACE(_deviceLog, "Done Constructing Device") } Device_impl::Device_impl (char* devMgr_ior, char* _id, char* lbl, char* sftwrPrfl, - char* compositeDev_ior) : Resource_impl(_id) + char* compositeDev_ior) : Resource_impl(_id, lbl) { - LOG_TRACE(Device_impl, "Constructing Device") initResources(devMgr_ior, _id, lbl, sftwrPrfl); _compositeDev_ior = compositeDev_ior; CORBA::Object_var _aggDev_obj = ossie::corba::Orb()->string_to_object(_compositeDev_ior.c_str()); if (CORBA::is_nil(_aggDev_obj)) { - LOG_ERROR(Device_impl, "Invalid composite device IOR: " << _compositeDev_ior); + RH_ERROR(_deviceLog, "Invalid composite device IOR: " << _compositeDev_ior); } else { _aggregateDevice = CF::AggregateDevice::_narrow(_aggDev_obj); _aggregateDevice->addDevice(this->_this()); } - LOG_TRACE(Device_impl, "Done Constructing Device") + RH_TRACE(_deviceLog, "Done Constructing Device") } const CF::DeviceManager_ptr Device_impl::getDeviceManager() const { @@ -152,6 +147,11 @@ const CF::DeviceManager_ptr Device_impl::getDeviceManager() const { return CF::DeviceManager::_nil(); } +void Device_impl::setLogger(rh_logger::LoggerPtr logptr) +{ + _deviceLog = logptr; +} + void Device_impl::postConstruction (std::string &profile, std::string ®istrar_ior, @@ -173,7 +173,7 @@ void Device_impl::postConstruction (std::string &profile, _deviceManager->registerDevice(this->_this()); // setup original capacity values cache - LOG_TRACE(Device_impl, "postConstructor: Saving original capacities... "); + RH_TRACE(_deviceLog, "postConstructor: Saving original capacities... "); PropertySet_impl::PropertyMap::iterator pi = propTable.begin(); for( ; pi != propTable.end(); pi++ ) { PropertyInterface *p = pi->second; @@ -181,11 +181,11 @@ void Device_impl::postConstruction (std::string &profile, CF::DataType res; res.id = p->id.c_str(); p->getValue(res.value); - LOG_TRACE(Device_impl, "postConstructor: Saving allocation ID: " << p->id); + RH_TRACE(_deviceLog, "postConstructor: Saving allocation ID: " << p->id); bool found = false; for ( unsigned int j=0; j < originalCap.length(); j++) { if ( strcmp(p->id.c_str(), originalCap[j].id) == 0 ) { - LOG_TRACE(Device_impl, "Override value for allocation ID: " << p->id); + RH_TRACE(_deviceLog, "Override value for allocation ID: " << p->id); originalCap[j].value = res.value; found = true; } @@ -205,24 +205,24 @@ void Device_impl::setAdditionalParameters ( std::string &profile, std::string ®istrar_ior, const std::string &nic ) { - // set parent's domain context - std::string tnic(nic); - Resource_impl::setAdditionalParameters(profile,registrar_ior, tnic); - _devMgr_ior = registrar_ior; _deviceManager = CF::DeviceManager::_nil(); CORBA::Object_var obj = ossie::corba::Orb()->string_to_object(_devMgr_ior.c_str()); if (CORBA::is_nil(obj)) { - LOG_ERROR(Device_impl, "Invalid device manager IOR"); + RH_ERROR(_deviceLog, "Invalid device manager IOR"); exit(-1); } _deviceManager = CF::DeviceManager::_narrow(obj); if (CORBA::is_nil(_deviceManager)) { - LOG_ERROR(Device_impl, "Could not narrow device manager IOR"); + RH_ERROR(_deviceLog, "Could not narrow device manager IOR"); exit(-1); } this->_devMgr = new redhawk::DeviceManagerContainer(_deviceManager); + + // Set up domain awareness + CF::DomainManager_var domainManager = _deviceManager->domMgr(); + setDomainManager(domainManager); } @@ -233,7 +233,7 @@ void Device_impl::run () void Device_impl::halt () { - LOG_DEBUG(Device_impl, "Halting Device") + RH_DEBUG(_deviceLog, "Halting Device") Resource_impl::halt(); } @@ -242,9 +242,9 @@ Device_impl::releaseObject () throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) { // SR:419 - LOG_DEBUG(Device_impl, "Receive releaseObject call"); + RH_DEBUG(_deviceLog, "Receive releaseObject call"); if (_adminState == CF::Device::UNLOCKED) { - LOG_DEBUG(Device_impl, "Releasing Device") + RH_DEBUG(_deviceLog, "Releasing Device") setAdminState(CF::Device::SHUTTING_DOWN); // SR:418 @@ -259,16 +259,16 @@ throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) setAdminState(CF::Device::LOCKED); try { // SR:422 - LOG_DEBUG(Device_impl, "Unregistering Device") + RH_DEBUG(_deviceLog, "Unregistering Device") _deviceManager->unregisterDevice(this->_this()); } catch (...) { // SR:423 throw CF::LifeCycle::ReleaseError(); } - LOG_DEBUG(Device_impl, "Done Releasing Device") + RH_DEBUG(_deviceLog, "Done Releasing Device") } - RH_NL_DEBUG("Device", "Clean up IDM_CHANNEL. DEV-ID:" << _identifier ); + RH_DEBUG(_deviceLog, "Clean up IDM_CHANNEL. DEV-ID:" << _identifier ); if ( idm_publisher ) idm_publisher.reset(); delete this->_devMgr; this->_devMgr=NULL; @@ -278,16 +278,16 @@ throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) Device_impl::~Device_impl () { - RH_NL_TRACE("Device", "DTOR START DEV-ID:" << _identifier ); + RH_TRACE(_deviceLog, "DTOR START DEV-ID:" << _identifier ); - RH_NL_DEBUG("Device", "Clean up event channel allocations"); + RH_DEBUG(_deviceLog, "Clean up event channel allocations"); if ( idm_publisher ) idm_publisher.reset(); if (this->_devMgr != NULL) { delete this->_devMgr; } - RH_NL_TRACE("Device", "DTOR END DEV-ID:" << _identifier ); + RH_TRACE(_deviceLog, "DTOR END DEV-ID:" << _identifier ); } @@ -295,11 +295,11 @@ Device_impl::~Device_impl () CORBA::Boolean Device_impl::allocateCapacity (const CF::Properties& capacities) throw (CORBA::SystemException, CF::Device::InvalidCapacity, CF::Device::InvalidState, CF::Device::InsufficientCapacity) { - LOG_TRACE(Device_impl, "in allocateCapacity"); + RH_TRACE(_deviceLog, "in allocateCapacity"); if (capacities.length() == 0) { // Nothing to do, return - LOG_TRACE(Device_impl, "no capacities to configure."); + RH_TRACE(_deviceLog, "no capacities to configure."); return true; } @@ -313,7 +313,7 @@ throw (CORBA::SystemException, CF::Device::InvalidCapacity, CF::Device::InvalidS } else { invalidState = "SHUTTING_DOWN"; } - LOG_DEBUG(Device_impl, "Cannot allocate capacity: System is " << invalidState); + RH_DEBUG(_deviceLog, "Cannot allocate capacity: System is " << invalidState); throw CF::Device::InvalidState(invalidState); } @@ -348,7 +348,7 @@ void Device_impl::validateCapacities (const CF::Properties& capacities) bool Device_impl::allocateCapacityLegacy (const CF::Properties& capacities) { - LOG_TRACE(Device_impl, "Using legacy capacity allocation"); + RH_TRACE(_deviceLog, "Using legacy capacity allocation"); typedef std::pair< CF::DataType, CF::DataType > Allocation; std::vector< Allocation > allocations; @@ -376,21 +376,21 @@ bool Device_impl::allocateCapacityLegacy (const CF::Properties& capacities) foundProperty = false; for (unsigned j = 0; j < currentCapacities.length (); j++) { - LOG_TRACE(Device_impl, "Comparing IDs: " << capacities[i].id << ", " << currentCapacities[j].id ); + RH_TRACE(_deviceLog, "Comparing IDs: " << capacities[i].id << ", " << currentCapacities[j].id ); if (strcmp (capacities[i].id, currentCapacities[j].id) == 0) { // Verify that both values have the same type if (!ossie::corba::isValidType (currentCapacities[j].value, capacities[i].value)) { - LOG_ERROR(Device_impl, "Cannot allocate capacity: Incorrect data type."); + RH_ERROR(_deviceLog, "Cannot allocate capacity: Incorrect data type."); throw (CF::Device::InvalidCapacity("Cannot allocate capacity. Incorrect Data Type.", capacities)); } else { // Check for sufficient capacity and allocate it if (!allocate (currentCapacities[j].value, capacities[i].value)) { - LOG_ERROR(Device_impl, "Cannot allocate capacity: Insufficient capacity."); + RH_ERROR(_deviceLog, "Cannot allocate capacity: Insufficient capacity."); return false; } Allocation a( capacities[i], currentCapacities[j] ); allocations.push_back( a ); - LOG_TRACE(Device_impl, "Device Allocation Capacity against, ID: " << capacities[i].id ); + RH_TRACE(_deviceLog, "Device Allocation Capacity against, ID: " << capacities[i].id ); } foundProperty = true; // Report that the requested property was found @@ -399,7 +399,7 @@ bool Device_impl::allocateCapacityLegacy (const CF::Properties& capacities) } if (!foundProperty) { - LOG_ERROR(Device_impl, "Cannot allocate capacity: Invalid property ID: " << capacities[i].id); + RH_ERROR(_deviceLog, "Cannot allocate capacity: Invalid property ID: " << capacities[i].id); throw (CF::Device::InvalidCapacity("Cannot allocate capacity. Invalid property ID", capacities)); } } @@ -422,7 +422,7 @@ bool Device_impl::allocateCapacityLegacy (const CF::Properties& capacities) CF::DataType request = allocations[ii].first; CF::DataType new_alloc = allocations[ii].second; PropertyInterface* property = getPropertyFromId((const char*)request.id); - LOG_TRACE(Device_impl, "Allocatable property: " << property->id); + RH_TRACE(_deviceLog, "Allocatable property: " << property->id); try { std::vector::iterator kind = property->kinds.begin(); bool sendEvent = false; @@ -454,10 +454,10 @@ bool Device_impl::allocateCapacityLegacy (const CF::Properties& capacities) // just in case.. should not happen cleanup.add(request); } catch (std::exception& e) { - LOG_ERROR(Device_impl, "Setting property " << property->id << ", " << property->name << " failed. Cause: " << e.what()); + RH_ERROR(_deviceLog, "Setting property " << property->id << ", " << property->name << " failed. Cause: " << e.what()); ossie::corba::push_back(invalidProperties,request); } catch (CORBA::Exception& e) { - LOG_ERROR(Device_impl, "Setting property " << property->id << " failed. Cause: " << e._name()); + RH_ERROR(_deviceLog, "Setting property " << property->id << " failed. Cause: " << e._name()); ossie::corba::push_back(invalidProperties,request); } } @@ -478,14 +478,14 @@ bool Device_impl::allocateCapacityLegacy (const CF::Properties& capacities) return true; } else { /* Not sure */ - LOG_WARN(Device_impl, "Cannot allocate capacity: System is BUSY"); + RH_WARN(_deviceLog, "Cannot allocate capacity: System is BUSY"); return false; } } bool Device_impl::allocateCapacityNew (const CF::Properties& capacities) { - LOG_TRACE(Device_impl, "Using callback-based capacity allocation"); + RH_TRACE(_deviceLog, "Using callback-based capacity allocation"); validateCapacities(capacities); @@ -495,16 +495,16 @@ bool Device_impl::allocateCapacityNew (const CF::Properties& capacities) const CF::DataType& capacity = capacities[ii]; const std::string id = static_cast(capacity.id); PropertyInterface* property = getPropertyFromId(id); - LOG_TRACE(Device_impl, "Allocating property '" << id << "'"); + RH_TRACE(_deviceLog, "Allocating property '" << id << "'"); try { if (property->allocate(capacity.value)) { allocations.add(capacity); } else { - LOG_DEBUG(Device_impl, "Cannot allocate capacity. Insufficent capacity for property '" << id << "'"); + RH_DEBUG(_deviceLog, "Cannot allocate capacity. Insufficent capacity for property '" << id << "'"); return false; } } catch (const ossie::not_implemented_error& ex) { - LOG_WARN(Device_impl, "No allocation implementation for property '" << id << "'"); + RH_WARN(_deviceLog, "No allocation implementation for property '" << id << "'"); return false; } } @@ -527,7 +527,7 @@ throw (CORBA::SystemException, CF::Device::InvalidCapacity, CF::Device::InvalidS } else { invalidState = "DISABLED"; } - LOG_DEBUG(Device_impl, "Cannot deallocate capacity: System is " << invalidState); + RH_DEBUG(_deviceLog, "Cannot deallocate capacity: System is " << invalidState); throw CF::Device::InvalidState(invalidState); } @@ -540,7 +540,7 @@ throw (CORBA::SystemException, CF::Device::InvalidCapacity, CF::Device::InvalidS void Device_impl::deallocateCapacityLegacy (const CF::Properties& capacities) { - LOG_TRACE(Device_impl, "Using legacy capacity deallocation"); + RH_TRACE(_deviceLog, "Using legacy capacity deallocation"); typedef std::pair< CF::DataType, PropertyInterface * > Allocation; std::vector< Allocation > deallocations; @@ -566,25 +566,25 @@ void Device_impl::deallocateCapacityLegacy (const CF::Properties& capacities) CORBA::Any new_value; property->getValue(new_value); if (!ossie::corba::isValidType (new_value, capacities[i].value)) { - LOG_WARN(Device_impl, "Cannot deallocate capacity. Incorrect Data Type."); + RH_WARN(_deviceLog, "Cannot deallocate capacity. Incorrect Data Type."); throw (CF::Device::InvalidCapacity("Cannot deallocate capacity. Incorrect Data Type.", capacities)); } else { deallocate (new_value, capacities[i].value); // check that we can stay within original bounds bool _apply=true; for (unsigned ii = 0; ii < originalCap.length (); ii++) { - LOG_TRACE(Device_impl, "Testing max value for allocation ID: " << originalCap[ii].id); + RH_TRACE(_deviceLog, "Testing max value for allocation ID: " << originalCap[ii].id); if (strcmp (pid.c_str(), originalCap[ii].id) == 0) { compResult = compareAnys (new_value, originalCap[ii].value); if (compResult == FIRST_BIGGER) { - LOG_WARN(Device_impl, "Cannot deallocate capacity, allocation ID: " << pid << ", New capacity would exceed original bound."); + RH_WARN(_deviceLog, "Cannot deallocate capacity, allocation ID: " << pid << ", New capacity would exceed original bound."); ossie::corba::push_back(overCaps, capacities[i]); _apply = false; } } } if (_apply) { - LOG_TRACE(Device_impl, "(deallocation) Allocatable property : " << property->id); + RH_TRACE(_deviceLog, "(deallocation) Allocatable property : " << property->id); try { std::vector::iterator kind = property->kinds.begin(); bool sendEvent = false; @@ -614,10 +614,10 @@ void Device_impl::deallocateCapacityLegacy (const CF::Properties& capacities) } } catch (std::exception& e) { - LOG_ERROR(Device_impl, "Setting property " << property->id << ", " << property->name << " failed. Cause: " << e.what()); + RH_ERROR(_deviceLog, "Setting property " << property->id << ", " << property->name << " failed. Cause: " << e.what()); ossie::corba::push_back(invalidProps,request); } catch (CORBA::Exception& e) { - LOG_ERROR(Device_impl, "Setting property " << property->id << " failed. Cause: " << e._name()); + RH_ERROR(_deviceLog, "Setting property " << property->id << " failed. Cause: " << e._name()); ossie::corba::push_back(invalidProps,request); } @@ -669,7 +669,7 @@ void Device_impl::deallocateCapacityLegacy (const CF::Properties& capacities) void Device_impl::deallocateCapacityNew (const CF::Properties& capacities) { - LOG_TRACE(Device_impl, "Using callback-based capacity deallocation"); + RH_TRACE(_deviceLog, "Using callback-based capacity deallocation"); validateCapacities(capacities); @@ -680,11 +680,11 @@ void Device_impl::deallocateCapacityNew (const CF::Properties& capacities) const CF::DataType& capacity = capacities[ii]; const std::string id = static_cast(capacity.id); PropertyInterface* property = getPropertyFromId(id); - LOG_TRACE(Device_impl, "Deallocating property (new method) '" << id << "'"); + RH_TRACE(_deviceLog, "Deallocating property (new method) '" << id << "'"); try { property->deallocate(capacity.value); } catch (const ossie::not_implemented_error& ex) { - LOG_WARN(Device_impl, "No deallocation implementation for property '" << id << "'"); + RH_WARN(_deviceLog, "No deallocation implementation for property '" << id << "'"); } catch (const std::exception& ex) { ossie::corba::push_back(invalidProps, capacity); } @@ -933,7 +933,7 @@ void Device_impl::sendStateChange( StandardEvent::StateChangeType &stateChangeFr idm_publisher ); } else { - RH_NL_WARN("Device", "Unable to publish state change, DEV-ID:" << _identifier ); + RH_WARN(_deviceLog, "Unable to publish state change, DEV-ID:" << _identifier ); } } @@ -1110,11 +1110,11 @@ throw (CF::PropertySet::PartialConfiguration, CF::PropertySet:: CF::DataType res; res.id = p->id.c_str(); p->getValue(res.value); - LOG_TRACE(Device_impl, "Saving value for allocation ID: " << p->id); + RH_TRACE(_deviceLog, "Saving value for allocation ID: " << p->id); bool found = false; for ( unsigned int j=0; j < originalCap.length(); j++) { if ( strcmp(p->id.c_str(), originalCap[j].id) == 0 ) { - LOG_TRACE(Device_impl, "Override value for allocation ID: " << p->id); + RH_TRACE(_deviceLog, "Override value for allocation ID: " << p->id); originalCap[j].value = res.value; found = true; } @@ -1132,14 +1132,14 @@ throw (CF::PropertySet::PartialConfiguration, CF::PropertySet:: unsigned int j=0; for ( ; j < originalCap.length(); j++ ) { if ( strcmp(capacities[i].id, originalCap[j].id) == 0 ) { - LOG_TRACE(Device_impl, "Override original value for allocation ID: " <string_to_object(idm_channel_ior.c_str()); if (CORBA::is_nil(IDM_channel_obj)) { - LOG_ERROR(Device_impl, "Invalid IDM channel IOR: " << idm_channel_ior << " DEV-ID:" << _identifier ); + RH_ERROR(_deviceLog, "Invalid IDM channel IOR: " << idm_channel_ior << " DEV-ID:" << _identifier ); } else { ossie::events::EventChannel_var idm_channel = ossie::events::EventChannel::_narrow(IDM_channel_obj); idm_publisher = redhawk::events::PublisherPtr(new redhawk::events::Publisher( idm_channel )); } } - CATCH_LOG_WARN(Device_impl, "Unable to connect to IDM channel"); + CATCH_RH_WARN(_deviceLog, "Unable to connect to IDM channel"); } else { try { - RH_NL_DEBUG("Device", "Getting EventManager.... DEV-ID:" << _identifier ); + RH_DEBUG(_deviceLog, "Getting EventManager.... DEV-ID:" << _identifier ); redhawk::events::ManagerPtr evt_mgr = redhawk::events::Manager::GetManager( this ); if ( evt_mgr ) { - RH_NL_INFO("Device", "DEV-ID:" << _identifier << " Requesting IDM CHANNEL " << redhawk::events::IDM_Channel_Spec ); + RH_INFO(_deviceLog, "DEV-ID:" << _identifier << " Requesting IDM CHANNEL " << redhawk::events::IDM_Channel_Spec ); idm_publisher = evt_mgr->Publisher( redhawk::events::IDM_Channel_Spec ); if (idm_publisher == NULL ) throw -1; } } catch(...) { - LOG_WARN(Device_impl, "Unable to connect to Domain's IDM Channel, DEV-ID:" << _identifier ); + RH_WARN(_deviceLog, "Unable to connect to Domain's IDM Channel, DEV-ID:" << _identifier ); } } @@ -1216,6 +1216,13 @@ void Device_impl::start_device(Device_impl::ctor_type ctor, struct sigaction sa, bool skip_run = false; bool enablesigfd=false; + for (int index = 1; index < argc; ++index) { + if (std::string(argv[index]) == std::string("-i")) { + std::cout<<"Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain"< execparams; for (int i = 0; i < argc; i++) { @@ -1251,6 +1258,7 @@ void Device_impl::start_device(Device_impl::ctor_type ctor, struct sigaction sa, } } + std::string logname = log_label+".system.Device"; // signal assist with processing SIGCHLD events for executable devices.. int sig_fd=-1; if ( enablesigfd ){ @@ -1261,13 +1269,13 @@ void Device_impl::start_device(Device_impl::ctor_type ctor, struct sigaction sa, /* We must block the signals in order for signalfd to receive them */ err = sigprocmask(SIG_BLOCK, &sigset, NULL); if ( err != 0 ) { - LOG_FATAL(Device_impl, "Failed to create signalfd for SIGCHLD"); + RH_NL_FATAL(logname, "Failed to create signalfd for SIGCHLD"); exit(EXIT_FAILURE); } /* Create the signalfd */ sig_fd = signalfd(-1, &sigset,0); if ( sig_fd == -1 ) { - LOG_FATAL(Device_impl, "Failed to create signalfd for SIGCHLD"); + RH_NL_FATAL(logname, "Failed to create signalfd for SIGCHLD"); exit(EXIT_FAILURE); } } @@ -1289,27 +1297,27 @@ void Device_impl::start_device(Device_impl::ctor_type ctor, struct sigaction sa, } if ((devMgr_ior == 0) || (id == 0) || (profile == 0) || (label == 0)) { - LOG_FATAL(Device_impl, "Per SCA specification SR:478, DEVICE_MGR_IOR, PROFILE_NAME, DEVICE_ID, and DEVICE_LABEL must be provided"); + RH_NL_FATAL(logname, "Per SCA specification SR:478, DEVICE_MGR_IOR, PROFILE_NAME, DEVICE_ID, and DEVICE_LABEL must be provided"); exit(-1); } - LOG_DEBUG(Device_impl, "Identifier = " << id << "Label = " << label << " Profile = " << profile << " IOR = " << devMgr_ior); + RH_NL_DEBUG(logname, "Identifier = " << id << "Label = " << label << " Profile = " << profile << " IOR = " << devMgr_ior); // Associate SIGINT to signal_catcher interrupt handler if( sigaction( SIGINT, &sa, NULL ) == -1 ) { - LOG_FATAL(Device_impl, "SIGINT association failed"); + RH_NL_FATAL(logname, "SIGINT association failed"); exit(EXIT_FAILURE); } // Associate SIGQUIT to signal_catcher interrupt handler if( sigaction( SIGQUIT, &sa, NULL ) == -1 ) { - LOG_FATAL(Device_impl, "SIGQUIT association failed"); + RH_NL_FATAL(logname, "SIGQUIT association failed"); exit(EXIT_FAILURE); } // Associate SIGTERM to signal_catcher interrupt handler if( sigaction( SIGTERM, &sa, NULL ) == -1 ) { - LOG_FATAL(Device_impl, "SIGTERM association failed"); + RH_NL_FATAL(logname, "SIGTERM association failed"); exit(EXIT_FAILURE); } @@ -1336,7 +1344,7 @@ void Device_impl::start_device(Device_impl::ctor_type ctor, struct sigaction sa, device->postConstruction( tmp_profile, tmp_devMgr_ior, idm_channel_ior, nic, sig_fd); } catch( CF::InvalidObjectReference &ex ) { - LOG_FATAL(Device_impl, "Device " << label << ", Failed initialization and registration, terminating execution"); + RH_NL_FATAL(logname, "Device " << label << ", Failed initialization and registration, terminating execution"); if ( device ) device->_remove_ref(); ossie::logging::Terminate(); ossie::corba::OrbShutdown(true); @@ -1362,13 +1370,13 @@ void Device_impl::start_device(Device_impl::ctor_type ctor, struct sigaction sa, eout << "COMPLETED_MAYBE"; } eout << ")"; - LOG_FATAL(Device_impl, "Unable to complete Device construction: "<_remove_ref(); ossie::logging::Terminate(); ossie::corba::OrbShutdown(true); exit(EXIT_FAILURE); } catch ( ... ) { - LOG_FATAL(Device_impl, "device fatal failure"); + RH_NL_FATAL(logname, "device fatal failure"); if ( device ) device->_remove_ref(); ossie::logging::Terminate(); ossie::corba::OrbShutdown(true); @@ -1379,7 +1387,7 @@ void Device_impl::start_device(Device_impl::ctor_type ctor, struct sigaction sa, return; } device->run(); - LOG_DEBUG(Device_impl, "Goodbye!"); + RH_NL_DEBUG(logname, "Goodbye!"); device->_remove_ref(); ossie::logging::Terminate(); ossie::corba::OrbShutdown(true); diff --git a/redhawk/src/base/framework/EventChannelSupport.cpp b/redhawk/src/base/framework/EventChannelSupport.cpp index b5dfe20d8..1215538bb 100644 --- a/redhawk/src/base/framework/EventChannelSupport.cpp +++ b/redhawk/src/base/framework/EventChannelSupport.cpp @@ -32,6 +32,7 @@ #include "ossie/debug.h" #include "ossie/CorbaUtils.h" #include "ossie/EventChannelSupport.h" +#include namespace ossie { @@ -744,6 +745,28 @@ namespace events { return; } + CF::EventChannelManager_var _ecm = CF::EventChannelManager::_nil(); + try { + std::string dommgr_name = nc_name+"/"+nc_name; + CORBA::Object_var dom_obj = ossie::corba::objectFromName(dommgr_name); + CF::DomainManager_ptr dom_ptr = CF::DomainManager::_narrow(dom_obj); + _ecm = dom_ptr->eventChannelMgr(); + } catch (...) { + } + + ossie::events::EventChannelReg_var registration; + try { + if (not CORBA::is_nil(_ecm)) { + ossie::events::EventRegistration ereg; + std::string registrationId(""); + ereg.channel_name = CORBA::string_dup(name.c_str()); + ereg.reg_id = CORBA::string_dup(registrationId.c_str()); + registration = _ecm->registerResource( ereg ); + nc_name = nc_name + "^" + ossie::corba::returnString(registration->reg.reg_id); + } + } catch (...) { + } + CosEventChannelAdmin::SupplierAdmin_var supplier_admin; int tries=retries; do @@ -824,6 +847,28 @@ namespace events { PushEventSupplier::~PushEventSupplier( ) { + CF::EventChannelManager_var _ecm = CF::EventChannelManager::_nil(); + std::string::size_type reg_id_idx = nc_name.find("^"); + std::string tmp_nc_name = nc_name.substr(0, reg_id_idx); + try { + std::string dommgr_name = tmp_nc_name+"/"+tmp_nc_name; + CORBA::Object_var dom_obj = ossie::corba::objectFromName(dommgr_name); + CF::DomainManager_ptr dom_ptr = CF::DomainManager::_narrow(dom_obj); + _ecm = dom_ptr->eventChannelMgr(); + } catch (...) { + } + + try { + if (not CORBA::is_nil(_ecm)) { + std::string reg_id = nc_name.substr(reg_id_idx+1); + ossie::events::EventRegistration ereg; + ereg.channel_name = CORBA::string_dup(name.c_str()); + ereg.reg_id = CORBA::string_dup(reg_id.c_str()); + _ecm->unregister(ereg); + } + } catch (...) { + } + RH_NL_DEBUG("PushEventSupplier", "DTOR - START." ); CORBA::ORB_ptr orb = ossie::corba::Orb(); int tries = retries; diff --git a/redhawk/src/base/framework/Events.cpp b/redhawk/src/base/framework/Events.cpp index 4c6c1a863..f10a3247d 100644 --- a/redhawk/src/base/framework/Events.cpp +++ b/redhawk/src/base/framework/Events.cpp @@ -490,7 +490,7 @@ namespace events { typedef boost::shared_ptr< EM_Subscriber > EM_SubscriberPtr; // Class singleton for this library - ManagerPtr Manager::_Manager; + std::map Manager::_managers; ManagerPtr Manager::GetManager( Resource_impl *obj ) { @@ -500,19 +500,12 @@ namespace events { oid = ossie::corba::returnString(obj->identifier()); } - if ( !Manager::_Manager ) { - try { - Manager::_Manager = boost::shared_ptr(new Manager( obj )); + if (Manager::_managers.find(oid) == Manager::_managers.end()) { RH_NL_DEBUG("redhawk::events::Manager", "Created EventManager for Resource: " << oid ); - } - catch(...){ - RH_NL_WARN("redhawk::events::Manager", "Resource (" <(new Manager( obj )); } - return Manager::_Manager; - + + return Manager::_managers[oid]; } @@ -520,7 +513,9 @@ namespace events { // release all Publishers and Subscribers RH_NL_TRACE("redhawk::events::Manager", "Terminate all EventChannels"); - if ( Manager::_Manager ) _Manager->_terminate(); + for (std::map::iterator it=Manager::_managers.begin(); it!=Manager::_managers.end(); it++) { + it->second->_terminate(); + } } @@ -532,20 +527,24 @@ namespace events { if ( obj ){ _obj = obj; + _resourceLog = _obj->getBaseLogger(); + _eventManagerLog = _resourceLog->getChildLogger("EventManager", "system"); _obj_id = ossie::corba::returnString(obj->identifier()); - RH_NL_DEBUG("redhawk::events::Manager", "Resolve Device and Domain Managers..."); + RH_DEBUG(_eventManagerLog, "Resolve Device and Domain Managers..."); // setup create publisher as internal methods redhawk::DomainManagerContainer *dm = obj->getDomainManager(); if ( dm == NULL ) throw -1; CF::DomainManager_ptr domMgr = dm->getRef(); if ( !ossie::corba::objectExists( domMgr ) ) throw 1; - RH_NL_DEBUG("redhawk::events::Manager", "Resolved Domain Manager..."); + RH_DEBUG(_eventManagerLog, "Resolved Domain Manager..."); CF::EventChannelManager_ptr ecm = domMgr->eventChannelMgr(); - RH_NL_DEBUG("redhawk::events::Manager", "Getting Event Channel Manager..."); + RH_DEBUG(_eventManagerLog, "Getting Event Channel Manager..."); if ( !ossie::corba::objectExists( ecm ) ) throw 1; _ecm = ecm; + } else { + _eventManagerLog = rh_logger::Logger::getLogger("EventManager"); } }; @@ -570,7 +569,7 @@ namespace events { throw (RegistrationExists, RegistrationFailed) { SCOPED_LOCK(_mgr_lock); - RH_NL_DEBUG("redhawk::events::Manager", "Requesting Publisher for Channel:" << channel_name << " resource:" << _obj_id ); + RH_DEBUG(_eventManagerLog, "Requesting Publisher for Channel:" << channel_name << " resource:" << _obj_id ); EM_PublisherPtr pub; @@ -583,39 +582,40 @@ namespace events { ereg.channel_name = CORBA::string_dup(channel_name.c_str()); ereg.reg_id = CORBA::string_dup(registrationId.c_str()); - RH_NL_DEBUG("redhawk::events::Manager", "Requesting Channel:" << channel_name << " from Domain's EventChannelManager " ); + RH_DEBUG(_eventManagerLog, "Requesting Channel:" << channel_name << " from Domain's EventChannelManager " ); registration = _ecm->registerResource( ereg ); reg = registration.in(); pub = EM_PublisherPtr( new EM_Publisher( *this, reg ) ); + pub->setLogger(this->_resourceLog->getChildLogger(channel_name, "events")); _publishers.push_back(pub.get()); - RH_NL_INFO("redhawk::events::Manager", "PUBLISHER - Channel:" << channel_name << " Reg-Id" << registration->reg.reg_id << " RESOURCE:" << _obj_id ); + RH_INFO(_eventManagerLog, "PUBLISHER - Channel:" << channel_name << " Reg-Id" << registration->reg.reg_id << " RESOURCE:" << _obj_id ); _registrations.push_back( reg ); } } catch( CF::EventChannelManager::RegistrationAlreadyExists e) { - RH_NL_ERROR("EventChannelManager", "Unable to create Publisher for Channel:" << channel_name << ", REASON: Registration already exists."); + RH_ERROR(_eventManagerLog, "Unable to create Publisher for Channel:" << channel_name << ", REASON: Registration already exists."); throw RegistrationExists(); } catch( CF::EventChannelManager::InvalidChannelName e) { - RH_NL_ERROR("EventChannelManager", "Unable to create Publisher for Channel:" << channel_name << ", REASON: Invalid channel name."); + RH_ERROR(_eventManagerLog, "Unable to create Publisher for Channel:" << channel_name << ", REASON: Invalid channel name."); throw RegistrationFailed(); } catch( CF::EventChannelManager::OperationFailed e) { - RH_NL_ERROR("EventChannelManager", "Unable to create Publisher for Channel:" << channel_name << ", REASON: Operation failed."); + RH_ERROR(_eventManagerLog, "Unable to create Publisher for Channel:" << channel_name << ", REASON: Operation failed."); throw RegistrationFailed(); } catch( CF::EventChannelManager::OperationNotAllowed e) { - RH_NL_ERROR("EventChannelManager", "Unable to create Publisher for Channel:" << channel_name << ", REASON: Operation failed."); + RH_ERROR(_eventManagerLog, "Unable to create Publisher for Channel:" << channel_name << ", REASON: Operation failed."); throw RegistrationFailed(); } catch( CF::EventChannelManager::ServiceUnavailable e) { - RH_NL_ERROR("EventChannelManager", "Unable to create Publisher for Channel:" << channel_name << ", REASON: Service unavailable."); + RH_ERROR(_eventManagerLog, "Unable to create Publisher for Channel:" << channel_name << ", REASON: Service unavailable."); throw RegistrationFailed(); } catch(...) { - RH_NL_ERROR("EventChannelManager", "Unable to create Publisher for Channel:" << channel_name << ", REASON: Unknown exception occurred."); + RH_ERROR(_eventManagerLog, "Unable to create Publisher for Channel:" << channel_name << ", REASON: Unknown exception occurred."); throw RegistrationFailed(); } @@ -629,7 +629,7 @@ namespace events { SCOPED_LOCK(_mgr_lock); EM_SubscriberPtr sub; - RH_NL_DEBUG("redhawk::events::Manager", "Requesting Subscriber, for Channel:" << channel_name << " resource:" << _obj_id ); + RH_DEBUG(_eventManagerLog, "Requesting Subscriber, for Channel:" << channel_name << " resource:" << _obj_id ); try { @@ -640,39 +640,40 @@ namespace events { ereg.channel_name = CORBA::string_dup(channel_name.c_str()); ereg.reg_id = CORBA::string_dup(registrationId.c_str()); - RH_NL_DEBUG("redhawk::events::Manager", "Requesting Channel:" << channel_name << " from Domain's EventChannelManager " ); + RH_DEBUG(_eventManagerLog, "Requesting Channel:" << channel_name << " from Domain's EventChannelManager " ); registration = _ecm->registerResource( ereg ); reg = registration.in(); sub = EM_SubscriberPtr( new EM_Subscriber( *this, reg ) ); + sub->setLogger(this->_resourceLog->getChildLogger(channel_name, "events")); _subscribers.push_back(sub.get()); - RH_NL_INFO("redhawk::events::Manager", "SUBSCRIBER - Channel:" << channel_name << " Reg-Id" << registration->reg.reg_id << " resource:" << _obj_id ); + RH_INFO(_eventManagerLog, "SUBSCRIBER - Channel:" << channel_name << " Reg-Id" << registration->reg.reg_id << " resource:" << _obj_id ); _registrations.push_back( reg ); } } catch( CF::EventChannelManager::RegistrationAlreadyExists e ) { - RH_NL_ERROR("EventChannelManager", "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Registration already exists."); + RH_ERROR(_eventManagerLog, "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Registration already exists."); throw RegistrationExists(); } catch( CF::EventChannelManager::InvalidChannelName e) { - RH_NL_ERROR("EventChannelManager", "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Invalid channel name."); + RH_ERROR(_eventManagerLog, "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Invalid channel name."); throw RegistrationFailed(); } catch( CF::EventChannelManager::OperationFailed e) { - RH_NL_ERROR("EventChannelManager", "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Operation failed."); + RH_ERROR(_eventManagerLog, "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Operation failed."); throw RegistrationFailed(); } catch( CF::EventChannelManager::OperationNotAllowed e) { - RH_NL_ERROR("EventChannelManager", "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Operation failed."); + RH_ERROR(_eventManagerLog, "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Operation failed."); throw RegistrationFailed(); } catch( CF::EventChannelManager::ServiceUnavailable e) { - RH_NL_ERROR("EventChannelManager", "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Service unavailable."); + RH_ERROR(_eventManagerLog, "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Service unavailable."); throw RegistrationFailed(); } catch( ... ) { - RH_NL_ERROR("EventChannelManager", "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Unknown exception occurred."); + RH_ERROR(_eventManagerLog, "Unable to create Subscriber for Channel:" << channel_name << ", REASON: Unknown exception occurred."); throw RegistrationFailed(); } @@ -685,7 +686,7 @@ namespace events { SCOPED_LOCK(_mgr_lock); _allow = false; - RH_NL_DEBUG("redhawk::events::Manager", " Resource: " << _obj_id << ", Terminate All Registrations.: " << _registrations.size() ); + RH_DEBUG(_eventManagerLog, " Resource: " << _obj_id << ", Terminate All Registrations.: " << _registrations.size() ); Registrations::iterator iter = _registrations.begin(); for ( ; iter != _registrations.end(); iter++ ) { @@ -694,12 +695,12 @@ namespace events { if ( ossie::corba::objectExists(_ecm) ) { try { // unregister from the Domain - RH_NL_DEBUG("redhawk::events::Manager", "Unregister REG=ID:" << creg.reg.reg_id ); + RH_DEBUG(_eventManagerLog, "Unregister REG=ID:" << creg.reg.reg_id ); _ecm->unregister( creg.reg ); } catch(...) { // log error - RH_NL_ERROR("redhawk::events::Manager", "Unregister ERROR REG=ID:" << creg.reg.reg_id ); + RH_ERROR(_eventManagerLog, "Unregister ERROR REG=ID:" << creg.reg.reg_id ); } } } @@ -707,7 +708,7 @@ namespace events { // need to cleanup Publisher memory _registrations.clear(); - RH_NL_DEBUG("redhawk::events::Manager", "Delete subscribers....."); + RH_DEBUG(_eventManagerLog, "Delete subscribers....."); Subscribers::iterator siter = _subscribers.begin(); for ( ; siter != _subscribers.end(); siter++ ) { if ( *siter ) delete *siter; @@ -715,18 +716,18 @@ namespace events { _subscribers.clear(); - RH_NL_DEBUG("redhawk::events::Manager", "Delete publishers....."); + RH_DEBUG(_eventManagerLog, "Delete publishers....."); Publishers::iterator piter = _publishers.begin(); for ( ; piter != _publishers.end(); piter++ ) { if ( *piter ) delete *piter; } _publishers.clear(); - RH_NL_DEBUG("redhawk::events::Manager", "Deleted.... publishers....."); + RH_DEBUG(_eventManagerLog, "Deleted.... publishers....."); // if we have any subscribers or publishers left then disconnect and delete those instances _ecm = CF::EventChannelManager::_nil(); - RH_NL_DEBUG("redhawk::events::Manager", "Terminate Completed."); + RH_DEBUG(_eventManagerLog, "Terminate Completed."); } @@ -786,23 +787,23 @@ namespace events { SCOPED_LOCK(_mgr_lock); std::string regid( reg.reg.reg_id.in() ); - RH_NL_DEBUG("redhawk::events::Manager", "Unregister request .... reg-id:" << regid ); + RH_DEBUG(_eventManagerLog, "Unregister request .... reg-id:" << regid ); Registrations::iterator iter = _registrations.begin(); for ( ; iter != _registrations.end(); iter++ ) { - RH_NL_DEBUG("redhawk::events::Manager", "TBL REG-ID:" << iter->reg.reg_id << " REQ:" << reg.reg.channel_name ); + RH_DEBUG(_eventManagerLog, "TBL REG-ID:" << iter->reg.reg_id << " REQ:" << reg.reg.channel_name ); if ( regid.compare(iter->reg.reg_id) == 0 ){ - RH_NL_DEBUG("redhawk::events::Manager", "FOUND REG-ID:" << regid << " CHANNEL:" << reg.reg.channel_name ); + RH_DEBUG(_eventManagerLog, "FOUND REG-ID:" << regid << " CHANNEL:" << reg.reg.channel_name ); if ( ossie::corba::objectExists(_ecm) ) { try { // unregister from the Domain - RH_NL_INFO("redhawk::events::Manager", "UNREGISTER REG-ID:" << regid << " CHANNEL:" << reg.reg.channel_name ); + RH_INFO(_eventManagerLog, "UNREGISTER REG-ID:" << regid << " CHANNEL:" << reg.reg.channel_name ); _ecm->unregister( reg.reg ); } catch(...) { // log error - RH_NL_ERROR("redhawk::events::Manager", "UNREGISTER FAILED, REG-ID:" << regid << " CHANNEL:" << reg.reg.channel_name ); + RH_ERROR(_eventManagerLog, "UNREGISTER FAILED, REG-ID:" << regid << " CHANNEL:" << reg.reg.channel_name ); } _registrations.erase(iter); diff --git a/redhawk/src/base/framework/ExecutableDevice_impl.cpp b/redhawk/src/base/framework/ExecutableDevice_impl.cpp index 46ff68166..cb2a03836 100644 --- a/redhawk/src/base/framework/ExecutableDevice_impl.cpp +++ b/redhawk/src/base/framework/ExecutableDevice_impl.cpp @@ -61,6 +61,7 @@ PREPARE_CF_LOGGING(ExecutableDevice_impl) ExecutableDevice_impl::ExecutableDevice_impl (char* devMgr_ior, char* id, char* lbl, char* sftwrPrfl): LoadableDevice_impl (devMgr_ior, id, lbl, sftwrPrfl) { + _init(); } @@ -72,6 +73,7 @@ ExecutableDevice_impl::ExecutableDevice_impl (char* devMgr_ior, char* id, char* CF::Properties capacities): LoadableDevice_impl (devMgr_ior, id, lbl, sftwrPrfl, capacities) { + _init(); } /* ExecutableDevice_impl **************************************** @@ -81,6 +83,7 @@ ExecutableDevice_impl::ExecutableDevice_impl (char* devMgr_ior, char* id, char* char* composite_ior): LoadableDevice_impl (devMgr_ior, id, lbl, sftwrPrfl, composite_ior) { + _init(); } @@ -91,6 +94,17 @@ ExecutableDevice_impl::ExecutableDevice_impl (char* devMgr_ior, char* id, char* CF::Properties capacities, char* composite_ior): LoadableDevice_impl (devMgr_ior, id, lbl, sftwrPrfl, capacities, composite_ior) { + _init(); +} + +void ExecutableDevice_impl::_init() +{ + setLogger(this->_baseLog->getChildLogger("ExecutableDevice", "system")); +} + +void ExecutableDevice_impl::setLogger(rh_logger::LoggerPtr logptr) +{ + _executabledeviceLog = logptr; } std::string ExecutableDevice_impl::component_name_from_profile_name(const std::string& profile_name) @@ -120,8 +134,6 @@ std::string ExecutableDevice_impl::get_component_name_from_exec_params(const CF: if (ossie::corba::returnString(parameters[i].id) == std::string("PROFILE_NAME")) return component_name_from_profile_name( ossie::any_to_string(parameters[i].value) ); } - - LOG_ERROR(ExecutableDevice_impl, __FUNCTION__ << ": Could not extract component name from exec params" ); throw CF::ExecutableDevice::InvalidParameters(parameters); } @@ -136,7 +148,7 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::executeLinked (const { std::stringstream errstr; errstr << "Error acquiring lock (errno=" << e.native_error() << " msg=\"" << e.what() << "\")"; - LOG_ERROR(ExecutableDevice_impl, __FUNCTION__ << ": " << errstr.str() ); + RH_ERROR(_executabledeviceLog, __FUNCTION__ << ": " << errstr.str() ); throw CF::Device::InvalidState(errstr.str().c_str()); } @@ -154,9 +166,11 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::executeLinked (const } update_selected_paths(selected_paths); - std::vector prepend_args; - CF::ExecutableDevice::ProcessID_Type pid = execute(name, options, parameters); - return pid; + // Add the system-defined DEPLOYMENT_ROOT property to the command line + redhawk::PropertyMap arguments(parameters); + arguments["RH::DEPLOYMENT_ROOT"] = getCacheDirectory(); + + return execute(name, options, arguments); } CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::execute (const char* name, const CF::Properties& options, const CF::Properties& parameters) throw (CORBA::SystemException, CF::Device::InvalidState, CF::ExecutableDevice::InvalidFunction, CF::ExecutableDevice::InvalidParameters, CF::ExecutableDevice::InvalidOptions, CF::InvalidFileName, CF::ExecutableDevice::ExecuteFail) @@ -170,7 +184,7 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::execute (const char* { std::stringstream errstr; errstr << "Error acquiring lock (errno=" << e.native_error() << " msg=\"" << e.what() << "\")"; - LOG_ERROR(ExecutableDevice_impl, __FUNCTION__ << ": " << errstr.str() ); + RH_ERROR(_executabledeviceLog, __FUNCTION__ << ": " << errstr.str() ); throw CF::Device::InvalidState(errstr.str().c_str()); } @@ -188,23 +202,23 @@ void ExecutableDevice_impl::set_resource_affinity( const CF::Properties& options // try { if ( redhawk::affinity::has_affinity( options ) ) { - LOG_DEBUG(ExecutableDevice_impl, "Has Affinity....ExecDevice/Resource:" << label() << "/" << rsc_name); + RH_DEBUG(_executabledeviceLog, "Has Affinity....ExecDevice/Resource:" << label() << "/" << rsc_name); if ( redhawk::affinity::is_disabled() ) { - LOG_WARN(ExecutableDevice_impl, "Resource has affinity directives but processing disabled, ExecDevice/Resource:" << + RH_WARN(_executabledeviceLog, "Resource has affinity directives but processing disabled, ExecDevice/Resource:" << label() << "/" << rsc_name); } else { - LOG_DEBUG(ExecutableDevice_impl, "Calling set resource affinity....ExecDevice/Resource:" << + RH_DEBUG(_executabledeviceLog, "Calling set resource affinity....ExecDevice/Resource:" << label() << "/" << rsc_name); redhawk::affinity::set_affinity( options, rsc_pid, blacklist ); } } else { - LOG_TRACE(ExecutableDevice_impl, "No Affinity Found....ExecDevice/Resource:" << label() << "/" << rsc_name); + RH_TRACE(_executabledeviceLog, "No Affinity Found....ExecDevice/Resource:" << label() << "/" << rsc_name); } } catch( redhawk::affinity::AffinityFailed &e) { - LOG_WARN(ExecutableDevice_impl, "AFFINITY REQUEST FAILED: " << e.what() ); + RH_WARN(_executabledeviceLog, "AFFINITY REQUEST FAILED: " << e.what() ); throw; } @@ -219,6 +233,8 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::do_execute (const ch CF::Properties invalidOptions; std::string path; char* tmp; + + std::string mod_localPath = prependCacheIfAvailable(name); // throw and error if name does not begin with a / if (strncmp(name, "/", 1) != 0) @@ -238,7 +254,7 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::do_execute (const ch invalidOptions[invalidOptions.length() - 1].value = options[i].value; } else - LOG_WARN(ExecutableDevice_impl, "Received a PRIORITY_ID execute option...ignoring.") + RH_WARN(_executabledeviceLog, "Received a PRIORITY_ID execute option...ignoring.") } if (options[i].id == CF::ExecutableDevice::STACK_SIZE_ID) { CORBA::TypeCode_var atype = options[i].value.type(); @@ -248,7 +264,7 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::do_execute (const ch invalidOptions[invalidOptions.length() - 1].value = options[i].value; } else - LOG_WARN(ExecutableDevice_impl, "Received a STACK_SIZE_ID execute option...ignoring.") + RH_WARN(_executabledeviceLog, "Received a STACK_SIZE_ID execute option...ignoring.") } } @@ -275,7 +291,7 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::do_execute (const ch // change permissions to 7-- if (chmod(path.c_str(), S_IRWXU) != 0) { - LOG_ERROR(ExecutableDevice_impl, "Unable to change permission on executable"); + RH_ERROR(_executabledeviceLog, "Unable to change permission on executable"); throw CF::ExecutableDevice::ExecuteFail(CF::CF_EACCES, "Unable to change permission on executable"); } @@ -302,15 +318,15 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::do_execute (const ch } args.push_back(path); - LOG_DEBUG(ExecutableDevice_impl, "Building param list for process " << path); + RH_DEBUG(_executabledeviceLog, "Building param list for process " << path); for (CORBA::ULong i = 0; i < parameters.length(); ++i) { - LOG_DEBUG(ExecutableDevice_impl, "id=" << ossie::corba::returnString(parameters[i].id) << " value=" << ossie::any_to_string(parameters[i].value)); + RH_DEBUG(_executabledeviceLog, "id=" << ossie::corba::returnString(parameters[i].id) << " value=" << ossie::any_to_string(parameters[i].value)); CORBA::TypeCode_var atype = parameters[i].value.type(); args.push_back(ossie::corba::returnString(parameters[i].id)); args.push_back(ossie::any_to_string(parameters[i].value)); } - LOG_DEBUG(ExecutableDevice_impl, "Forking process " << path); + RH_DEBUG(_executabledeviceLog, "Forking process " << path); std::vector argv(args.size() + 1, NULL); for (std::size_t i = 0; i < args.size(); ++i) { @@ -337,22 +353,22 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::do_execute (const ch ExecutableDevice_impl::__logger->setLevel(lvl); // set affinity logger method so we do not use log4cxx during affinity processing routine redhawk::affinity::set_affinity_logger( ExecutableDevice_impl::__logger ) ; - LOG_DEBUG(ExecutableDevice_impl, " Calling set resource affinity....exec:" << name << " options=" << options.length()); + RH_DEBUG(_executabledeviceLog, " Calling set resource affinity....exec:" << name << " options=" << options.length()); // set affinity preference before exec try { - LOG_DEBUG(ExecutableDevice_impl, " Calling set resource affinity....exec:" << name << " options=" << options.length()); + RH_DEBUG(_executabledeviceLog, " Calling set resource affinity....exec:" << name << " options=" << options.length()); set_resource_affinity( options, getpid(), name ); } catch( redhawk::affinity::AffinityFailed &ex ) { - LOG_WARN(ExecutableDevice_impl, "Unable to satisfy affinity request for: " << name << " Reason: " << ex.what() ); + RH_WARN(_executabledeviceLog, "Unable to satisfy affinity request for: " << name << " Reason: " << ex.what() ); errno=EPERM<<2; returnval=-1; ossie::corba::OrbShutdown(true); exit(returnval); } catch( ... ) { - LOG_WARN(ExecutableDevice_impl, "Unhandled exception during affinity processing for resource: " << name ); + RH_WARN(_executabledeviceLog, "Unhandled exception during affinity processing for resource: " << name ); ossie::corba::OrbShutdown(true); exit(returnval); } @@ -378,20 +394,20 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::do_execute (const ch break; // Only retry on "text file busy" error - LOG_WARN(ExecutableDevice_impl, "execv() failed, retrying... (cmd=" << path << " msg=\"" << strerror(errno) << "\" retries=" << num_retries << ")"); + RH_WARN(_executabledeviceLog, "execv() failed, retrying... (cmd=" << path << " msg=\"" << strerror(errno) << "\" retries=" << num_retries << ")"); usleep(100000); } if( returnval ) { - LOG_ERROR(ExecutableDevice_impl, "Error when calling execv() (cmd=" << path << " errno=" << errno << " msg=\"" << strerror(errno) << "\")"); + RH_ERROR(_executabledeviceLog, "Error when calling execv() (cmd=" << path << " errno=" << errno << " msg=\"" << strerror(errno) << "\")"); ossie::corba::OrbShutdown(true); } - LOG_DEBUG(ExecutableDevice_impl, "Exiting FAILED subprocess:" << returnval ); + RH_DEBUG(_executabledeviceLog, "Exiting FAILED subprocess:" << returnval ); exit(returnval); } else if (pid < 0 ){ - LOG_ERROR(ExecutableDevice_impl, "Error forking child process (errno: " << errno << " msg=\"" << strerror(errno) << "\")" ); + RH_ERROR(_executabledeviceLog, "Error forking child process (errno: " << errno << " msg=\"" << strerror(errno) << "\")" ); switch (errno) { case E2BIG: throw CF::ExecutableDevice::ExecuteFail(CF::CF_E2BIG, @@ -423,7 +439,7 @@ CF::ExecutableDevice::ProcessID_Type ExecutableDevice_impl::do_execute (const ch } } - LOG_DEBUG(ExecutableDevice_impl, "Execute success: name:" << name << " : "<< path); + RH_DEBUG(_executabledeviceLog, "Execute success: name:" << name << " : "<< path); return pid; } @@ -452,13 +468,13 @@ ExecutableDevice_impl::terminate (CF::ExecutableDevice::ProcessID_Type processId bool processes_dead = false; for (std::vector< std::pair< int, float > >::iterator _signal=_signals.begin();!processes_dead &&_signal!=_signals.end();_signal++) { int retval = killpg(pgroup, _signal->first); - LOG_TRACE(ExecutableDevice_impl,"Intitial Process Termination pid/group " << processId << "/" << pgroup << " RET= " << retval); + RH_TRACE(_executabledeviceLog,"Intitial Process Termination pid/group " << processId << "/" << pgroup << " RET= " << retval); if ( retval == -1 && errno == EPERM ) { - LOG_ERROR(ExecutableDevice_impl,"Error sending pid/group " << processId << "/" << pgroup); + RH_ERROR(_executabledeviceLog,"Error sending pid/group " << processId << "/" << pgroup); continue; } if ( retval == -1 && errno == ESRCH ) { - LOG_TRACE(ExecutableDevice_impl,"Process group is dead " << processId << "/" << pgroup); + RH_TRACE(_executabledeviceLog,"Process group is dead " << processId << "/" << pgroup); processes_dead = true; continue; } @@ -469,9 +485,9 @@ ExecutableDevice_impl::terminate (CF::ExecutableDevice::ProcessID_Type processId int cnt=0; while (!processes_dead && (retval != -1) and ( now < end_time )) { retval = killpg(pgroup, 0); - LOG_TRACE(ExecutableDevice_impl,"Terminating Process.... (loop:" << cnt++ << " signal:" << _signal->first << ") pid/group " << processId << "/" << pgroup << " RET= " << retval); + RH_TRACE(_executabledeviceLog,"Terminating Process.... (loop:" << cnt++ << " signal:" << _signal->first << ") pid/group " << processId << "/" << pgroup << " RET= " << retval); if (retval == -1 and (errno == ESRCH)) { - LOG_TRACE(ExecutableDevice_impl,"Process group terminated " << processId << "/" << pgroup); + RH_TRACE(_executabledeviceLog,"Process group terminated " << processId << "/" << pgroup); processes_dead = true; continue; } diff --git a/redhawk/src/base/framework/ExecutorService.cpp b/redhawk/src/base/framework/ExecutorService.cpp new file mode 100644 index 000000000..1e32ca73c --- /dev/null +++ b/redhawk/src/base/framework/ExecutorService.cpp @@ -0,0 +1,111 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +using namespace redhawk; + +ExecutorService::ExecutorService() : + _thread(0), + _running(false) +{ +} + +ExecutorService::~ExecutorService() +{ + stop(); +} + +void ExecutorService::start () +{ + boost::mutex::scoped_lock lock(_mutex); + if (_running) { + return; + } + + _running = true; + _thread = new boost::thread(&ExecutorService::_run, this); +} + +void ExecutorService::stop () +{ + boost::thread* old_thread = 0; + { + boost::mutex::scoped_lock lock(_mutex); + _running = false; + old_thread = _thread; + _thread = 0; + _cond.notify_all(); + } + if (old_thread) { + old_thread->join(); + delete old_thread; + } +} + +void ExecutorService::clear () +{ + boost::mutex::scoped_lock lock(_mutex); + _queue.clear(); + _cond.notify_all(); +} + +size_t ExecutorService::pending () +{ + boost::mutex::scoped_lock lock(_mutex); + return _queue.size(); +} + +void ExecutorService::_run () +{ + boost::mutex::scoped_lock lock(_mutex); + while (_running) { + if (_queue.empty()) { + _cond.wait(lock); + } else { + task_queue::iterator task = _queue.begin(); + if (task->first > boost::get_system_time()) { + // Head of queue is scheduled in the future + boost::system_time when = task->first; + _cond.timed_wait(lock, when); + } else { + // Copy the task's function and remove it from the queue + func_type func = task->second; + _queue.erase(task); + + // Run task with the lock released + lock.unlock(); + func(); + lock.lock(); + } + } + } +} + +void ExecutorService::_insertSorted (func_type func, boost::system_time when) +{ + boost::mutex::scoped_lock lock(_mutex); + task_queue::iterator pos = _queue.begin(); + while ((pos != _queue.end()) && (when > pos->first)) { + ++pos; + } + _queue.insert(pos, std::make_pair(when, func)); + _cond.notify_all(); +} diff --git a/redhawk/src/base/framework/FileStream.cpp b/redhawk/src/base/framework/FileStream.cpp index 8027ae415..399d5a45a 100644 --- a/redhawk/src/base/framework/FileStream.cpp +++ b/redhawk/src/base/framework/FileStream.cpp @@ -84,9 +84,37 @@ void File_buffer::close() throw(std::ios_base::failure) } } catch (CORBA::SystemException& se) { - throw std::ios_base::failure("CORBA SystemException while opening file"); + throw std::ios_base::failure("CORBA SystemException while closing file"); } catch (...) { - throw std::ios_base::failure("Unexpected error while opening file"); + throw std::ios_base::failure("Unexpected error while closing file"); + } +} + +File_stream::File_stream(CF::FileSystem_ptr fsysptr, const char* path) throw(std::ios_base::failure) : + std::ios(0), + needsClose(true) +{ + try { + sb = new File_buffer((CF::File_var)fsysptr->open(path, true)); + this->init(sb); + } catch (const CF::InvalidFileName& ifn) { + throw std::ios_base::failure(std::string(ifn.msg)); + } catch (const CF::FileException& fe) { + throw std::ios_base::failure(std::string(fe.msg)); + } catch( ... ) { + throw std::ios_base::failure("exception while opening file"); + } +} + +File_stream::File_stream(CF::File_ptr fptr) : + std::ios(0), + needsClose(false) +{ + try { + sb = new File_buffer(fptr); + this->init(sb); + } catch( ... ) { + throw std::ios_base::failure("exception while opening file"); } } diff --git a/redhawk/src/base/framework/LoadableDevice_impl.cpp b/redhawk/src/base/framework/LoadableDevice_impl.cpp index b37b3944e..c99927185 100644 --- a/redhawk/src/base/framework/LoadableDevice_impl.cpp +++ b/redhawk/src/base/framework/LoadableDevice_impl.cpp @@ -112,10 +112,6 @@ std::string EnvironmentPathParser::to_string() const return ret_str; } - -PREPARE_CF_LOGGING(LoadableDevice_impl) - - /* LoadableDevice_impl **************************************************************************** - constructor 1: no capacities defined ************************************************************************************************ */ @@ -174,9 +170,11 @@ void LoadableDevice_impl::_init () { "bytes", "external", "configure"); -} - + // Default to the current working directory + cacheDirectory = ossie::getCurrentDirName(); + setLogger(this->_baseLog->getChildLogger("LoadableDevice", "system")); +} /* LoadableDevice_impl **************************************************************************** - destructor @@ -192,6 +190,11 @@ LoadableDevice_impl::~LoadableDevice_impl () #endif } +void LoadableDevice_impl::setLogger(rh_logger::LoggerPtr logptr) +{ + _loadabledeviceLog = logptr; +} + void LoadableDevice_impl::update_ld_library_path (CF::FileSystem_ptr fs, const char* fileName, CF::LoadableDevice::LoadType loadKind) throw (CORBA::SystemException, CF::Device::InvalidState, CF::LoadableDevice::InvalidLoadKind, CF::InvalidFileName, CF::LoadableDevice::LoadFail) { // Update environment to use newly-loaded library @@ -215,7 +218,7 @@ void LoadableDevice_impl::merge_front_environment_path( const char* environment_ EnvironmentPathParser parser( getenv(environment_variable) ); parser.merge_front( path ); setenv(environment_variable, parser.to_string().c_str(), 1); - LOG_DEBUG(LoadableDevice_impl, "Updated environment path " << environment_variable << ": " << parser.to_string() ); + RH_DEBUG(_loadabledeviceLog, "Updated environment path " << environment_variable << ": " << parser.to_string() ); } void @@ -235,20 +238,20 @@ throw (CORBA::SystemException, CF::Device::InvalidState, catch( CF::File::IOException & e ) { std::stringstream errstr; errstr << "IO Exception occurred, file: " << fileName; - LOG_ERROR(LoadableDevice_impl, __FUNCTION__ << ": " << errstr.str() ); + RH_ERROR(_loadabledeviceLog, __FUNCTION__ << ": " << errstr.str() ); throw CF::LoadableDevice::LoadFail(e.errorNumber, e.msg); } catch( CF::FileException & e ) { std::stringstream errstr; errstr << "File Exception occurred, file: " << fileName; - LOG_ERROR(LoadableDevice_impl, __FUNCTION__ << ": " << errstr.str() ); + RH_ERROR(_loadabledeviceLog, __FUNCTION__ << ": " << errstr.str() ); throw CF::LoadableDevice::LoadFail(e.errorNumber, e.msg); } catch( const boost::thread_resource_error& e ) { std::stringstream errstr; errstr << "Error acquiring lock (errno=" << e.native_error() << " msg=\"" << e.what() << "\")"; - LOG_ERROR(LoadableDevice_impl, __FUNCTION__ << ": " << errstr.str() ); + RH_ERROR(_loadabledeviceLog, __FUNCTION__ << ": " << errstr.str() ); throw CF::Device::InvalidState(errstr.str().c_str()); } } @@ -263,23 +266,23 @@ throw (CORBA::SystemException, CF::Device::InvalidState, CF::LoadableDevice::InvalidLoadKind, CF::InvalidFileName, CF::LoadableDevice::LoadFail, CF::FileException ) { - LOG_DEBUG(LoadableDevice_impl, "load " << fileName) + RH_DEBUG(_loadabledeviceLog, "load " << fileName << " kind:" << loadKind) // verify that the device is in a valid state for loading if (!isUnlocked () || isDisabled ()) { - LOG_ERROR(LoadableDevice_impl, "Cannot load. System is either LOCKED, SHUTTING DOWN or DISABLED.") - LOG_DEBUG(LoadableDevice_impl, "Unlocked: " << isUnlocked ()) - LOG_DEBUG(LoadableDevice_impl, "isDisabled: " << isDisabled ()) + RH_ERROR(_loadabledeviceLog, "Cannot load. System is either LOCKED, SHUTTING DOWN or DISABLED.") + RH_DEBUG(_loadabledeviceLog, "Unlocked: " << isUnlocked ()) + RH_DEBUG(_loadabledeviceLog, "isDisabled: " << isDisabled ()) throw (CF::Device:: InvalidState ("Cannot load. System is either LOCKED, SHUTTING DOWN or DISABLED.")); } - LOG_DEBUG(LoadableDevice_impl, "It's not locked and not disabled") + RH_DEBUG(_loadabledeviceLog, "It's not locked and not disabled") // verify that the loadKind is supported (only executable is supported by this version) if ((loadKind != CF::LoadableDevice::EXECUTABLE) && (loadKind != CF::LoadableDevice::SHARED_LIBRARY)) { - LOG_ERROR(LoadableDevice_impl, "It's not CF::LoadableDevice::EXECUTABLE or CF::LoadableDevice::SHARED_LIBRARY") + RH_ERROR(_loadabledeviceLog, "It's not CF::LoadableDevice::EXECUTABLE or CF::LoadableDevice::SHARED_LIBRARY") throw CF::LoadableDevice::InvalidLoadKind (); } @@ -290,15 +293,15 @@ throw (CORBA::SystemException, CF::Device::InvalidState, // already performs this existence check try { if (!fs->exists (workingFileName.c_str())) { - LOG_ERROR(LoadableDevice_impl, "File " << workingFileName << " does not exist") + RH_ERROR(_loadabledeviceLog, "File " << workingFileName << " does not exist") throw (CF::InvalidFileName (CF::CF_ENOENT, "Cannot load. File name is invalid.")); } } catch ( ... ) { - LOG_ERROR(LoadableDevice_impl, "Exception raised when calling the file system: " << workingFileName ); + RH_ERROR(_loadabledeviceLog, "Exception raised when calling the file system: " << workingFileName ); throw; } - LOG_DEBUG(LoadableDevice_impl, "Cleaning name " << fileName) + RH_DEBUG(_loadabledeviceLog, "Cleaning name " << fileName) // Get rid of all the directories in the given name (if any) CF::FileSystem::FileInformationSequence_var contents = fs->list(workingFileName.c_str()); std::string simpleName; @@ -309,17 +312,17 @@ throw (CORBA::SystemException, CF::Device::InvalidState, simpleName = workingFileName.substr(pos + 1); } - LOG_DEBUG(LoadableDevice_impl, "Is " << fileName << " a directory?") + RH_DEBUG(_loadabledeviceLog, "Is " << fileName << " a directory?") CF::FileSystem::FileInformationType* fileInfo = 0; for (unsigned int i = 0; i < contents->length(); i++) { - LOG_DEBUG(LoadableDevice_impl, "comparing " << simpleName << " and " << contents[i].name) + RH_DEBUG(_loadabledeviceLog, "comparing " << simpleName << " and " << contents[i].name) if (!simpleName.compare(contents[i].name)) { fileInfo = &contents[i]; break; } } if (!fileInfo) { - LOG_ERROR(LoadableDevice_impl, "The file system couldn't find " << fileName) + RH_ERROR(_loadabledeviceLog, "The file system couldn't find " << fileName) throw (CF::InvalidFileName (CF::CF_ENOENT, "Cannot load. File name is invalid.")); } @@ -346,11 +349,11 @@ throw (CORBA::SystemException, CF::Device::InvalidState, // in the cache. No consideration is given to clock sync differences between systems. time_t remoteModifiedTime = getModTime(fileInfo->fileProperties); time_t cacheModifiedTime = cacheTimestamps[workingFileName]; - LOG_TRACE(LoadableDevice_impl, "Remote modified: " << remoteModifiedTime << " Local modified: " << cacheModifiedTime); + RH_TRACE(_loadabledeviceLog, "Remote modified: " << remoteModifiedTime << " Local modified: " << cacheModifiedTime); if (remoteModifiedTime > cacheModifiedTime) { - LOG_DEBUG(LoadableDevice_impl, "Remote file is newer than local file"); + RH_DEBUG(_loadabledeviceLog, "Remote file is newer than local file"); } else { - LOG_DEBUG(LoadableDevice_impl, "File exists in cache"); + RH_DEBUG(_loadabledeviceLog, "File exists in cache"); incrementFile(workingFileName); return; } @@ -360,35 +363,42 @@ throw (CORBA::SystemException, CF::Device::InvalidState, if (fileInfo->kind != CF::FileSystem::DIRECTORY) { // The target file is a file - LOG_DEBUG(LoadableDevice_impl, "Loading the file " << fileName); + RH_DEBUG(_loadabledeviceLog, "Loading the file " << fileName); + std::fstream fileStream; + std::ios_base::openmode mode; + mode = std::ios::out; + std::string _relativeFileName = workingFileName; + if (workingFileName[0] == '/') { + _relativeFileName = workingFileName.substr(1); + } + relativeFileName = prependCacheIfAvailable(_relativeFileName); + // Create a local directory to copy the file to - fs::path parentDir = fs::path(workingFileName).parent_path().relative_path(); + fs::path parentDir; + if (relativeFileName == _relativeFileName) { + parentDir = fs::path(relativeFileName).parent_path().relative_path(); + } else { + parentDir = fs::path(relativeFileName).parent_path(); + } try { if ( !parentDir.string().empty() && fs::create_directories(parentDir)) { - LOG_DEBUG(LoadableDevice_impl, "Created parent directory " << parentDir.string()); + RH_DEBUG(_loadabledeviceLog, "Created parent directory " << parentDir.string()); } } catch (const fs::filesystem_error& ex) { - LOG_ERROR(LoadableDevice_impl, "Unable to create parent directory " << parentDir.string() << ": " << ex.what()); + RH_ERROR(_loadabledeviceLog, "Unable to create parent directory " << parentDir.string() << ": " << ex.what()); throw CF::LoadableDevice::LoadFail(CF::CF_NOTSET, "Device SDR cache write error"); } // copy the file - LOG_DEBUG(LoadableDevice_impl, "Copying " << workingFileName << " to the device's cache") - std::fstream fileStream; - std::ios_base::openmode mode; - mode = std::ios::out; - relativeFileName = workingFileName; - if (workingFileName[0] == '/') { - relativeFileName = workingFileName.substr(1); - } + RH_DEBUG(_loadabledeviceLog, "Copying " << workingFileName << " to the device's cache") fileStream.open(relativeFileName.c_str(), mode); bool text_file_busy = false; if (!fileStream.is_open()) { if (errno == ETXTBSY) { text_file_busy = true; } else { - LOG_ERROR(LoadableDevice_impl, "Could not create file " << relativeFileName.c_str()); + RH_ERROR(_loadabledeviceLog, "Could not create file " << relativeFileName.c_str()); throw CF::LoadableDevice::LoadFail(CF::CF_NOTSET, "Device SDR cache write error"); } } @@ -400,7 +410,7 @@ throw (CORBA::SystemException, CF::Device::InvalidState, cacheTimestamps[workingFileName] = getModTime(fileInfo->fileProperties); fileStream.open(relativeFileName.c_str(), mode); if (!fileStream.is_open()) { - LOG_ERROR(LoadableDevice_impl, "Could not create file " << relativeFileName.c_str()); + RH_ERROR(_loadabledeviceLog, "Could not create file " << relativeFileName.c_str()); throw CF::LoadableDevice::LoadFail(CF::CF_NOTSET, "Device SDR cache write error"); } } @@ -410,7 +420,7 @@ throw (CORBA::SystemException, CF::Device::InvalidState, fileTypeTable[workingFileName] = CF::FileSystem::PLAIN; } else { // The target file is a directory - LOG_DEBUG(LoadableDevice_impl, "Copying the file " << fileName << " as a directory to the cache as " << workingFileName) + RH_DEBUG(_loadabledeviceLog, "Copying the file " << fileName << " as a directory to the cache as " << workingFileName) fileTypeTable[workingFileName] = CF::FileSystem::DIRECTORY; fs::path localPath = fs::path(workingFileName).branch_path().relative_path(); copiedFiles.insert(copiedFiles_type::value_type(workingFileName, localPath.string())); @@ -422,7 +432,7 @@ throw (CORBA::SystemException, CF::Device::InvalidState, } // add filename to loadedfiles. If it's been already loaded, then increment its counter - LOG_DEBUG(LoadableDevice_impl, "Incrementing " << workingFileName << " vs " << fileName) + RH_DEBUG(_loadabledeviceLog, "Incrementing " << workingFileName << " vs " << fileName) incrementFile (workingFileName); if (cacheTimestamps.count(workingFileName) == 0) { cacheTimestamps[workingFileName] = getModTime(fileInfo->fileProperties); @@ -474,14 +484,23 @@ throw (CORBA::SystemException, CF::Device::InvalidState, FILE *fileCheck = popen(command.c_str(), "r"); int status = pclose(fileCheck); if (!status) { + RH_DEBUG(_loadabledeviceLog, "cmd= " << command << + " relativeFileName: " << relativeFileName << + " relativePath: " << relativePath); + // The import worked std::string additionalPath = ""; + // Prepend the current path if the cache is empty, then check the filename + if (cacheDirectory.empty()) { + additionalPath = currentPath + std::string("/"); + } if (fileInfo->kind == CF::FileSystem::DIRECTORY) { - additionalPath = currentPath+std::string("/")+relativeFileName; + additionalPath = additionalPath + relativeFileName; } else { - additionalPath = currentPath+std::string("/")+relativePath; + additionalPath = additionalPath + relativePath; } env_changes.addModification("PYTHONPATH", additionalPath); + RH_DEBUG(_loadabledeviceLog, "Adding " << additionalPath << " to PYTHONPATH"); PythonPackage = true; } chdir(currentPath.c_str()); @@ -496,8 +515,15 @@ throw (CORBA::SystemException, CF::Device::InvalidState, if ((extension == ".py") || (extension == ".pyc")) { fileOrDirectoryName.erase(iext); } + relativePath.assign(relativeFileName, 0, lastSlash); + } else { + relativePath = relativeFileName; + if (true) { // if __init__.py exists + relativePath.assign(relativeFileName, 0, lastSlash); + } + relativePath = prependCacheIfAvailable(relativePath); } - relativePath.assign(relativeFileName, 0, lastSlash); + if (chdir(relativePath.c_str())) { // this is an invalid path } else { @@ -507,19 +533,21 @@ throw (CORBA::SystemException, CF::Device::InvalidState, FILE *fileCheck = popen(command.c_str(), "r"); int status = pclose(fileCheck); if (!status) { - LOG_DEBUG(LoadableDevice_impl, "cmd= " << command << + RH_DEBUG(_loadabledeviceLog, "cmd= " << command << " relativeFileName: " << relativeFileName << " relativePath: " << relativePath); // The import worked - std::string additionalPath = ""; - if (fileInfo->kind == CF::FileSystem::DIRECTORY) { - additionalPath = currentPath+std::string("/")+relativePath; - } else { - additionalPath = currentPath+std::string("/")+relativePath; + std::string additionalPath = relativePath; + if (cacheDirectory.empty()) { + if (fileInfo->kind == CF::FileSystem::DIRECTORY) { + additionalPath = currentPath+std::string("/")+relativePath; + } else { + additionalPath = currentPath+std::string("/")+relativePath; + } } env_changes.addModification("PYTHONPATH", additionalPath); - LOG_DEBUG(LoadableDevice_impl, "Adding " << additionalPath << " to PYTHONPATH"); + RH_DEBUG(_loadabledeviceLog, "Adding " << additionalPath << " to PYTHONPATH"); PythonPackage = true; } } @@ -532,8 +560,12 @@ throw (CORBA::SystemException, CF::Device::InvalidState, int retval = ossie::helpers::is_jarfile( relativeFileName ); if ( retval == 0 ) { currentPath = ossie::getCurrentDirName(); - std::string additionalPath = currentPath+std::string("/")+relativeFileName; + std::string additionalPath = relativeFileName; + if (cacheDirectory.empty()) { + additionalPath = currentPath+std::string("/")+relativeFileName; + } env_changes.addModification("CLASSPATH", additionalPath); + RH_DEBUG(_loadabledeviceLog, "Adding " << additionalPath << " to CLASSPATH"); JavaJar = true; } } @@ -579,30 +611,29 @@ void LoadableDevice_impl::update_path(sharedLibraryStorage &packageDescription) void LoadableDevice_impl::_loadTree(CF::FileSystem_ptr fs, std::string remotePath, fs::path& localPath, std::string fileKey) { - LOG_DEBUG(LoadableDevice_impl, "_loadTree " << remotePath << " " << localPath) + RH_DEBUG(_loadabledeviceLog, "_loadTree " << remotePath << " " << localPath) + fs::path mod_localPath = prependCacheIfAvailable(localPath.string()); CF::FileSystem::FileInformationSequence_var fis = fs->list(remotePath.c_str()); for (unsigned int i = 0; i < fis->length(); i++) { if (fis[i].kind == CF::FileSystem::PLAIN) { std::string fileName(fis[i].name); - fs::path localFile(localPath / fileName); + fs::path localFile(mod_localPath / fileName); if (*(remotePath.end() - 1) == '/') { - LOG_DEBUG(LoadableDevice_impl, "_copyFile " << remotePath + fileName << " " << localFile) + RH_DEBUG(_loadabledeviceLog, "_copyFile " << remotePath + fileName << " " << localFile) _copyFile(fs, remotePath + fileName, localFile.string(), fileKey); } else { - LOG_DEBUG(LoadableDevice_impl, "_copyFile " << remotePath << " " << localFile) + RH_DEBUG(_loadabledeviceLog, "_copyFile " << remotePath << " " << localFile) _copyFile(fs, remotePath, localFile.string(), fileKey); } const redhawk::PropertyMap& fileprops = redhawk::PropertyMap::cast(fis[i].fileProperties); - redhawk::PropertyMap::const_iterator iter_fileprops = fileprops.find("EXECUTABLE"); - if (iter_fileprops != fileprops.end()) { - if (fileprops["EXECUTABLE"].toBoolean()) - chmod(localFile.string().c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); + if (fileprops.get("EXECUTABLE", false).toBoolean()) { + chmod(localFile.string().c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); } } else if (fis[i].kind == CF::FileSystem::DIRECTORY) { std::string directoryName(fis[i].name); - fs::path localDirectory(localPath / directoryName); - LOG_DEBUG(LoadableDevice_impl, "Making directory " << directoryName << " in " << localPath) + fs::path localDirectory(mod_localPath / directoryName); + RH_DEBUG(_loadabledeviceLog, "Making directory " << directoryName << " in " << mod_localPath) copiedFiles.insert(copiedFiles_type::value_type(fileKey, localDirectory.string())); bool dexists = false; try { @@ -616,10 +647,10 @@ void LoadableDevice_impl::_loadTree(CF::FileSystem_ptr fs, std::string remotePat fs::create_directories(localDirectory); } if (*(remotePath.end() - 1) == '/') { - LOG_DEBUG(LoadableDevice_impl, "There") - _loadTree(fs, remotePath + std::string("/") + directoryName, localPath, fileKey); + RH_DEBUG(_loadabledeviceLog, "There") + _loadTree(fs, remotePath + std::string("/") + directoryName, mod_localPath, fileKey); } else { - LOG_DEBUG(LoadableDevice_impl, "Here") + RH_DEBUG(_loadabledeviceLog, "Here") _loadTree(fs, remotePath + std::string("/"), localDirectory, fileKey); } } else { @@ -631,7 +662,7 @@ void LoadableDevice_impl::_loadTree(CF::FileSystem_ptr fs, std::string remotePat void LoadableDevice_impl::_deleteTree(const std::string &fileKey) { - LOG_DEBUG(LoadableDevice_impl, "_deleteTree " << fileKey) + RH_DEBUG(_loadabledeviceLog, "_deleteTree " << fileKey) std::pair p = copiedFiles.equal_range(fileKey); // perform the search backwards (so that directories are emptied before they're deleted) @@ -639,11 +670,11 @@ void LoadableDevice_impl::_deleteTree(const std::string &fileKey) --p.second; if (fs::is_directory(((*p.second).second).c_str())) { if (!fs::is_empty(((*p.second).second).c_str())) { - LOG_TRACE(LoadableDevice_impl, "Not removing " << ((*p.second).second).c_str() << " - not empty!") + RH_TRACE(_loadabledeviceLog, "Not removing " << ((*p.second).second).c_str() << " - not empty!") continue; } } - LOG_TRACE(LoadableDevice_impl, "removing " << ((*p.second).second).c_str()) + RH_TRACE(_loadabledeviceLog, "removing " << ((*p.second).second).c_str()) fs::remove(((*p.second).second).c_str()); } @@ -653,7 +684,7 @@ void LoadableDevice_impl::_deleteTree(const std::string &fileKey) bool LoadableDevice_impl::_treeIntact(const std::string &fileKey) { - LOG_DEBUG(LoadableDevice_impl, "_treeIntact " << fileKey) + RH_DEBUG(_loadabledeviceLog, "_treeIntact " << fileKey) std::pair p = copiedFiles.equal_range(fileKey); for ( ; p.first != p.second; ) { @@ -670,15 +701,33 @@ bool LoadableDevice_impl::_treeIntact(const std::string &fileKey) return true; } +std::string LoadableDevice_impl::prependCacheIfAvailable(const std::string &localPath) { + std::string mod_localPath = localPath; + if (this->getPropertyFromId("cacheDirectory")) { + std::string cache_dir = ((StringProperty*)this->getPropertyFromId("cacheDirectory"))->getValue(); + if (!cache_dir.empty()) { + if (localPath.find(cache_dir) == std::string::npos) { + if (!cache_dir.compare(cache_dir.length()-1, 1, "/")) { + mod_localPath = cache_dir + mod_localPath; + } else { + mod_localPath = cache_dir + std::string("/") + mod_localPath; + } + } + } + } + return mod_localPath; +} + void LoadableDevice_impl::_copyFile(CF::FileSystem_ptr fs, const std::string &remotePath, const std::string &localPath, const std::string &fileKey) { + std::string mod_localPath(prependCacheIfAvailable(localPath)); CF::File_var fileToLoad = CF::File::_nil(); try { fileToLoad= fs->open(remotePath.c_str(), true); if ( CORBA::is_nil(fileToLoad) ) { std::string msg("Unable to open remote file: "); msg += remotePath; - LOG_ERROR(LoadableDevice_impl, msg); + RH_ERROR(_loadabledeviceLog, msg); throw CF::LoadableDevice::LoadFail( CF::CF_NOTSET, msg.c_str()); } } @@ -700,14 +749,14 @@ void LoadableDevice_impl::_copyFile(CF::FileSystem_ptr fs, const std::string &re std::fstream fileStream; std::ios_base::openmode mode; mode = std::ios::out | std::ios::trunc; - fileStream.open(localPath.c_str(), mode); + fileStream.open(mod_localPath.c_str(), mode); if (!fileStream.is_open()) { - LOG_ERROR(LoadableDevice_impl, "Local file " << localPath << " did not open succesfully.") + RH_ERROR(_loadabledeviceLog, "Local file " << mod_localPath << " did not open succesfully.") } else { - LOG_DEBUG(LoadableDevice_impl, "Local file " << localPath << " opened succesfully.") + RH_DEBUG(_loadabledeviceLog, "Local file " << mod_localPath << " opened succesfully.") } - copiedFiles.insert(copiedFiles_type::value_type(fileKey, localPath)); + copiedFiles.insert(copiedFiles_type::value_type(fileKey, mod_localPath)); std::size_t fileSize = fileToLoad->sizeOf(); bool fe=false; @@ -717,13 +766,13 @@ void LoadableDevice_impl::_copyFile(CF::FileSystem_ptr fs, const std::string &re toRead = std::min(fileSize, blockTransferSize); fileSize -= toRead; - //LOG_TRACE(LoadableDevice_impl, "READ Local file " << localPath << " length:" << toRead << " filesize/bts " << fileSize << "/" << blockTransferSize ); + //RH_TRACE(_loadabledeviceLog, "READ Local file " << mod_localPath << " length:" << toRead << " filesize/bts " << fileSize << "/" << blockTransferSize ); try { fileToLoad->read(data, toRead); fileStream.write((const char*)data->get_buffer(), data->length()); } catch ( CF::File::IOException &e ) { - LOG_WARN(LoadableDevice_impl, "READ Local file exception, " << ossie::corba::returnString(e.msg) ); + RH_WARN(_loadabledeviceLog, "READ Local file exception, " << ossie::corba::returnString(e.msg) ); throw; } @@ -737,7 +786,7 @@ void LoadableDevice_impl::_copyFile(CF::FileSystem_ptr fs, const std::string &re fileToLoad->close(); } catch(...) { - LOG_ERROR(LoadableDevice_impl, "Closing remote file encountered exception, file:" << remotePath ); + RH_ERROR(_loadabledeviceLog, "Closing remote file encountered exception, file:" << remotePath ); fe=true; } @@ -772,7 +821,7 @@ throw (CORBA::SystemException, CF::Device::InvalidState, CF::InvalidFileName) { std::stringstream errstr; errstr << "Error acquiring lock (errno=" << e.native_error() << " msg=\"" << e.what() << "\")"; - LOG_ERROR(LoadableDevice_impl, __FUNCTION__ << ": " << errstr.str() ); + RH_ERROR(_loadabledeviceLog, __FUNCTION__ << ": " << errstr.str() ); throw CF::Device::InvalidState(errstr.str().c_str()); } } @@ -784,7 +833,7 @@ LoadableDevice_impl::do_unload (const char* fileName) throw (CORBA::SystemException, CF::Device::InvalidState, CF::InvalidFileName) { - LOG_DEBUG(LoadableDevice_impl, "Unload called for " << fileName) + RH_DEBUG(_loadabledeviceLog, "Unload called for " << fileName) // verify that the device is in a valid state for loading if (isLocked () || isDisabled ()) { @@ -802,7 +851,7 @@ throw (CORBA::SystemException, CF::Device::InvalidState, CF::InvalidFileName) // delete the file if (fileTypeTable.count(workingFileName) == 0) { // no record as to what the file is (yet still clearly valid) - LOG_WARN(LoadableDevice_impl, "Unload called on a file that does not exist (" << fileName << ")") + RH_WARN(_loadabledeviceLog, "Unload called on a file that does not exist (" << fileName << ")") return; } if (fileTypeTable[workingFileName] == CF::FileSystem::PLAIN) { @@ -812,7 +861,7 @@ throw (CORBA::SystemException, CF::Device::InvalidState, CF::InvalidFileName) relativeFileName = workingFileName.substr(1); } remove(relativeFileName.c_str()); - LOG_DEBUG(LoadableDevice_impl, "Unload ############## (" << fileName << ")") + RH_DEBUG(_loadabledeviceLog, "Unload ############## (" << fileName << ")") } else if (fileTypeTable[workingFileName] == CF::FileSystem::DIRECTORY) { _deleteTree(std::string(fileName)); } else if (fileTypeTable[workingFileName] == CF::FileSystem::FILE_SYSTEM) { @@ -881,9 +930,15 @@ bool LoadableDevice_impl::isFileLoaded (const char* fileName) } -void LoadableDevice_impl ::configure (const CF::Properties& capacities) -throw (CF::PropertySet::PartialConfiguration, CF::PropertySet:: - InvalidConfiguration, CORBA::SystemException) +const std::string& LoadableDevice_impl::getCacheDirectory() { - Device_impl::configure(capacities); + if (this->getPropertyFromId("cacheDirectory")) { + std::string cache_dir = ((StringProperty*)this->getPropertyFromId("cacheDirectory"))->getValue(); + if (!cache_dir.empty()) { + if (cacheDirectory != cache_dir) { + cacheDirectory = cache_dir; + } + } + } + return cacheDirectory; } diff --git a/redhawk/src/base/framework/Logging_impl.cpp b/redhawk/src/base/framework/Logging_impl.cpp index 435455e91..e79269f3c 100644 --- a/redhawk/src/base/framework/Logging_impl.cpp +++ b/redhawk/src/base/framework/Logging_impl.cpp @@ -37,14 +37,39 @@ struct null_deleter }; -Logging_impl::Logging_impl() : +Logging_impl::Logging_impl(std::string logger_name) : _logName(""), _logLevel(CF::LogLevels::INFO), _logCfgContents(), _logCfgURL(""), - _loggingCtx() + _origLogCfgURL(""), + _loggingCtx(), + _origLevelSet(false) { + _baseLog = rh_logger::Logger::getNewHierarchy(logger_name); + // get default set of macros to fill in + _loggingMacros = ossie::logging::GetDefaultMacros(); + + ossie::logging::ResolveHostInfo( _loggingMacros ); + + _logCfgContents = ossie::logging::GetDefaultConfig(); + + // setup logger to be root by default and assign logging level INFO` + getLogger( _logName, true ); +}; + +Logging_impl::Logging_impl(rh_logger::LoggerPtr parent_logger) : + _logName(""), + _logLevel(CF::LogLevels::INFO), + _logCfgContents(), + _logCfgURL(""), + _origLogCfgURL(""), + _loggingCtx(), + _origLevelSet(false) +{ + + _baseLog = parent_logger; // get default set of macros to fill in _loggingMacros = ossie::logging::GetDefaultMacros(); @@ -132,10 +157,25 @@ void Logging_impl::setLoggingContext( const std::string &logcfg_url, int logLeve STDOUT_DEBUG("Logging_impl setLoggingContext END" ); } +std::string Logging_impl::getExpandedLogConfig(const std::string &logcfg_url) { + std::string _logCfgContents; -void Logging_impl::saveLoggingContext( const std::string &logcfg_url, int logLevel, ossie::logging::ResourceCtxPtr ctx ) { + if ( logcfg_url == "" ) { + STDOUT_DEBUG( "Logging_impl saveLoggingContext Default Configuration."); + _logCfgContents=ossie::logging::GetDefaultConfig(); + } else { + // grab contents of URL and save + _logCfgContents = ""; + std::string config_contents = ossie::logging::GetConfigFileContents(logcfg_url); + if ( config_contents.size() > 0 ){ + _logCfgContents= ossie::logging::ExpandMacros(config_contents, _loggingMacros); + } + } + return _logCfgContents; +} +void Logging_impl::saveLoggingContext( const std::string &logcfg_url, int logLevel, ossie::logging::ResourceCtxPtr ctx ) { STDOUT_DEBUG("Logging_impl saveLoggingContext START:"); if ( ctx ) { STDOUT_DEBUG( "Logging_impl saveLoggingContext Apply Macro Context:"); @@ -146,22 +186,8 @@ void Logging_impl::saveLoggingContext( const std::string &logcfg_url, int logLev try { // save off logging config url _logCfgURL = logcfg_url; - - // test we have a logging URI - if ( logcfg_url == "" ) { - STDOUT_DEBUG( "Logging_impl saveLoggingContext Default Configuration."); - _logCfgContents=ossie::logging::GetDefaultConfig(); - } - else{ - // grab contents of URL and save - _logCfgContents = ""; - std::string config_contents = ossie::logging::GetConfigFileContents(logcfg_url); - if ( config_contents.size() > 0 ){ - _logCfgContents= ossie::logging::ExpandMacros(config_contents, _loggingMacros); - } - } - } - catch( std::exception &e ) { + _logCfgContents = getExpandedLogConfig(logcfg_url); + } catch( std::exception &e ) { } bool set_level=false; @@ -191,6 +217,16 @@ void Logging_impl::saveLoggingContext( const std::string &logcfg_url, int logLev } } + // set_level means that a debug level was passed to the component/device. If that's the case, tie the log level to the global root logger + this->_baseLog->configureLogger(_logCfgContents, set_level, logLevel); + + if (not _origLevelSet) { + _origLevelSet = true; + _origLogCfgURL = _logCfgURL; + _origLogLevel = _logLevel; + _origCtx = ctx; + } + STDOUT_DEBUG("Logging_impl setLoggingContext END" ); } @@ -224,8 +260,8 @@ char *Logging_impl::getLogConfig () { void Logging_impl::setLogConfig( const char *config_contents ) { + std::string lcfg = ossie::logging::ExpandMacros( config_contents, _loggingMacros ); if ( logConfigCallback) { - std::string lcfg = ossie::logging::ExpandMacros( config_contents, _loggingMacros ); (*logConfigCallback)( lcfg.c_str() ); _logCfgContents = lcfg; } @@ -234,7 +270,7 @@ void Logging_impl::setLogConfig( const char *config_contents ) { // check if my level has changed for the logger log_level(); } - + this->_baseLog->configureLogger(lcfg); } void Logging_impl::setLogConfigURL( const char *in_url ) { @@ -267,6 +303,8 @@ void Logging_impl::setLogConfigURL( const char *in_url ) { void Logging_impl::setLogLevel( const char *logger_id, const CF::LogLevel newLevel ) throw (CF::UnknownIdentifier) { + if (not haveLoggerHierarchy(logger_id)) + throw (CF::UnknownIdentifier()); _logLevel = newLevel; if ( logLevelCallback ) { (*logLevelCallback)(logger_id, newLevel); @@ -274,19 +312,73 @@ void Logging_impl::setLogLevel( const char *logger_id, const CF::LogLevel newLev else { std::string logid(""); if ( logger_id ) logid=logger_id; - ossie::logging::SetLogLevel( logid, newLevel ); + rh_logger::LoggerPtr tmp_logger(this->_baseLog->getInstanceLogger(logger_id)); + rh_logger::LevelPtr level = ossie::logging::ConvertCFLevelToRHLevel( newLevel); + tmp_logger->setLevel(level); if ( _logger && logid == _logger->getName() ) { _logLevel = newLevel; } } } -CF::LogLevel Logging_impl::log_level() { - if ( _logger ) { - CF::LogLevel level = ossie::logging::ConvertRHLevelToCFLevel( _logger->getLevel() ); - if ( level != _logLevel ) { - _logLevel = level; +bool Logging_impl::haveLogger(const std::string &name) +{ + std::vector loggers = this->_baseLog->getNamedLoggers(); + std::string _logger_id(name); + bool found_logger = false; + for (std::vector::iterator it=loggers.begin(); it!=loggers.end(); it++) { + if (*it==_logger_id) { + found_logger = true; + break; + } + } + return found_logger; +} + +bool Logging_impl::haveLoggerHierarchy(const std::string &name) +{ + return this->_baseLog->isLoggerInHierarchy(name); +} + +CF::LogLevel Logging_impl::getLogLevel( const char *logger_id ) + throw (CF::UnknownIdentifier) +{ + if (not haveLoggerHierarchy(logger_id)) + throw (CF::UnknownIdentifier()); + rh_logger::LoggerPtr tmp_logger(this->_baseLog->getInstanceLogger(logger_id)); + int _level = tmp_logger->getLevel()->toInt(); + if (_level == rh_logger::Level::OFF_INT) + _level = CF::LogLevels::OFF; + else if (_level == rh_logger::Level::ALL_INT) + _level = CF::LogLevels::ALL; + return _level; +} + +void Logging_impl::resetLog() { + if (_origLevelSet) { + std::vector loggers = this->_baseLog->getNamedLoggers(); + for (std::vector::iterator it=loggers.begin(); it!=loggers.end(); it++) { + rh_logger::LoggerPtr _tmplog(this->_baseLog->getInstanceLogger(*it)); + _tmplog->setLevel(rh_logger::LevelPtr()); } + this->setLoggingContext(_origLogCfgURL, ossie::logging::ConvertRHLevelToDebug(ossie::logging::ConvertCFLevelToRHLevel(_origLogLevel)), _origCtx); + } +} + +CF::StringSequence* Logging_impl::getNamedLoggers() { + CF::StringSequence_var retval = new CF::StringSequence(); + std::vector loggers = this->_baseLog->getNamedLoggers(); + for (unsigned int i=0; i_baseLog->getLevel() ); + if ( level != _logLevel ) { + _logLevel = level; } return _logLevel; } @@ -294,18 +386,19 @@ CF::LogLevel Logging_impl::log_level() { void Logging_impl::log_level( const CF::LogLevel newLevel ) { + rh_logger::LevelPtr level = ossie::logging::ConvertCFLevelToRHLevel( newLevel ); if ( logLevelCallback ) { _logLevel = newLevel; (*logLevelCallback)( "", newLevel); } else { _logLevel = newLevel; - rh_logger::LevelPtr level = ossie::logging::ConvertCFLevelToRHLevel( newLevel ); // apply new level to resource logger if ( _logger ) { _logger->setLevel( level ); } - } + } + this->_baseLog->setLevel(level); } diff --git a/redhawk/src/base/framework/Makefile.am b/redhawk/src/base/framework/Makefile.am index 1d0f8d3f2..216e78c6e 100644 --- a/redhawk/src/base/framework/Makefile.am +++ b/redhawk/src/base/framework/Makefile.am @@ -41,6 +41,7 @@ libossiecf_la_SOURCES = AggregateDevice_impl.cpp \ CorbaUtils.cpp \ prop_helpers.cpp \ MessageInterface.cpp \ + MessageSupplier.cpp \ PropertyInterface.cpp \ Service_impl.cpp \ type_traits.cpp \ @@ -49,15 +50,32 @@ libossiecf_la_SOURCES = AggregateDevice_impl.cpp \ logging/rh_logger.cpp \ logging/StringInputStream.cpp \ logging/RH_LogEventAppender.cpp \ + logging/RH_SyncRollingAppender.cpp \ EventChannelSupport.cpp \ Events.cpp \ Component.cpp \ Value.cpp \ PropertyType.cpp \ PropertyMap.cpp \ - Versions.cpp + Versions.cpp \ + ExecutorService.cpp \ + UsesPort.cpp \ + ProvidesPort.cpp \ + Transport.cpp \ + BufferManager.cpp \ + inplace_list.h \ + bitops.cpp \ + bitbuffer.cpp \ + shm/Allocator.cpp \ + shm/Heap.cpp \ + shm/HeapClient.cpp \ + shm/MappedFile.cpp \ + shm/Superblock.cpp \ + shm/SuperblockFile.cpp \ + shm/System.cpp libossiecf_la_CXXFLAGS = -Wall $(BOOST_CPPFLAGS) $(OMNICOS_CFLAGS) $(OMNIORB_CFLAGS) $(LOG4CXX_FLAGS) -libossiecf_la_LIBADD = $(BOOST_LDFLAGS) $(BOOST_FILESYSTEM_LIB) $(BOOST_SERIALIZATION_LIB) $(BOOST_THREAD_LIB) $(BOOST_SYSTEM_LIB) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(LOG4CXX_LIBS) -ldl +# Include the omniORB internal directory, otherwise CorbaUtils will not build +libossiecf_la_CXXFLAGS +=-I$(OMNIORB_INCLUDEDIR)/omniORB4/internal +libossiecf_la_LIBADD = $(BOOST_LDFLAGS) $(BOOST_FILESYSTEM_LIB) $(BOOST_SERIALIZATION_LIB) $(BOOST_THREAD_LIB) $(BOOST_SYSTEM_LIB) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(LOG4CXX_LIBS) -ldl -lrt libossiecf_la_LDFLAGS = -Wall -version-info $(LIBOSSIECF_VERSION_INFO) - diff --git a/redhawk/src/base/framework/MessageInterface.cpp b/redhawk/src/base/framework/MessageInterface.cpp index 0629fd1a0..020dabb9e 100644 --- a/redhawk/src/base/framework/MessageInterface.cpp +++ b/redhawk/src/base/framework/MessageInterface.cpp @@ -18,8 +18,8 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ -#include "ossie/MessageInterface.h" -#include +#include +#include PREPARE_CF_LOGGING(MessageConsumerPort) @@ -69,11 +69,28 @@ CosEventChannelAdmin::ProxyPullConsumer_ptr SupplierAdmin_i::obtain_pull_consume return CosEventChannelAdmin::ProxyPullConsumer::_nil(); }; -MessageConsumerPort::MessageConsumerPort(std::string port_name) : Port_Provides_base_impl(port_name) { - supplier_admin = new SupplierAdmin_i(this); +MessageConsumerPort::MessageConsumerPort(std::string port_name) : + Port_Provides_base_impl(port_name), + supplier_admin(0) +{ +} + +MessageConsumerPort::~MessageConsumerPort() +{ + // If a SupplierAdmin was created, deactivate and delete it + if (supplier_admin) { + PortableServer::POA_var poa = supplier_admin->_default_POA(); + PortableServer::ObjectId_var oid = poa->servant_to_id(supplier_admin); + poa->deactivate_object(oid); + supplier_admin->_remove_ref(); + } + + for (CallbackTable::iterator callback = callbacks_.begin(); callback != callbacks_.end(); ++callback) { + delete callback->second; + } } - // CF::Port methods +// CF::Port methods void MessageConsumerPort::connectPort(CORBA::Object_ptr connection, const char* connectionId) { CosEventChannelAdmin::EventChannel_var channel = ossie::corba::_narrowSafe(connection); if (CORBA::is_nil(channel)) { @@ -101,6 +118,12 @@ CosEventChannelAdmin::ConsumerAdmin_ptr MessageConsumerPort::for_consumers() { }; CosEventChannelAdmin::SupplierAdmin_ptr MessageConsumerPort::for_suppliers() { + boost::mutex::scoped_lock lock(portInterfaceAccess); + if (!supplier_admin) { + supplier_admin = new SupplierAdmin_i(this); + PortableServer::POA_var poa = supplier_admin->_default_POA(); + PortableServer::ObjectId_var oid = poa->activate_object(supplier_admin); + } return supplier_admin->_this(); }; @@ -144,9 +167,9 @@ CosEventComm::PushSupplier_ptr MessageConsumerPort::removeSupplier (const std::s }; void MessageConsumerPort::fireCallback (const std::string& id, const CORBA::Any& data) { - CallbackTable::iterator callback = callbacks_.find(id); - if (callback != callbacks_.end()) { - (*callback->second)(id, data); + MessageCallback* callback = getMessageCallback(id); + if (callback) { + callback->dispatch(id, data); } else { if (generic_callbacks_.empty()) { std::string warning = "no callbacks registered for messages with id: "+id+"."; @@ -157,8 +180,8 @@ void MessageConsumerPort::fireCallback (const std::string& id, const CORBA::Any& warning += " The only registered callback is for message with id: "+callbacks_.begin()->first; } else { warning += " The available message callbacks are for messages with any of the following id: "; - for (callback = callbacks_.begin();callback != callbacks_.end(); callback++) { - warning += callback->first+" "; + for (CallbackTable::iterator cb = callbacks_.begin(); cb != callbacks_.end(); ++cb) { + warning += cb->first+" "; } } LOG_WARN(MessageConsumerPort,warning); @@ -166,87 +189,34 @@ void MessageConsumerPort::fireCallback (const std::string& id, const CORBA::Any& } // Invoke the callback for those messages that are generic - generic_callbacks_(id, data); + dispatchGeneric(id, data); }; -std::string MessageConsumerPort::getRepid() const -{ - return "IDL:ExtendedEvent/MessageEvent:1.0"; -} - -std::string MessageConsumerPort::getDirection() const -{ - return "Bidir"; -} - -MessageSupplierPort::MessageSupplierPort (std::string port_name) : - Port_Uses_base_impl(port_name) -{ -} - -MessageSupplierPort::~MessageSupplierPort (void) +MessageConsumerPort::MessageCallback* MessageConsumerPort::getMessageCallback(const std::string& id) { -} - -void MessageSupplierPort::connectPort(CORBA::Object_ptr connection, const char* connectionId) -{ - boost::mutex::scoped_lock lock(portInterfaceAccess); - this->active = true; - CosEventChannelAdmin::EventChannel_var channel = ossie::corba::_narrowSafe(connection); - if (CORBA::is_nil(channel)) { - throw CF::Port::InvalidPort(0, "The object provided did not narrow to a CosEventChannelAdmin::EventChannel type"); - } - CosEventChannelAdmin::SupplierAdmin_var supplier_admin = channel->for_suppliers(); - CosEventChannelAdmin::ProxyPushConsumer_ptr proxy_consumer = supplier_admin->obtain_push_consumer(); - proxy_consumer->connect_push_supplier(CosEventComm::PushSupplier::_nil()); - extendConsumers(connectionId, proxy_consumer); -} - -void MessageSupplierPort::disconnectPort(const char* connectionId) -{ - boost::mutex::scoped_lock lock(portInterfaceAccess); - CosEventChannelAdmin::ProxyPushConsumer_var consumer = removeConsumer(connectionId); - if (CORBA::is_nil(consumer)) { - return; - } - consumer->disconnect_push_consumer(); - if (this->consumers.empty()) { - this->active = false; + CallbackTable::iterator callback = callbacks_.find(id); + if (callback != callbacks_.end()) { + return callback->second; } + return 0; } -void MessageSupplierPort::push(const CORBA::Any& data) +bool MessageConsumerPort::hasGenericCallbacks() { - boost::mutex::scoped_lock lock(portInterfaceAccess); - std::map::iterator connection = consumers.begin(); - while (connection != consumers.end()) { - try { - (connection->second)->push(data); - } catch ( const CORBA::MARSHAL& ex ) { - RH_NL_WARN("MessageSupplierPort","Could not deliver the message. Maximum message size exceeded"); - } catch ( ... ) { - } - connection++; - } + return !generic_callbacks_.empty(); } -CosEventChannelAdmin::ProxyPushConsumer_ptr MessageSupplierPort::removeConsumer(std::string consumer_id) +void MessageConsumerPort::dispatchGeneric(const std::string& id, const CORBA::Any& data) { - std::map::iterator connection = consumers.find(consumer_id); - if (connection == consumers.end()) { - return CosEventChannelAdmin::ProxyPushConsumer::_nil(); - } - CosEventChannelAdmin::ProxyPushConsumer_var consumer = connection->second; - consumers.erase(connection); - return consumer._retn(); + generic_callbacks_(id, data); } -void MessageSupplierPort::extendConsumers(std::string consumer_id, CosEventChannelAdmin::ProxyPushConsumer_ptr proxy_consumer) +std::string MessageConsumerPort::getRepid() const { - consumers[std::string(consumer_id)] = proxy_consumer; + return ExtendedEvent::MessageEvent::_PD_repoId; } -std::string MessageSupplierPort::getRepid() const +std::string MessageConsumerPort::getDirection() const { - return "IDL:ExtendedEvent/MessageEvent:1.0"; + return CF::PortSet::DIRECTION_BIDIR; } diff --git a/redhawk/src/base/framework/MessageSupplier.cpp b/redhawk/src/base/framework/MessageSupplier.cpp new file mode 100644 index 000000000..0d0b8dbba --- /dev/null +++ b/redhawk/src/base/framework/MessageSupplier.cpp @@ -0,0 +1,357 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include + +class MessageSupplierPort::MessageTransport : public redhawk::UsesTransport +{ +public: + MessageTransport(MessageSupplierPort* port) : + redhawk::UsesTransport(port) + { + } + + virtual ~MessageTransport() + { + } + + virtual void push(const CORBA::Any& data) = 0; + + virtual void beginQueue(size_t count) = 0; + virtual void queueMessage(const std::string& msgId, const char* format, const void* msgData, MessageSupplierPort::SerializerFunc serializer) = 0; + virtual void sendMessages() = 0; + +private: + CosEventChannelAdmin::EventChannel_var _channel; +}; + +class MessageSupplierPort::CorbaTransport : public MessageSupplierPort::MessageTransport +{ +public: + CorbaTransport(MessageSupplierPort* port, CosEventChannelAdmin::EventChannel_ptr channel) : + MessageTransport(port) + { + CosEventChannelAdmin::SupplierAdmin_var supplier_admin = channel->for_suppliers(); + _consumer = supplier_admin->obtain_push_consumer(); + _consumer->connect_push_supplier(CosEventComm::PushSupplier::_nil()); + } + + virtual std::string transportType() const + { + return "CORBA"; + } + + virtual CF::Properties transportInfo() const + { + return CF::Properties(); + } + + void push(const CORBA::Any& data) + { + try { + _consumer->push(data); + } catch (const CORBA::MARSHAL&) { + throw redhawk::TransportError("Maximum message size exceeded"); + } + } + + void beginQueue(size_t count) + { + // Pre-allocate enough space to hold the entire queue + if (_queue.maximum() < count) { + _queue.replace(count, 0, CF::Properties::allocbuf(count), true); + } else { + _queue.length(0); + } + } + + void queueMessage(const std::string& msgId, const char* /*unused*/, const void* msgData, MessageSupplierPort::SerializerFunc serializer) + { + CORBA::ULong index = _queue.length(); + _queue.length(index+1); + CF::DataType& message = _queue[index]; + message.id = msgId.c_str(); + serializer(message.value, msgData); + } + + void sendMessages() + { + try { + CORBA::Any data; + data <<= _queue; + push(data); + } + catch(const redhawk::TransportError &ex ){ + if ( _queue.length() == 1 ){ + throw; + } + + CF::Properties _smsg; + _smsg.length(1); + int mcnt=0; + for (CORBA::ULong ii = 0; ii < _queue.length(); ++ii) { + _smsg[0] = _queue[ii]; + CORBA::Any d; + d <<= _smsg; + try { + push(d); + } + catch( const redhawk::TransportError &ex ){ + std::ostringstream os; + os << "Maximum message size exceeded, sent " << mcnt << " of " << _queue.length() <<"."; + throw redhawk::TransportError(os.str()); + } + } + } + } + + void disconnect() + { + try { + _consumer->disconnect_push_consumer(); + } catch (...) { + // Ignore errors on disconnect + } + } + +private: + CosEventChannelAdmin::ProxyPushConsumer_var _consumer; + CF::Properties _queue; +}; + +class MessageSupplierPort::LocalTransport : public MessageSupplierPort::MessageTransport +{ +public: + LocalTransport(MessageSupplierPort* port, MessageConsumerPort* consumer) : + MessageTransport(port), + _consumer(consumer) + { + } + + virtual std::string transportType() const + { + return "local"; + } + + virtual CF::Properties transportInfo() const + { + return CF::Properties(); + } + + void push(const CORBA::Any& data) + { + CF::Properties* temp; + if (!(data >>= temp)) { + return; + } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + for (redhawk::PropertyMap::const_iterator msg = props.begin(); msg != props.end(); ++msg) { + _consumer->fireCallback(msg->getId(), msg->getValue()); + } + } + + void beginQueue(size_t /*unused*/) + { + } + + void queueMessage(const std::string& msgId, const char* format, const void* msgData, MessageSupplierPort::SerializerFunc serializer) + { + CallbackEntry* entry = getCallback(msgId, format); + if (entry) { + // There is a message-specific callback registered; use direct + // dispatch if available, otherwise fall back to CORBA Any + if (entry->direct) { + entry->callback->dispatch(msgId, msgData); + } else { + CORBA::Any data; + serializer(data, msgData); + entry->callback->dispatch(msgId, data); + } + } + + // If the receiver has any generic callbacks registered, serialize the + // message to a CORBA Any (which, technically speaking, may have also + // been done above if the message format differed) and send it along. + // By serializing only when it's required, the best case of direct + // message dispatch runs significantly faster. + if (_consumer->hasGenericCallbacks()) { + CORBA::Any data; + serializer(data, msgData); + _consumer->dispatchGeneric(msgId, data); + } + } + + void sendMessages() + { + } + +private: + struct CallbackEntry { + MessageConsumerPort::MessageCallback* callback; + bool direct; + }; + + typedef std::map CallbackTable; + + CallbackEntry* getCallback(const std::string& msgId, const char* format) + { + CallbackTable::iterator callback = _callbacks.find(msgId); + if (callback != _callbacks.end()) { + // The callback has already been found and negotiated + return &(callback->second); + } + + // No callback has been found yet; ask the consumer for its callback, + // and if it has one, negotiate whether we can use direct dispatch via + // void* + CallbackEntry entry; + entry.callback = _consumer->getMessageCallback(msgId); + if (entry.callback) { + entry.direct = entry.callback->isCompatible(format); + callback = _callbacks.insert(std::make_pair(msgId, entry)).first; + return &(callback->second); + } + + // There is no callback registered for the given message + return 0; + } + + MessageConsumerPort* _consumer; + CallbackTable _callbacks; +}; + +MessageSupplierPort::MessageSupplierPort (const std::string& name) : + UsesPort(name) +{ +} + +MessageSupplierPort::~MessageSupplierPort (void) +{ +} + +void MessageSupplierPort::_validatePort(CORBA::Object_ptr object) +{ + const std::string rep_id(CosEventChannelAdmin::EventChannel::_PD_repoId); + bool valid; + try { + valid = object->_is_a(rep_id.c_str()); + } catch (...) { + // If _is_a throws an exception, assume the remote object is + // unreachable (e.g., dead) + throw CF::Port::InvalidPort(1, "Object unreachable"); + } + + if (!valid) { + std::string message = "Object does not support " + rep_id; + throw CF::Port::InvalidPort(1, message.c_str()); + } +} + +redhawk::UsesTransport* MessageSupplierPort::_createTransport(CORBA::Object_ptr object, const std::string& connectionId) +{ + CosEventChannelAdmin::EventChannel_var channel = ossie::corba::_narrowSafe(object); + if (CORBA::is_nil(channel)) { + throw CF::Port::InvalidPort(0, "The object provided did not narrow to a CosEventChannelAdmin::EventChannel type"); + } + + MessageConsumerPort* local_port = ossie::corba::getLocalServant(channel); + if (local_port) { + return new LocalTransport(this, local_port); + } else { + return new CorbaTransport(this, channel); + } +} + +void MessageSupplierPort::push(const CORBA::Any& data, const std::string& connectionId) +{ + boost::mutex::scoped_lock lock(updatingPortsLock); + _checkConnectionId(connectionId); + for (TransportIterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + if (!_isConnectionSelected(connection.connectionId(), connectionId)) { + continue; + } + try { + connection.transport()->push(data); + } catch (const redhawk::TransportError& exc) { + RH_NL_WARN("MessageSupplierPort", "Could not deliver the message. " << exc.what()); + } catch (...) { + } + } +} + +std::string MessageSupplierPort::getRepid() const +{ + return ExtendedEvent::MessageEvent::_PD_repoId; +} + +void MessageSupplierPort::_beginMessageQueue(size_t count, const std::string& connectionId) +{ + for (TransportIterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + if (!_isConnectionSelected(connection.connectionId(), connectionId)) { + continue; + } + connection.transport()->beginQueue(count); + } +} + +void MessageSupplierPort::_queueMessage(const std::string& msgId, const char* format, const void* msgData, + SerializerFunc serializer, const std::string& connectionId) +{ + for (TransportIterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + if (!_isConnectionSelected(connection.connectionId(), connectionId)) { + continue; + } + try { + connection.transport()->queueMessage(msgId, format, msgData, serializer); + } catch ( ... ) { + } + } +} + +void MessageSupplierPort::_sendMessageQueue(const std::string& connectionId) +{ + for (TransportIterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + if (!_isConnectionSelected(connection.connectionId(), connectionId)) { + continue; + } + try { + connection.transport()->sendMessages(); + } catch (const redhawk::TransportError& exc) { + RH_NL_WARN("MessageSupplierPort", "Could not deliver the message. " << exc.what()); + } catch (...) { + } + } +} + +bool MessageSupplierPort::_isConnectionSelected(const std::string& connectionId, const std::string& targetId) +{ + if (targetId.empty()) { + return true; + } + return (connectionId == targetId); +} + +void MessageSupplierPort::_checkConnectionId(const std::string& connectionId) +{ + if (!connectionId.empty() && !_hasConnection(connectionId)) { + throw std::invalid_argument("invalid connection '" + connectionId + "'"); + } +} diff --git a/redhawk/src/base/framework/POACreator.cpp b/redhawk/src/base/framework/POACreator.cpp index 7556545e4..f35805b58 100644 --- a/redhawk/src/base/framework/POACreator.cpp +++ b/redhawk/src/base/framework/POACreator.cpp @@ -24,8 +24,6 @@ using namespace ossie::corba; -PREPARE_CF_LOGGING(POACreator); - CORBA::Boolean POACreator::unknown_adapter (PortableServer::POA_ptr parent, const char* name) throw (CORBA::SystemException) { @@ -116,7 +114,6 @@ CORBA::Boolean POACreator::unknown_adapter (PortableServer::POA_ptr parent, cons PortableServer::POAManager_var poa_mgr = parent->the_POAManager(); try { - LOG_TRACE(POACreator, "Creating POA " << name); PortableServer::POA_var child = parent->create_POA(name, poa_mgr, policy_list); if (install_adapter_activator) { PortableServer::AdapterActivator_var tmpObj = this->_this(); diff --git a/redhawk/src/base/framework/PortSet_impl.cpp b/redhawk/src/base/framework/PortSet_impl.cpp index b27b0d46d..0cb17c76f 100644 --- a/redhawk/src/base/framework/PortSet_impl.cpp +++ b/redhawk/src/base/framework/PortSet_impl.cpp @@ -21,8 +21,6 @@ #include #include -PREPARE_CF_LOGGING(PortSet_impl); - PortSet_impl::PortSet_impl () { } diff --git a/redhawk/src/base/framework/PortSupplier_impl.cpp b/redhawk/src/base/framework/PortSupplier_impl.cpp index 1d9f12db7..8d411d860 100644 --- a/redhawk/src/base/framework/PortSupplier_impl.cpp +++ b/redhawk/src/base/framework/PortSupplier_impl.cpp @@ -20,8 +20,6 @@ #include -PREPARE_CF_LOGGING(PortSupplier_impl); - PortSupplier_impl::PortSupplier_impl () { } @@ -39,18 +37,27 @@ CORBA::Object* PortSupplier_impl::getPort (const char* name) throw (CORBA::Syste void PortSupplier_impl::addPort (const std::string& name, PortBase* servant) { - LOG_TRACE(PortSupplier_impl, "Adding port '" << name << "'"); + RH_TRACE(_portsupplierLog, "Adding port '" << name << "'"); insertPort(name, servant); // Activate the port in its default POA (usually, the root) - LOG_TRACE(PortSupplier_impl, "Activating port '" << name << "'"); + RH_TRACE(_portsupplierLog, "Activating port '" << name << "'"); PortableServer::POA_var poa = servant->_default_POA(); PortableServer::ObjectId_var oid = poa->activate_object(servant); + + // Allow additional post-activation initialization + RH_TRACE(_portsupplierLog, "Initializing port '" << name << "'"); + servant->initializePort(); +} + +void PortSupplier_impl::setLogger(rh_logger::LoggerPtr logptr) +{ + _portsupplierLog = logptr; } void PortSupplier_impl::addPort (const std::string& name, const std::string& description, PortBase* servant) { - LOG_TRACE(PortSupplier_impl, "Adding port '" << name << "': " << description); + RH_TRACE(_portsupplierLog, "Adding port '" << name << "': " << description); addPort(name, servant); servant->setDescription(description); } @@ -86,7 +93,7 @@ void PortSupplier_impl::releasePorts () void PortSupplier_impl::deactivatePort (PortBase* servant) { - LOG_TRACE(PortSupplier_impl, "Deactivating port '" << servant->getName() << "'"); + RH_TRACE(_portsupplierLog, "Deactivating port '" << servant->getName() << "'"); PortableServer::POA_var poa = servant->_default_POA(); PortableServer::ObjectId_var oid = poa->servant_to_id(servant); poa->deactivate_object(oid); @@ -98,7 +105,7 @@ void PortSupplier_impl::insertPort (const std::string& name, PortBase* servant) if (existing != _portServants.end()) { // A port is already registered with the given name, assume that the // new one must replace the old one - LOG_DEBUG(PortSupplier_impl, "Replacing existing port '" << name << "'"); + RH_DEBUG(_portsupplierLog, "Replacing existing port '" << name << "'"); deactivatePort(existing->second); } _portServants[name] = servant; diff --git a/redhawk/src/base/framework/Port_impl.cpp b/redhawk/src/base/framework/Port_impl.cpp index 54201eb94..81990e1dc 100644 --- a/redhawk/src/base/framework/Port_impl.cpp +++ b/redhawk/src/base/framework/Port_impl.cpp @@ -36,3 +36,36 @@ void Port_impl::connectPort(CORBA::Object_ptr connection, const char* connection void Port_impl::disconnectPort(const char* connectionId) { } + +LOGGER PortBase::getLogger() +{ + return _portLog; +} + +void PortBase::setLogger(LOGGER newLogger) +{ + _portLog = newLogger; +} + +namespace redhawk { + + PortCallError::PortCallError( const std::string &msg, const std::vector &connectionids ) : + std::runtime_error(PortCallError::makeMessage(msg, connectionids)) {} + + PortCallError::~PortCallError() throw () {} + + std::string PortCallError::makeMessage(const std::string& msg, const std::vector& connectionids) { + std::ostringstream cnvt; + cnvt.str(""); + cnvt << msg; + if (not connectionids.empty()) { + cnvt << "Connections available: "; + for (std::vector::const_iterator connectionid=connectionids.begin(); connectionid!=connectionids.end(); connectionid++) { + cnvt << *connectionid; + if (connectionid!=connectionids.end()-1) + cnvt << ", "; + } + } + return cnvt.str(); + } +} diff --git a/redhawk/src/base/framework/PropertyInterface.cpp b/redhawk/src/base/framework/PropertyInterface.cpp index 46950f7e3..8353e2fce 100644 --- a/redhawk/src/base/framework/PropertyInterface.cpp +++ b/redhawk/src/base/framework/PropertyInterface.cpp @@ -20,6 +20,122 @@ #include "ossie/PropertyInterface.h" +namespace CF { + + CF::UTCTime operator+(const CF::UTCTime& lhs, double seconds) + { + CF::UTCTime result = lhs; + result += seconds; + return result; + } + + CF::UTCTime& operator+=(CF::UTCTime& lhs, double seconds) + { + // Split fractional and whole seconds to preserve precision + lhs.tfsec += std::modf(seconds, &seconds); + lhs.twsec += seconds; + redhawk::time::utils::normalize(lhs); + return lhs; + } + + CF::UTCTime operator-(const CF::UTCTime& lhs, double seconds) + { + CF::UTCTime result = lhs; + result -= seconds; + return result; + } + + double operator-(const CF::UTCTime& lhs, const CF::UTCTime& rhs) + { + return (lhs.twsec - rhs.twsec) + (lhs.tfsec - rhs.tfsec); + } + + CF::UTCTime& operator-=(CF::UTCTime& lhs, double seconds) + { + // Split fractional and whole seconds to preserve precision + lhs.tfsec -= std::modf(seconds, &seconds); + lhs.twsec -= seconds; + redhawk::time::utils::normalize(lhs); + return lhs; + } + + bool operator==(const CF::UTCTime& lhs, const CF::UTCTime& rhs) + { + if (lhs.tcstatus != rhs.tcstatus) { + return false; + } else if (lhs.twsec != rhs.twsec) { + return false; + } else if (lhs.tfsec != rhs.tfsec) { + return false; + } + return true; + } + + bool operator!=(const CF::UTCTime& lhs, const CF::UTCTime& rhs) + { + if (lhs.tcstatus != rhs.tcstatus) { + return true; + } else if (lhs.twsec != rhs.twsec) { + return true; + } else if (lhs.tfsec != rhs.tfsec) { + return true; + } + return false; + } + + bool operator<(const CF::UTCTime& lhs, const CF::UTCTime& rhs) + { + if (lhs.twsec == rhs.twsec) { + return lhs.tfsec < rhs.tfsec; + } else { + return lhs.twsec < rhs.twsec; + } + } + + bool operator<=(const CF::UTCTime& lhs, const CF::UTCTime& rhs) + { + if (lhs.twsec == rhs.twsec) { + return lhs.tfsec <= rhs.tfsec; + } else { + return lhs.twsec <= rhs.twsec; + } + } + + bool operator>(const CF::UTCTime& lhs, const CF::UTCTime& rhs) + { + if (lhs.twsec == rhs.twsec) { + return lhs.tfsec > rhs.tfsec; + } else { + return lhs.twsec > rhs.twsec; + } + } + + bool operator>=(const CF::UTCTime& lhs, const CF::UTCTime& rhs) + { + if (lhs.twsec == rhs.twsec) { + return lhs.tfsec >= rhs.tfsec; + } else { + return lhs.twsec >= rhs.twsec; + } + } + + std::ostream& operator<<(std::ostream& stream, const CF::UTCTime& utc) + { + struct tm time; + time_t seconds = utc.twsec; + gmtime_r(&seconds, &time); + stream << (1900+time.tm_year) << ':'; + stream << std::setw(2) << std::setfill('0') << (time.tm_mon+1) << ':'; + stream << std::setw(2) << time.tm_mday << "::"; + stream << std::setw(2) << time.tm_hour << ":"; + stream << std::setw(2) << time.tm_min << ":"; + stream << std::setw(2) << time.tm_sec; + int usec = round(utc.tfsec * 1000000.0); + stream << "." << std::setw(6) << usec; + return stream; + } + +} PropertyInterface::PropertyInterface (CORBA::TypeCode_ptr _type) : id(), @@ -68,13 +184,7 @@ bool PropertyInterface::isQueryable () const bool PropertyInterface::isProperty () const { - std::vector::const_iterator p = kinds.begin(); - while (p != kinds.end()) { - if ((*p) == std::string("property")) - return true; - p++; - } - return false; + return (std::find(kinds.begin(), kinds.end(), "property") != kinds.end()); } bool PropertyInterface::isConfigurable () const @@ -119,10 +229,10 @@ void PropertyInterface::configure(const std::string& _id, const std::string& _na std::string::size_type istart = 0; while (istart < _kinds.size()) { std::string::size_type iend = _kinds.find(',', istart); - kinds.push_back(_kinds.substr(istart, iend)); if (iend == std::string::npos) { - break; + iend = _kinds.size(); } + kinds.push_back(_kinds.substr(istart, iend-istart)); istart = iend + 1; } } @@ -178,6 +288,16 @@ inline void SimplePropertyWrapper::toAny (const char& v, CORBA::Any& a) } +template <> +inline bool SimplePropertyWrapper::fromAny (const CORBA::Any& a, CF::UTCTime& v) +{ + CF::UTCTime *tmp; + if (not (a >>= tmp)) + return false; + v = *tmp; + return true; +} + template <> inline bool SimplePropertyWrapper::fromAny (const CORBA::Any& a, CORBA::Octet& v) { @@ -325,7 +445,14 @@ N##Property* PropertyWrapperFactory::Create (T& value) \ return new SimplePropertyWrapper< T >(value); \ } +#define SIMPLE_STRUCT_FACTORY_CREATE(N,T) \ +N##Property* PropertyWrapperFactory::Create (T& value) \ +{ \ + return new PropertyWrapper< T >(value); \ +} + SIMPLE_FACTORY_CREATE(String, std::string); +SIMPLE_FACTORY_CREATE(UTCTime, CF::UTCTime); SIMPLE_FACTORY_CREATE(Boolean, bool); SIMPLE_FACTORY_CREATE(Char, char); @@ -367,6 +494,7 @@ N##SeqProperty* PropertyWrapperFactory::Create (std::vector& value) \ } SIMPLE_SEQUENCE_FACTORY_CREATE(String, std::string); +SIMPLE_SEQUENCE_FACTORY_CREATE(UTCTime, CF::UTCTime); SIMPLE_SEQUENCE_FACTORY_CREATE(Boolean, bool); SIMPLE_SEQUENCE_FACTORY_CREATE(Char, char); SIMPLE_SEQUENCE_FACTORY_CREATE(Octet, CORBA::Octet); @@ -404,6 +532,7 @@ N##Property* MonitorFactory::Create (T& value) \ } SIMPLEMONITOR_FACTORY_CREATE(String, std::string); +SIMPLEMONITOR_FACTORY_CREATE(UTCTime, CF::UTCTime); SIMPLEMONITOR_FACTORY_CREATE(Boolean, bool); SIMPLEMONITOR_FACTORY_CREATE(Char, char); @@ -439,6 +568,7 @@ N##SeqProperty* MonitorFactory::Create (std::vector& value) \ } SIMPLEMONITOR_SEQUENCE_FACTORY_CREATE(String, std::string); +SIMPLEMONITOR_SEQUENCE_FACTORY_CREATE(UTCTime, CF::UTCTime); SIMPLEMONITOR_SEQUENCE_FACTORY_CREATE(Boolean, bool); SIMPLEMONITOR_SEQUENCE_FACTORY_CREATE(Char, char); SIMPLEMONITOR_SEQUENCE_FACTORY_CREATE(Octet, CORBA::Octet); diff --git a/redhawk/src/base/framework/PropertyMap.cpp b/redhawk/src/base/framework/PropertyMap.cpp index 09d71a373..3e1f63dac 100644 --- a/redhawk/src/base/framework/PropertyMap.cpp +++ b/redhawk/src/base/framework/PropertyMap.cpp @@ -107,11 +107,69 @@ const Value& PropertyMap::operator[] (const std::string& id) const return dt->getValue(); } +const Value& PropertyMap::get(const std::string& id, const Value& def) const +{ + const_iterator dt = find(id); + if (dt != end()) { + return dt->getValue(); + } else { + return def; + } +} + +bool PropertyMap::operator==( const redhawk::PropertyMap &other ) const +{ + // + // perform simple matching of a property map against another map + // + if ( size() != other.size() ) { + return false; + } + + if ( size() == 0 ) { + return true; + } + + for ( const_iterator iter = begin(); iter != end(); ++iter) { + std::string pid(iter->getId()); + const_iterator other_prop = other.find( pid ); + if ( other_prop == other.end() ) { + return false; + } + // perform equal match values + std::string action("eq"); + if ( !ossie::compare_anys(iter->getValue(), other_prop->getValue(), action) ) { + return false; + } + } + + return true; +} + + +bool PropertyMap::operator!=( const redhawk::PropertyMap &other ) const +{ + return !(*this == other); +} + +void PropertyMap::update(const CF::Properties& properties) +{ + const PropertyMap& other = cast(properties); + for (const_iterator prop = other.begin(); prop != other.end(); ++prop) { + (*this)[prop->getId()] = prop->getValue(); + } +} + void PropertyMap::push_back(const CF::DataType& property) { ossie::corba::push_back(*this, property); } +void PropertyMap::extend(const CF::Properties& properties) +{ + ossie::corba::extend(*this, properties); +} + PropertyMap::iterator PropertyMap::begin() { return static_cast(this->get_buffer()); @@ -163,3 +221,25 @@ void PropertyMap::erase(iterator first, iterator last) // Resize to remove deleted items length(length()-(last-first)); } + +std::string PropertyMap::toString() const +{ + std::ostringstream out; + out << *this; + return out.str(); +} + +std::ostream& redhawk::operator<<(std::ostream& out, const redhawk::PropertyMap& properties) +{ + out << "{"; + bool first = true; + for (PropertyMap::const_iterator prop = properties.begin(); prop != properties.end(); ++prop) { + if (!first) { + out << ", "; + } + first = false; + out << prop->getId() << "=" << prop->getValue().toString(); + } + out << "}"; + return out; +} diff --git a/redhawk/src/base/framework/PropertySet_impl.cpp b/redhawk/src/base/framework/PropertySet_impl.cpp index e09585629..717d42b99 100644 --- a/redhawk/src/base/framework/PropertySet_impl.cpp +++ b/redhawk/src/base/framework/PropertySet_impl.cpp @@ -28,6 +28,7 @@ #include "ossie/concurrent.h" #include "ossie/Events.h" #include "ossie/ossieSupport.h" +#include "ossie/PropertyMap.h" // @@ -92,6 +93,7 @@ PREPARE_CF_LOGGING(PropertySet_impl); PropertySet_impl::PropertySet_impl (): propertyChangePort(0), + _propertyQueryTimestamp("QUERY_TIMESTAMP"), _propChangeThread( new PropertyChangeThread(*this), 0.1 ), _propertiesInitialized(false) { @@ -122,11 +124,11 @@ PropertySet_impl::~PropertySet_impl () void PropertySet_impl::setExecparamProperties(std::map& execparams) { - LOG_TRACE(PropertySet_impl, "Setting " << execparams.size() << " exec parameters"); + RH_TRACE(_propertysetLog, "Setting " << execparams.size() << " exec parameters"); std::map::iterator iter; for (iter = execparams.begin(); iter != execparams.end(); iter++) { - LOG_TRACE(PropertySet_impl, "Property: " << iter->first << " = " + RH_TRACE(_propertysetLog, "Property: " << iter->first << " = " << iter->second); const std::string id = iter->first; PropertyInterface* property = getPropertyFromId(id); @@ -137,10 +139,29 @@ void PropertySet_impl::setExecparamProperties(std::map& exec CORBA::Any val = ossie::string_to_any(iter->second, property->type); property->setValue(val); } else { - LOG_WARN(PropertySet_impl, "Property: " << id << " is not defined, ignoring it!!"); + RH_WARN(_propertysetLog, "Property: " << id << " is not defined, ignoring it!!"); } } - LOG_TRACE(PropertySet_impl, "Done setting exec parameters"); + RH_TRACE(_propertysetLog, "Done setting exec parameters"); +} + +void PropertySet_impl::setLogger(rh_logger::LoggerPtr logptr) +{ + _propertysetLog = logptr; +} + +void PropertySet_impl::setCommandLineProperty(const std::string& id, const redhawk::Value& value) +{ + RH_TRACE(_propertysetLog, "Property: " << id << " = " << value.toString()); + PropertyInterface* property = getPropertyFromId(id); + // the property can belong to a resource, device, or Device/Domain + // Manager. If the property is not found, then it might be a resource + // property passed through the nodeBooter to the DeviceManager + if (property) { + property->setValue(value, false); + } else { + RH_WARN(_propertysetLog, "Property: " << id << " is not defined, ignoring it!!"); + } } void @@ -161,14 +182,14 @@ throw (CF::PropertyEmitter::AlreadyInitialized, CF::PropertySet::PartialConfigur for (CORBA::ULong ii = 0; ii < ctorProps.length(); ++ii) { PropertyInterface* property = getPropertyFromId((const char*)ctorProps[ii].id); if (property && property->isProperty()) { - LOG_TRACE(PropertySet_impl, "Constructor property: " << property->id); + RH_TRACE(_propertysetLog, "Constructor property: " << property->id); try { property->setValue(ctorProps[ii].value, false); } catch (std::exception& e) { - LOG_ERROR(PropertySet_impl, "Setting property " << property->id << ", " << property->name << " failed. Cause: " << e.what()); + RH_ERROR(_propertysetLog, "Setting property " << property->id << ", " << property->name << " failed. Cause: " << e.what()); ossie::corba::push_back(invalidProperties, ctorProps[ii]); } catch (CORBA::Exception& e) { - LOG_ERROR(PropertySet_impl, "Setting property " << property->id << " failed. Cause: " << e._name()); + RH_ERROR(_propertysetLog, "Setting property " << property->id << " failed. Cause: " << e._name()); ossie::corba::push_back(invalidProperties, ctorProps[ii]); } } else { @@ -204,7 +225,7 @@ throw (CORBA::SystemException, CF::PropertySet::InvalidConfiguration, for (CORBA::ULong ii = 0; ii < configProperties.length(); ++ii) { PropertyInterface* property = getPropertyFromId((const char*)configProperties[ii].id); if (property && property->isConfigurable()) { - LOG_TRACE(PropertySet_impl, "Configure property: " << property->id); + RH_TRACE(_propertysetLog, "Configure property: " << property->id); try { std::vector::iterator kind = property->kinds.begin(); bool sendEvent = false; @@ -233,7 +254,7 @@ throw (CORBA::SystemException, CF::PropertySet::InvalidConfiguration, property->getValue(after_value); std::string comparator("eq"); if (ossie::compare_anys(before_value, after_value, comparator)) { - LOG_TRACE(PropertySet_impl, "Value has not changed on configure for property " << property->id << ". Not triggering callback"); + RH_TRACE(_propertysetLog, "Value has not changed on configure for property " << property->id << ". Not triggering callback"); } executePropertyCallback(property->id); if (sendEvent) { @@ -242,13 +263,15 @@ throw (CORBA::SystemException, CF::PropertySet::InvalidConfiguration, } ++validProperties; } catch (std::exception& e) { - LOG_ERROR(PropertySet_impl, "Setting property " << property->id << ", " << property->name << " failed. Cause: " << e.what()); + RH_ERROR(_propertysetLog, "Setting property " << property->id << ", " << property->name << " failed. Cause: " << e.what()); CORBA::ULong count = invalidProperties.length(); invalidProperties.length(count + 1); invalidProperties[count].id = CORBA::string_dup(configProperties[ii].id); invalidProperties[count].value = configProperties[ii].value; + } catch (CF::PropertySet::InvalidConfiguration& e) { + throw; } catch (CORBA::Exception& e) { - LOG_ERROR(PropertySet_impl, "Setting property " << property->id << " failed. Cause: " << e._name()); + RH_ERROR(_propertysetLog, "Setting property " << property->id << " failed. Cause: " << e._name()); CORBA::ULong count = invalidProperties.length(); invalidProperties.length(count + 1); invalidProperties[count].id = CORBA::string_dup(configProperties[ii].id); @@ -283,7 +306,7 @@ throw (CORBA::SystemException, CF::UnknownProperties) // For queries of zero length, return all id/value pairs in propertySet. if (configProperties.length () == 0) { - LOG_TRACE(PropertySet_impl, "Query all properties"); + RH_TRACE(_propertysetLog, "Query all properties"); PropertyMap::iterator jj = propTable.begin(); for (CORBA::ULong ii = 0; ii < propTable.size(); ++ii) { if (jj->second->isQueryable()) { @@ -301,6 +324,9 @@ throw (CORBA::SystemException, CF::UnknownProperties) } ++jj; } + /*configProperties.length(configProperties.length() + 1); + configProperties[configProperties.length()-1].id = CORBA::string_dup(_propertyQueryTimestamp.c_str()); + configProperties[configProperties.length()-1].value <<= _makeTime(-1,0,0);*/ } else { // For queries of length > 0, return all requested pairs in propertySet CF::Properties invalidProperties; @@ -308,7 +334,11 @@ throw (CORBA::SystemException, CF::UnknownProperties) // Returns values for valid queries in the same order as requested for (CORBA::ULong ii = 0; ii < configProperties.length (); ++ii) { const std::string id = (const char*)configProperties[ii].id; - LOG_TRACE(PropertySet_impl, "Query property " << id); + RH_TRACE(_propertysetLog, "Query property " << id); + if (id == _propertyQueryTimestamp) { + configProperties[ii].value <<= _makeTime(-1,0,0); + continue; + } PropertyInterface* property = getPropertyFromId(id); if (property && property->isQueryable()) { if (property->isNilEnabled()) { @@ -333,7 +363,7 @@ throw (CORBA::SystemException, CF::UnknownProperties) } } - LOG_TRACE(PropertySet_impl, "Query returning " << configProperties.length() << " properties"); + RH_TRACE(_propertysetLog, "Query returning " << configProperties.length() << " properties"); TRACE_EXIT(PropertySet_impl); } @@ -342,7 +372,7 @@ char *PropertySet_impl::registerPropertyListener( CORBA::Object_ptr listener, co throw(CF::UnknownProperties, CF::InvalidObjectReference) { - LOG_TRACE(PropertySet_impl, "Start RegisterListener"); + RH_TRACE(_propertysetLog, "Start RegisterListener"); CF::Properties invalidProperties; int ii; @@ -357,7 +387,7 @@ char *PropertySet_impl::registerPropertyListener( CORBA::Object_ptr listener, co PropertyMap::iterator jj = propTable.begin(); for (CORBA::ULong ii = 0; ii < propTable.size(); ++ii) { if (jj->second->isQueryable()) { - LOG_DEBUG(PropertySet_impl, "RegisterListener: registering property id: " << jj->second->id); + RH_DEBUG(_propertysetLog, "RegisterListener: registering property id: " << jj->second->id); // Add callback to monitor changes to specified properties, use smart pointers for clean up props.insert( std::pair< std::string, PCL_CallbackPtr >( jj->second->id, PCL_CallbackPtr( new PCL_Callback() ) ) ); @@ -370,7 +400,7 @@ char *PropertySet_impl::registerPropertyListener( CORBA::Object_ptr listener, co // check for matching propids.. PropertyInterface* property = getPropertyFromId((const char*)prop_ids[ii]); if (property && property->isQueryable()) { - LOG_DEBUG(PropertySet_impl, "RegisterListener: registering property id: " << property->id); + RH_DEBUG(_propertysetLog, "RegisterListener: registering property id: " << property->id); // Add callback to monitor changes to specified properties, use smart pointers for clean up props.insert( std::pair< std::string, PCL_CallbackPtr >(property->id, PCL_CallbackPtr( new PCL_Callback() ) ) ); @@ -387,12 +417,12 @@ char *PropertySet_impl::registerPropertyListener( CORBA::Object_ptr listener, co throw CF::UnknownProperties(invalidProperties); } - LOG_DEBUG(PropertySet_impl, "RegisterListener: Determine listener type... "); + RH_DEBUG(_propertysetLog, "RegisterListener: Determine listener type... "); // listener can be either an EventChannel or PropertyChangeListener PropertyChangeListener *pcl=NULL; bool is_ec = false; try { - LOG_DEBUG(PropertySet_impl, "RegisterListener: Checking for event channel....." ); + RH_DEBUG(_propertysetLog, "RegisterListener: Checking for event channel....." ); ossie::events::EventChannel_ptr ec = ossie::events::EventChannel::_narrow(listener); if ( !CORBA::is_nil(ec) ) { pcl = new EC_PropertyChangeListener(listener); @@ -401,13 +431,13 @@ char *PropertySet_impl::registerPropertyListener( CORBA::Object_ptr listener, co } catch(...) { if ( pcl ) delete pcl; - LOG_DEBUG(PropertySet_impl, "RegisterListener: Registrant not an event channel....." ); + RH_DEBUG(_propertysetLog, "RegisterListener: Registrant not an event channel....." ); // this ok... need to check additional types } if ( !is_ec ) { try { - LOG_DEBUG(PropertySet_impl, "RegisterListener: Trying for PropertyChangeListener......." ); + RH_DEBUG(_propertysetLog, "RegisterListener: Trying for PropertyChangeListener......." ); pcl = new INF_PropertyChangeListener(listener); } catch(...) { @@ -433,7 +463,7 @@ char *PropertySet_impl::registerPropertyListener( CORBA::Object_ptr listener, co rec.pcl.reset(pcl); PropertyReportTable::iterator p = rec.props.begin(); for ( ; p != rec.props.end(); p++ ) { - LOG_DEBUG(PropertySet_impl, "RegisterListener: Setting Callback.... REG-ID:" << p->first << " FUNC:" << p->second ); + RH_DEBUG(_propertysetLog, "RegisterListener: Setting Callback.... REG-ID:" << p->first << " FUNC:" << p->second ); PropertyInterface *prop = getPropertyFromId(p->first); if ( prop ) { // check for matching propids.. @@ -441,11 +471,11 @@ char *PropertySet_impl::registerPropertyListener( CORBA::Object_ptr listener, co } } - LOG_DEBUG(PropertySet_impl, "RegisterListener: adding record.. "); - LOG_DEBUG(PropertySet_impl, "RegisterListener ..... reg:" << rec.regId ); - LOG_DEBUG(PropertySet_impl, "RegisterListener ..... sec:" << sec ); - LOG_DEBUG(PropertySet_impl, "RegisterListener ..... fsec:" << fsec ); - LOG_DEBUG(PropertySet_impl, "RegisterListener ..... dur:" << rec.reportInterval.total_milliseconds() ); + RH_DEBUG(_propertysetLog, "RegisterListener: adding record.. "); + RH_DEBUG(_propertysetLog, "RegisterListener ..... reg:" << rec.regId ); + RH_DEBUG(_propertysetLog, "RegisterListener ..... sec:" << sec ); + RH_DEBUG(_propertysetLog, "RegisterListener ..... fsec:" << fsec ); + RH_DEBUG(_propertysetLog, "RegisterListener ..... dur:" << rec.reportInterval.total_milliseconds() ); // add the registration record to our registry _propChangeRegistry.insert( std::pair< std::string, PropertyChangeRec >( reg_id, rec ) ); @@ -453,7 +483,7 @@ char *PropertySet_impl::registerPropertyListener( CORBA::Object_ptr listener, co // enable monitoring thread... if ( !_propChangeThread.threadRunning() ) _propChangeThread.start(); - LOG_TRACE(PropertySet_impl, "RegisterListener: End Registration"); + RH_TRACE(_propertysetLog, "RegisterListener: End Registration"); return CORBA::string_dup(reg_id.c_str() ); } @@ -461,32 +491,31 @@ char *PropertySet_impl::registerPropertyListener( CORBA::Object_ptr listener, co void PropertySet_impl::unregisterPropertyListener( const char *reg_id ) throw(CF::InvalidIdentifier) { - SCOPED_LOCK(propertySetAccess); - PropertyChangeRegistry::iterator reg = _propChangeRegistry.find(reg_id); - if ( reg != _propChangeRegistry.end() ) { - - PropertyChangeRec *rec = &(reg->second); - // need to unregister callback with property - PropertyReportTable::iterator p = rec->props.begin(); - for ( ; p != rec->props.end(); p++ ) { - LOG_DEBUG(PropertySet_impl, "RegisterListener: Unregister callback...:" << p->first << " FUNC:" << p->second ); - PropertyInterface *prop = getPropertyFromId(p->first); - if ( prop ) { - // check for matching propids.. - prop->removeChangeListener( p->second, &PCL_Callback::recordChanged ); - } + { + SCOPED_LOCK(propertySetAccess); + PropertyChangeRegistry::iterator reg = _propChangeRegistry.find(reg_id); + if ( reg != _propChangeRegistry.end() ) { + PropertyChangeRec *rec = &(reg->second); + // need to unregister callback with property + PropertyReportTable::iterator p = rec->props.begin(); + for ( ; p != rec->props.end(); p++ ) { + RH_DEBUG(_propertysetLog, "RegisterListener: Unregister callback...:" << p->first << " FUNC:" << p->second ); + PropertyInterface *prop = getPropertyFromId(p->first); + if ( prop ) { + // check for matching propids.. + prop->removeChangeListener( p->second, &PCL_Callback::recordChanged ); + } + } + } else { + throw CF::InvalidIdentifier(); } - // remove registration record _propChangeRegistry.erase(reg); - - if( _propChangeRegistry.size() == 0 ){ - _propChangeThread.stop(); - _propChangeThread.release(); - } } - else { - throw CF::InvalidIdentifier(); + + if( _propChangeRegistry.size() == 0 ){ + _propChangeThread.stop(); + _propChangeThread.release(); } } @@ -587,7 +616,7 @@ void PropertySet_impl::setPropertyCallback (const std::string& id, PropertyCallb } else { // Check if property exists if (!getPropertyFromId(id)){ - LOG_WARN(PropertySet_impl, "Setting listener for property " << id << " that does not exist"); + RH_WARN(_propertysetLog, "Setting listener for property " << id << " that does not exist"); } propId = id; } @@ -614,7 +643,7 @@ void PropertySet_impl::stopPropertyChangeMonitor() int PropertySet_impl::_propertyChangeServiceFunction() { - LOG_TRACE(PropertySet_impl, "Starting property change service function."); + RH_TRACE(_propertysetLog, "Starting property change service function."); time_t delay = 0; { SCOPED_LOCK(propertySetAccess); @@ -627,17 +656,17 @@ int PropertySet_impl::_propertyChangeServiceFunction() for( ; iter != _propChangeRegistry.end() && _propChangeThread.threadRunning(); iter++) { PropertyChangeRec *rec = &(iter->second); - LOG_DEBUG(PropertySet_impl, "Change Listener ... reg_id/interval :" << rec->regId << "/" << rec->reportInterval.total_milliseconds()); + RH_DEBUG(_propertysetLog, "Change Listener ... reg_id/interval :" << rec->regId << "/" << rec->reportInterval.total_milliseconds()); PropertyReportTable::iterator rpt_iter = rec->props.begin(); // check all registered properties for changes for( ; rpt_iter != rec->props.end() && _propChangeThread.threadRunning(); rpt_iter++) { // check if property changed - LOG_DEBUG(PropertySet_impl, " Property/set :" << rpt_iter->first << "/" << rpt_iter->second->isSet()); + RH_DEBUG(_propertysetLog, " Property/set :" << rpt_iter->first << "/" << rpt_iter->second->isSet()); try{ if ( _propMonitors[rpt_iter->first]->isChanged() ) { rpt_iter->second->recordChanged(); - LOG_DEBUG(PropertySet_impl, " Recording Change Property/set :" << rpt_iter->first << "/" << rpt_iter->second->isChanged()); + RH_DEBUG(_propertysetLog, " Recording Change Property/set :" << rpt_iter->first << "/" << rpt_iter->second->isChanged()); } } catch(...) {} @@ -645,24 +674,24 @@ int PropertySet_impl::_propertyChangeServiceFunction() // determine if time has expired boost::posix_time::time_duration dur = rec->expiration - now; - LOG_DEBUG(PropertySet_impl, " Check for expiration, dur=" << dur.total_milliseconds() ); + RH_DEBUG(_propertysetLog, " Check for expiration, dur=" << dur.total_milliseconds() ); if ( dur.total_milliseconds() <= 0 ) { CF::Properties rpt_props; CORBA::ULong idx = 0; PropertyReportTable::iterator rpt_iter = rec->props.begin(); // check all registered properties for changes for( ; rpt_iter != rec->props.end() && _propChangeThread.threadRunning(); rpt_iter++) { - LOG_DEBUG(PropertySet_impl, " Sending Change Property/set :" << rpt_iter->first << "/" << rpt_iter->second->isChanged()); + RH_DEBUG(_propertysetLog, " Sending Change Property/set :" << rpt_iter->first << "/" << rpt_iter->second->isChanged()); if (rpt_iter->second->isChanged() ) { // add to reporting change list idx = rpt_props.length(); rpt_props.length( idx+1 ); rpt_props[idx].id = CORBA::string_dup(rpt_iter->first.c_str()); - LOG_DEBUG(PropertySet_impl, " Getting getValue from property....prop: " << rpt_iter->first << " reg_id:" << rec->regId ); + RH_DEBUG(_propertysetLog, " Getting getValue from property....prop: " << rpt_iter->first << " reg_id:" << rec->regId ); PropertyInterface *property = getPropertyFromId(rpt_iter->first); if ( property ) { - LOG_DEBUG(PropertySet_impl, " Getting getValue from property....prop: " << rpt_iter->first << " reg_id:" << rec->regId ); + RH_DEBUG(_propertysetLog, " Getting getValue from property....prop: " << rpt_iter->first << " reg_id:" << rec->regId ); property->getValue( rpt_props[idx].value ); } @@ -675,9 +704,9 @@ int PropertySet_impl::_propertyChangeServiceFunction() // publish changes to listener if ( rec->pcl && rpt_props.length() > 0 ) { - LOG_DEBUG(PropertySet_impl, " Calling notifier....size :" << rpt_props.length()); + RH_DEBUG(_propertysetLog, " Calling notifier....size :" << rpt_props.length()); if ( rec->pcl->notify( rec, rpt_props ) != 0 ) { - LOG_ERROR(PropertySet_impl, "Publishing changes to PropertyChangeListener FAILED, reg_id:" << rec->regId ); + RH_ERROR(_propertysetLog, "Publishing changes to PropertyChangeListener FAILED, reg_id:" << rec->regId ); } } @@ -692,13 +721,13 @@ int PropertySet_impl::_propertyChangeServiceFunction() // determine delay interval based on shortest remaining duration interval if ( delay == 0 ) delay=dur.total_milliseconds(); - LOG_DEBUG(PropertySet_impl, " Test for delay/duration (millisecs) ... :" << delay << "/" << dur.total_milliseconds()); + RH_DEBUG(_propertysetLog, " Test for delay/duration (millisecs) ... :" << delay << "/" << dur.total_milliseconds()); if ( dur.total_milliseconds() > 0 ) delay = std::min( delay, (time_t)dur.total_milliseconds() ); - LOG_DEBUG(PropertySet_impl, " Minimum delay (millisecs) ... :" << delay ); + RH_DEBUG(_propertysetLog, " Minimum delay (millisecs) ... :" << delay ); } } - LOG_DEBUG(PropertySet_impl, "Request sleep delay........(millisecs) :" << delay); + RH_DEBUG(_propertysetLog, "Request sleep delay........(millisecs) :" << delay); // figure out how long to wait till next iteration if ( delay > 0 ) _propChangeThread.updateDelay( delay/1000.0 ); return NOOP; @@ -733,6 +762,7 @@ int PropertySet_impl::EC_PropertyChangeListener::notify( PropertyChangeRec *rec evt.reg_id = CORBA::string_dup( rec->regId.c_str()); evt.resource_id = CORBA::string_dup( rec->rscId.c_str() ); evt.properties = changes; + evt.timestamp = _makeTime(-1,0,0); try { RH_NL_DEBUG("EC_PropertyChangeListener", "Send change event reg/id:" << rec->regId << "/" << uuid ); pub->push( evt ); @@ -766,6 +796,7 @@ int PropertySet_impl::INF_PropertyChangeListener::notify( PropertyChangeRec *rec evt.reg_id = CORBA::string_dup( rec->regId.c_str()); evt.resource_id = CORBA::string_dup( rec->rscId.c_str() ); evt.properties = changes; + evt.timestamp = _makeTime(-1,0,0); try { RH_NL_DEBUG("INF_PropertyChangeListener", "Send change event reg/id:" << rec->regId << "/" << uuid ); listener->propertyChange( evt ); diff --git a/redhawk/src/base/framework/PropertyType.cpp b/redhawk/src/base/framework/PropertyType.cpp index f6858cf0b..7c84fbfb7 100644 --- a/redhawk/src/base/framework/PropertyType.cpp +++ b/redhawk/src/base/framework/PropertyType.cpp @@ -32,6 +32,20 @@ PropertyType::PropertyType(const CF::DataType& dt) : { } +PropertyType::PropertyType(const std::string& identifier, const CORBA::Any& value) : + CF::DataType() +{ + setId(identifier); + this->value = value; +} + +PropertyType::PropertyType(const std::string& identifier, const Value& value) : + CF::DataType() +{ + setId(identifier); + this->value = value; +} + PropertyType& PropertyType::operator=(const CF::DataType& dt) { CF::DataType::operator=(dt); diff --git a/redhawk/src/base/framework/ProvidesPort.cpp b/redhawk/src/base/framework/ProvidesPort.cpp new file mode 100644 index 000000000..e4f0a645d --- /dev/null +++ b/redhawk/src/base/framework/ProvidesPort.cpp @@ -0,0 +1,180 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include +#include + +namespace redhawk { + + ProvidesTransport::ProvidesTransport(NegotiableProvidesPortBase* port, const std::string& transportId) : + _port(port), + _transportId(transportId) + { + } + + const std::string& ProvidesTransport::transportId() const + { + return _transportId; + } + + + NegotiableProvidesPortBase::NegotiableProvidesPortBase(const std::string& name) : + Port_Provides_base_impl(name) + { + } + + NegotiableProvidesPortBase::~NegotiableProvidesPortBase() + { + for (TransportMap::iterator transport = _transports.begin(); transport != _transports.end(); ++transport) { + delete transport->second; + } + + for (TransportManagerList::iterator manager = _transportManagers.begin(); manager != _transportManagers.end(); ++manager) { + delete (*manager); + } + } + + void NegotiableProvidesPortBase::initializePort() + { + const std::string repo_id = getRepid(); + TransportStack transports = TransportRegistry::GetTransports(repo_id); + for (TransportStack::iterator iter = transports.begin(); iter != transports.end(); ++iter) { + TransportFactory* transport = *iter; + RH_DEBUG(_portLog, "Adding provides transport '" << transport->transportType() + << "' for '" << repo_id << "'"); + _transportManagers.push_back(transport->createProvidesManager(this)); + } + } + + void NegotiableProvidesPortBase::releasePort() + { + for (TransportMap::iterator transport = _transports.begin(); transport != _transports.end(); ++transport) { + transport->second->stopTransport(); + } + } + + ExtendedCF::TransportInfoSequence* NegotiableProvidesPortBase::supportedTransports() + { + ExtendedCF::TransportInfoSequence_var transports = new ExtendedCF::TransportInfoSequence; + for (TransportManagerList::iterator manager = _transportManagers.begin(); manager != _transportManagers.end(); ++manager) { + ExtendedCF::TransportInfo transport; + transport.transportType = (*manager)->transportType().c_str(); + transport.transportProperties = (*manager)->transportProperties(); + ossie::corba::push_back(transports, transport); + } + return transports._retn(); + } + + ExtendedCF::NegotiationResult* + NegotiableProvidesPortBase::negotiateTransport(const char* transportType, + const CF::Properties& transportProperties) + { + boost::mutex::scoped_lock lock(_transportMutex); + + // Find the appropriate transport manager; the caller should have + // checked this port's supported transport types already, but it's + // still technically possible to give us a bad type. + ProvidesTransportManager* manager = _getTransportManager(transportType); + if (!manager) { + std::string message = "cannot negotiate transport type '" + std::string(transportType) + "'"; + throw ExtendedCF::NegotiationError(message.c_str()); + } + + // Create a unique identifier for this transport instance, that should + // be used later for disconnect + std::string transport_id = ossie::generateUUID(); + const redhawk::PropertyMap& transport_props = redhawk::PropertyMap::cast(transportProperties); + + // Ask the manager to create a transport instance; if for any reason it + // doesn't want to (e.g., bad properties), it should throw an exception + // (preferably some form of TransportError). + ProvidesTransport* transport; + try { + transport = manager->createProvidesTransport(transport_id, transport_props); + } catch (const std::exception& exc) { + throw ExtendedCF::NegotiationError(exc.what()); + } + + // An exception is preferred, but handle null as well. + if (!transport) { + std::string message = "cannot create provides transport type '" + std::string(transportType) + "'"; + throw ExtendedCF::NegotiationError(message.c_str()); + } + + // Attempt to start the transport instance. This should be unlikely to + // fail, but the transport still has the option to throw an exception; + // if it does, make sure to delete the transport. + try { + transport->startTransport(); + } catch (const std::exception& exc) { + delete transport; + throw ExtendedCF::NegotiationError(exc.what()); + } catch (...) { + RH_ERROR(_portLog, "Unexpected error starting transport type '" << transportType << "'"); + delete transport; + throw ExtendedCF::NegotiationError("failed to start transport"); + } + + // Take ownership of the transport instance, and return the results of + // negotiation back to the caller. If the uses side rejects the results + // (again, should be unlikely at this point), it's responsible for + // breaking the connection by calling disconnectTransport(). + _transports[transport_id] = transport; + ExtendedCF::NegotiationResult_var result = new ExtendedCF::NegotiationResult; + result->transportId = transport_id.c_str(); + result->properties = manager->getNegotiationProperties(transport); + return result._retn(); + } + + void NegotiableProvidesPortBase::disconnectTransport(const char* transportId) + { + boost::mutex::scoped_lock lock(_transportMutex); + + // Make sure it's a valid transport ID + TransportMap::iterator transport = _transports.find(transportId); + if (transport == _transports.end()) { + throw ExtendedCF::NegotiationError("invalid transport identifier"); + } + + // Stop the transport, logging exceptions but continuing on + try { + transport->second->stopTransport(); + } catch (const std::exception& exc) { + RH_ERROR(_portLog, "Error stopping transport '" << transportId << "': " << exc.what()); + } catch (...) { + RH_ERROR(_portLog, "Unknown error stopping transport '" << transportId << "'"); + } + + // Finish cleaning up + delete transport->second; + _transports.erase(transport); + } + + ProvidesTransportManager* NegotiableProvidesPortBase::_getTransportManager(const std::string& transportType) + { + for (TransportManagerList::iterator manager = _transportManagers.begin(); manager != _transportManagers.end(); ++manager) { + if ((*manager)->transportType() == transportType) { + return (*manager); + } + } + return 0; + } +} diff --git a/redhawk/src/base/framework/Resource_impl.cpp b/redhawk/src/base/framework/Resource_impl.cpp index 6e383d643..294e40359 100644 --- a/redhawk/src/base/framework/Resource_impl.cpp +++ b/redhawk/src/base/framework/Resource_impl.cpp @@ -22,85 +22,100 @@ #include "ossie/Resource_impl.h" #include "ossie/Events.h" - -PREPARE_CF_LOGGING(Resource_impl) +#include "ossie/Component.h" +#include Resource_impl::Resource_impl (const char* _uuid) : + Logging_impl(_uuid), _identifier(_uuid), _started(false), component_running_mutex(), component_running(&component_running_mutex), - _domMgr(NULL), + _domMgr(new redhawk::DomainManagerContainer()), _initialized(false) { + this->setLogger(this->_baseLog->getChildLogger("Resource", "system")); } Resource_impl::Resource_impl (const char* _uuid, const char *label) : + Logging_impl(label), _identifier(_uuid), naming_service_name(label), _started(false), component_running_mutex(), component_running(&component_running_mutex), - _domMgr(NULL), + _domMgr(new redhawk::DomainManagerContainer()), _initialized(false) { + this->setLogger(this->_baseLog->getChildLogger("Resource", "system")); } -Resource_impl::~Resource_impl () { - if (this->_domMgr != NULL) - delete this->_domMgr; - - -}; - +Resource_impl::~Resource_impl () +{ +} -void Resource_impl::setAdditionalParameters(std::string &softwareProfile, std::string &application_registrar_ior, std::string &nic) +void Resource_impl::setAdditionalParameters(std::string& softwareProfile, std::string &application_registrar_ior, std::string &nic) { _softwareProfile = softwareProfile; CORBA::ORB_ptr orb = ossie::corba::Orb(); CORBA::Object_var applicationRegistrarObject = CORBA::Object::_nil(); try { - RH_NL_TRACE("Resource", "narrow to Registrar object:" << application_registrar_ior ); + RH_TRACE(_resourceLog, "narrow to Registrar object:" << application_registrar_ior ); applicationRegistrarObject = orb->string_to_object(application_registrar_ior.c_str()); } catch ( ... ) { - RH_NL_WARN("Resource", "No Registrar... create empty container"); - this->_domMgr = new redhawk::DomainManagerContainer(); + RH_WARN(_resourceLog, "No Registrar... create empty container"); + setDomainManager(CF::DomainManager::_nil()); return; } CF::ApplicationRegistrar_var applicationRegistrar = ossie::corba::_narrowSafe(applicationRegistrarObject); if (!CORBA::is_nil(applicationRegistrar)) { try { - RH_NL_TRACE("Resource", "Get DomainManager from Registrar object:" << application_registrar_ior ); + RH_TRACE(_resourceLog, "Get DomainManager from Registrar object:" << application_registrar_ior ); CF::DomainManager_var dm=applicationRegistrar->domMgr(); - this->_domMgr = new redhawk::DomainManagerContainer(dm); + setDomainManager(dm); return; } catch(...){ - RH_NL_WARN("Resource", "ApplicationRegistrar Failure to get DomainManager container"); + RH_WARN(_resourceLog, "ApplicationRegistrar Failure to get DomainManager container"); } } - RH_NL_TRACE("Resource", "Resolve DeviceManager..."); + RH_TRACE(_resourceLog, "Resolve DeviceManager..."); CF::DeviceManager_var devMgr = ossie::corba::_narrowSafe(applicationRegistrarObject); if (!CORBA::is_nil(devMgr)) { try { - RH_NL_TRACE("Resource", "Resolving DomainManager from DeviceManager..."); + RH_TRACE(_resourceLog, "Resolving DomainManager from DeviceManager..."); CF::DomainManager_var dm=devMgr->domMgr(); - this->_domMgr = new redhawk::DomainManagerContainer(dm); + setDomainManager(dm); return; } catch(...){ - RH_NL_WARN("Resource", "DeviceManager... Failure to get DomainManager container"); + RH_WARN(_resourceLog, "DeviceManager... Failure to get DomainManager container"); } - } - RH_NL_DEBUG("Resource", "All else failed.... use empty container"); - this->_domMgr = new redhawk::DomainManagerContainer(); + RH_DEBUG(_resourceLog, "All else failed.... use empty container"); + setDomainManager(CF::DomainManager::_nil()); } +void Resource_impl::setLogger(rh_logger::LoggerPtr logptr) +{ + _resourceLog = logptr; + PropertySet_impl::setLogger(this->_baseLog->getChildLogger("PropertySet", "system")); + PortSupplier_impl::setLogger(this->_baseLog->getChildLogger("PortSupplier", "system")); +} + +redhawk::DomainManagerContainer* Resource_impl::getDomainManager() +{ + return _domMgr.get(); +} + +void Resource_impl::setDomainManager(CF::DomainManager_ptr domainManager) +{ + _domMgr.reset(new redhawk::DomainManagerContainer(domainManager)); +} void Resource_impl::constructor () { @@ -131,6 +146,16 @@ throw (CORBA::SystemException) return CORBA::string_dup(_softwareProfile.c_str()); } +CF::StringSequence* Resource_impl::getNamedLoggers() { + CF::StringSequence_var retval = new CF::StringSequence(); + std::vector _loggers = this->_baseLog->getNamedLoggers(); + retval->length(_loggers.size()); + for (unsigned int i=0; i<_loggers.size(); i++) { + retval[i] = CORBA::string_dup(_loggers[i].c_str()); + } + return retval._retn(); +} + CORBA::Boolean Resource_impl::started () throw (CORBA::SystemException) { return _started; @@ -146,7 +171,7 @@ void Resource_impl::initialize () throw (CF::LifeCycle::InitializeError, CORBA:: try { constructor(); } catch (const std::exception& exc) { - LOG_ERROR(Resource_impl, "initialize(): " << exc.what()); + RH_ERROR(_resourceLog, "initialize(): " << exc.what()); CF::StringSequence messages; ossie::corba::push_back(messages, exc.what()); throw CF::LifeCycle::InitializeError(messages); @@ -158,27 +183,33 @@ void Resource_impl::releaseObject() throw (CORBA::SystemException, CF::LifeCycle { releasePorts(); stopPropertyChangeMonitor(); - redhawk::events::Manager::Terminate(); PortableServer::POA_ptr root_poa = ossie::corba::RootPOA(); PortableServer::ObjectId_var oid = root_poa->servant_to_id(this); root_poa->deactivate_object(oid); component_running.signal(); + + _resourceReleased(this); } void Resource_impl::run() { // Start handling CORBA requests - LOG_TRACE(Resource_impl, "handling CORBA requests"); + RH_TRACE(_resourceLog, "handling CORBA requests"); component_running.wait(); - LOG_TRACE(Resource_impl, "leaving run()"); + RH_TRACE(_resourceLog, "leaving run()"); } void Resource_impl::halt() { - LOG_DEBUG(Resource_impl, "Halting component") + RH_DEBUG(_resourceLog, "Halting component") - LOG_TRACE(Resource_impl, "Sending device running signal"); + RH_TRACE(_resourceLog, "Sending device running signal"); component_running.signal(); - LOG_TRACE(Resource_impl, "Done sending device running signal"); + RH_TRACE(_resourceLog, "Done sending device running signal"); +} + +const std::string& Resource_impl::getIdentifier() const +{ + return _identifier; } void Resource_impl::setCurrentWorkingDirectory(std::string& cwd) { @@ -188,6 +219,139 @@ void Resource_impl::setCurrentWorkingDirectory(std::string& cwd) { std::string& Resource_impl::getCurrentWorkingDirectory() { return this->currentWorkingDirectory; } + +const std::string& Resource_impl::getDeploymentRoot() const +{ + return _deploymentRoot; +} + +void Resource_impl::setCommandLineProperty(const std::string& id, const redhawk::Value& value) +{ + if (id == "PROFILE_NAME") { + _softwareProfile = value.toString(); + } else if (id == "RH::DEPLOYMENT_ROOT") { + _deploymentRoot = value.toString(); + } else { + PropertySet_impl::setCommandLineProperty(id, value); + } +} + +Resource_impl* Resource_impl::create_component(Resource_impl::ctor_type ctor, const CF::Properties& properties) +{ + const redhawk::PropertyMap& parameters = redhawk::PropertyMap::cast(properties); + + std::string identifier; + std::string name_binding; + std::string application_registrar_ior; + std::string logging_config_uri; + std::string dpath; + int debug_level = -1; + redhawk::PropertyMap cmdlineProps; + for (redhawk::PropertyMap::const_iterator prop = parameters.begin(); prop != parameters.end(); ++prop) { + const std::string id = prop->getId(); + if (id == "COMPONENT_IDENTIFIER") { + identifier = prop->getValue().toString(); + } else if (id == "NAME_BINDING") { + name_binding = prop->getValue().toString(); + } else if (id == "NAMING_CONTEXT_IOR") { + application_registrar_ior = prop->getValue().toString(); + } else if (id == "DEBUG_LEVEL") { + debug_level = atoi(prop->getValue().toString().c_str()); + } else if (id == "LOGGING_CONFIG_URI") { + logging_config_uri = prop->getValue().toString(); + } else if (id == "DOM_PATH") { + dpath = prop->getValue().toString(); + } else { + cmdlineProps.push_back(*prop); + } + } + + try { + if (!application_registrar_ior.empty()) { + CORBA::Object_var applicationRegistrarObject = ossie::corba::stringToObject(application_registrar_ior); + CF::ApplicationRegistrar_var applicationRegistrar = ossie::corba::_narrowSafe(applicationRegistrarObject); + if (not CORBA::is_nil(applicationRegistrar)) { + CF::Application_var app = applicationRegistrar->app(); + if (not CORBA::is_nil(app)) { + std::string name = ossie::corba::returnString(app->name()); + std::string tpath=dpath; + if ( dpath[0] == '/' ) + tpath=dpath.substr(1); + std::vector< std::string > t; + // path should be /domain/ + boost::algorithm::split( t, tpath, boost::is_any_of("/") ); + dpath = t[0]+"/"+name; + } + } + } + } catch ( ... ) { + } + + ossie::logging::ResourceCtxPtr ctx( new ossie::logging::ComponentCtx(name_binding, identifier, dpath ) ); + ossie::logging::Configure(logging_config_uri, debug_level, ctx); + + std::string logname = name_binding+".startup"; + RH_NL_TRACE(logname, "Creating component with identifier '" << identifier << "'"); + Resource_impl* resource = ctor(identifier, name_binding); + + resource->saveLoggingContext( logging_config_uri, debug_level, ctx ); + + // Initialize command line properties, which can include special properties + // like PROFILE_NAME. + for (redhawk::PropertyMap::const_iterator prop = cmdlineProps.begin(); prop != cmdlineProps.end(); ++prop) { + resource->setCommandLineProperty(prop->getId(), prop->getValue()); + } + + // Activate the component servant. + RH_NL_TRACE(logname, "Activating component object"); + PortableServer::ObjectId_var oid = ossie::corba::RootPOA()->activate_object(resource); + CF::Resource_var resource_obj = resource->_this(); + + // Get the application naming context and bind the component into it. + if (!application_registrar_ior.empty()) { + CORBA::Object_var applicationRegistrarObject = ossie::corba::stringToObject(application_registrar_ior); + CF::ApplicationRegistrar_var applicationRegistrar = ossie::corba::_narrowSafe(applicationRegistrarObject); + + if (!CORBA::is_nil(applicationRegistrar)) { + try { + // Set up the DomainManager container + CF::DomainManager_var domainManager = applicationRegistrar->domMgr(); + resource->setDomainManager(domainManager); + + // If it inherits from the Component class, set up the Application + // container as well + Component* component = dynamic_cast(resource); + if (component) { + CF::Application_var application = applicationRegistrar->app(); + component->setApplication(application); + } + + // Register with the application + RH_NL_TRACE(logname, "Registering with application using name '" << name_binding << "'"); + applicationRegistrar->registerComponent(name_binding.c_str(), resource_obj); + } + catch( CF::InvalidObjectReference &e ) { + RH_NL_ERROR(logname, "Exception registering with registrar, comp: " << name_binding << " exception: InvalidObjectReference"); + } + catch( CF::DuplicateName &e ){ + RH_NL_ERROR(logname, "Exception registering with registrar, comp: " << name_binding << " exception: DuplicateName"); + } + catch(CORBA::SystemException &ex){ + RH_NL_ERROR(logname, "Exception registering with registrar, comp: " << name_binding << " exception: CORBA System Exception, terminating application"); + throw; + } + + } else { + RH_NL_TRACE(logname, "Binding component to naming context with name '" << name_binding << "'"); + // the registrar is not available (because the invoking infrastructure only uses the name service) + CosNaming::NamingContext_var applicationContext = ossie::corba::_narrowSafe(applicationRegistrarObject); + ossie::corba::bindObjectToContext(resource_obj, applicationContext, name_binding); + } + } + + return resource; +} + static Resource_impl* main_component = 0; static void sigint_handler(int signum) { @@ -196,74 +360,73 @@ static void sigint_handler(int signum) void Resource_impl::start_component(Resource_impl::ctor_type ctor, int argc, char* argv[]) { - std::string application_registrar_ior; - std::string component_identifier; - std::string name_binding; - std::string profile = ""; - std::string nic = ""; - const char* logging_config_uri = 0; + for (int index = 1; index < argc; ++index) { + if (std::string(argv[index]) == std::string("-i")) { + std::cout<<"Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain"< execparams; - std::string logcfg_uri(""); + std::string logcfg_uri; std::string dpath(""); bool skip_run = false; - - // Parse execparams. - for (int i=0; i < argc; i++) { - if (strcmp("NAMING_CONTEXT_IOR", argv[i]) == 0) { - application_registrar_ior = argv[++i]; - } else if (strcmp("NIC", argv[i]) == 0) { - nic = argv[++i]; - } else if (strcmp("PROFILE_NAME", argv[i]) == 0) { - profile = argv[++i]; - } else if (strcmp("COMPONENT_IDENTIFIER", argv[i]) == 0) { - component_identifier = argv[++i]; - } else if (strcmp("NAME_BINDING", argv[i]) == 0) { - name_binding = argv[++i]; - } else if (strcmp("LOGGING_CONFIG_URI", argv[i]) == 0) { - logging_config_uri = argv[++i]; - } else if (strcmp("DEBUG_LEVEL", argv[i]) == 0) { - debug_level = atoi(argv[++i]); - } else if (strcmp("DOM_PATH", argv[i]) == 0) { - dpath = argv[++i]; - } else if (strcmp("-i", argv[i]) == 0) { - standAlone = true; - } else if (strcmp("SKIP_RUN", argv[i]) == 0) { + redhawk::PropertyMap cmdlineProps; + for (int i = 1; i < argc; i++) { + if (strcmp("SKIP_RUN", argv[i]) == 0) { skip_run = true; - } else if (i > 0) { // any other argument besides the first one is part of the execparams - std::string paramName = argv[i]; - execparams[paramName] = argv[++i]; + } else { + const std::string name = argv[i++]; + std::string value; + if (i < argc) { + value = argv[i]; + } else { + std::cerr << "No value given for " << name << std::endl; + } + if (name == "LOGGING_CONFIG_URI") { + logcfg_uri = value; + cmdlineProps[name] = value; + } else if (name == "DEBUG_LEVEL") { + debug_level = atoi(value.c_str()); + cmdlineProps[name] = value; + } else if (name == "DOM_PATH") { + dpath = value; + } else { // any other argument is part of the cmdlineProps + cmdlineProps[name] = value; + } } } - if (standAlone) { - if (component_identifier.empty()) { - component_identifier = ossie::generateUUID(); - } - if (name_binding.empty()) { - name_binding = ""; - } - } else { - if (application_registrar_ior.empty()) { - std::cout<setAdditionalParameters(profile, application_registrar_ior, nic); - - if ( !skip_run ) { - // assign the logging context to the resource to support logging interface - resource->saveLoggingContext( logcfg_uri, debug_level, ctx ); - } - - // setting all the execparams passed as argument, this method resides in the Resource_impl class - resource->setExecparamProperties(execparams); + Resource_impl* resource = create_component(ctor, cmdlineProps); std::string pathAndFile = argv[0]; unsigned lastSlash = pathAndFile.find_last_of("/"); std::string cwd = pathAndFile.substr(0, lastSlash); resource->setCurrentWorkingDirectory(cwd); - // Activate the component servant. - LOG_TRACE(Resource_impl, "Activating component object"); - PortableServer::ObjectId_var oid = ossie::corba::RootPOA()->activate_object(resource); - CF::Resource_var resource_obj = resource->_this(); - - // Get the application naming context and bind the component into it. - if (!application_registrar_ior.empty()) { - LOG_TRACE(Resource_impl, "Binding component to application context with name '" << name_binding << "'"); - CORBA::Object_var applicationRegistrarObject = orb->string_to_object(application_registrar_ior.c_str()); - CF::ApplicationRegistrar_var applicationRegistrar = ossie::corba::_narrowSafe(applicationRegistrarObject); - if (!CORBA::is_nil(applicationRegistrar)) { - try { - applicationRegistrar->registerComponent(name_binding.c_str(), resource_obj); - } - catch( CF::InvalidObjectReference &e ) { - LOG_ERROR(Resource_impl, "Exception registering with registrar, comp: " << name_binding << " exception: InvalidObjectReference"); - } - catch( CF::DuplicateName &e ){ - LOG_ERROR(Resource_impl, "Exception registering with registrar, comp: " << name_binding << " exception: DuplicateName"); - } - catch(CORBA::SystemException &ex){ - LOG_ERROR(Resource_impl, "Exception registering with registrar, comp: " << name_binding << " exception: CORBA System Exception, terminating application"); - try { - ossie::logging::Terminate(); - ossie::corba::OrbShutdown(true); - } - catch(...){ - } - exit(-1); - } - - } else { - // the registrar is not available (because the invoking infrastructure only uses the name service) - CosNaming::NamingContext_var applicationContext = ossie::corba::_narrowSafe(applicationRegistrarObject); - ossie::corba::bindObjectToContext(resource_obj, applicationContext, name_binding); - } - } else { - if (standAlone) { - std::cout<object_to_string(resource_obj)<run(); - LOG_TRACE(Resource_impl, "Component run loop terminated"); - - // Ignore SIGINT from here on out to ensure that the ORB gets shut down - // properly - sa.sa_handler = SIG_IGN; - sigemptyset(&sa.sa_mask); - sigaction(SIGINT, &sa, NULL); - main_component = 0; - - LOG_TRACE(Resource_impl, "Deleting component"); - resource->_remove_ref(); - LOG_TRACE(Resource_impl, "Shutting down ORB"); + // Store away a reference to the main component and establish a handler for + // SIGINT that will break out of run() + main_component = resource; + struct sigaction sa; + sa.sa_handler = &sigint_handler; + sigemptyset(&sa.sa_mask); + sa.sa_flags = 0; + sigaction(SIGINT, &sa, NULL); - try { - ossie::logging::Terminate(); - } catch ( ... ) {} + resource->run(); - ossie::corba::OrbShutdown(true); - } + redhawk::events::Manager::Terminate(); + + // Ignore SIGINT from here on out to ensure that the ORB gets shut down + // properly + sa.sa_handler = SIG_IGN; + sigemptyset(&sa.sa_mask); + sigaction(SIGINT, &sa, NULL); + main_component = 0; + + resource->_remove_ref(); + + try { + ossie::logging::Terminate(); + } catch ( ... ) {} + + ossie::corba::OrbShutdown(true); } catch( CORBA::SystemException &e ){ std::cerr << "Resource_impl: Unhandled CORBA exception, exiting comp: " << component_identifier << "/" << name_binding << std::endl; @@ -387,4 +496,5 @@ void Resource_impl::start_component(Resource_impl::ctor_type ctor, int argc, cha } + } diff --git a/redhawk/src/base/framework/Service_impl.cpp b/redhawk/src/base/framework/Service_impl.cpp index c6259d63e..47bfd0c74 100644 --- a/redhawk/src/base/framework/Service_impl.cpp +++ b/redhawk/src/base/framework/Service_impl.cpp @@ -31,29 +31,31 @@ void Service_impl::initResources (char* devMgr_ior, char* name) _name = name; _devMgr_ior = devMgr_ior; initialConfiguration = true; -} + _baseLog = rh_logger::Logger::getNewHierarchy(_name); + _serviceLog = _baseLog->getChildLogger("Service", "system"); +} Service_impl::Service_impl (char* devMgr_ior, char* _name) : component_running_mutex(), component_running(&component_running_mutex) { - LOG_TRACE(Service_impl, "Constructing Device") + RH_TRACE(_serviceLog, "Constructing Device") initResources(devMgr_ior, _name); - LOG_TRACE(Service_impl, "Done Constructing Device") + RH_TRACE(_serviceLog, "Done Constructing Device") } void Service_impl::resolveDeviceManager () { - LOG_TRACE(Service_impl, "entering resolveDeviceManager()"); + RH_TRACE(_serviceLog, "entering resolveDeviceManager()"); _deviceManager = CF::DeviceManager::_nil(); CORBA::Object_var obj = ossie::corba::Orb()->string_to_object(_devMgr_ior.c_str()); if (CORBA::is_nil(obj)) { - LOG_ERROR(Service_impl, "Invalid device manager IOR"); + RH_ERROR(_serviceLog, "Invalid device manager IOR"); exit(-1); } _deviceManager = CF::DeviceManager::_narrow(obj); if (CORBA::is_nil(_deviceManager)) { - LOG_ERROR(Service_impl, "Could not narrow device manager IOR"); + RH_ERROR(_serviceLog, "Could not narrow device manager IOR"); exit(-1); } this->_devMgr = new redhawk::DeviceManagerContainer(_deviceManager); @@ -61,7 +63,7 @@ void Service_impl::resolveDeviceManager () this->_domMgr = new redhawk::DomainManagerContainer(_deviceManager->domMgr()); return; } - LOG_TRACE(Service_impl, "leaving resolveDeviceManager()"); + RH_TRACE(_serviceLog, "leaving resolveDeviceManager()"); } void Service_impl::registerServiceWithDevMgr () @@ -71,16 +73,16 @@ void Service_impl::registerServiceWithDevMgr () void Service_impl::run () { - LOG_TRACE(Service_impl, "handling CORBA requests"); + RH_TRACE(_serviceLog, "handling CORBA requests"); component_running.wait(); - LOG_TRACE(Service_impl, "leaving run()"); + RH_TRACE(_serviceLog, "leaving run()"); } void Service_impl::halt () { - LOG_DEBUG(Service_impl, "Halting Service") + RH_DEBUG(_serviceLog, "Halting Service") component_running.signal(); - LOG_TRACE(Service_impl, "Done sending service running signal"); + RH_TRACE(_serviceLog, "Done sending service running signal"); } void Service_impl::terminateService () diff --git a/redhawk/src/base/framework/ThreadedComponent.cpp b/redhawk/src/base/framework/ThreadedComponent.cpp index 40af49513..4c761117c 100644 --- a/redhawk/src/base/framework/ThreadedComponent.cpp +++ b/redhawk/src/base/framework/ThreadedComponent.cpp @@ -19,13 +19,17 @@ */ #include +#include namespace ossie { -ProcessThread::ProcessThread(ThreadedComponent *target, float delay) : +PREPARE_CF_LOGGING(ProcessThread); + +ProcessThread::ProcessThread(ThreadedComponent *target, float delay, const std::string& name) : _thread(0), _running(false), _target(target), + _name(name), _mythread(_thread) { updateDelay(delay); @@ -41,12 +45,42 @@ void ProcessThread::start() void ProcessThread::run() { + // If a name was given, set it on the current thread + // NB: On RHEL/CentOS 6, the name is limited to 15 characters, and the call + // fails if the name exceeds that limit + if (!_name.empty()) { + std::string name = _name.substr(0, 15); + pthread_setname_np(pthread_self(), name.c_str()); + } + + boost::posix_time::time_duration boost_delay = boost::posix_time::microseconds(_delay.tv_sec*1e6 + _delay.tv_nsec*1e-3); while (_running) { - int state = _target->serviceFunction(); + int state; + try { + state = _target->serviceFunction(); + } catch (const std::exception& exc) { + LOG_FATAL(ProcessThread, "Unhandled exception in service function: " << exc.what()); + exit(-1); + } catch (const CORBA::Exception& exc) { + LOG_FATAL(ProcessThread, "Unhandled exception in service function: " + << ossie::corba::describeException(exc)); + exit(-1); + } catch (boost::thread_interrupted &) { + break; + } catch (...) { + LOG_FATAL(ProcessThread, "Unhandled exception in service function"); + exit(-1); + } if (state == FINISH) { return; } else if (state == NOOP) { - nanosleep(&_delay, NULL); + try { + boost::this_thread::sleep(boost_delay); + } catch (boost::thread_interrupted &) { + break; + } catch (...) { + throw; + } } else { boost::this_thread::yield(); @@ -57,6 +91,7 @@ void ProcessThread::run() bool ProcessThread::release(unsigned long secs, unsigned long usecs) { _running = false; + this->stop(); if (_thread) { if ((secs == 0) && (usecs == 0)){ _thread->join(); @@ -102,6 +137,7 @@ bool ProcessThread::threadRunning() ThreadedComponent::ThreadedComponent() : serviceThread(0), serviceThreadLock(), + _threadName(), _defaultDelay(0.1) { } @@ -114,8 +150,8 @@ void ThreadedComponent::startThread () { boost::mutex::scoped_lock lock(serviceThreadLock); if (!serviceThread) { - serviceThread = new ossie::ProcessThread(this, _defaultDelay); - serviceThread->start(); + serviceThread = new ossie::ProcessThread(this, _defaultDelay, _threadName); + serviceThread->start(); } } @@ -145,3 +181,9 @@ void ThreadedComponent::setThreadDelay (float delay) serviceThread->updateDelay(delay); } } + +void ThreadedComponent::setThreadName (const std::string& name) +{ + boost::mutex::scoped_lock lock(serviceThreadLock); + _threadName = name; +} diff --git a/redhawk/src/base/framework/Transport.cpp b/redhawk/src/base/framework/Transport.cpp new file mode 100644 index 000000000..c15c71a94 --- /dev/null +++ b/redhawk/src/base/framework/Transport.cpp @@ -0,0 +1,96 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include + +namespace redhawk { + + class TransportRegistry::Impl { + public: + Impl() + { + } + + void registerTransport(TransportFactory* transport) + { + const std::string repo_id = transport->repoId(); + int priority = getPriority(transport); + + TransportList& list = _registry[repo_id]; + TransportList::iterator pos = list.begin(); + while ((pos != list.end()) && (pos->priority <= priority)) { + ++pos; + } + list.insert(pos, Entry(priority, transport)); + } + + TransportStack getTransports(const std::string& repoId) + { + TransportStack stack; + TransportMap::iterator transport = _registry.find(repoId); + if (transport != _registry.end()) { + TransportList& list = transport->second; + for (TransportList::iterator iter = list.begin(); iter != list.end(); ++iter) { + stack.push_back(iter->transport); + } + } + return stack; + } + + int getPriority(TransportFactory* transport) + { + return transport->defaultPriority(); + } + + private: + struct Entry { + public: + Entry(int priority, TransportFactory* transport) : + priority(priority), + transport(transport) + { + } + + int priority; + TransportFactory* transport; + }; + + typedef std::vector TransportList; + typedef std::map TransportMap; + TransportMap _registry; + }; + + void TransportRegistry::RegisterTransport(TransportFactory* transport) + { + Instance().registerTransport(transport); + } + + TransportStack TransportRegistry::GetTransports(const std::string& repoId) + { + return Instance().getTransports(repoId); + } + + TransportRegistry::Impl& TransportRegistry::Instance() + { + static TransportRegistry::Impl instance; + return instance; + } +} diff --git a/redhawk/src/base/framework/UsesPort.cpp b/redhawk/src/base/framework/UsesPort.cpp new file mode 100644 index 000000000..3ff1a9f05 --- /dev/null +++ b/redhawk/src/base/framework/UsesPort.cpp @@ -0,0 +1,435 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include +#include +#include + +namespace redhawk { + + namespace { + static const redhawk::PropertyMap* + findTransportProperties(const ExtendedCF::TransportInfoSequence& transports, + const std::string& transportType) + { + for (CORBA::ULong index = 0; index < transports.length(); ++index) { + if (transportType == static_cast(transports[index].transportType)) { + return &redhawk::PropertyMap::cast(transports[index].transportProperties); + } + } + return 0; + } + } + + + UsesTransport::UsesTransport(UsesPort* port) : + _port(port), + _alive(true) + { + } + + bool UsesTransport::isAlive() const + { + return _alive; + } + + void UsesTransport::setAlive(bool alive) + { + _alive = alive; + } + + + UsesPort::Connection::Connection(const std::string& connectionId, CORBA::Object_ptr objref, + UsesTransport* transport): + connectionId(connectionId), + objref(CORBA::Object::_duplicate(objref)), + transport(transport) + { + } + + UsesPort::Connection::~Connection() + { + delete transport; + } + + void UsesPort::Connection::disconnected() + { + transport->disconnect(); + } + + + UsesPort::UsesPort(const std::string& name) : + Port_Uses_base_impl(name) + { + } + + UsesPort::~UsesPort() + { + for (ConnectionList::iterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + delete *connection; + } + } + + void UsesPort::connectPort(CORBA::Object_ptr object, const char* connectionId) + { + RH_TRACE_ENTER(_portLog); + + // Give a specific exception message for nil + if (CORBA::is_nil(object)) { + throw CF::Port::InvalidPort(1, "Nil object reference"); + } + + // Attempt to check the type of the remote object to reject invalid + // types; note this does not require the lock + _validatePort(object); + + const std::string connection_id(connectionId); + { + // Acquire the state lock before modifying the container + boost::mutex::scoped_lock lock(updatingPortsLock); + + // Prevent duplicate connection IDs + if (_findConnection(connection_id) != _connections.end()) { + throw CF::Port::OccupiedPort(); + } + + Connection* connection = _createConnection(object, connection_id); + _connections.push_back(connection); + + RH_DEBUG(_portLog, "Using transport '" << connection->transport->transportType() + << "' for connection '" << connection_id << "'"); + + active = true; + } + + _portConnected(connectionId); + RH_TRACE_EXIT(_portLog); + } + + void UsesPort::disconnectPort(const char* connectionId) + { + RH_TRACE_ENTER(_portLog); + { + boost::mutex::scoped_lock lock(updatingPortsLock); + + ConnectionList::iterator connection = _findConnection(connectionId); + if (connection == _connections.end()) { + std::string message = std::string("No connection ") + connectionId; + throw CF::Port::InvalidPort(2, message.c_str()); + } + + RH_DEBUG(_portLog, "Disconnecting connection '" << connectionId << "'"); + UsesTransport* transport = (*connection)->transport; + try { + (*connection)->disconnected(); + } catch (const std::exception& exc) { + if (transport->isAlive()) { + RH_WARN(_portLog, "Exception disconnecting '" << connectionId << "': " + << exc.what()); + } + } catch (const CORBA::Exception& exc) { + if (transport->isAlive()) { + RH_WARN(_portLog, "Exception disconnecting '" << connectionId << "': " + << ossie::corba::describeException(exc)); + } + } + + delete (*connection); + _connections.erase(connection); + + if (_connections.empty()) { + active = false; + } + } + + _portDisconnected(connectionId); + RH_TRACE_EXIT(_portLog); + } + + ExtendedCF::UsesConnectionSequence* UsesPort::connections() + { + boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in + ExtendedCF::UsesConnectionSequence_var retVal = new ExtendedCF::UsesConnectionSequence(); + for (ConnectionList::iterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + ExtendedCF::UsesConnection conn; + conn.connectionId = (*connection)->connectionId.c_str(); + conn.port = CORBA::Object::_duplicate((*connection)->objref); + ossie::corba::push_back(retVal, conn); + } + return retVal._retn(); + } + + void UsesPort::_validatePort(CORBA::Object_ptr object) + { + const std::string rep_id = getRepid(); + bool valid; + try { + valid = object->_is_a(rep_id.c_str()); + } catch (...) { + // If _is_a throws an exception, assume the remote object is + // unreachable (e.g., dead) + throw CF::Port::InvalidPort(1, "Object unreachable"); + } + + if (!valid) { + std::string message = "Object does not support " + rep_id; + throw CF::Port::InvalidPort(1, message.c_str()); + } + } + + UsesPort::ConnectionList::iterator UsesPort::_findConnection(const std::string& connectionId) + { + ConnectionList::iterator entry = _connections.begin(); + for (; entry != _connections.end(); ++entry) { + if ((*entry)->connectionId == connectionId) { + return entry; + } + } + return entry; + } + + UsesPort::Connection* UsesPort::_createConnection(CORBA::Object_ptr object, const std::string& connectionId) + { + UsesTransport* transport = _createTransport(object, connectionId); + return new Connection(connectionId, object, transport); + } + + bool UsesPort::_hasConnection(const std::string& connectionId) + { + return _findConnection(connectionId) != _connections.end(); + } + + NegotiableUsesPort::NegotiableUsesPort(const std::string& name) : + UsesPort(name) + { + } + + NegotiableUsesPort::~NegotiableUsesPort() + { + for (TransportManagerList::iterator manager = _transportManagers.begin(); manager != _transportManagers.end(); ++manager) { + delete *manager; + } + } + + void NegotiableUsesPort::initializePort() + { + const std::string repo_id = getRepid(); + TransportStack transports = TransportRegistry::GetTransports(repo_id); + for (TransportStack::iterator iter = transports.begin(); iter != transports.end(); ++iter) { + TransportFactory* transport = *iter; + RH_DEBUG(_portLog, "Adding uses transport '" << transport->transportType() + << "' for '" << repo_id << "'"); + _transportManagers.push_back(transport->createUsesManager(this)); + } + } + + ExtendedCF::TransportInfoSequence* NegotiableUsesPort::supportedTransports() + { + ExtendedCF::TransportInfoSequence_var transports = new ExtendedCF::TransportInfoSequence; + for (TransportManagerList::iterator manager = _transportManagers.begin(); manager != _transportManagers.end(); ++manager) { + ExtendedCF::TransportInfo info; + info.transportType = (*manager)->transportType().c_str(); + info.transportProperties = (*manager)->transportProperties(); + ossie::corba::push_back(transports, info); + } + return transports._retn(); + } + + ExtendedCF::ConnectionStatusSequence* NegotiableUsesPort::connectionStatus() + { + boost::mutex::scoped_lock lock(updatingPortsLock); // don't want to process while command information is coming in + ExtendedCF::ConnectionStatusSequence_var retVal = new ExtendedCF::ConnectionStatusSequence(); + for (ConnectionList::iterator connection = _connections.begin(); connection != _connections.end(); ++connection) { + ExtendedCF::ConnectionStatus status; + status.connectionId = (*connection)->connectionId.c_str(); + status.port = CORBA::Object::_duplicate((*connection)->objref); + UsesTransport* transport = (*connection)->transport; + status.alive = transport->isAlive(); + status.transportType = transport->transportType().c_str(); + status.transportInfo = transport->transportInfo(); + ossie::corba::push_back(retVal, status); + } + return retVal._retn(); + } + + class NegotiableUsesPort::NegotiatedConnection : public UsesPort::Connection + { + public: + NegotiatedConnection(const std::string& connectionId, ExtendedCF::NegotiableProvidesPort_ptr negotiablePort, + const std::string& transportId, UsesTransport* transport) : + Connection(connectionId, negotiablePort, transport), + negotiablePort(ExtendedCF::NegotiableProvidesPort::_duplicate(negotiablePort)), + transportId(transportId) + { + } + + virtual void disconnected() + { + Connection::disconnected(); + + if (!CORBA::is_nil(negotiablePort) && !transportId.empty()) { + try { + negotiablePort->disconnectTransport(transportId.c_str()); + } catch (const CORBA::Exception& exc) { + // Ignore + } + } + } + + ExtendedCF::NegotiableProvidesPort_var negotiablePort; + std::string transportId; + }; + + UsesPort::Connection* NegotiableUsesPort::_createConnection(CORBA::Object_ptr object, + const std::string& connectionId) + { + PortBase* local_port = ossie::corba::getLocalServant(object); + if (local_port) { + UsesTransport* transport = _createLocalTransport(local_port, object, connectionId); + if (transport) { + return new Connection(connectionId, object, transport); + } + } + + ExtendedCF::NegotiableProvidesPort_var negotiable_port = ossie::corba::_narrowSafe(object); + if (!CORBA::is_nil(negotiable_port)) { + Connection* connection = _negotiateConnection(negotiable_port, connectionId); + if (connection) { + return connection; + } + } + + UsesTransport* transport = _createTransport(object, connectionId); + return new Connection(connectionId, object, transport); + } + + UsesTransport* NegotiableUsesPort::_createLocalTransport(PortBase*, CORBA::Object_ptr, const std::string&) + { + return 0; + } + + NegotiableUsesPort::NegotiatedConnection* + NegotiableUsesPort::_negotiateConnection(ExtendedCF::NegotiableProvidesPort_ptr negotiablePort, + const std::string& connectionId) + { + RH_DEBUG(_portLog, "Trying to negotiate transport for connection '" << connectionId << "'"); + + // Check the remote side's supported transport list first, to determine + // which transports to try and their properties (e.g., hostname) + ExtendedCF::TransportInfoSequence_var supported_transports; + try { + supported_transports = negotiablePort->supportedTransports(); + } catch (const CORBA::Exception& exc) { + // Can't negotiate with an inaccessible object + RH_WARN(_portLog, "Unable to negotiate connection '" << connectionId << "': " + << ossie::corba::describeException(exc)); + return 0; + } + + // Try possible transports until one succeeds; the managers are created + // in priority order, so the highest priority is tried first + for (TransportManagerList::iterator manager = _transportManagers.begin(); manager != _transportManagers.end(); ++manager) { + // Search the supported transports for a match based on the type, + // getting back the remote side's properties (or null if not found) + const std::string transport_type = (*manager)->transportType(); + const redhawk::PropertyMap* transport_props = findTransportProperties(supported_transports, transport_type); + if (transport_props) { + NegotiatedConnection* connection = _negotiateTransport(negotiablePort, connectionId, *manager, *transport_props); + if (connection) { + return connection; + } + } else { + RH_DEBUG(_portLog, "Provides side for connection '" << connectionId + << "' does not support transport '" << transport_type << "'"); + } + } + return 0; + } + + NegotiableUsesPort::NegotiatedConnection* + NegotiableUsesPort::_negotiateTransport(ExtendedCF::NegotiableProvidesPort_ptr negotiablePort, + const std::string& connectionId, + UsesTransportManager* manager, + const redhawk::PropertyMap& properties) + { + const std::string transport_type = manager->transportType(); + RH_DEBUG(_portLog, "Trying to negotiate transport '" << transport_type + << "' for connection '" << connectionId << "'"); + + // Ask the transport manager to create a uses transport based on the + // remote side's transport properties. This is allowed to fail, if the + // manager doesn't think it can connect to the remote side, with a + // null return. + UsesTransport* transport = manager->createUsesTransport(negotiablePort, connectionId, properties); + if (!transport) { + return 0; + } + + // Now that the uses side transport endpoint is established, ask the + // manager for any properties it wants to pass to the remote object to + // establish the provides side transport endpoint. + // NB: This is done via the manager instead of the transport because + // local and CORBA implementations of the transport are not + // negotiated. + redhawk::PropertyMap negotiation_props = manager->getNegotiationProperties(transport); + + // Attempt to negotiate with the remote side. Exceptions are considered + // a soft failure here, so log the message, clean up, and try another + // transport type. + ExtendedCF::NegotiationResult_var result; + try { + result = negotiablePort->negotiateTransport(transport_type.c_str(), negotiation_props); + } catch (const ExtendedCF::NegotiationError& exc) { + RH_ERROR(_portLog, "Error negotiating transport '" << transport_type << "': " << exc.msg); + delete transport; + return 0; + } + + // Perform any final negotiation steps based on the results. This is + // the transport layer's last chance to reject the connection if there + // is some aspect of the provides side's properties it doesn't like. + const std::string transport_id(result->transportId); + try { + manager->setNegotiationResult(transport, redhawk::PropertyMap::cast(result->properties)); + + // On success, it's safe to return now + return new NegotiatedConnection(connectionId, negotiablePort, transport_id, transport); + } catch (const std::exception& exc) { + RH_ERROR(_portLog, "Error completing transport '" << transport_type << "' connection: " + << exc.what()); + } catch (...) { + RH_ERROR(_portLog, "Unknown error completing transport '" << transport_type << "' connection"); + } + + // Clean up the failed transport negotiation, which is still registered + // on the provides end. + delete transport; + try { + RH_DEBUG(_portLog, "Undoing failed negotiation for transport '" << transport_type << "'"); + negotiablePort->disconnectTransport(transport_id.c_str()); + } catch (const CORBA::Exception& exc) { + RH_ERROR(_portLog, "Error undoing failed negotiation for transport '" << transport_type << "': " + << ossie::corba::describeException(exc)); + } + return 0; + } + +} diff --git a/redhawk/src/base/framework/Value.cpp b/redhawk/src/base/framework/Value.cpp index 8769c2e5a..f4e4e636a 100644 --- a/redhawk/src/base/framework/Value.cpp +++ b/redhawk/src/base/framework/Value.cpp @@ -50,6 +50,87 @@ Value& Value::operator=(const Value& any) return operator=(static_cast(any)); } +Value::Type Value::GetType(CORBA::TypeCode_ptr typecode) +{ + if (CF::_tc_Properties->equivalent(typecode)) { + return TYPE_PROPERTIES; + } else if (CF::_tc_DataType->equivalent(typecode)) { + return TYPE_DATATYPE; + } else if (CORBA::_tc_AnySeq->equivalent(typecode)) { + return TYPE_VALUE_SEQUENCE; + } + + // Remove any aliases + CORBA::TypeCode_var base_type = ossie::corba::unalias(typecode); + switch (base_type->kind()) { + case CORBA::tk_null: return TYPE_NONE; + case CORBA::tk_boolean: return TYPE_BOOLEAN; + case CORBA::tk_octet: return TYPE_OCTET; + case CORBA::tk_short: return TYPE_SHORT; + case CORBA::tk_ushort: return TYPE_USHORT; + case CORBA::tk_long: return TYPE_LONG; + case CORBA::tk_ulong: return TYPE_ULONG; + case CORBA::tk_longlong: return TYPE_LONGLONG; + case CORBA::tk_ulonglong: return TYPE_ULONGLONG; + case CORBA::tk_float: return TYPE_FLOAT; + case CORBA::tk_double: return TYPE_DOUBLE; + case CORBA::tk_string: return TYPE_STRING; + case CORBA::tk_sequence: return TYPE_SEQUENCE; + case CORBA::tk_any: return TYPE_VALUE; + default: + return TYPE_OTHER; + } +} + +bool Value::IsNumeric(Type type) +{ + switch (type) { + case TYPE_BOOLEAN: + case TYPE_OCTET: + case TYPE_SHORT: + case TYPE_USHORT: + case TYPE_LONG: + case TYPE_ULONG: + case TYPE_LONGLONG: + case TYPE_ULONGLONG: + case TYPE_FLOAT: + case TYPE_DOUBLE: + return true; + default: + return false; + } +} + +Value::Type Value::getType() const +{ + CORBA::TypeCode_var any_type = type(); + return Value::GetType(any_type); +} + +bool Value::isNumeric() const +{ + return Value::IsNumeric(getType()); +} + +bool Value::isSequence() const +{ + CORBA::TypeCode_var any_type = type(); + any_type = ossie::corba::unalias(any_type); + return (any_type->kind() == CORBA::tk_sequence); +} + +Value::Type Value::getElementType() const +{ + CORBA::TypeCode_var any_type = type(); + any_type = ossie::corba::unalias(any_type); + if (any_type->kind() != CORBA::tk_sequence) { + return TYPE_NONE; + } + + CORBA::TypeCode_var element_type = any_type->content_type(); + return Value::GetType(element_type); +} + std::string Value::toString() const { return ossie::any_to_string(*this); @@ -138,6 +219,11 @@ const ValueSequence& Value::asSequence() const return ValueSequence::cast(*prop_seq); } +std::ostream& redhawk::operator<<(std::ostream& out, const CORBA::Any& value) +{ + out << Value::cast(value).toString(); + return out; +} ValueSequence::ValueSequence() : CORBA::AnySeq() diff --git a/redhawk/src/base/framework/Versions.cpp b/redhawk/src/base/framework/Versions.cpp index 0153125e1..2794b3d92 100644 --- a/redhawk/src/base/framework/Versions.cpp +++ b/redhawk/src/base/framework/Versions.cpp @@ -21,22 +21,20 @@ #include namespace redhawk { - int compareVersions(std::string &a, std::string &b) { + int compareVersions(const std::string& a, const std::string& b) { - std::string token; - - if (!a.compare("sca_compliant") and !b.compare("sca_compliant")) + if (a == b) { return 0; - - if (!a.compare("sca_compliant")) + } else if (a == "sca_compliant") { return 1; - - if (!b.compare("sca_compliant")) + } else if (b == "sca_compliant") { return -1; + } std::vector first_tokens; std::vector second_tokens; try { + std::string token; std::istringstream first(a); while (std::getline(first, token, '.')) { if (!token.empty()) diff --git a/redhawk/src/base/framework/affinity.cpp b/redhawk/src/base/framework/affinity.cpp index ab5154b41..48a916436 100644 --- a/redhawk/src/base/framework/affinity.cpp +++ b/redhawk/src/base/framework/affinity.cpp @@ -78,6 +78,15 @@ namespace redhawk { namespace affinity { + bool check_numa() { +#ifdef HAVE_LIBNUMA + return (numa_available() != -1); +#else + return false; +#endif + } + + // // promote nic affinity to a socket if all associated cpus for the interface are blacklisted // @@ -222,35 +231,32 @@ namespace redhawk { int find_socket_for_interface ( const std::string &iface , const bool findFirst, const CpuList &bl ){ int retval=-1; - bool check_numa=false; -#ifdef HAVE_LIBNUMA - check_numa=(numa_available() != -1); -#endif // Determine cpu list by interrupts assigned for the specified NIC redhawk::affinity::CpuList cpulist = identify_cpus(iface); if ( cpulist.size() > 0 ) { - if ( check_numa ) { - int psoc=-1; -#ifdef HAVE_LIBNUMA - int soc=-1; - for( int i=0; i < (int)cpulist.size();i++ ) { - RH_NL_DEBUG("gpp::affinity", "Finding (processor socket) for NIC:" << iface << " socket :" << numa_node_of_cpu(cpulist[i]) ); - if ( std::count( bl.begin(), bl.end(), cpulist[i] ) != 0 ) continue; - soc = numa_node_of_cpu(cpulist[i]); - if ( soc != psoc && psoc != -1 && !findFirst ) { - RH_NL_WARN("gpp::affinity", "More than 1 socket servicing NIC:" << iface); - psoc=-1; - break; + if ( check_numa() ) { + int psoc=-1; +#if HAVE_LIBNUMA + int soc=-1; + for( int i=0; i < (int)cpulist.size();i++ ) { + RH_DEBUG(_affinity_logger, "Finding (processor socket) for NIC:" << iface << " socket :" << numa_node_of_cpu(cpulist[i]) ); + if ( std::count( bl.begin(), bl.end(), cpulist[i] ) != 0 ) continue; + + soc = numa_node_of_cpu(cpulist[i]); + if ( soc != psoc && psoc != -1 && !findFirst ) { + RH_WARN(_affinity_logger, "More than 1 socket servicing NIC:" << iface); + psoc=-1; + break; + } + psoc=soc; + if( findFirst ) break; } - psoc=soc; - if( findFirst ) break; - } #endif - retval=psoc; + retval=psoc; } else { - retval=0; + retval=0; } } @@ -271,7 +277,7 @@ namespace redhawk { } #ifdef HAVE_LIBNUMA - if ( numa_available() == -1 ) { + if ( check_numa() == false ) { return cpu_list; } @@ -507,7 +513,7 @@ namespace redhawk { RH_DEBUG(_affinity_logger, " cnt:" << cnt << " Processing Affinity pid: " << pid << " " << affinity_spec.first << ":" << affinity_spec.second ); #ifdef HAVE_LIBNUMA - if ( numa_available() == -1 ) { + if ( check_numa() == false ) { RH_WARN(_affinity_logger, "Missing affinity support from Redhawk libraries, ... ignoring numa affinity based requests "); } else { diff --git a/redhawk/src/base/framework/bitbuffer.cpp b/redhawk/src/base/framework/bitbuffer.cpp new file mode 100644 index 000000000..904481d0a --- /dev/null +++ b/redhawk/src/base/framework/bitbuffer.cpp @@ -0,0 +1,294 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include +#include + +using redhawk::shared_bitbuffer; +using redhawk::bitbuffer; + +// +// shared_bitbuffer implementation +// + +// Declare npos here so that storage is allocated for it, even though it most +// cases it is used directly as a constant; otherwise, some uses may fail to +// link. +const size_t shared_bitbuffer::npos; + +shared_bitbuffer::shared_bitbuffer() : + _M_memory(), + _M_base(0), + _M_offset(0), + _M_size(0) +{ +} + +shared_bitbuffer::shared_bitbuffer(data_type* ptr, size_t bits) : + _M_memory(ptr, _M_bits_to_bytes(bits)), + _M_base(ptr), + _M_offset(0), + _M_size(bits) +{ +} + +bool shared_bitbuffer::empty() const +{ + return (_M_size == 0); +} + +size_t shared_bitbuffer::size() const +{ + return _M_size; +} + +const shared_bitbuffer::data_type* shared_bitbuffer::data() const +{ + return _M_base; +} + +size_t shared_bitbuffer::offset() const +{ + return _M_offset; +} + +int shared_bitbuffer::operator[] (size_t pos) const +{ + return bitops::getbit(data(), offset() + pos); +} + +uint64_t shared_bitbuffer::getint(size_t pos, size_t bits) const +{ + _M_check_pos(pos + bits, size(), "redhawk::shared_bitbuffer::getint()"); + return bitops::getint(data(), offset() + pos, bits); +} + +void shared_bitbuffer::trim(size_t start, size_t end) +{ + // Check indices for range, which may update end if it was not given, or + // larger than the source size. + _M_check_range(start, end, size(), "redhawk::shared_bitbuffer::trim"); + _M_offset += start; + _M_size = (end - start); + + // Normalize base pointer and offset, so that offset is always in the range + // [0, 8) + _M_base += (_M_offset / 8); + _M_offset &= 7; +} + +shared_bitbuffer shared_bitbuffer::slice(size_t start, size_t end) const +{ + shared_bitbuffer result(*this); + result.trim(start, end); + return result; +} + +void shared_bitbuffer::swap(shared_bitbuffer& other) +{ + _M_memory.swap(other._M_memory); + std::swap(_M_base, other._M_base); + std::swap(_M_offset, other._M_offset); + std::swap(_M_size, other._M_size); +} + +int shared_bitbuffer::popcount() const +{ + return bitops::popcount(data(), offset(), size()); +} + +int shared_bitbuffer::distance(const shared_bitbuffer& other) const +{ + return bitops::hammingDistance(data(), offset(), other.data(), other.offset(), other.size()); +} + +size_t shared_bitbuffer::find(size_t start, const shared_bitbuffer& pattern, int maxDistance) const +{ + return bitops::find(data(), offset() + start, size(), pattern.data(), pattern.offset(), pattern.size(), maxDistance); +} + +shared_bitbuffer shared_bitbuffer::make_transient(const data_type* data, size_t start, size_t bits) +{ + shared_bitbuffer result; + result._M_base = const_cast(data); + result._M_offset = start; + result._M_size = bits; + return result; +} + +void shared_bitbuffer::_M_check_pos(size_t pos, size_t size, const char* name) +{ + if (pos > size) { + throw std::out_of_range(name); + } +} + +void shared_bitbuffer::_M_check_range(size_t start, size_t& end, size_t size, const char* name) +{ + _M_check_pos(start, size, name); + if (end < start) { + throw std::invalid_argument(std::string(name) + " end is before start"); + } + end = std::min(end, size); +} + +size_t shared_bitbuffer::_M_takeskip_size(size_t size, size_t take, size_t skip) +{ + size_t pass = take + skip; + size_t iterations = size / pass; + size_t remain = std::min(take, size % pass); + return (iterations * take) + remain; +} + +// +// bitbuffer::reference implementation +// +bitbuffer::reference::reference(data_type* data, size_t pos) : + _M_data(data), + _M_pos(pos) +{ +} + +bitbuffer::reference::operator int () const +{ + return bitops::getbit(_M_data, _M_pos); +} + +bitbuffer::reference& bitbuffer::reference::operator=(bool value) +{ + bitops::setbit(_M_data, _M_pos, value); + return *this; +} + +bitbuffer::reference& bitbuffer::reference::operator=(const bitbuffer::reference& other) +{ + return *this = int(other); +} + +// +// bitbuffer implementation +// +bitbuffer::bitbuffer() : + shared_bitbuffer() +{ +} + +bitbuffer::data_type* bitbuffer::data() +{ + return const_cast(shared_bitbuffer::data()); +} + +void bitbuffer::fill(size_t start, size_t end, bool value) +{ + size_t bits = end - start; + bitops::fill(data(), offset() + start, bits, value); +} + +bitbuffer::reference bitbuffer::operator[] (size_t pos) +{ + return reference(data(), offset() + pos); +} + +void bitbuffer::setint(size_t pos, uint64_t value, size_t bits) +{ + _M_check_pos(pos + bits, size(), "redhawk::bitbuffer::setint()"); + bitops::setint(data(), offset() + pos, value, bits); +} + +bitbuffer bitbuffer::slice(size_t start, size_t end) +{ + _M_check_pos(start, size(), "redhawk::bitbuffer::slice()"); + bitbuffer temp(*this); + temp.trim(start, end); + return temp; +} + +void bitbuffer::replace(size_t pos, size_t bits, const shared_bitbuffer& src, size_t srcpos) +{ + redhawk::bitops::copy(data(), offset() + pos, src.data(), src.offset() + srcpos, bits); +} + +size_t bitbuffer::takeskip(size_t pos, const shared_bitbuffer& src, size_t take, size_t skip, size_t start, size_t end) +{ + // Check indices for range, which may update end if it was not given, or + // larger than the source size. + _M_check_range(start, end, src.size(), "redhawk::bitbuffer::takeskip"); + // Check size of destination to ensure it can hold enough bits + size_t count = end - start; + size_t required = _M_takeskip_size(count, take, skip); + if ((size() - pos) < required) { + throw std::length_error("redhawk::bitbuffer::takeskip"); + } + // Account for internal bit offsets + pos += offset(); + start += src.offset(); + return bitops::takeskip(data(), pos, src.data(), start, count, take, skip); +} + +void bitbuffer::swap(bitbuffer& other) +{ + // Use base class swap, with the caveat that we have to do a cast so that + // it can complile (the base class explicitly disallows swapping with a + // mutable buffer to prevent accidental end-runs around const protection) + shared_bitbuffer::swap(static_cast(other)); +} + +void bitbuffer::_M_parse(const std::string& str) +{ + int count = bitops::parseString(data(), offset(), str.c_str(), str.size()); + if (count < (int) str.size()) { + std::string message = "invalid character '"; + message += str[count]; + message += '\''; + throw std::invalid_argument(message); + } +} + +void bitbuffer::_M_resize(bitbuffer& dest) +{ + size_t bits = std::min(size(), dest.size()); + redhawk::bitops::copy(dest.data(), dest.offset(), data(), offset(), bits); + this->swap(dest); +} + +// +// global operator implementations +// +bool redhawk::operator==(const shared_bitbuffer& lhs, const shared_bitbuffer& rhs) +{ + if (lhs.size() != rhs.size()) { + // Different sizes always compare unequal + return false; + } else if ((lhs.data() == rhs.data()) && (lhs.offset() == rhs.offset())) { + // If the data pointer and offsets are the same (the size is already + // known to be the same), no further comparison is required + return true; + } else { + // Perform element-wise comparison + return bitops::compare(lhs.data(), lhs.offset(), rhs.data(), rhs.offset(), lhs.size()) == 0; + } +} + +bool redhawk::operator!=(const shared_bitbuffer& lhs, const shared_bitbuffer& rhs) +{ + return !(lhs == rhs); +} diff --git a/redhawk/src/base/framework/bitops.cpp b/redhawk/src/base/framework/bitops.cpp new file mode 100644 index 000000000..6695c368e --- /dev/null +++ b/redhawk/src/base/framework/bitops.cpp @@ -0,0 +1,972 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include +#include +#include +#include +#include + +namespace redhawk { +namespace bitops { + + namespace { + // Normalizes a byte pointer and bit offset so that the offset is + // always in the range [0,8). The data type is templatized to support + // const and non-const pointers. + template + static inline size_t adjust_buffer(T*& buffer, size_t offset) + { + buffer += (offset / 8); + return offset & 0x7; + } + + // Generates a bitmask with the least-significant N bits set + static inline byte bitmask(size_t nbits) + { + return (1 << nbits) - 1; + } + + // Lookup table of Hamming weights by byte value + static const int hammingWeights[] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, + 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 + }; + + // Tags for describing how a unary or binary operation accesses the bit + // arrays, allowing the apply functions to support get/set/modify with + // the same code + + // Only read from the operand (e.g., population count) + struct read_tag {}; + + // Only write to the operand (e.g., fill) + struct write_tag {}; + + // Read and modify the operation (e.g., negate) + struct readwrite_tag {}; + + // Tags for bit operation function bodies with exact alignment for all + // bit arrays (e.g., in a binary operation, both bit arrays have the + // same alignment) + + // Use the normal operator() to process element-by-element + struct element_tag {}; + + // Use array-based operator() to process all at once (e.g., in copy, + // using memcpy to transfer) + struct array_tag {}; + + // Handle reading of partial and split bytes + struct bit_reader { + static inline byte read(const byte* src, size_t offset, size_t bits) + { + const size_t shift = 8 - (offset + bits); + return ((*src) >> shift) & bitmask(bits); + } + + static inline byte read(const byte* src, size_t bits) + { + // Get the leftmost bits from src; masking is unnecessary, as + // unsigned values are zero-filled on right shift + const size_t shift = 8 - bits; + return ((*src) >> shift); + } + + static inline byte read_split(const byte* src, size_t offset) + { + // The byte value is formed by combining two adjacent bytes + // from src and then shifting down. This is a pretty efficient + // way to handle unaligned values. Although doing this in a + // loop requires accessing each byte twice, the CPU cache + // should mitigate that cost. + size_t shift = 8 - offset; + return ((src[0] << 8) | src[1]) >> shift; + } + + static inline byte read_split(const byte* src, size_t offset, size_t bits) + { + // Like full-byte read_split, but it is not known whether the + // second byte at src is required (or even accessible). The + // first byte is loaded into the high 8 bits of at 16-bit + // value, and the low 8 bits are set from the byte that + // contains the last bit, which may still be the first byte. + // Doing it this way avoids a conditional, the performance of + // which is less predictable. + uint16_t value = src[0] << 8; + size_t last = offset + bits - 1; // index of last bit + value |= src[last/8]; + // Shift to put the last bit at the LSB, and mask to get only + // the requested number of bits + size_t shift = 15 - last; + return (value >> shift) & bitmask(bits); + } + }; + + // Handle writing of partial and split bytes + struct bit_writer { + static inline void write(byte value, byte* dest, size_t offset, size_t bits) + { + // Preserve the leftmost bits, and if necessary, the rightmost + // bits as well + const size_t shift = 8 - (offset + bits); + const byte mask = bitmask(bits) << shift; + *dest = ((*dest) & ~mask) | ((value << shift) & mask); + } + + static inline void write(byte value, byte* dest, size_t bits) + { + // Sets leftmost bits in dest from value + const size_t shift = 8 - bits; + const byte mask = bitmask(shift); + *dest = ((*dest) & mask) | (value << shift); + } + + static inline void write_split(byte value, byte* dest, size_t offset) + { + // 8-bit split write is equivalent to two partial writes, one + // to each byte + write(value >> offset, dest, offset, 8 - offset); + write(value, dest + 1, offset); + } + + static inline void write_split(byte value, byte* dest, size_t offset, size_t bits) + { + // Fewer than 8 bits may or may not be split into two bytes; + // write as many bits as possible to the first byte, and then + // if necessary, write the remainder to the second byte + size_t left = std::min(8 - offset, bits); + write(value >> (bits - left), dest, offset, left); + if (bits > left) { + write(value, dest + 1, bits - left); + } + } + }; + + // Traits-like class to take a read/write tag and provide an interface + // to read and write bits to the operands as necessary. For example, if + // an operation reads from but does not write to a bit array, the read + // functions return meaningful values but the write functions are no- + // ops, which can be eliminated by the compiler. + template + struct bit_handler; + + // Read-only: add no-op write interface to bit reader + template <> + struct bit_handler : public bit_reader + { + static inline void write(byte, const byte*, size_t, size_t) + { + } + + static inline void write(byte, const byte*, size_t) + { + } + + static inline void write_split(byte, const byte*, size_t) + { + } + + static inline void write_split(byte, const byte*, size_t, size_t) + { + } + }; + + // Write-only: add no-op read interface to bit writer + template <> + struct bit_handler : public bit_writer + { + static inline byte read(byte*, size_t, size_t) + { + return byte(); + } + + static inline byte read(byte*, size_t) + { + return byte(); + } + + static inline byte read_split(byte*, size_t) + { + return byte(); + } + + static inline byte read_split(byte*, size_t, size_t) + { + return byte(); + } + }; + + // Read/write: compose bit reader and bit writer + template <> + struct bit_handler : public bit_reader, public bit_writer + { + }; + + // Unary function body for aligned, full-byte operations, performed + // element-by-element + template + inline void unary_body(T*& data, size_t bytes, Func& func, element_tag) + { + for (size_t ii = 0; ii < bytes; ++ii) { + func(*data++, 8); + } + } + + // Unary function body for aligned, full-byte operations, where the + // function supports array-based operation + template + inline void unary_body(T*& data, size_t bytes, Func& func, array_tag) + { + func(data, bytes); + data += bytes; + } + + // Applies a unary function across an array of bits. + // The data type, T, is templatized to support const/non-const byte + // data (inner functions eventually specify "const byte*" or "byte*", + // so the set of supported types is bounded). + // The functor type, Func, must be a unary operator, preferably a + // subclass of UnaryGetter/UnarySetter. It must have nested typedefs + // for dispatching to the correct functions for reading/writing and the + // main function body: + // mode_tag - one of read_tag, write_tag or readwrite_tag + // body_tag - element_tag for element-by-element processing of + // aligned, full-byte data, or array_tag if Func has an + // array-based operator() that can be applied to the + // whole data set + // + // This template function is designed to support all types of read/ + // write/update methods across a bit array as efficiently as possible, + // while ensuring correctness irrespective of alignment or bit count. + // As much as possible, conditionals are avoided, and the compiler will + // eliminate code that has no effect, such as no-op reads and writes, + // or checking for function completion when the functor's complete() + // method always returns false. + template + inline typename Func::return_type apply_unary(T* data, size_t offset, size_t count, Func func) + { + typedef bit_handler handler; + + // Adjust packed bit pointer and offset so offset is less then 8 + offset = adjust_buffer(data, offset); + + // First, account for an unaligned starting bit + if (offset) { + size_t nbits = std::min(8 - offset, count); + // Fetch a subset of the first byte (a no-op if the array is + // only written) + byte value = handler::read(data, offset, nbits); + + // Apply the function to the value, which may be modified if + // func takes it by reference + func(value, nbits); + + // Write out the modified value (a no-op if the array is only + // read) + handler::write(value, data, offset, nbits); + if (func.complete()) { + return func.returns(); + } + ++data; + count -= nbits; + } + + // Apply function to aligned, full-byte values + const size_t bytes = count / 8; + unary_body(data, bytes, func, typename Func::body_tag()); + if (func.complete()) { + return func.returns(); + } + + // If less than a full byte remains, process it + size_t remain = count & 7; + if (remain) { + // Fetch, apply and write (as needed) from the leftmost N bits + byte value = handler::read(data, remain); + func(value, remain); + handler::write(value, data, remain); + } + + return func.returns(); + } + + // Binary function body for exact alignment between left and right + // sides, performing the operation element-by-element + template + void binop_body_aligned(T1*& lhs, T2*& rhs, size_t bytes, Func& func, element_tag) + { + for (size_t ii = 0; ii < bytes; ++ii) { + func(*lhs, *rhs, 8); + if (func.complete()) { + return; + } + ++lhs; + ++rhs; + } + } + + // Binary function body for exact alignment between left and right + // sides, where the functor supports array-based operation + template + void binop_body_aligned(T1*& lhs, T2*& rhs, size_t bytes, Func& func, array_tag) + { + func(lhs, rhs, bytes); + lhs += bytes; + rhs += bytes; + } + + // Applies a binary function across two equal-length arrays of bits. + // The data types, T1 and T2, are templatized to support const/non- + // const byte data (inner functions eventually specify "const byte*" + // or "byte*", so the set of supported types is bounded). + // The functor type, Func, must be a binary operator, preferably a + // subclass of BinaryGetter/BinarySetter. It must have nested typedefs + // for dispatching to the correct functions for reading/writing from/to + // the bit arrays, and the main function body: + // left_mode_tag - how lhs is accessed (one of read_tag, write_tag + // or readwrite_tag) + // right_mode_tag - how rhs is accessed (one of read_tag, write_tag + // or readwrite_tag) + // body_tag - element_tag for element-by-element processing of + // exact aligned, full-byte data, or array_tag if + // Func has an array-based operator() that can be + // applied to a whole data set + // + // This template function is designed to support all types of read/ + // write/update methods across two bit arrays as efficiently as + // possible, while ensuring correctness irrespective of alignment or + // bit count. As much as possible, conditionals are avoided, and the + // compiler will eliminate code that has no effect, such as no-op reads + // and writes, or checking for function completion when the functor's + // complete() method always returns false. + template + typename Func::return_type apply_binop(T1* lhs, size_t lhs_offset, + T2* rhs, size_t rhs_offset, + size_t count, Func func) + { + typedef bit_handler lhs_handler; + typedef bit_handler rhs_handler; + + // Adjust pointers and offsets so offsets are less then 8 + lhs_offset = adjust_buffer(lhs, lhs_offset); + rhs_offset = adjust_buffer(rhs, rhs_offset); + + // If the left hand side is not byte-aligned, apply the operation + // to a sub-byte number of bits so that remaining iterations are + // aligned + if (lhs_offset) { + const size_t nbits = std::min(8 - lhs_offset, count); + + // Get the left and right hand values for the operand (which + // may be no-ops in the case of an array that is only written, + // not read); the right hand side may be split between two + // bytes + byte lhs_value = lhs_handler::read(lhs, lhs_offset, nbits); + byte rhs_value = rhs_handler::read_split(rhs, rhs_offset, nbits); + + // Apply the function to the values, which may be modified if + // func takes one or both by reference + func(lhs_value, rhs_value, nbits); + + // Write back the updated values (again, no-ops if the arrays + // are only read) + lhs_handler::write(lhs_value, lhs, lhs_offset, nbits); + rhs_handler::write_split(rhs_value, rhs, rhs_offset, nbits); + if (func.complete()) { + return func.returns(); + } + + // Advance to the next byte for the left hand side, and adjust + // the offset for the right hand side (which may advance to the + // next byte as well) + ++lhs; + rhs_offset = adjust_buffer(rhs, rhs_offset + nbits); + count -= nbits; + } + + // Left offset is now guaranteed to be 0; if the right offset is + // also 0, then both sides are exactly byte-aligned + const size_t bytes = count / 8; + if (rhs_offset == 0) { + binop_body_aligned(lhs, rhs, bytes, func, typename Func::body_tag()); + if (func.complete()) { + return func.returns(); + } + } else { + // The two bit arrays are not exactly aligned; iterate through + // each byte from the left-hand side + for (size_t ii = 0; ii < bytes; ++ii) { + // Fetch the right-hand value from two adjacent bytes if + // needed + byte rhs_value = rhs_handler::read_split(rhs, rhs_offset); + // Apply the function; it is not necessary to read or write + // via a bit handler because we are using the real byte + // address + func(*lhs, rhs_value, 8); + // Write back the right hand side (if needed) + rhs_handler::write_split(rhs_value, rhs, rhs_offset); + if (func.complete()) { + return func.returns(); + } + ++lhs; + ++rhs; + } + } + + // If less than a full byte remains, process it + const size_t remain = count & 7; + if (remain) { + // The left hand side is byte aligned, read the left-justified + // bits (if needed); the right hand side may or may not be split + // across two bytes, depending on the alignment and number of + // bits + byte lhs_value = lhs_handler::read(lhs, remain); + byte rhs_value = rhs_handler::read_split(rhs, rhs_offset, remain); + func(lhs_value, rhs_value, remain); + // Write results if needed (see above) + lhs_handler::write(lhs_value, lhs, remain); + rhs_handler::write_split(rhs_value, rhs, rhs_offset, remain); + } + return func.returns(); + } + } // end anonymous namespace + + // Base class for operations, supporting a return value and completion + // status. By default, an operation is never "complete" in the sense that + // it does not need to return early, but subclasses may override complete() + // in order to stop processing and return immediately (e.g., compare). + template + class Operation { + public: + typedef R return_type; + + Operation(return_type rv=0) : + result(rv) + { + } + + return_type returns() + { + return result; + } + + bool complete () + { + return false; + } + + R result; + }; + + // Template specialization for void return, which cannot have a result + // member variable + template <> + class Operation { + public: + typedef void return_type; + + void returns() + { + } + + bool complete () + { + return false; + } + }; + + // Base class for unary operations to ensure they define the required tags. + // In most cases, new operations should extend UnarySetter or UnaryGetter. + template + class UnaryOp : public Operation { + public: + typedef Mode mode_tag; + typedef Body body_tag; + }; + + // Base class for unary getters; that is, functions that read from a bit + // array. Inheriting from this class defines the required function dispatch + // tags to ensure that apply_unary only reads from the input array. + template + class UnaryGetter : public UnaryOp + { + public: + // void operator() (byte value, size_t bits); + }; + + // Base class for unary setters; that is, functions that write to a bit + // array. Inheriting from this class defines the required function dispatch + // tags to ensure that apply_unary only writes to input array. + template + class UnarySetter : public UnaryOp + { + public: + // void operator() (byte& value, size_t bits); + }; + + // Base class for binary operations to ensure they define the required + // tags. In most cases, new operations should extend BinarySetter or + // BinaryGetter. + template + class BinaryOp : public Operation { + public: + typedef LeftMode left_mode_tag; + typedef RightMode right_mode_tag; + typedef Body body_tag; + }; + + // Base class for binary getters; that is, functions that read values from + // two bit arrays. Inheriting from this class defines the required function + // dispatch tags to ensure that apply_binop only reads from both arrays. + template + class BinaryGetter : public BinaryOp + { + // void operator() (byte lhs, byte rhs, size_t bits); + }; + + // Base class for binary setters; that is, functions that read values from + // one bit array and write them to another. Inheriting from this class + // defines the required function dispatch tags to ensure that apply_binop + // only reads from the right array and writes to the left array. + template + class BinarySetter : public BinaryOp + { + // void operator() (byte& lhs, byte rhs, size_t bits); + }; + + + // + // Public function implementations + // + bool getbit(const byte* str, size_t pos) + { + const size_t bit_offset = adjust_buffer(str, pos); + return ((*str) >> (7 - bit_offset)) & 1; + } + + void setbit(byte* str, size_t pos, bool value) + { + const size_t bit_offset = adjust_buffer(str, pos); + const size_t shift = (7 - bit_offset); + const byte mask = ~(1 << shift); + *str = ((*str) & mask) | (value << shift); + } + + // Unary getter functor that accumulates an integer value of up to 64 bits + // by shifting and or-ing successive bits from an array of bits. The first + // bit is always in the MSB. + class GetInteger : public UnaryGetter { + public: + inline void operator() (byte value, size_t bits) + { + // Shift the existing value over to accomodate the new bits, + // which maintains the first bit in the MSB + result = (result << bits) | value; + } + }; + + uint64_t getint(const byte* str, size_t start, size_t bits) + { + if (bits > 64) { + throw std::length_error("redhawk::bitops::getint()"); + } + return apply_unary(str, start, bits, GetInteger()); + } + + // Unary setter functor that takes an integer value of up to 64 bits and + // returns successive bits from that value starting with the MSB. + class SetInteger : public UnarySetter { + public: + SetInteger(uint64_t value, int bits) : + // Shift the value up to the MSB of the 64-bit integer + value(value << (64 - bits)) + { + } + + inline void operator() (byte& dest, size_t bits) + { + // The current value is in the MSB, shift the requested number of + // bits down to the LSB + dest = (value >> (64-bits)); + // Shift the next bits up to the MSB + value <<= bits; + } + + uint64_t value; + }; + + void setint(byte* str, size_t start, uint64_t value, size_t bits) + { + if (bits > 64) { + throw std::length_error("redhawk::bitops::setint()"); + } + apply_unary(str, start, bits, SetInteger(value, bits)); + } + + // Unary setter functor that fills a bit array with either 0's or 1's. For + // the aligned full-byte case, a second operator() is defined that uses + // memset, which is faster than setting each byte individually. + class Fill : public UnarySetter { + public: + Fill(bool value) : + value(-1 * value) // all bits 1 if true, 0 if false + { + } + + inline void operator() (byte& dest, size_t /*unused*/) + { + dest = value; + } + + inline void operator() (byte* dest, size_t bytes) + { + memset(dest, value, bytes); + } + + const byte value; + }; + + void fill(byte* str, size_t start, size_t length, bool value) + { + apply_unary(str, start, length, Fill(value)); + } + + // Unary setter functor that takes a source array of byte values and packs + // each byte value into a bit, where a zero byte results in a 0 bit and any + // non-zero value results in a 1 bit. + class Pack : public UnarySetter { + public: + Pack(const byte* src) : + src(src) + { + } + + inline void operator() (byte& dest, size_t bits) + { + // NB: Accumulate the packed value in a temporary variable so the + // compiler knows it doesn't have to write back to dest on each + // iteration + byte value = 0; + for (size_t ii = 0; ii < bits; ++ii) { + value = (value << 1) | ((*src++)?1:0); + } + dest = value; + } + + const byte* src; + }; + + void pack(byte* dest, size_t dstart, const byte* src, size_t length) + { + apply_unary(dest, dstart, length, Pack(src)); + } + + // Unary getter functor that takes a destination array of byte values and + // unpacks each bit value into a byte, where a bit value of 0 expands to a + // byte value of 0, and a bit value of 1 expands to a byte value of 1. + class Unpack : public UnaryGetter { + public: + Unpack(byte* dest) : + dest(dest) + { + } + + inline void operator() (byte value, size_t bits) + { + // When bits is known at compile time (i.e., in the aligned full- + // byte case), the compiler will usually unroll this loop to remove + // the conditional check + for (int pos = (bits-1); pos >= 0; --pos) { + *dest++ = (value >> pos) & 1; + } + } + + byte* dest; + }; + + void unpack(byte* dest, const byte* src, size_t sstart, size_t length) + { + apply_unary(src, sstart, length, Unpack(dest)); + } + + // Binary setter to copy bits, using memcpy for accelerated copies when + // both arrays are exactly aligned. + class Copy : public BinarySetter + { + public: + inline void operator() (byte& dest, byte src, size_t /*unused*/) + { + dest = src; + } + + inline void operator() (byte* dest, const byte* src, size_t bytes) + { + // Copy aligned bytes directly + std::memcpy(dest, src, bytes); + } + }; + + void copy(byte* dest, size_t dstart, const byte* src, size_t sstart, size_t length) + { + apply_binop(dest, dstart, src, sstart, length, Copy()); + } + + // Comparison functor, returning: + // * A positive integer if the left operand is greater than the right + // * Zero if the left operand is equal to the right + // * A negative integer if the left operand is less than the right + // The complete() method is overridden to return early once a difference is + // found. + class Compare : public BinaryGetter { + public: + inline void operator() (byte lhs, byte rhs, size_t /*unused*/) { + if (lhs == rhs) { + result = 0; + } else if (lhs > rhs) { + result = 1; + } else { + result = -1; + } + } + + inline void operator() (const byte* lhs, const byte* rhs, size_t bytes) + { + result = memcmp(lhs, rhs, bytes); + } + + bool complete() + { + return (result != 0); + } + }; + + int compare(const byte* s1, size_t start1, const byte* s2, size_t start2, size_t length) + { + return apply_binop(s1, start1, s2, start2, length, Compare()); + } + + // Unary getter functor that returns the population count (Hamming weight) + // of the input bit array. + class Popcount : public UnaryGetter { + public: + inline void operator() (byte value, size_t /*unused*/) + { + result += hammingWeights[value]; + } + }; + + int popcount(const byte* str, size_t offset, size_t count) + { + return apply_unary(str, offset, count, Popcount()); + } + + // Unary getter functor that turns a bit string into a character string. + class ToString : public UnaryGetter { + public: + ToString(char* dest) : + dest(dest) + { + } + + inline void operator() (byte value, size_t bits) + { + // When bits is known at compile time (i.e., in the aligned full- + // byte case), the compiler will usually unroll this loop to remove + // the conditional check + for (int pos = (bits-1); pos >= 0; --pos) { + // Simple optimization: the value has to be 0 or 1, and the + // ASCII characters for 0 and 1 are adjacent, so adding the bit + // value to '0' gives the right value, as long as you cast back + // to a char (addition promotes to int here). + *dest++ = '0' + ((value >> pos) & 1); + } + } + + char* dest; + }; + + void toString(char* str, const byte* ptr, size_t start, size_t length) + { + apply_unary(ptr, start, length, ToString(str)); + } + + // Unary functor that takes a character string and parses into a sequence + // of bits, returning the number of characters parsed. + // The complete() method is overridden to return early if an invalid + // character (not '0' or '1') is encountered, and the return value will be + // less than the requested number of characters. + // In order to handle the possibility of a partial write, this functor does + // not inherit from the expected UnarySetter, but instead uses read/write + // functionality to avoid overwriting exisiting bits. + class FromString : public UnaryOp { + public: + FromString(const char* src) : + src(src), + valid(true) + { + } + + inline void operator() (byte& dest, size_t bits) + { + byte value = 0; + for (size_t ii = 0; ii < bits; ++ii) { + byte bit = 0; + switch (*src) { + case '0': bit = 0; break; + case '1': bit = 1; break; + default: + // Invalid character: stop parsing, write the valid bits + // (which are in the least-significant bits, and the count + // is equal to the loop index) and return. + valid = false; + const size_t offset = 8 - bits; + bit_writer::write(value, &dest, offset, ii); + return; + } + value = (value << 1) | bit; + ++src; + ++result; + } + // Everything worked as planned, assign the value + dest = value; + } + + bool complete() + { + return !valid; + } + + inline byte parse(size_t bits) + { + byte value = 0; + for (size_t ii = 0; ii < bits; ++ii) { + byte bit = 0; + switch (*src) { + case '0': bit = 0; break; + case '1': bit = 1; break; + default: + // Stop parsing and return immediately + valid = false; + return value; + } + value = (value << 1) | bit; + ++src; + ++result; + } + return value; + } + + const char* src; + bool valid; + }; + + int parseString(byte* dest, size_t dstart, const char* str, size_t length) + { + return apply_unary(dest, dstart, length, FromString(str)); + } + + // Hamming distance functor that accumulates the number of bit positions + // that differ between two bit arrays. + class HammingDist : public BinaryGetter { + public: + inline void operator() (byte lhs, byte rhs, size_t /*unused*/) { + result += hammingWeights[lhs ^ rhs]; + } + }; + + int hammingDistance(const byte* s1, size_t start1, const byte* s2, size_t start2, size_t length) + { + return apply_binop(s1, start1, s2, start2, length, HammingDist()); + } + + + // Hamming distance-based comparsion, for inexact search up to a maximum + // number of bit differences. The complete() method is overridden to return + // early once a the Hamming distance exceeds the threshold. + class HammingCompare : public HammingDist { + public: + HammingCompare(int maxDistance) : + _maxDistance(maxDistance) + { + } + + bool complete() + { + return result > _maxDistance; + } + + private: + int _maxDistance; + }; + + int find(const byte* str, size_t sstart, size_t slen, + const byte* patt, size_t pstart, size_t plen, + int maxdist) + { + // Basic validity checks + if (slen < plen) { + throw std::logic_error("pattern is longer than string"); + } + + const size_t end = slen - plen; + for (size_t index = sstart; index < end; ++index) { + // Use a Hamming calculation that short-circuits if the maximum + // distance is exceeded + int dist = apply_binop(str, index, patt, pstart, plen, HammingCompare(maxdist)); + if (dist <= maxdist) { + return index; + } + } + return -1; + } + + size_t takeskip(byte* dest, size_t dstart, + const byte* src, size_t sstart, size_t slen, + size_t take, size_t skip) + { + size_t dest_pos = dstart; + size_t end = sstart + slen; + for (; sstart < end; sstart += (take+skip)) { + size_t pass = std::min(take, end-sstart); + copy(dest, dest_pos, src, sstart, pass); + dest_pos += pass; + } + return dest_pos - dstart; + } + +} // namespace bitops +} // namespace redhawk diff --git a/redhawk/src/base/framework/idl/Makefile.am b/redhawk/src/base/framework/idl/Makefile.am index d5153119d..927919a03 100644 --- a/redhawk/src/base/framework/idl/Makefile.am +++ b/redhawk/src/base/framework/idl/Makefile.am @@ -50,6 +50,8 @@ BUILT_SOURCES = cfSK.cpp \ ExtendedEventDynSK.cpp \ QueryablePortSK.cpp \ QueryablePortDynSK.cpp \ + NegotiablePortSK.cpp \ + NegotiablePortDynSK.cpp \ WellKnownPropertiesSK.cpp \ WellKnownPropertiesDynSK.cpp \ sandboxSK.cpp \ @@ -63,7 +65,7 @@ CLEANFILES = $(BUILT_SOURCES) cfheaderdir = $(pkgincludedir)/CF -dist_cfheader_HEADERS = LogInterfaces.h EventChannelManager.h cf.h DataType.h Port.h PortTypes.h StandardEvent.h AggregateDevices.h ExtendedEvent.h QueryablePort.h WellKnownProperties.h sandbox.h +dist_cfheader_HEADERS = LogInterfaces.h EventChannelManager.h cf.h DataType.h Port.h PortTypes.h StandardEvent.h AggregateDevices.h ExtendedEvent.h QueryablePort.h NegotiablePort.h WellKnownProperties.h sandbox.h #nodist_pkginclude_HEADERS = cf.h PortTypes.h StandardEvent.h lib_LTLIBRARIES = libossieidl.la diff --git a/redhawk/src/base/framework/inplace_list.h b/redhawk/src/base/framework/inplace_list.h new file mode 100644 index 000000000..9a1eea623 --- /dev/null +++ b/redhawk/src/base/framework/inplace_list.h @@ -0,0 +1,239 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +namespace redhawk { + + template + struct list_node_traits { + typedef Node node_type; + + static inline void set_prev(node_type& node, node_type* prev) + { + node.*Prev = prev; + } + + static inline node_type* get_prev(const node_type& node) + { + return node.*Prev; + } + + static inline void set_next(node_type& node, node_type* next) + { + node.*Next = next; + } + + static inline node_type* get_next(const node_type& node) + { + return node.*Next; + } + }; + + template + struct list_iterator { + public: + typedef NodeTraits node_traits; + typedef std::bidirectional_iterator_tag iterator_category; + typedef typename NodeTraits::node_type value_type; + typedef ptrdiff_t difference_type; + typedef Node* pointer; + typedef Node& reference; + + list_iterator(pointer node=0) : + _M_node(node) + { + } + + list_iterator(const list_iterator& other) : + _M_node(other.get_node()) + { + } + + list_iterator& operator++() + { + _M_node = node_traits::get_next(*_M_node); + return *this; + } + + list_iterator operator++(int) + { + list_iterator result(*this); + return ++result; + } + + list_iterator& operator--() + { + _M_node = node_traits::get_prev(*_M_node); + return *this; + } + + list_iterator operator--(int) + { + list_iterator result(*this); + return --result; + } + + reference operator*() const + { + return *get_node(); + } + + pointer operator->() const + { + return get_node(); + } + + pointer get_node() const + { + return _M_node; + } + + bool operator==(const list_iterator& other) const + { + return (_M_node == other._M_node); + } + + bool operator!=(const list_iterator& other) const + { + return !(*this == other); + } + + private: + pointer _M_node; + }; + + template > + class inplace_list { + public: + typedef Node node_type; + typedef NodeTraits node_traits; + + typedef list_iterator iterator; + typedef list_iterator const_iterator; + + inplace_list() : + _M_head(0), + _M_tail(0), + _M_size(0) + { + } + + size_t size() const + { + return _M_size; + } + + bool empty() const + { + return (_M_head == 0); + } + + iterator begin() + { + return iterator(_M_head); + } + + iterator end() + { + return iterator(); + } + + const_iterator begin() const + { + return const_iterator(_M_head); + } + + const_iterator end() const + { + return const_iterator(); + } + + void insert(iterator pos, node_type& node) + { + node_type* prev = 0; + node_type* next = 0; + if (pos == begin()) { + next = _M_head; + _M_head = &node; + if (!_M_tail) { + _M_tail = _M_head; + } + } else if (pos == end()) { + prev = _M_tail; + _M_tail = &node; + } else { + prev = node_traits::get_prev(*pos); + next = &(*pos); + } + + if (prev) { + node_traits::set_next(*prev, &node); + } + node_traits::set_prev(node, prev); + node_traits::set_next(node, next); + if (next) { + node_traits::set_prev(*next, &node); + } + ++_M_size; + } + + void erase(iterator iter) + { + node_type* prev = node_traits::get_prev(*iter); + node_type* next = node_traits::get_next(*iter); + if (!prev) { + _M_head = next; + } else { + node_traits::set_next(*prev, next); + } + if (next) { + node_traits::set_prev(*next, prev); + } else { + _M_tail = prev; + } + --_M_size; + } + + void push_front(node_type& node) + { + insert(begin(), node); + } + + node_type& front() + { + return *_M_head; + } + + void pop_back() + { + erase(_M_tail); + } + + node_type& back() + { + return *_M_tail; + } + + private: + node_type* _M_head; + node_type* _M_tail; + size_t _M_size; + }; + +} diff --git a/redhawk/src/base/framework/java/Makefile.am b/redhawk/src/base/framework/java/Makefile.am index 2e9a7a97e..83b02b787 100644 --- a/redhawk/src/base/framework/java/Makefile.am +++ b/redhawk/src/base/framework/java/Makefile.am @@ -47,7 +47,7 @@ vpath Cos%.idl $(OMNICOS_IDLDIR) idlj_IDLSRC = CosEventComm.idl CosEventChannelAdmin.idl \ LogInterfaces.idl EventChannelManager.idl \ cf.idl DataType.idl Port.idl PortTypes.idl StandardEvent.idl AggregateDevices.idl \ - ExtendedEvent.idl QueryablePort.idl WellKnownProperties.idl sandbox.idl + ExtendedEvent.idl QueryablePort.idl NegotiablePort.idl WellKnownProperties.idl sandbox.idl # CosNaming is included with the JRE, so only build JNI bindings. idljni_IDLSRC = CosNaming.idl $(idlj_IDLSRC) diff --git a/redhawk/src/base/framework/java/jni/Makefile.am b/redhawk/src/base/framework/java/jni/Makefile.am index 609adf5b9..e68a6433a 100644 --- a/redhawk/src/base/framework/java/jni/Makefile.am +++ b/redhawk/src/base/framework/java/jni/Makefile.am @@ -31,6 +31,7 @@ nodist_libossiecfjni_la_SOURCES = ossie/CF/jni_LogInterfaces.cpp \ ossie/CF/jni_Port.cpp \ ossie/CF/jni_AggregateDevices.cpp \ ossie/CF/jni_QueryablePort.cpp \ + ossie/CF/jni_NegotiablePort.cpp \ ossie/CF/jni_StandardEvent.cpp \ ossie/CF/jni_WellKnownProperties.cpp \ ossie/CF/jni_PortTypes.cpp \ @@ -47,6 +48,7 @@ nobase_nodist_include_HEADERS = ossie/CF/jni_LogInterfaces.h \ ossie/CF/jni_cf.h \ ossie/CF/jni_AggregateDevices.h \ ossie/CF/jni_QueryablePort.h \ + ossie/CF/jni_NegotiablePort.h \ ossie/CF/jni_StandardEvent.h \ ossie/CF/jni_WellKnownProperties.h \ ossie/CF/jni_PortTypes.h \ diff --git a/redhawk/src/base/framework/java/ossie/Makefile.am b/redhawk/src/base/framework/java/ossie/Makefile.am index 29446e916..82b0aacfa 100644 --- a/redhawk/src/base/framework/java/ossie/Makefile.am +++ b/redhawk/src/base/framework/java/ossie/Makefile.am @@ -102,13 +102,15 @@ ossie_jar_SOURCE = src/org/ossie/component/AllocCapacity.java \ src/org/ossie/component/QueryableUsesPort.java \ src/org/ossie/component/PropertyChangeRec.java \ src/org/ossie/component/Resource.java \ + src/org/ossie/component/RHLogger.java \ src/org/ossie/component/Component.java \ src/org/ossie/component/ThreadedComponent.java \ src/org/ossie/component/ThreadedDevice.java \ src/org/ossie/component/ThreadedResource.java \ src/org/ossie/component/Service.java \ src/org/ossie/component/UsesPort.java \ - src/org/ossie/component/PortBase.java \ + src/org/ossie/component/PortBase.java \ + src/org/ossie/component/StartablePort.java \ src/org/ossie/logging/logging.java \ src/org/ossie/logging/RH_LogEventAppender.java \ src/org/ossie/events/Consumer_i.java \ @@ -143,16 +145,23 @@ ossie_jar_SOURCE = src/org/ossie/component/AllocCapacity.java \ src/org/ossie/properties/PrimitiveArrayUtils.java \ src/org/ossie/properties/Property.java \ src/org/ossie/properties/PropertyListener.java \ + src/org/ossie/properties/UTCTimeProperty.java \ + src/org/ossie/properties/UTCTimeSequenceProperty.java \ src/org/ossie/properties/StringProperty.java \ src/org/ossie/properties/StringSequenceProperty.java \ src/org/ossie/properties/StructDef.java \ src/org/ossie/properties/StructProperty.java \ src/org/ossie/properties/StructSequenceProperty.java \ src/org/ossie/properties/UnsignedUtils.java \ + src/org/ossie/redhawk/PortCallError.java \ src/org/ossie/redhawk/ApplicationContainer.java \ src/org/ossie/redhawk/DomainManagerContainer.java \ src/org/ossie/redhawk/NetworkContainer.java \ - src/org/ossie/redhawk/DeviceManagerContainer.java + src/org/ossie/redhawk/DeviceManagerContainer.java \ + src/org/ossie/redhawk/NetworkContainer.java \ + src/org/ossie/redhawk/time/DefaultComparator.java \ + src/org/ossie/redhawk/time/Comparator.java \ + src/org/ossie/redhawk/time/utils.java # Deprecated generic property classes, replaced by strongly-typed classes but # maintained for source compatibility. diff --git a/redhawk/src/base/framework/java/ossie/pom.xml b/redhawk/src/base/framework/java/ossie/pom.xml deleted file mode 100644 index 7c4f58eaf..000000000 --- a/redhawk/src/base/framework/java/ossie/pom.xml +++ /dev/null @@ -1,101 +0,0 @@ - - 4.0.0 - - redhawk.coreframework - parent - 2.0.9-SNAPSHOT - ../../../../../../pom.xml - - ossie - bundle - - - - log4j - log4j - 1.2.15 - - - com.sun.jmx - jmxri - - - com.sun.jdmk - jmxtools - - - javax.jms - jms - - - - - ${project.groupId} - cf-interfaces - ${project.version} - - - commons-lang - commons-lang - 2.4 - - - - src - - - org.codehaus.gmaven - gmaven-plugin - 1.3 - - - set-main-artifact - package - - execute - - - - project.artifact.setFile(new - File("${project.basedir}/ossie.jar")) - - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.7 - - - attach-artifacts - package - - attach-artifact - - - - - ${project.build.directory}/${project.artifactId}-${project.version}.jar - beta - jar - - - - - - - - - - diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Device.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Device.java index 389a420de..31d533740 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Device.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Device.java @@ -30,6 +30,7 @@ import java.util.Properties; import org.apache.log4j.Logger; +import org.apache.log4j.LogManager; import org.omg.CORBA.Any; import org.omg.CORBA.ORB; import org.omg.CORBA.Object; @@ -217,13 +218,13 @@ public void connectIDMChannel(final String idm_channel_ior){ EventChannel idm_channel=null; // Get DomainManager incoming event channel and connect the device to it, where applicable try { - logger.debug("connectIDMChannel: idm_channel_ior:" + idm_channel_ior); + this._deviceLog.debug("connectIDMChannel: idm_channel_ior:" + idm_channel_ior); Object idm_channel_obj = orb.string_to_object(idm_channel_ior); idm_channel = org.omg.CosEventChannelAdmin.EventChannelHelper.narrow(idm_channel_obj); idm_publisher = new org.ossie.events.Publisher(idm_channel); } catch (Exception e){ - logger.warn("Error connecting to IDM channel."); + this._deviceLog.warn("Error connecting to IDM channel."); } } @@ -234,17 +235,19 @@ public void connectIDMChannel(final String idm_channel_ior){ idm_publisher = evt_mgr.Publisher( Manager.IDM_CHANNEL_SPEC ); } catch( Manager.OperationFailed e) { - logger.warn("Failed to connect to IDM channel."); + this._deviceLog.warn("Failed to connect to IDM channel."); } catch( RegistrationExists e) { - logger.warn("Failed to connect to IDM channel."); + this._deviceLog.warn("Failed to connect to IDM channel."); } catch( RegistrationFailed e) { - logger.warn("Failed to connect to IDM channel."); + this._deviceLog.warn("Failed to connect to IDM channel."); } } } + protected RHLogger _deviceLog; + /** * The setup() function exists to make it easy for start_device to invoke the no-arg constructor. * @@ -268,6 +271,7 @@ protected CF.Device setup(final DeviceManager devMgr, final POA poa) throws InvalidObjectReference, ServantNotActive, WrongPolicy { super.setup(compId, label, softwareProfile, orb, poa); this.label = label; + this._deviceLog = this._baseLog.getChildLogger("Device", "system"); DevicePOATie tie = new DevicePOATie(this, poa); tie._this(orb); @@ -294,7 +298,7 @@ protected CF.Device setup(final DeviceManager devMgr, try { this._ecm = org.ossie.events.Manager.GetManager(this); }catch( Manager.OperationFailed e){ - logger.warn("Unable to resolve EventChannelManager"); + this._deviceLog.warn("Unable to resolve EventChannelManager"); } } @@ -358,6 +362,13 @@ public static void start_device(final Class clazz, final Stri final POA rootpoa = org.ossie.corba.utils.RootPOA(); + if (args.length == 1) { + if (args[0].equals("-i")) { + System.out.println("Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain"); + System.exit(-1); + } + } + Map execparams = parseArgs(args); DeviceManager devMgr = null; @@ -445,6 +456,7 @@ public static void start_device(final Class clazz, final Stri Thread shutdownWatcher = new Thread(new Runnable() { public void run() { device_i.waitDisposed(); + LogManager.shutdown(); shutdownORB(orb); } }); @@ -462,9 +474,6 @@ public void run() { } catch (InterruptedException e) { // PASS } - - // Shut down native ORB, if it's running - omnijni.ORB.shutdown(); } @@ -493,7 +502,7 @@ public void initializeProperties(final DataType[] ctorProperties) throws Already * * @throws InvalidCapacity */ - private void validateAllocProps(final DataType[] capacities) throws InvalidCapacity { + protected void validateAllocProps(final DataType[] capacities) throws InvalidCapacity { final ArrayList invalidProperties = new ArrayList(); // throw new InvalidCapacity("Error configuring component", @@ -524,11 +533,11 @@ private void validateAllocProps(final DataType[] capacities) throws InvalidCapac */ public boolean allocateCapacity(DataType[] capacities) throws InvalidCapacity, InvalidState { - logger.debug("allocateCapacity : " + capacities.toString()); + this._deviceLog.debug("allocateCapacity : " + capacities.toString()); // Checks for empty if (capacities.length == 0){ - logger.trace("No capacities to allocate."); + this._deviceLog.trace("No capacities to allocate."); return true; } @@ -542,7 +551,7 @@ public boolean allocateCapacity(DataType[] capacities) throws InvalidCapacity, I } else { invalidState = "SHUTTING_DOWN"; } - logger.debug("Cannot allocate capacity: System is " + invalidState); + this._deviceLog.debug("Cannot allocate capacity: System is " + invalidState); throw new InvalidState(invalidState); } @@ -559,7 +568,7 @@ public boolean allocateCapacity(DataType[] capacities) throws InvalidCapacity, I // Checks to see if the device has a call back function registered if (callbacks.containsKey(cap.id) && callbacks.get(cap.id).allocate(cap)){ // If it does, use it - logger.trace("Capacity allocated by user-defined function."); + this._deviceLog.trace("Capacity allocated by user-defined function."); allocations.add(cap); } else { // Otherwise defer to the property's allocator. @@ -568,7 +577,7 @@ public boolean allocateCapacity(DataType[] capacities) throws InvalidCapacity, I if (property.allocate(cap.value)) { allocations.add(cap); } else { - logger.debug("Cannot allocate capacity. Insufficient capacity for property '" + cap.id + "'"); + this._deviceLog.debug("Cannot allocate capacity. Insufficient capacity for property '" + cap.id + "'"); return false; } } catch (final RuntimeException ex) { @@ -602,7 +611,7 @@ public boolean allocateCapacity(DataType[] capacities) throws InvalidCapacity, I public void deallocateCapacity(DataType[] capacities) throws InvalidCapacity, InvalidState { /* Verify that the device is in a valid state */ if (adminState == AdminType.LOCKED || operationState == OperationalType.DISABLED){ - logger.warn("Cannot deallocate capacity. System is either LOCKED or DISABLED."); + this._deviceLog.warn("Cannot deallocate capacity. System is either LOCKED or DISABLED."); throw new InvalidState("Cannot deallocate capacity. System is either LOCKED or DISABLED."); } @@ -615,7 +624,7 @@ public void deallocateCapacity(DataType[] capacities) throws InvalidCapacity, In // Checks to see if the device has a callback function registered if (callbacks.containsKey(cap.id) && callbacks.get(cap.id).deallocate(cap)){ // If it does, use it - logger.trace("Capacity allocated by user-defined function."); + this._deviceLog.trace("Capacity allocated by user-defined function."); } else { // Otherwise defer to the property's deallocator. final IProperty property = this.propSet.get(cap.id); @@ -624,7 +633,7 @@ public void deallocateCapacity(DataType[] capacities) throws InvalidCapacity, In for( DataType ov : originalCap ) { if ( ov.id.equals(property.getId()) ) { if ( AnyUtils.compareAnys(property.toAny(),ov.value,"gt") ) { - logger.debug("deallocation exceeds bounds for " + property ); + this._deviceLog.debug("deallocation exceeds bounds for " + property ); overCaps.add(cap); property.allocate(cap.value); break; @@ -633,7 +642,7 @@ public void deallocateCapacity(DataType[] capacities) throws InvalidCapacity, In } } catch (final RuntimeException ex) { - logger.debug("Exception during dealloaction...property: " + property ); + this._deviceLog.debug("Exception during dealloaction...property: " + property ); invalidProps.add(cap); } @@ -843,11 +852,11 @@ protected void setUsageState(UsageType newUsageState){ try { if ( idm_publisher != null ) { idm_publisher.push(AnyUtils.toAny(event, TCKind.tk_objref) ); - logger.debug("Sent device StateChangeEvent - USAGE "); + this._deviceLog.debug("Sent device StateChangeEvent - USAGE "); } } catch (Exception e) { - logger.warn("Error sending event."); + this._deviceLog.warn("Error sending event."); } usageState = newUsageState; @@ -951,11 +960,11 @@ protected void setAdminState(AdminType newAdminState){ try { if ( idm_publisher != null ) { idm_publisher.push(AnyUtils.toAny(event, TCKind.tk_objref) ); - logger.debug("Sent device StateChangeEvent - ADMIN "); + this._deviceLog.debug("Sent device StateChangeEvent - ADMIN "); } } catch (Exception e) { - logger.warn("Error sending event."); + this._deviceLog.warn("Error sending event."); } adminState = newAdminState; @@ -998,11 +1007,11 @@ protected void setOperationState(OperationalType newOperationState){ try { if ( idm_publisher != null ) { idm_publisher.push(AnyUtils.toAny(event, TCKind.tk_objref) ); - logger.debug("Sent device StateChangeEvent - OPERATIONAL "); + this._deviceLog.debug("Sent device StateChangeEvent - OPERATIONAL "); } } catch (Exception e) { - logger.warn("Error sending event."); + this._deviceLog.warn("Error sending event."); } operationState = newOperationState; diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Logging.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Logging.java index 7f0a369fd..42c21574e 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Logging.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Logging.java @@ -82,6 +82,8 @@ public interface ConfigurationChangeListener { /** internal log4j logger object */ protected Logger _logger; + public RHLogger _baseLog; + /** log identifier, by default uses root logger or "" **/ protected String logName; @@ -103,6 +105,13 @@ public interface ConfigurationChangeListener { /** holds the url for the logging configuration */ protected String loggingURL; + /** hold initial values to restore logging after reset */ + protected boolean _origLevelSet; + protected String _origLogCfgURL; + protected int _origLogLevel; + protected logging.ResourceCtx _origCtx; + protected int defaultLogLevel = CF.LogLevels.INFO; + /** Constructor that sets the base logging context for a resource. The logger that is passed in is established by Domain base classes: Resource, Device, and Service, to maintain backwards @@ -122,12 +131,17 @@ public Logging ( Logger establishedLogger, String logName ) { _logger = Logger.getLogger(logName); } + this._baseLog=null; this.logName=logName; - this.logLevel=CF.LogLevels.INFO; + this.logLevel=this.defaultLogLevel; this.logConfig =""; this.logListener=null; this.loggingCtx = null; this.loggingURL = null; + this._origLevelSet = false; + this._origLogCfgURL = ""; + this._origLogLevel = -1; + this._origCtx = null; this.loggingMacros=logging.GetDefaultMacros(); logging.ResolveHostInfo( this.loggingMacros ); } @@ -288,7 +302,6 @@ public void saveLoggingContext( String logcfg_url, ctx.apply( this.loggingMacros ); this.loggingCtx = ctx; } - // save off configuration that we are given try{ this.loggingURL = logcfg_url; @@ -335,7 +348,13 @@ public void saveLoggingContext( String logcfg_url, // for event channel appenders... needs to occur after // Domain awareness is established logging.SetEventChannelManager( ECM ); - + + if (!_origLevelSet) { + _origLevelSet = true; + _origLogCfgURL = logcfg_url; + _origLogLevel = oldstyle_loglevel; + _origCtx = ctx; + } } public void setEventChannelManager( org.ossie.events.Manager ECM ) { @@ -354,36 +373,32 @@ public void setEventChannelManager( org.ossie.events.Manager ECM ) { * @param logging.Resource a content class from the logging.ResourceCtx tree */ public void setLoggingContext( String logcfg_url, int oldstyle_loglevel, logging.ResourceCtx ctx ) { + // test we have a logging URI + if ( logcfg_url == null || logcfg_url == "" ) { + logging.ConfigureDefault(); + } + else { + // apply any context data + if ( ctx != null ) { + ctx.apply( this.loggingMacros ); + this.loggingCtx = ctx; + } - // test we have a logging URI - if ( logcfg_url == null || logcfg_url == "" ) { - logging.ConfigureDefault(); - } - else { - // apply any context data - if ( ctx != null ) { - ctx.apply( this.loggingMacros ); - this.loggingCtx = ctx; - } - - // call setLogConfigURL to load configuration and set log4j - if ( logcfg_url != null ) { - setLogConfigURL( logcfg_url ); - } - } + // call setLogConfigURL to load configuration and set log4j + if ( logcfg_url != null ) { + setLogConfigURL( logcfg_url ); + } + } - try { - if ( oldstyle_loglevel > -1 ) { - // set log level for this logger - setLogLevel( logName, logging.ConvertLogLevel(oldstyle_loglevel) ); - } - else { - // grab root logger's level - logLevel = logging.ConvertLog4ToCFLevel( Logger.getRootLogger().getLevel() ); - } - } - catch( Exception e ){ - } + try { + if ( oldstyle_loglevel > -1 ) { + // set log level for this logger + setLogLevel( logName, logging.ConvertLogLevel(oldstyle_loglevel) ); + } else { + // grab root logger's level + logLevel = logging.ConvertLog4ToCFLevel( Logger.getRootLogger().getLevel() ); + } + } catch( Exception e ) {} } ////////////////////////////////////////////////////////////////////////////// @@ -400,12 +415,9 @@ public void setLoggingContext( String logcfg_url, int oldstyle_loglevel, logging * @return int value of a CF::LogLevels enumeration */ public int log_level() { - if ( _logger != null ) { - Level logger_level = _logger.getLevel(); - Level cur_loglevel= logging.ConvertToLog4Level(logLevel); - if ( logger_level != null && logger_level != cur_loglevel ) { - logLevel = logging.ConvertLog4ToCFLevel(logger_level); - } + if ( _baseLog != null ) { + Level logger_level = _baseLog.getLevel(); + logLevel = logging.ConvertLog4ToCFLevel(logger_level); } return logLevel; @@ -420,21 +432,21 @@ public int log_level() { * @param int value of a CF::LogLevels enumeration */ public void log_level( int newLogLevel ) { - if ( this.logListener != null ) { - logLevel = newLogLevel; - this.logListener.logLevelChanged( logName, newLogLevel ); - } - else { - logLevel = newLogLevel; - Level tlevel= logging.ConvertToLog4Level(newLogLevel); - if ( _logger != null ) { - _logger.setLevel(tlevel); - } - else { - Logger.getRootLogger().setLevel(tlevel); - } - } - + Level tlevel= logging.ConvertToLog4Level(newLogLevel); + if ( this.logListener != null ) { + logLevel = newLogLevel; + this.logListener.logLevelChanged( logName, newLogLevel ); + } else { + logLevel = newLogLevel; + if ( _logger != null ) { + _logger.setLevel(tlevel); + } else { + Logger.getRootLogger().setLevel(tlevel); + } + } + if ( _baseLog != null ) { + _baseLog.setLevel(tlevel); + } } @@ -447,32 +459,89 @@ public void log_level( int newLogLevel ) { * @param int value of a CF::LogLevels enumeration */ public void setLogLevel( String logger_id, int newLogLevel ) throws UnknownIdentifier { - - if ( this.logListener != null ) { - if ( logger_id == logName ){ - this.logLevel = newLogLevel; - } - - this.logListener.logLevelChanged( logger_id, newLogLevel ); - } - else { - Level tlevel=Level.INFO; - tlevel = logging.ConvertToLog4Level(newLogLevel); - - if ( logger_id != null ){ - Logger logger = Logger.getLogger( logger_id ); - if ( logger != null ) { - logger.setLevel( tlevel ); + if (!haveLoggerHierarchy(logger_id)) + throw new CF.UnknownIdentifier(); + if ( this.logListener != null ) { + if ( logger_id == logName ){ + this.logLevel = newLogLevel; + } + this.logListener.logLevelChanged( logger_id, newLogLevel ); + } else { + Level tlevel=Level.INFO; + tlevel = logging.ConvertToLog4Level(newLogLevel); + + if ( logger_id != null ) { + RHLogger logger = this._baseLog.getLogger( logger_id ); + if ( logger != null ) { + logger.setLevel( tlevel ); if ( logger_id == logName ) { logLevel=newLogLevel; } - } - } - else { - Logger.getRootLogger().setLevel(tlevel); - } + } + } else { + this._baseLog.getRootLogger().setLevel(tlevel); + } + } + } - } + /** + * haveLoggerHierarchy + * + * Determine whether or not the log name is in this component's hierarchy + * + * @returns boolean value + */ + protected boolean haveLoggerHierarchy(String name) + { + return this._baseLog.isLoggerInHierarchy(name); + } + + /** + * getLogLevel + * + * Get the logging level for a named logger associated with this resource + * + * @returns int value of a CF::LogLevels enumeration + */ + public int getLogLevel( String logger_id ) throws UnknownIdentifier { + if (!haveLoggerHierarchy(logger_id)) + throw new CF.UnknownIdentifier(); + RHLogger tmp_logger = this._baseLog.getLogger(logger_id); + Level _level = tmp_logger.getLevel(); + return logging.ConvertLog4ToCFLevel(_level); + } + + /** + * getNamedLoggers + * + * Get a list of the named loggers in this resource + * + * @returns array of strings with the logger names + */ + public String[] getNamedLoggers() { + String[] retval = new String[0]; + if (this._baseLog != null) { + retval = this._baseLog.getNamedLoggers(); + } + return retval; + } + + /** + * resetLog + * + * Reset the logger to its initial state + * + */ + public void resetLog() { + if (_origLevelSet) { + String[] loggers = this._baseLog.getNamedLoggers(); + for (String logger: loggers) { + RHLogger _tmplog = this._baseLog.getLogger(logger); + _tmplog.setLevel(null); + } + this.logLevel=this.defaultLogLevel; + this.setLoggingContext(this._origLogCfgURL, this._origLogLevel, this._origCtx); + } } /** @@ -528,28 +597,22 @@ public void setLogConfig( String config_contents ) { * @param String URL of file to load */ public void setLogConfigURL( String config_url ) { - - // - // Get File contents.... - // - try{ - String config_contents=""; - - config_contents = logging.GetConfigFileContents(config_url); - - if ( config_contents.length() > 0 ){ - this.loggingURL = config_url; - // apply contents of file to configuration - this.setLogConfig( config_contents ); - } - else { - _logger.warn( "URL contents could not be resolved, url: " + config_url ); - } - - } - catch( Exception e ){ - _logger.warn( "Exception caught during logging configuration using URL, url: "+ config_url ); - } + // + // Get File contents.... + // + try { + String config_contents=""; + config_contents = logging.GetConfigFileContents(config_url); + if ( config_contents.length() > 0 ) { + this.loggingURL = config_url; + this.logConfig = logging.ExpandMacros(config_contents, loggingMacros ); + } + else { + _logger.warn( "URL contents could not be resolved, url: " + config_url ); + } + } catch( Exception e ) { + _logger.warn( "Exception caught during logging configuration using URL, url: "+ config_url ); + } } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/PortBase.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/PortBase.java index 7bf5d0911..c87c4680c 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/PortBase.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/PortBase.java @@ -20,8 +20,9 @@ package org.ossie.component; -public interface PortBase +public interface PortBase { String getRepid(); String getDirection(); + void setLogger(RHLogger logger); } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/ProcessThread.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/ProcessThread.java index e235ea6eb..0c81a2a22 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/ProcessThread.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/ProcessThread.java @@ -20,17 +20,35 @@ package org.ossie.component; +import org.apache.log4j.Logger; + class ProcessThread implements Runnable { public ProcessThread (ThreadedComponent target) { this.target = target; + // Try to use the most relevant logger for the target + if (target instanceof Device) { + logger = Device.logger; + } else if (target instanceof Resource) { + logger = Resource.logger; + } else { + logger = Logger.getLogger(ProcessThread.class.getName()); + } } public void run () { while (this.isRunning()) { - int state = this.target.process(); + int state = ThreadedComponent.NORMAL; + try { + state = this.target.process(); + } catch (Throwable exc) { + logger.fatal("Unhandled exception in service function: " + exc.getMessage()); + exc.printStackTrace(); + // Terminate the process on unhandled exceptions + System.exit(-1); + } if (state == ThreadedComponent.FINISH) { return; } else if (state == ThreadedComponent.NOOP) { @@ -71,4 +89,5 @@ public void setDelay (float delay) private ThreadedComponent target; private boolean running = true; private long delay = 125; + private Logger logger; } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/PropertyChangeRec.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/PropertyChangeRec.java index 1eda05a1b..915f57c59 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/PropertyChangeRec.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/PropertyChangeRec.java @@ -102,10 +102,16 @@ public EC_PropertyChangeListener( org.omg.CORBA.Object obj ) public int notify( PropertyChangeRec prec, DataType[] props) { String uuid = UUID.randomUUID().toString(); + long tmp_time = System.currentTimeMillis(); + CF.UTCTime _time = new CF.UTCTime(); + _time.tcstatus = 1; + _time.twsec = tmp_time /1000; + _time.tfsec = (tmp_time % 1000)/1000.0; PropertyChangeEvent evt = new PropertyChangeEvent( uuid, prec.regId, prec.rscId, - props); + props, + _time); final Any any = ORB.init().create_any(); PropertyChangeEventHelper.insert( any, evt); @@ -142,10 +148,16 @@ public INF_PropertyChangeListener( org.omg.CORBA.Object obj ) public int notify( PropertyChangeRec prec, DataType[] props) { String uuid = UUID.randomUUID().toString(); + long tmp_time = System.currentTimeMillis(); + CF.UTCTime _time = new CF.UTCTime(); + _time.tcstatus = 1; + _time.twsec = tmp_time /1000; + _time.tfsec = (tmp_time % 1000)/1000.0; PropertyChangeEvent evt = new PropertyChangeEvent( uuid, prec.regId, prec.rscId, - props); + props, + _time); int retval=0; try { diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/QueryableUsesPort.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/QueryableUsesPort.java index 71806f485..c3189e557 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/QueryableUsesPort.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/QueryableUsesPort.java @@ -24,11 +24,16 @@ */ package org.ossie.component; +import java.util.Map; import java.util.HashMap; import org.ossie.component.UsesPort; -//import ExtendedCF.QueryablePortPOA; import ExtendedCF.*; +import org.ossie.redhawk.PortCallError; +import org.ossie.component.RHLogger; +import java.util.List; +import java.util.ArrayList; +import java.util.ListIterator; public abstract class QueryableUsesPort< E > extends QueryablePortPOA { // SUPPRESS CHECKSTYLE Name @@ -44,6 +49,42 @@ public QueryableUsesPort(final String portName) { this.updatingPortsLock = new Object(); } + public RHLogger _portLog = null; + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + + public List getConnectionIds() { + List retval = new ArrayList(); + for (String key : outPorts.keySet()) { + retval.add(key); + } + return retval; + } + + public void __evaluateRequestBasedOnConnections(String __connection_id__, boolean returnValue, boolean inOut, boolean out) throws PortCallError { + if (__connection_id__.isEmpty() && (outPorts.size() > 1)) { + if (out || inOut || returnValue) { + throw new PortCallError("Returned parameters require either a single connection or a populated __connection_id__ to disambiguate the call.", this.getConnectionIds()); + } + } + if (outPorts.isEmpty()) { + if (out || inOut || returnValue) { + throw new PortCallError("No connections available.", this.getConnectionIds()); + } else { + if (!__connection_id__.isEmpty()) { + throw new PortCallError("The requested connection id ("+__connection_id__+") does not exist.", this.getConnectionIds()); + } + } + } + if ((!__connection_id__.isEmpty()) && (!outPorts.isEmpty())) { + if (!outPorts.containsKey(__connection_id__)) { + throw new PortCallError("The requested connection id ("+__connection_id__+") does not exist.", this.getConnectionIds()); + } + } + } + public boolean isActive() { return this.active; } @@ -93,7 +134,14 @@ public void disconnectPort(final String connectionId) { protected abstract E narrow(org.omg.CORBA.Object connection); public UsesConnection[] connections() { - final UsesConnection[] connList = new UsesConnection[0]; - return connList; + synchronized (this.updatingPortsLock) { + final UsesConnection[] connList = new UsesConnection[this.outPorts.size()]; + int index = 0; + for (Map.Entry entry : this.outPorts.entrySet()) { + org.omg.CORBA.Object obj = (org.omg.CORBA.Object)entry.getValue(); + connList[index++] = new UsesConnection(entry.getKey(), obj); + } + return connList; + } } } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/RHLogger.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/RHLogger.java new file mode 100644 index 000000000..aaf418180 --- /dev/null +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/RHLogger.java @@ -0,0 +1,204 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package org.ossie.component; + +import org.apache.log4j.Logger; +import org.apache.log4j.Level; +import org.apache.log4j.spi.LoggerRepository; +import java.util.Enumeration; +import java.util.ArrayList; +import java.util.List; + +import org.ossie.logging.logging; + +/** + RHLogger + + Class extends the log4j logger with some utility functions + + */ + +public class RHLogger { + + private Logger l4logger; + private String logname; + static public String USER_LOGS = "user"; + static public RHLogger _rootLogger; + + public RHLogger ( String logName ) { + this.l4logger = Logger.getLogger(logName); + this.logname = logName; + this._rootLogger = null; + } + + public Logger getL4Logger() { + return this.l4logger; + } + + static public RHLogger getRootLogger() { + if ( _rootLogger == null ) { + _rootLogger = new RHLogger(""); + } + return _rootLogger; + } + + static public RHLogger getLogger( String name ) { + RHLogger ret; + if ( !name.isEmpty() ) { + ret = new RHLogger( name ); + } else { + ret = RHLogger.getRootLogger(); + } + return ret; + } + + public RHLogger getResourceLogger( String name ) { + return this.getLogger( name ); + } + + public RHLogger getChildLogger( String name ) { + String ns = "user"; + String _ns = ns; + if (logname.contains(".")) { + _ns = ""; + } + String _full_name; + if (!_ns.isEmpty() && !name.contains("."+USER_LOGS+".")) + _full_name = logname+"."+_ns+"."+name; + else + _full_name = logname+"."+name; + return getResourceLogger(_full_name); + } + + public RHLogger getChildLogger( String name, String ns ) { + String _full_name; + String _ns = ns; + if (_ns.equals("user")) { + if (logname.contains(".")) { + _ns = ""; + } + } + if (!_ns.isEmpty() && ((!_ns.equals(USER_LOGS)) || ((_ns.equals(USER_LOGS)) && (!name.contains("."+USER_LOGS+"."))))) + _full_name = logname+"."+_ns+"."+name; + else + _full_name = logname+"."+name; + return getResourceLogger(_full_name); + } + + public boolean isLoggerInHierarchy(String search_name) { + LoggerRepository repo = Logger.getRootLogger().getLoggerRepository(); + Enumeration list = repo.getCurrentLoggers(); + for (Enumeration loggerEnumeration = repo.getCurrentLoggers() ; loggerEnumeration.hasMoreElements() ; ) { + Logger logger = (Logger)loggerEnumeration.nextElement(); + String _name = logger.getName(); + if (_name.startsWith(logname)) { + if (_name.length() > logname.length()) { + if (_name.charAt(logname.length()) != '.') { + continue; + } + } + if (!_name.startsWith(search_name)) { + continue; + } + if (_name.length() > search_name.length()) { + if ((!search_name.isEmpty()) && (_name.charAt(search_name.length()) != '.')) { + continue; + } + } + return true; + } + } + return false; + } + + public String[] getNamedLoggers() { + List ret = new ArrayList(); + LoggerRepository repo = Logger.getRootLogger().getLoggerRepository(); + Enumeration list = repo.getCurrentLoggers(); + for (Enumeration loggerEnumeration = repo.getCurrentLoggers() ; loggerEnumeration.hasMoreElements() ; ) { + Logger logger = (Logger)loggerEnumeration.nextElement(); + String _name = logger.getName(); + if (_name.startsWith(logname)) { + if (_name.length() > logname.length()) { + if (_name.charAt(logname.length()) != '.') { + continue; + } + } + ret.add(_name); + } + } + return ret.toArray(new String[ret.size()]); + } + + public void setLevel(Level level) { + this.l4logger.setLevel(level); + } + + public Level getEffectiveLevel() { + return this.l4logger.getEffectiveLevel(); + } + + public Level getLevel() { + Level retval = this.l4logger.getLevel(); + if (retval == null) { + retval = this.getEffectiveLevel(); + } + return retval; + } + + public void trace(Object message) { + this.l4logger.trace(message); + } + public void debug(Object message) { + this.l4logger.debug(message); + } + public void info(Object message) { + this.l4logger.info(message); + } + public void warn(Object message) { + this.l4logger.warn(message); + } + public void error(Object message) { + this.l4logger.error(message); + } + public void fatal(Object message) { + this.l4logger.fatal(message); + } + + public void trace(Object message, Throwable t) { + this.l4logger.trace(message, t); + } + public void debug(Object message, Throwable t) { + this.l4logger.debug(message, t); + } + public void info(Object message, Throwable t) { + this.l4logger.info(message, t); + } + public void warn(Object message, Throwable t) { + this.l4logger.warn(message, t); + } + public void error(Object message, Throwable t) { + this.l4logger.error(message, t); + } + public void fatal(Object message, Throwable t) { + this.l4logger.fatal(message, t); + } +} diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Resource.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Resource.java index 491a77761..f8eee8f64 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Resource.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Resource.java @@ -29,6 +29,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Hashtable; +import java.util.List; import java.util.Map; import java.util.Properties; import java.io.FileOutputStream; @@ -257,6 +258,44 @@ protected void addPort(String name, String description, omnijni.Servant servant) addPort(name, servant); } + /** + * Start processing for any ports that support it. + */ + protected void startPorts() { + for (StartablePort port : getStartablePorts()) { + port.startPort(); + } + } + + /** + * Stop processing for any ports that support it. If there are any calls on + * the port that are blocking, this should cause them to return immediately. + */ + protected void stopPorts() { + for (StartablePort port : getStartablePorts()) { + port.stopPort(); + } + } + + /** + * Returns a list of the registered ports that support the StartablePort + * interface. + */ + private List getStartablePorts() { + List startablePorts = new ArrayList(); + for (Servant servant : this.portServants.values()) { + if (servant instanceof StartablePort) { + startablePorts.add((StartablePort) servant); + } + } + for (omnijni.Servant servant : this.nativePorts.values()) { + if (servant instanceof StartablePort) { + startablePorts.add((StartablePort) servant); + } + } + return startablePorts; + } + /** * Default Constructor that automatically sets parameters for the Sun ORB * and the JacORB ORB. @@ -309,7 +348,7 @@ public void constructor() * {@inheritDoc} */ public void initialize() throws InitializeError { - logger.trace("initialize()"); + this._resourceLog.trace("initialize()"); if (!initialized) { this.ports.clear(); for (Map.Entry me : this.portServants.entrySet()) { @@ -322,7 +361,7 @@ public void initialize() throws InitializeError { this.constructor(); } catch (final Throwable exc) { final String message = exc.getMessage(); - logger.error("initialize(): " + message); + this._resourceLog.error("initialize(): " + message); throw new InitializeError(new String[]{message}); } } @@ -333,8 +372,9 @@ public void initialize() throws InitializeError { */ public void start() throws StartError { // While we are starting or stopping don't let anything else occur - logger.trace("start()"); + this._resourceLog.trace("start()"); synchronized (this) { + startPorts(); this._started = true; if (processingThread == null) { processingThread = new Thread(this); @@ -348,21 +388,22 @@ public void start() throws StartError { * {@inheritDoc} */ public void stop() throws StopError { - logger.trace("stop()"); + this._resourceLog.trace("stop()"); synchronized (this) { if (processingThread != null) { + stopPorts(); this._started = false; try { processingThread.interrupt(); processingThread.join(1000); if (processingThread.isAlive()) { - logger.error("Error stopping processing thread"); + this._resourceLog.error("Error stopping processing thread"); throw new StopError(CF.ErrorNumberType.CF_NOTSET, "Error stopping processing thread"); } else { processingThread = null; } } catch (InterruptedException e) { - logger.error("Error stopping processing thread", e); + this._resourceLog.error("Error stopping processing thread", e); throw new StopError(CF.ErrorNumberType.CF_NOTSET, "Error stopping processing thread due to: " + e.toString()); } @@ -376,18 +417,18 @@ public void stop() throws StopError { * {@inheritDoc} */ public void runTest(final int testid, final PropertiesHolder testValues) throws UnknownTest, UnknownProperties { - logger.trace("runTest()"); + this._resourceLog.trace("runTest()"); } /* BASE CLASS METHODS */ public void releaseObject() throws ReleaseError { - logger.trace("releaseObject()"); + this._resourceLog.trace("releaseObject()"); try { this.stopPropertyChangeMonitor(); this.stop(); } catch (StopError e1) { - logger.error("Failed to stop during release", e1); + this._resourceLog.error("Failed to stop during release", e1); } // These loops deactivate the port objects so that they can be destroyed without incident @@ -435,7 +476,7 @@ public org.omg.CORBA.Object getPort(final String name) throws UnknownPort { // the Ports_var maps are kept different (they could be made into one) // because it's less confusing this way - logger.trace("getPort(" + name + ")"); + this._portsupplierLog.trace("getPort(" + name + ")"); if (this.nativePorts.containsKey(name)) { return this.nativePorts.get(name)._this_object(getOrb()); } @@ -449,7 +490,7 @@ public org.omg.CORBA.Object getPort(final String name) throws UnknownPort { } public void initializeProperties(final DataType[] ctorProperties) throws AlreadyInitialized, InvalidConfiguration, PartialConfiguration { - logger.trace("initializeProperties() - star "); + this._propertysetLog.trace("initializeProperties() - star "); // Disallow multiple calls if (this._propertiesInitialized) { @@ -478,9 +519,9 @@ public void initializeProperties(final DataType[] ctorProperties) throws Already // callback. prop.configureNoCallbacks(dt.value); } - logger.trace("Construct property: " + prop); + this._propertysetLog.trace("Construct property: " + prop); } catch (Throwable t) { - logger.error("Unable to construct property " + dt.id + ": " + t.getMessage()); + this._propertysetLog.error("Unable to construct property " + dt.id + ": " + t.getMessage()); invalidProperties.add(dt); } } @@ -491,7 +532,7 @@ public void initializeProperties(final DataType[] ctorProperties) throws Already throw new PartialConfiguration(invalidProperties.toArray(new DataType[0])); } - logger.trace("initializeProperties() - end"); + this._propertysetLog.trace("initializeProperties() - end"); } public PortInfoType[] getPortSet () { @@ -553,7 +594,7 @@ public PortInfoType[] getPortSet () { * {@inheritDoc} */ public void configure(final DataType[] configProperties) throws InvalidConfiguration, PartialConfiguration { - logger.trace("configure()"); + this._propertysetLog.trace("configure()"); // Ensure there's something to do if (configProperties.length == 0) { @@ -575,7 +616,7 @@ public void configure(final DataType[] configProperties) throws InvalidConfigura // callback. prop.configure(dt.value); if (AnyUtils.compareAnys(value_before, prop.toAny(), "eq")) { - logger.debug("Value has not changed on configure for property " + dt.id + ". Not triggering callback"); + this._propertysetLog.debug("Value has not changed on configure for property " + dt.id + ". Not triggering callback"); } else { // The property value changed. // Check to see if this property should issue property change @@ -585,9 +626,9 @@ public void configure(final DataType[] configProperties) throws InvalidConfigura } } - logger.trace("Configured property: " + prop); + this._propertysetLog.trace("Configured property: " + prop); } catch (Throwable t) { - logger.error("Unable to configure property " + dt.id + ": " + t.getMessage()); + this._propertysetLog.error("Unable to configure property " + dt.id + ": " + t.getMessage()); invalidProperties.add(dt); } } @@ -599,16 +640,29 @@ public void configure(final DataType[] configProperties) throws InvalidConfigura } } + String _propertyQueryTimestamp = "QUERY_TIMESTAMP"; + + public CF.UTCTime _makeTime(final short status, final double wsec, final double fsec) { + CF.UTCTime _time = new CF.UTCTime(status, wsec, fsec); + if (status == -1) { + long tmp_time = System.currentTimeMillis(); + _time.tcstatus = 1; + _time.twsec = tmp_time /1000; + _time.tfsec = (tmp_time % 1000)/1000.0; + } + return _time; + } + /** * {@inheritDoc} */ public void query(final PropertiesHolder configProperties) throws UnknownProperties { - logger.trace("query()"); + this._propertysetLog.trace("query()"); // For queries of zero length, return all id/value pairs in propertySet if (configProperties.value.length == 0) { final ArrayList props = new ArrayList(this.propSet.size()); for (final IProperty prop : this.propSet.values()) { - logger.trace("Querying property: " + prop); + this._propertysetLog.trace("Querying property: " + prop); if (prop.isQueryable()) { if (prop instanceof StructProperty) { Any structAny = ORB.init().create_any(); @@ -626,6 +680,10 @@ public void query(final PropertiesHolder configProperties) throws UnknownPropert } } } + + /*final Any anytime = ORB.init().create_any(); + CF.UTCTimeHelper.insert(anytime, this._makeTime((short)-1,0,0)); + props.add(new DataType(_propertyQueryTimestamp, anytime));*/ configProperties.value = props.toArray(new DataType[props.size()]); return; @@ -638,6 +696,12 @@ public void query(final PropertiesHolder configProperties) throws UnknownPropert // Return values for valid queries in the same order as requested for (final DataType dt : configProperties.value) { // Look up the property and ensure it is queryable + if (dt.id.equals(_propertyQueryTimestamp)) { + final Any anytime = ORB.init().create_any(); + CF.UTCTimeHelper.insert(anytime, this._makeTime((short)-1,0,0)); + validProperties.add(new DataType(_propertyQueryTimestamp, anytime)); + continue; + } final IProperty prop = this.propSet.get(dt.id); if ((prop != null) && prop.isQueryable()) { if (prop instanceof StructProperty) { @@ -673,30 +737,30 @@ public void query(final PropertiesHolder configProperties) throws UnknownPropert public String registerPropertyListener(final org.omg.CORBA.Object listener, String[] prop_ids, float interval) throws CF.UnknownProperties, CF.InvalidObjectReference { - logger.trace("registerPropertyListener - start "); + this._propertysetLog.trace("registerPropertyListener - start "); ArrayList pids = new ArrayList(); final ArrayList invalidProperties = new ArrayList(); String reg_id; synchronized(this) { // For queries of zero length, return all id/value pairs in propertySet if (prop_ids.length == 0) { - logger.trace("registering all properties..."); + this._propertysetLog.trace("registering all properties..."); for (final IProperty prop : this.propSet.values()) { if (prop.isQueryable()) { - logger.debug("..... property:" + prop.getId()); + this._propertysetLog.debug("..... property:" + prop.getId()); pids.add(prop.getId()); } } } else { // For queries of length > 0, return all requested pairs in propertySet - logger.trace("registering fixed property: N:" + prop_ids.length); + this._propertysetLog.trace("registering fixed property: N:" + prop_ids.length); // Return values for valid queries in the same order as requested for (final String id : prop_ids) { // Look up the property and ensure it is queryable final IProperty prop = this.propSet.get(id); if ((prop != null) && prop.isQueryable()) { - logger.debug("..... property:" + id); + this._propertysetLog.debug("..... property:" + id); pids.add(prop.getId()); } else { DataType dt = new DataType(id, null); @@ -709,7 +773,7 @@ public String registerPropertyListener(final org.omg.CORBA.Object listener, Stri throw new UnknownProperties(invalidProperties.toArray(new DataType[invalidProperties.size()])); } - logger.trace("PropertyChangeListener: register N properties: " + pids.size()); + this._propertysetLog.trace("PropertyChangeListener: register N properties: " + pids.size()); PropertyChangeRec prec = new PropertyChangeRec( listener, compId, interval, @@ -717,25 +781,25 @@ public String registerPropertyListener(final org.omg.CORBA.Object listener, Stri this.propSet ); // check if our listener is valid if ( prec.pcl == null ) { - logger.error("PropertyChangeListener: caller provided invalid listener interface "); + this._propertysetLog.error("PropertyChangeListener: caller provided invalid listener interface "); prec = null; throw new CF.InvalidObjectReference(); } // Add the registry record to our map - logger.debug("registerPropertyListener REGISTERING id-s/regid: " + pids.size() + "/" + prec.regId ); + this._propertysetLog.debug("registerPropertyListener REGISTERING id-s/regid: " + pids.size() + "/" + prec.regId ); this._propChangeRegistry.put( prec.regId, prec ); reg_id = prec.regId; // start monitoring thread if not started if ( this._propChangeThread == null ) { - logger.debug("registerPropertyListener - First registration ... starting monitoring thread "); + this._propertysetLog.debug("registerPropertyListener - First registration ... starting monitoring thread "); this._propChangeProcessor.start(); this._propChangeThread = new Thread( this._propChangeProcessor ); this._propChangeThread.start(); } } - logger.trace("registerPropertyListener - end"); + this._propertysetLog.trace("registerPropertyListener - end"); return reg_id; } @@ -743,11 +807,11 @@ public String registerPropertyListener(final org.omg.CORBA.Object listener, Stri public void unregisterPropertyListener(final String reg_id) throws CF.InvalidIdentifier { - logger.trace("unregisterPropertyListener - start "); + this._propertysetLog.trace("unregisterPropertyListener - start "); synchronized(this) { PropertyChangeRec prec = this._propChangeRegistry.get(reg_id); if ( prec != null ) { - logger.trace("unregisterPropertyListener - Remove registration " +reg_id ); + this._propertysetLog.trace("unregisterPropertyListener - Remove registration " +reg_id ); for ( String id : prec.props.keySet() ) { final IProperty prop = this.propSet.get(id); if ( prop != null ) { @@ -760,9 +824,9 @@ public void unregisterPropertyListener(final String reg_id) } this._propChangeRegistry.remove(reg_id); - logger.debug("unregisterPropertyListener - UNREGISTER REG-ID:" + reg_id ); + this._propertysetLog.debug("unregisterPropertyListener - UNREGISTER REG-ID:" + reg_id ); if ( this._propChangeRegistry.size() == 0 ) { - logger.debug("unregisterPropertyListener - No more registrants... stopping thread "); + this._propertysetLog.debug("unregisterPropertyListener - No more registrants... stopping thread "); this.stopPropertyChangeMonitor(); this._propChangeThread=null; } @@ -798,7 +862,7 @@ public void stopPropertyChangeMonitor() { */ protected int _propertyChangeServiceFunction() { - logger.trace("_propertyChangeServiceFunction ... start "); + this._propertysetLog.trace("_propertyChangeServiceFunction ... start "); long delay=0; synchronized(this) { // for each registration record @@ -808,7 +872,7 @@ protected int _propertyChangeServiceFunction() { long now = System.currentTimeMillis(); long dur = prec.expiration - now; - logger.debug( "Resource::_propertyChangeServiceFunction ... reg_id/interval :" + prec.regId + "/" + prec.reportInterval + " expiration=" + dur ); + this._propertysetLog.debug( "Resource::_propertyChangeServiceFunction ... reg_id/interval :" + prec.regId + "/" + prec.reportInterval + " expiration=" + dur ); // determine if time has expired if ( dur <= 0 ) { @@ -819,7 +883,7 @@ protected int _propertyChangeServiceFunction() { String pid = iter.getKey(); PCL_Callback pcb = iter.getValue(); - logger.trace(" Check Property/Set " + pid + "/" + pcb.isSet() ); + this._propertysetLog.trace(" Check Property/Set " + pid + "/" + pcb.isSet() ); // check if property changed if ( pcb.isSet() == true ) { final IProperty prop = this.propSet.get(pid); @@ -834,10 +898,10 @@ protected int _propertyChangeServiceFunction() { // publish changes to listener if ( rpt_props.size() > 0 && prec.pcl != null ) { - logger.debug(" Notify PropertyChangeListener ...size/reg :" + rpt_props.size() + "/" + prec.regId ); + this._propertysetLog.debug(" Notify PropertyChangeListener ...size/reg :" + rpt_props.size() + "/" + prec.regId ); DataType [] rprops = rpt_props.toArray( new DataType[ rpt_props.size() ] ); if ( prec.pcl.notify( prec, rprops ) != 0 ) { - logger.error("Publishing changes to PropertyChangeListener FAILED, reg_id:" + prec.regId ); + this._propertysetLog.error("Publishing changes to PropertyChangeListener FAILED, reg_id:" + prec.regId ); // probably should mark for removal... if last one then stop the thread... } } @@ -850,19 +914,19 @@ protected int _propertyChangeServiceFunction() { // find smallest increment of time to wait if ( delay == 0 ) { delay=dur; } - logger.trace( " Test for delay/duration (millisecs) ... :" + delay + "/" + dur ); + this._propertysetLog.trace( " Test for delay/duration (millisecs) ... :" + delay + "/" + dur ); if ( dur > 0 ) { delay = Math.min( delay, dur ); } - logger.trace( " Minimum delay (millisecs) ... :" + delay ); + this._propertysetLog.trace( " Minimum delay (millisecs) ... :" + delay ); } // end synchronized if ( delay > 0 ) { - logger.debug( "....Set monitoring thread delay (millisecs) ... :" + delay ); + this._propertysetLog.debug( "....Set monitoring thread delay (millisecs) ... :" + delay ); _propChangeProcessor.setThreadDelay( delay/1000.0f ); } } - logger.trace("_propertyChangeServiceFunction ... end "); + this._propertysetLog.trace("_propertyChangeServiceFunction ... end "); return ThreadedComponent.NOOP; } @@ -924,14 +988,14 @@ public void setAdditionalParameters(final String ApplicationRegistrarIOR, String if (appReg != null) { if (appReg.domMgr()!=null) { this._domMgr = new DomainManagerContainer(appReg.domMgr()); - this.logger.info("setAdditionalParameters domain: " + this._domMgr.getRef().name() ); + this._resourceLog.info("setAdditionalParameters domain: " + this._domMgr.getRef().name() ); } if ( this._domMgr != null ) { try { this._ecm = org.ossie.events.Manager.GetManager(this); }catch( org.ossie.events.Manager.OperationFailed e){ - logger.warn("Unable to resolve EventChannelManager"); + this._resourceLog.warn("Unable to resolve EventChannelManager"); } } } @@ -992,6 +1056,10 @@ protected void initializeProperties(Map execparams) { } } + protected RHLogger _resourceLog; + protected RHLogger _propertysetLog; + protected RHLogger _portsupplierLog; + /** * Protected initialize intended only to be used by start_component. * @@ -1006,6 +1074,12 @@ protected CF.Resource setup(final String compId, final String compName, final OR this.compName = compName; this.orb = orb; this.poa = poa; + this._baseLog = RHLogger.getLogger(this.compName); + this._resourceLog = this._baseLog.getChildLogger("Resource", "system"); + this._propertysetLog = this._baseLog.getChildLogger("PropertySet", "system"); + this._portsupplierLog = this._baseLog.getChildLogger("PortSupplier", "system"); + + this.setupPortLoggers(); ResourcePOATie tie = new ResourcePOATie(this, poa); tie._this(orb); @@ -1030,6 +1104,9 @@ protected CF.Resource setup(final String compId, final String compName, final St return result; } + protected void setupPortLoggers() { + } + /** * Parse the set of SCA execparam arguments into a Map * @@ -1100,6 +1177,12 @@ public static void start_component(final Class clazz, final public static void start_component(final Class clazz, final String[] args, final Properties props) throws InstantiationException, IllegalAccessException, InvalidObjectReference, NotFound, CannotProceed, org.omg.CosNaming.NamingContextPackage.InvalidName, ServantNotActive, WrongPolicy { + if (args.length == 1) { + if (args[0].equals("-i")) { + System.out.println("Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain"); + System.exit(-1); + } + } // initialize library's ORB reference final org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init( args, props ); @@ -1170,15 +1253,24 @@ public static void start_component(final Class clazz, final } if ((nameContext == null) || (nameBinding == null)) { - if ((!Arrays.toString(args).contains("-i")) && (!Arrays.toString(args).contains("--interactive"))) { - System.out.println("usage: "+clazz+" [options] [execparams]\n"); - System.out.println("The set of execparams is defined in the .prf for the component"); - System.out.println("They are provided as arguments pairs ID VALUE, for example:"); - System.out.println(" "+clazz+" INT_PARAM 5 STR_PARAM ABCDED\n"); - System.out.println("Options:"); - System.out.println(" -i,--interactive Run the component in interactive test mode\n"); - System.exit(-1); + System.out.println("usage: "+clazz+" [options] [execparams]\n"); + System.out.println("The set of execparams is defined in the .prf for the component"); + System.out.println("They are provided as arguments pairs ID VALUE, for example:"); + System.out.println(" "+clazz+" INT_PARAM 5 STR_PARAM ABCDED\n"); + System.exit(-1); + } + + try { + if (applicationRegistrar != null) { + String name = applicationRegistrar.app().name(); + String tpath = dom_path; + String[] t = dom_path.split("/"); + if ( dom_path.charAt(0) == '/') { + tpath=dom_path.substring(1, dom_path.length()-1); + } + dom_path = t[0]+"/"+name; } + } catch (Exception e) { } logging.ComponentCtx ctx = new logging.ComponentCtx( nameBinding, identifier, dom_path ); @@ -1223,6 +1315,7 @@ public static void start_component(final Class clazz, final Thread shutdownWatcher = new Thread(new Runnable() { public void run() { resource_i.waitDisposed(); + LogManager.shutdown(); // On slow VMs, shutting down the ORB immediately after // releaseObject() sometimes leads to a CORBA.COMM_FAILURE // exception being thrown to the caller, presumably because @@ -1252,9 +1345,6 @@ public void run() { // long time (~300ms). orb.destroy(); - // Shut down native ORB, if it's running - omnijni.ORB.shutdown(); - logger.debug("Goodbye!"); } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Service.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Service.java index e9e50036e..969497a57 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Service.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/Service.java @@ -204,6 +204,13 @@ public static void start_service(final Class clazz, final Str final POA rootpoa = org.ossie.corba.utils.RootPOA(); + if (args.length == 1) { + if (args[0].equals("-i")) { + System.out.println("Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain"); + System.exit(-1); + } + } + Map execparams = parseArgs(args); DeviceManager deviceMgr = null; diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/component/StartablePort.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/StartablePort.java new file mode 100644 index 000000000..5c5096e45 --- /dev/null +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/component/StartablePort.java @@ -0,0 +1,26 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package org.ossie.component; + +public interface StartablePort extends PortBase { + public void startPort(); + public void stopPort(); +} diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/Manager.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/Manager.java index 0d92a3ad9..857905e3c 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/Manager.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/Manager.java @@ -122,6 +122,11 @@ public void unregister( ) { _ecm=null; } + public int disconnect() { + unregister(); + return super.disconnect(); + } + private Manager _ecm; private EventChannelReg _creg; diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageAdapter.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageAdapter.java index 663dc067e..8dccbb94e 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageAdapter.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageAdapter.java @@ -27,7 +27,7 @@ * dispatch to a listener */ class MessageAdapter implements EventCallback { - public MessageAdapter(Class structDef, MessageListener listener) + public MessageAdapter(Class structDef, MessageListener listener) { this.structDef = structDef; this.listener = listener; @@ -52,5 +52,5 @@ public void messageReceived(String messageId, E messageData) } private Class structDef; - private MessageListener listener; + private MessageListener listener; } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageConsumerPort.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageConsumerPort.java index d714262c4..468000194 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageConsumerPort.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageConsumerPort.java @@ -20,6 +20,7 @@ package org.ossie.events; +import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -36,6 +37,7 @@ import org.apache.log4j.Logger; import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; import org.ossie.properties.StructDef; /** @@ -45,8 +47,11 @@ @SuppressWarnings("deprecation") public class MessageConsumerPort extends ExtendedEvent.MessageEventPOA implements PortBase { public Object updatingPortsLock = new Object(); + public RHLogger _portLog; protected HashMap callbacks = new HashMap(); + private List> genericCallbacks = new ArrayList<>(); + protected boolean active = false; protected String name; @@ -131,6 +136,12 @@ public MessageConsumerPort(String portName, Logger logger) this.logger = logger; } + public MessageConsumerPort(String portName, RHLogger logger) + { + this(portName); + this._portLog = logger; + } + public boolean isActive() { return this.active; } @@ -148,15 +159,20 @@ public void setLogger(Logger logger) this.logger = logger; } - public String getRepid() - { - return "IDL:ExtendedEvent/MessageEvent:1.0"; - } + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + + public String getRepid() + { + return ExtendedEvent.MessageEventHelper.id(); + } - public String getDirection() - { - return "Bidir"; - } + public String getDirection() + { + return CF.PortSet.DIRECTION_BIDIR; + } /** * Register a listener for a message. @@ -168,13 +184,20 @@ public String getDirection() * * @since 2.0 */ - public void registerMessage(String messageId, Class clazz, MessageListener listener) + public void registerMessage(String messageId, Class clazz, MessageListener listener) { synchronized (this.callbacks) { this.callbacks.put(messageId, new MessageAdapter(clazz, listener)); } } + public void registerMessage(MessageListener listener) + { + synchronized (this.callbacks) { + this.genericCallbacks.add(listener); + } + } + public void connectPort(final org.omg.CORBA.Object connection, final String connectionId) throws CF.PortPackage.InvalidPort, CF.PortPackage.OccupiedPort { // Give a specific exception message for nil if (connection == null) { @@ -361,7 +384,7 @@ private void dispatchMessage(final String messageId, final org.omg.CORBA.Any mes message_callback = this.callbacks.get(messageId); // If no callback is registered, present a meaningful warning - if (message_callback == null) { + if ((message_callback == null) && this.genericCallbacks.isEmpty()) { String warning = "No callbacks registered for messages with id '"+messageId+"'."; if (this.callbacks.isEmpty()) { warning += " No callbacks are registered"; @@ -378,7 +401,13 @@ private void dispatchMessage(final String messageId, final org.omg.CORBA.Any mes } } - message_callback.message(messageId, messageData); + if (message_callback != null) { + message_callback.message(messageId, messageData); + } + + for (MessageListener listener : this.genericCallbacks) { + listener.messageReceived(messageId, messageData); + } } /** diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageListener.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageListener.java index 68582e37c..fa2811c14 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageListener.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageListener.java @@ -20,8 +20,6 @@ package org.ossie.events; -import org.ossie.properties.StructDef; - -public interface MessageListener { +public interface MessageListener { public void messageReceived(String messageId, E messageData); } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageSupplierPort.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageSupplierPort.java index f48255547..74f891e58 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageSupplierPort.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/MessageSupplierPort.java @@ -39,11 +39,12 @@ import org.apache.log4j.Logger; -import org.ossie.component.UsesPort; +import org.ossie.component.QueryableUsesPort; import org.ossie.component.PortBase; +import org.ossie.component.RHLogger; import org.ossie.properties.StructDef; -public class MessageSupplierPort extends UsesPort implements EventChannelOperations, PortBase { +public class MessageSupplierPort extends QueryableUsesPort implements EventChannelOperations, PortBase { /** * Internal class to adapt PushSupplier CORBA interface for disconnection @@ -63,6 +64,7 @@ public void disconnect_push_supplier() final String connectionId; } + public RHLogger _portLog; private Logger logger = Logger.getLogger(MessageSupplierPort.class.getName()); private Map consumers = new HashMap(); private Map suppliers = new HashMap(); @@ -71,18 +73,29 @@ public MessageSupplierPort(String portName) { super(portName); } - + public MessageSupplierPort(String portName, Logger logger) { this(portName); this.logger = logger; } + public MessageSupplierPort(String portName, RHLogger logger) + { + this(portName); + this._portLog = logger; + } + public void setLogger(Logger logger) { this.logger = logger; } - + + public void setLogger(RHLogger logger) + { + this._portLog = logger; + } + public void connectPort(final org.omg.CORBA.Object connection, final String connectionId) throws CF.PortPackage.InvalidPort, CF.PortPackage.OccupiedPort { // Give a specific exception message for nil if (connection == null) { @@ -126,6 +139,8 @@ public void connectPort(final org.omg.CORBA.Object connection, final String conn } synchronized (this.updatingPortsLock) { + // Store the original channel in the parent object's container + this.outPorts.put(connectionId, channel); this.suppliers.put(connectionId, supplier); this.consumers.put(connectionId, proxy_consumer); this.active = true; @@ -136,15 +151,58 @@ public void disconnectPort(final String connectionId) { this.removeConnection(connectionId, true); } - - public void push(final Any data) + + /** + * Sends pre-serialized messages to all connections. + * + * @param data messages serialized to a CORBA Any + */ + public void push(final Any data) + { + try { + this._push(data,""); + } catch( final org.omg.CORBA.MARSHAL ex ) { + this.logger.warn("Could not deliver the message. Maximum message size exceeded"); + } + } + + /** + * Sends pre-serialized messages to a specific connection. + * + * @param data messages serialized to a CORBA Any + * @param connectionId target connection + * @throws IllegalArgumentException If connectionId does not match any + * connection + * @since 2.2 + */ + public void push(final Any data, String connectionId) + { + try { + this._push(data, connectionId); + } catch( final org.omg.CORBA.MARSHAL ex ) { + this.logger.warn("Could not deliver the message. Maximum message size exceeded"); + } + } + + public void _push(final Any data, String connectionId) { synchronized(this.updatingPortsLock) { if (!this.active) { return; } - for (PushConsumer consumer : this.consumers.values()) { + if (!connectionId.isEmpty()) { + if (!_hasConnection(connectionId)) { + throw new IllegalArgumentException("invalid connection: '"+connectionId+"'"); + } + } + + for (Map.Entry entry : consumers.entrySet()) { + if (!_isConnectionSelected(entry.getKey(), connectionId)) { + continue; + } + + PushConsumer consumer = entry.getValue(); try { consumer.push(data); } catch (final org.omg.CosEventComm.Disconnected ex) { @@ -157,21 +215,57 @@ public void push(final Any data) removeConnection( consumer ); continue; } catch( final org.omg.CORBA.MARSHAL ex ) { - this.logger.warn("Could not deliver the message. Maximum message size exceeded"); - continue; + throw ex; } catch (final Exception e) { continue; } } } } - + + /** + * Sends a single message to all connections. + * + * @param message message structure to send + */ public void sendMessage(final StructDef message) { - this.sendMessages(Arrays.asList(message)); + this.sendMessage(message, ""); + } + + /** + * Sends a single message to a specific connection. + * + * @param message message structure to send + * @param connectionId target connection + * @throws IllegalArgumentException If connectionId does not match any + * connection + */ + public void sendMessage(StructDef message, String connectionId) + { + this.sendMessages(Arrays.asList(message), connectionId); } - public void sendMessages(final Collection messages) { + /** + * Sends a collection of messages to all connections. + * + * @param messages collection of message structures to send + */ + public void sendMessages(final Collection messages) + { + this.sendMessages(messages, ""); + } + + /** + * Sends a collection of messages to a specific connection. + * + * @param messages collection of message structures to send + * @param connectionId target connection + * @throws IllegalArgumentException If connectionId does not match any + * connection + */ + public void sendMessages(Collection messages, String connectionId) + { final CF.DataType[] properties = new CF.DataType[messages.size()]; int index = 0; for (StructDef message : messages) { @@ -179,7 +273,22 @@ public void sendMessages(final Collection messages) { } final Any any = ORB.init().create_any(); CF.PropertiesHelper.insert(any, properties); - this.push(any); + try { + this._push(any, connectionId); + } catch( final org.omg.CORBA.MARSHAL ex ) { + // Sending them all at once failed, try send the messages individually + if (messages.size() == 1) { + this.logger.warn("Could not deliver the message. Maximum message size exceeded"); + } else { + this.logger.warn("Could not deliver the message. Maximum message size exceeded, trying individually."); + + for (CF.DataType prop : properties) { + final Any a = ORB.init().create_any(); + CF.PropertiesHelper.insert(a, new CF.DataType[]{prop}); + this.push(a, connectionId); + } + } + } } protected EventChannelOperations narrow(org.omg.CORBA.Object connection) @@ -210,6 +319,10 @@ private void removeConnection(String connectionId, boolean notifyConsumer) SupplierAdapter supplier = null; PushConsumer proxy_consumer = null; synchronized (this.updatingPortsLock) { + // Remove the original EventChannel object from the parent class' + // container + this.outPorts.remove(connectionId); + proxy_consumer = this.consumers.get(connectionId); if (proxy_consumer == null) { return; @@ -264,6 +377,19 @@ private void deactivateChild(org.omg.PortableServer.Servant servant) } } + private boolean _hasConnection(String connectionId) + { + return consumers.containsKey(connectionId); + } + + private boolean _isConnectionSelected(String connectionId, String targetId) + { + if (targetId.isEmpty()) { + return true; + } + return connectionId.equals(targetId); + } + @Deprecated public org.omg.CosEventChannelAdmin.ConsumerAdmin for_consumers() { @@ -281,13 +407,13 @@ public org.omg.CosEventChannelAdmin.SupplierAdmin for_suppliers() return null; } - public String getRepid() - { - return "IDL:ExtendedEvent/MessageEvent:1.0"; - } + public String getRepid() + { + return ExtendedEvent.MessageEventHelper.id(); + } - public String getDirection() - { - return "Uses"; - } + public String getDirection() + { + return CF.PortSet.DIRECTION_USES; + } } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/Subscriber.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/Subscriber.java index a2b7f302a..a61c4a483 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/events/Subscriber.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/events/Subscriber.java @@ -181,6 +181,25 @@ public int getData( Any ret ) { } + public Any getData() { + + Any retval=null; + try{ + + // check if callback method is enable.. it so then return + if ( dataArrivedCB != null ) return retval; + + // check if data is available + if ( events.size() < 1 ) return retval; + + return events.remove(); + } + catch( Throwable e) { + } + + return retval; + } + public void setDataArrivedListener( DataArrivedListener newListener ) { dataArrivedCB = newListener; } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/logging/logging.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/logging/logging.java index 8bee82e37..3f4fefaac 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/logging/logging.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/logging/logging.java @@ -33,6 +33,8 @@ import java.lang.Exception; import java.util.Properties; import java.io.FileOutputStream; +import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.FileReader; import java.io.FileWriter; import java.io.BufferedReader; @@ -161,7 +163,7 @@ public ServiceCtx( String name, device_mgr = seg[n]; } } - + }; public void apply( MacroTable tbl ){ @@ -188,6 +190,29 @@ public DeviceCtx( String name, device_mgr = seg[n]; } } + + try { + String pid = GetPid(); + BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream("/proc/" + pid + "/stat"))); + String stat = reader.readLine(); + int openParen = stat.indexOf("("); + int closeParen = stat.lastIndexOf(")"); + String[] statElements = stat.substring(closeParen + 2).split(" "); + String script_pid = statElements[1]; + + reader = new BufferedReader(new InputStreamReader(new FileInputStream("/proc/" + pid + "/stat"))); + stat = reader.readLine(); + openParen = stat.indexOf("("); + closeParen = stat.lastIndexOf(")"); + statElements = stat.substring(closeParen + 2).split(" "); + String ppid = statElements[1]; + InetAddress addr = InetAddress.getLocalHost(); + String hname = addr.getHostName(); + + device_mgr_id = domain_name + ":" + hname + ":" + device_mgr + "_" + ppid; + } catch (FileNotFoundException ex) { + } catch (IOException ex) { + } }; @@ -319,7 +344,8 @@ public static void SetResourceInfo( MacroTable tbl, ResourceCtx ctx ) { public static void SetComponentInfo( MacroTable tbl, ComponentCtx ctx ) { SetResourceInfo( tbl, ctx ); tbl.put("@@@WAVEFORM.NAME@@@", ctx.waveform.replaceAll(":", "-" ) ); - tbl.put("@@@WAVEFORM.ID@@@", ctx.waveform_id.replaceAll(":", "-" ) ); + tbl.put("@@@WAVEFORM.INSTANCE@@@", ctx.waveform_id.replaceAll(":", "-" ) ); + tbl.put("@@@WAVEFORM.ID@@@", ctx.waveform_id.replaceAll(":", "-" ) ); tbl.put("@@@COMPONENT.NAME@@@", ctx.name.replaceAll(":", "-" ) ); tbl.put("@@@COMPONENT.INSTANCE@@@", ctx.instance_id.replaceAll(":", "-" ) ); tbl.put("@@@COMPONENT.PID@@@", GetPid()); @@ -388,6 +414,7 @@ public static Level ConvertToLog4Level( int newlevel ) { if ( newlevel == CF.LogLevels.DEBUG ) return Level.DEBUG; if ( newlevel == CF.LogLevels.TRACE ) return Level.TRACE; if ( newlevel == CF.LogLevels.ALL ) return Level.ALL; + if ( newlevel == -1 ) return null; return Level.INFO; }; @@ -442,7 +469,7 @@ public static String GetDefaultConfig() { "# Direct log messages to STDOUT\n" + "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + - "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n"; + "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{3}:%L - %m%n\n"; return cfg; }; diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/AnyUtils.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/AnyUtils.java index 00c6deacd..1630c50b3 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/AnyUtils.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/AnyUtils.java @@ -29,7 +29,7 @@ import java.math.BigInteger; import java.util.Arrays; import java.util.List; - import org.apache.log4j.Logger; +import org.apache.log4j.Logger; import org.apache.commons.lang.ArrayUtils; import org.omg.CORBA.Any; @@ -67,6 +67,7 @@ import CF.complexULongHelper; import CF.complexLongLongHelper; import CF.complexULongLongHelper; +import CF.UTCTimeHelper; import CF.complexFloatSeqHelper; import CF.complexDoubleSeqHelper; @@ -79,6 +80,7 @@ import CF.complexULongSeqHelper; import CF.complexLongLongSeqHelper; import CF.complexULongLongSeqHelper; +import CF.UTCTimeSequenceHelper; public final class AnyUtils { @@ -105,6 +107,12 @@ public static Object convertString(final String stringValue, final String type) if ("true".equalsIgnoreCase(stringValue) || "false".equalsIgnoreCase(stringValue)) { return Boolean.parseBoolean(stringValue); } + else if ("0".equalsIgnoreCase(stringValue) ) { + return Boolean.FALSE; + } + else if ("1".equalsIgnoreCase(stringValue) ) { + return Boolean.TRUE; + } throw new IllegalArgumentException(stringValue + " is not a valid boolean value"); } else if (type.equals("char")) { if (stringValue.length() == 1) { @@ -334,6 +342,8 @@ public static Object convertAny(final Any theAny, final TypeCode typeCode) { return complexLongLongHelper.extract(theAny); } else if (typeCode.name().equals("complexULongLong")) { return complexULongLongHelper.extract(theAny); + } else if (typeCode.name().equals("UTCTime")) { + return UTCTimeHelper.extract(theAny); } case TCKind._tk_longdouble: case TCKind._tk_array: @@ -439,6 +449,8 @@ private static Object[] extractSequence(final Any theAny, return complexLongSeqHelper.extract(theAny); } else if (complexULongLongHelper.type().equivalent(contentType)) { return complexULongLongSeqHelper.extract(theAny); + } else if (UTCTimeHelper.type().equivalent(contentType)) { + return UTCTimeSequenceHelper.extract(theAny); } else { throw new IllegalArgumentException("Unsupported struct content type: " + contentType); } @@ -1125,7 +1137,7 @@ public static boolean compareAnys(final Any a, final Any b, final String action) } return false; } - + // Here is the test to determine if we have a struct sequence if(tmpA.getClass() == Any[].class) { Any[] anysA = (Any[])tmpA; @@ -1258,6 +1270,12 @@ public static boolean compareAnys(final Any a, final Any b, final String action) CF.complexULongLongHelper.extract(b), action, a.type()); + } else if (a.type().equivalent(UTCTimeHelper.type())) { + result = AnyUtils.performAction( + CF.UTCTimeHelper.extract(a), + CF.UTCTimeHelper.extract(b), + action, + a.type()); } else { result = false; } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/BooleanProperty.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/BooleanProperty.java index 4058bd533..b60279df9 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/BooleanProperty.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/BooleanProperty.java @@ -28,7 +28,11 @@ public BooleanProperty(String id, String name, Boolean value, Mode mode, protected Boolean extract(org.omg.CORBA.Any any) { try { - return (Boolean)AnyUtils.convertAny(any); + Object t = AnyUtils.convertAny(any); + if ( t instanceof String ) { + return parseString((String)t); + } + return (Boolean)t; } catch (ClassCastException ex) { throw new IllegalArgumentException("Incorrect any type recevied"); } @@ -39,6 +43,6 @@ protected void insert(org.omg.CORBA.Any any, Boolean value) { } protected Boolean parseString(String str) { - return Boolean.valueOf(str); + return (Boolean)AnyUtils.convertString(str,"boolean"); } } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/StructSequenceProperty.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/StructSequenceProperty.java index a474f1883..8ac8b3ddc 100644 --- a/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/StructSequenceProperty.java +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/StructSequenceProperty.java @@ -43,6 +43,7 @@ public StructSequenceProperty(String id, String name, Class structClass, List this(id, name, structClass, value, Mode.get(mode), Kind.get(kinds)); } + @SafeVarargs public static List asList(E... array) { return new ArrayList(Arrays.asList(array)); } diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/UTCTimeProperty.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/UTCTimeProperty.java new file mode 100644 index 000000000..170cc9799 --- /dev/null +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/UTCTimeProperty.java @@ -0,0 +1,64 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package org.ossie.properties; + +import org.omg.CORBA.Any; +import CF.UTCTime; +import CF.UTCTimeHelper; +import org.ossie.redhawk.time.utils; + +public class UTCTimeProperty extends AbstractSimpleProperty { + public UTCTimeProperty(String id, String name, UTCTime value, Mode mode, + Action action, Kind[] kinds) { + super(id, name, "utctime", value, mode, action, kinds); + } + + public UTCTimeProperty(String id, String name, UTCTime value, Mode mode, + Action action, Kind[] kinds, boolean optional) { + super(id, name, "utctime", value, mode, action, kinds, optional); + } + + public UTCTimeProperty(String id, String name, String value, Mode mode, + Action action, Kind[] kinds) { + super(id, name, "utctime", utils.convert(value), mode, action, kinds); + } + + public UTCTimeProperty(String id, String name, String value, Mode mode, + Action action, Kind[] kinds, boolean optional) { + super(id, name, "utctime", utils.convert(value), mode, action, kinds, optional); + } + + protected UTCTime extract(Any any) { + try { + return (UTCTime)AnyUtils.convertAny(any); + } catch (ClassCastException ex) { + throw new IllegalArgumentException("Incorrect any type recevied"); + } + } + + protected void insert(Any any, UTCTime value) { + UTCTimeHelper.insert(any, value); + } + + protected UTCTime parseString(String time) { + return utils.convert(time); + } +} diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/UTCTimeSequenceProperty.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/UTCTimeSequenceProperty.java new file mode 100644 index 000000000..2f55c49a4 --- /dev/null +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/properties/UTCTimeSequenceProperty.java @@ -0,0 +1,72 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package org.ossie.properties; + +import java.util.Arrays; +import java.util.ArrayList; +import java.util.List; + +import org.omg.CORBA.Any; +import CF.UTCTime; +import CF.UTCTimeSequenceHelper; +import org.ossie.redhawk.time.utils; + +public class UTCTimeSequenceProperty extends AbstractSequenceProperty { + public UTCTimeSequenceProperty(String id, String name, List value, Mode mode, + Action action, Kind[] kinds) { + super(id, name, "utctime", value, mode, action, kinds); + } + + public UTCTimeSequenceProperty(String id, String name, List value, Mode mode, + Action action, Kind[] kinds, boolean optional) { + super(id, name, "utctime", value, mode, action, kinds, optional); + } + + public static List asList() { + return new ArrayList(); + } + + public static List asList(CF.UTCTime... array) { + return new ArrayList(Arrays.asList(array)); + } + + public static List asList(String... array) { + List list = new ArrayList(0); + for (String item : array) { + list.add(utils.convert(item)); + } + return list; + } + + protected List extract(Any any) { + CF.UTCTime[] array = CF.UTCTimeSequenceHelper.extract(any); + List list = new ArrayList(array.length); + for (CF.UTCTime item : array) { + list.add(item); + } + return list; + } + + protected void insert(Any any, List value) { + CF.UTCTime[] array = value.toArray(new CF.UTCTime[value.size()]); + CF.UTCTimeSequenceHelper.insert(any, array); + } +} diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/PortCallError.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/PortCallError.java new file mode 100644 index 000000000..38adb5a16 --- /dev/null +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/PortCallError.java @@ -0,0 +1,43 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package org.ossie.redhawk; +import java.util.List; +import java.util.ListIterator; + +public class PortCallError extends Exception { + public PortCallError(String msg, List connectionids) { + super(createPortCallErrorMessage(msg, connectionids)); + } + + private static String createPortCallErrorMessage(String msg, List connectionids) { + String _msg = msg; + if (connectionids.size() > 0) { + _msg += "Connections available: "; + for (ListIterator iter = connectionids.listIterator(); iter.hasNext(); ) { + _msg += iter.next(); + if (iter.hasNext()) { + _msg += ", "; + } + } + } + return _msg; + } +} diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/time/Comparator.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/time/Comparator.java new file mode 100644 index 000000000..70c1fbf98 --- /dev/null +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/time/Comparator.java @@ -0,0 +1,33 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package org.ossie.redhawk.time; + +import java.lang.System; +import CF.UTCTime; + +/** + * @generated + */ +public interface Comparator { + + public boolean compare(final UTCTime T1, final UTCTime T2); +} + diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/time/DefaultComparator.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/time/DefaultComparator.java new file mode 100644 index 000000000..38abfe0b6 --- /dev/null +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/time/DefaultComparator.java @@ -0,0 +1,41 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package org.ossie.redhawk.time; + +import java.lang.System; +import CF.UTCTime; + +/** + * @generated + */ +public class DefaultComparator implements org.ossie.redhawk.time.Comparator { + + public boolean compare(final UTCTime T1, final UTCTime T2) { + if (T1.tcstatus != T2.tcstatus) + return false; + if (T1.tfsec != T2.tfsec) + return false; + if (T1.twsec != T2.twsec) + return false; + return true; + } + +} diff --git a/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/time/utils.java b/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/time/utils.java new file mode 100644 index 000000000..3242befac --- /dev/null +++ b/redhawk/src/base/framework/java/ossie/src/org/ossie/redhawk/time/utils.java @@ -0,0 +1,178 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK bulkioInterfaces. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +package org.ossie.redhawk.time; + +import java.lang.System; +import java.util.Calendar; +import java.util.TimeZone; + +import CF.UTCTime; + +public class utils { + + public static UTCTime create( double wholesecs, double fractionalsecs) { + + double wsec = wholesecs; + double fsec = fractionalsecs; + if ( wsec < 0.0 || fsec < 0.0 ) { + long tmp_time = System.currentTimeMillis(); + wsec = tmp_time /1000; + fsec = (tmp_time % 1000)/1000.0; + } + UTCTime tstamp = new UTCTime(); + tstamp.tcstatus = 1; + tstamp.twsec = wsec; + tstamp.tfsec = fsec; + return tstamp; + } + + + public static UTCTime now() { + return create(-1.0,-1.0); + } + + public static UTCTime notSet() { + UTCTime tstamp = create(0.0,0.0); + tstamp.tcstatus = 0; + return tstamp; + } + + /** + * Normalizes a UTCTime, such that the whole portion contains an integral number of seconds, + * and the fractional portion is in the range [0.0, 1.0). + */ + public static void normalize(UTCTime time) { + // Get fractional adjustment from whole seconds + double fadj = time.twsec % 1.0; + time.twsec -= fadj; + + // Adjust fractional seconds and get whole seconds adjustment + time.tfsec += fadj; + double wadj = Math.floor(time.tfsec); + time.twsec += wadj; + time.tfsec -= wadj; + } + + /** + * Returns a new copy of a UTCTime. + */ + public static UTCTime copy(UTCTime time) { + return new UTCTime(time.tcstatus, time.twsec, time.tfsec); + } + + public static int compare(UTCTime time1, UTCTime time2) { + if (time1.twsec == time2.twsec) { + return Double.compare(time1.tfsec, time2.tfsec); + } + return Double.compare(time1.twsec, time2.twsec); + } + + /** + * Returns the result of adding an offset to a UTCTime. + */ + public static UTCTime add(UTCTime time, double seconds) { + return utils.increment(utils.copy(time), seconds); + } + + /** + * Adds an offset to a UTCTime. + */ + public static UTCTime increment(UTCTime time, double seconds) { + // Separate the fractional and whole portions of the offset + double fractional = seconds % 1.0; + double whole = seconds - fractional; + time.tfsec += fractional; + time.twsec += (seconds - fractional); + utils.normalize(time); + return time; + } + + /** + * Returns the result of substracting an offset from a UTCTime. + */ + public static UTCTime subtract(UTCTime time, double seconds) { + return utils.add(time, -seconds); + } + + /** + * Subtracts an offset from a UTCTime. + */ + public static UTCTime decrement(UTCTime time, double seconds) { + return utils.increment(time, -seconds); + } + + /** + * Returns the difference, in seconds, between two UTCTime values (i.e., lhs - rhs). + */ + public static double difference(UTCTime lhs, UTCTime rhs) { + return (lhs.twsec - rhs.twsec) + (lhs.tfsec - rhs.tfsec); + } + + /** + * String format to produce YYYY:MM:DD::HH:MM:SS.SSSSSS output for UTCTime. + */ + private static final String TIME_FORMAT = "%1$tY:%1$tm:%1$td::%1$tH:%1$tM:%1$tS.%2$06d"; + + /** + * Formats a UTCTime as a human-readable string following the format: + * YYYY:MM:DD::HH:MM:SS.SSSSSS + */ + public static String toString(UTCTime time) { + // Use Calendar to hold the integral seconds, but since it is limited + // to millisecond precision, exclude the fractional seconds. It must be + // created with the UTC time zone, otherwise the formatter will return + // local time. + Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + calendar.setTimeInMillis((long)(time.twsec * 1000.0)); + + // Append the fractional seconds down to microsecond precision. + int usec = (int) Math.round(time.tfsec * 1000000.0); + + return String.format(utils.TIME_FORMAT, calendar, usec); + } + + /** + * Converts a human-readable string following of the format: + * YYYY:MM:DD::HH:MM:SS.SSSSSS or 'now' + * to UTCTime + */ + public static UTCTime convert(String time) { + if (time.equals("now")) { + return now(); + } + String[] token = time.split(":"); + if (token.length != 7) + return new CF.UTCTime(); + int year = Integer.parseInt(token[0]); + int month = Integer.parseInt(token[1])-1; + int day = Integer.parseInt(token[2]); + int hours = Integer.parseInt(token[4]); + int minutes = Integer.parseInt(token[5]); + double full_seconds = Double.parseDouble(token[6]); + int seconds = (int)full_seconds; + Calendar _calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + + _calendar.set(year, month, day, hours, minutes, seconds); + double wsec = _calendar.getTimeInMillis()/1000; + double fsec = full_seconds - seconds; + return new CF.UTCTime((short)1, wsec, fsec); + } +} diff --git a/redhawk/src/base/framework/java/pom.xml b/redhawk/src/base/framework/java/pom.xml deleted file mode 100644 index ff4a6aaf9..000000000 --- a/redhawk/src/base/framework/java/pom.xml +++ /dev/null @@ -1,97 +0,0 @@ - - 4.0.0 - - redhawk.coreframework - parent - 2.0.9-SNAPSHOT - ../../../../../pom.xml - - cf-interfaces - bundle - - ../../../idl/ossie/ - - - - redhawk.coreframework - omnijni - ${project.version} - - - - - - ${idl.dir} - - - - cf/src - - - org.codehaus.gmaven - gmaven-plugin - 1.3 - - - set-main-artifact - package - - execute - - - - project.artifact.setFile(new - File("${project.basedir}/CFInterfaces.jar")) - - - - - - - - org.codehaus.mojo - build-helper-maven-plugin - 1.7 - - - attach-artifacts - package - - attach-artifact - - - - - ${project.build.directory}/${project.artifactId}-${project.version}.jar - beta - jar - - - - - - - - - maven-assembly-plugin - 2.2-beta-5 - - - attach-idlzip - package - - single - - - - assembly.xml - - - - - - - - - diff --git a/redhawk/src/base/framework/logging/RH_LogEventAppender.cpp b/redhawk/src/base/framework/logging/RH_LogEventAppender.cpp index 154a7b42a..fd52a8898 100644 --- a/redhawk/src/base/framework/logging/RH_LogEventAppender.cpp +++ b/redhawk/src/base/framework/logging/RH_LogEventAppender.cpp @@ -113,9 +113,7 @@ channelName("LOG_CHANNEL"), _nameContext(""), _reconnect_retries(10), _reconnect_delay(10), - _cleanup_event_channel(0) -{ - + _cleanup_event_channel(0) { } diff --git a/redhawk/src/base/framework/logging/RH_LogEventAppender.h b/redhawk/src/base/framework/logging/RH_LogEventAppender.h index 095c57fb9..68126617c 100644 --- a/redhawk/src/base/framework/logging/RH_LogEventAppender.h +++ b/redhawk/src/base/framework/logging/RH_LogEventAppender.h @@ -69,7 +69,7 @@ class RH_LogEventAppender : public AppenderSkeleton std::vector< std::string > ArgList; - // + //saveLoggingContext // perform connect operation to establish a corba context // int connect_(); diff --git a/redhawk/src/base/framework/logging/RH_SyncRollingAppender.cpp b/redhawk/src/base/framework/logging/RH_SyncRollingAppender.cpp new file mode 100644 index 000000000..67268b2f2 --- /dev/null +++ b/redhawk/src/base/framework/logging/RH_SyncRollingAppender.cpp @@ -0,0 +1,395 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifdef HAVE_LOG4CXX +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "RH_SyncRollingAppender.h" + +typedef ipc::scoped_lock< _IPC_Mutex > _IPC_ScopedMLock; +typedef ipc::scoped_lock< ipc::file_lock > _IPC_ScopedFLock; + +using namespace log4cxx; +using namespace log4cxx::helpers; + +#define _LL_DEBUG( msg ) \ + { std::ostringstream __os; __os << msg; LogLog::debug(__os.str()); __os.str(""); } + +#define _LL_WARN( msg ) \ + { std::ostringstream __os; __os << msg; LogLog::warn(__os.str()); __os.str(""); } + +#define _LL_ERROR( msg ) \ + { std::ostringstream __os; __os << msg; LogLog::error(__os.str()); __os.str(""); } + +#define _LLS_DEBUG( os, msg ) \ + os << msg; LogLog::debug(os.str()); os.str(""); + + +namespace log4cxx { + + // + // Allows for same naming reference to LogEvent Appender class + // + class ClassRH_SyncRollingAppender : public Class + { + public: + ClassRH_SyncRollingAppender() : helpers::Class() {} + virtual LogString getName() const { + return LOG4CXX_STR("org.ossie.logging.RH_SyncRollingAppender"); + } + virtual ObjectPtr newInstance() const { + return new RH_SyncRollingAppender(); + } + }; + +}; + +// Register factory class with log4cxx for the appender +IMPLEMENT_LOG4CXX_OBJECT_WITH_CUSTOM_CLASS(RH_SyncRollingAppender, ClassRH_SyncRollingAppender) + +RH_SyncRollingAppender::RH_SyncRollingAppender(): +rolling::RollingFileAppenderSkeleton(), + wait_on_lock(50), + retries(0), + max_file_size(10*1024*1024), + max_bkup_index(1), + roll_count(0), + cleanup(false), + created(false), + sync_ctx(NULL) +{ + +} + + +RH_SyncRollingAppender::~RH_SyncRollingAppender() { + + int pid=getpid(); + _LL_DEBUG( "RH_SyncRollingAppender::DTOR START " << pid ); + + try { + _LL_DEBUG( "UNLOCK FILE LOCK " << pid ); + flock.unlock(); + } + catch(...){} + + if ( sync_ctx ) { + _LL_DEBUG( "RH_SyncRollingAppender::DTOR unlock shared memory <" << sync_ctx->fname << "> " << pid << "\n" ); + try { + sync_ctx->mutex.unlock(); + } + catch(...){} + + if ( cleanup ) { + _LL_DEBUG( "RH_SyncRollingAppender::DTPR clean up sharedg memory for: <" << sync_ctx->fname << "> " << pid << "\n" ); + // clean up shared memory opbject + std::string fname=_clean_fname(sync_ctx->fname); + ipc::shared_memory_object::remove(fname.c_str()); + } + else { + _LL_WARN( "RH_SyncRollingAppender: LEAVING SHARED MEMORY KEY <" << sync_ctx->fname << "> " << pid << "\n" ); + } + } + + _LL_DEBUG( "RH_SyncRollingAppender::DTOR END " << pid ); +} + +std::string RH_SyncRollingAppender::_clean_fname( const std::string &fname ) { + return boost::replace_all_copy( fname, "/", "-" ); +} + +int RH_SyncRollingAppender::_get_mem( const std::string &fname) { + + int retval=0; + created = false; + std::string clean_fname =_clean_fname(fname); + + // use the file name as the key for the shared memory segment... + try { + ipc::shared_memory_object shm_obj + (ipc::create_only, //only create + clean_fname.c_str(), //name + ipc::read_write //read-write mode + ); + + shm_obj.truncate(sizeof(sync_log_file)); + shm.swap(shm_obj); + _LL_DEBUG( "RH_SyncRollingAppender::get_mem Creating Named memory space <" << fname << ">" ); + created = true; + } + catch(...) { + } + + if ( !created ) { + _LL_DEBUG( "RH_SyncRollingAppender::get_mem Attach to Existing <" << fname << ">" ); + ipc::shared_memory_object shm_obj + (ipc::open_only, //only create + clean_fname.c_str(), //name + ipc::read_write //read-write mode + ); + + shm.swap(shm_obj); + } + + //Map the whole shared memory in this process + ipc::mapped_region tregion(shm, ipc::read_write); + + void *addr = tregion.get_address(); + sync_ctx = new(addr) sync_log_file; + + if ( created ) { + _IPC_ScopedMLock lock(sync_ctx->mutex); + strcpy( sync_ctx->fname, fname.c_str() ); + sync_ctx->n_msgs = 0; + sync_ctx->max_size = 10*1024*1024; + sync_ctx->max_index = 10; + sync_ctx->roll_count = 0; + } + + _LL_DEBUG( "RH_SyncRollingAppender::get_mem key: <" << sync_ctx->fname << ">" ); + _LL_DEBUG( " n_msgs : " << sync_ctx->n_msgs ); + _LL_DEBUG( " max_size : " << sync_ctx->max_size ); + _LL_DEBUG( " max_index : " << sync_ctx->max_index ); + _LL_DEBUG( " roll_count : " << sync_ctx->roll_count ); + _LL_DEBUG( " fname : " << sync_ctx->fname << std::endl ); + + region.swap(tregion); + + return retval; +} + + +void RH_SyncRollingAppender::resync_rollover(log4cxx::helpers::Pool &p){ + + try { + _LL_DEBUG( "RH_SyncRollingAppender::resync_rollover... RESYNC START "); + synchronized sync(mutex); + setImmediateFlush(true); + closeWriter(); + helpers::OutputStreamPtr os(new helpers::FileOutputStream(getFile(), getAppend() )); + helpers::WriterPtr newWriter(createWriter(os)); + setFile(getFile()); + setWriter(newWriter); + if (getAppend()) { + fileLength = File().setPath(getFile()).length(p); + } else { + fileLength = 0; + } + _LL_DEBUG( "RH_SyncRollingAppender::resync_rollover... RESYNC COMPLETED "); + } catch (std::exception& ex) { + LogLog::warn(LOG4CXX_STR("Exception during resync-rollover")); + } +} + +void RH_SyncRollingAppender::subAppend(const spi::LoggingEventPtr& event, log4cxx::helpers::Pool& p){ + + int cretries = retries; + do { + _LL_DEBUG( "RH_SyncRollingAppender::subAppend Waiting to lock mutex."); + boost::posix_time::ptime abs_time = boost::posix_time::microsec_clock::universal_time()+boost::posix_time::millisec(wait_on_lock); + try { + _IPC_ScopedFLock lock(flock,abs_time); + + if ( lock ) { + + /// we are behind... need to reopen the base file... + if ( roll_count < sync_ctx->roll_count ) { + resync_rollover(p); + roll_count = sync_ctx->roll_count; + } + else { + + // get the current file status to test the trigger with + { + synchronized sync(mutex); + fileLength = File().setPath(getFile()).length(p); + } + + _LL_DEBUG( "RH_SyncRollingAppender::subAppend roll_count: " << roll_count << " length:" << fileLength); + // The rollover check must precede actual writing. This is the + // only correct behavior for time driven triggers. + if ( + triggeringPolicy->isTriggeringEvent(this, event, getFile(), getFileLength())) { + // + // wrap rollover request in try block since + // rollover may fail in case read access to directory + // is not provided. However appender should still be in good + // condition and the append should still happen. + try { + _LL_DEBUG( "Rolling......for: " << getFile() << " PRE:" << sync_ctx->roll_count); + rollover(p); + sync_ctx->roll_count++; + roll_count = sync_ctx->roll_count; + _LL_DEBUG( "Rolling......for: " << getFile() << " POST:" << sync_ctx->roll_count); + } catch (std::exception& ex) { + LogLog::warn(LOG4CXX_STR("Exception during rollover attempt.")); + } + } + } + + FileAppender::subAppend(event, p); + } + else if ( cretries ) { + _LL_DEBUG( "RH_SyncRollingAppender::subAppend --- UNABLE TO LOCK...RETRY " << cretries); + } + } + catch(...){ + _LL_DEBUG( "RH_SyncRollingAppender::subAppend : exception during subAPPEND " << cretries); + } + } + while ( cretries && --cretries ); +} + +void RH_SyncRollingAppender::activateOptions(Pool& p) { + + if ( !sync_ctx) { + std::string fname; + log4cxx::helpers::Transcoder::encode( fileName, fname ); + _LL_DEBUG( "ACTIVATE OPTIONS FOR: " << fname ); + _get_mem( fname ); + if ( !sync_ctx ) { + throw MissingResourceException(LOG4CXX_STR("No Shared Memory Access")); + } + + if ( !created ) { + _LL_DEBUG( "IGNORING LOG4 OPTIONS.. USING OPTIONS FROM MEMORY KEY: " << fname << " max_size:" << sync_ctx->max_size << " max index:" << sync_ctx->max_index << "\n"); + setMaximumFileSize(sync_ctx->max_size); + setMaxBackupIndex(sync_ctx->max_index); + } + } + + log4cxx::rolling::SizeBasedTriggeringPolicyPtr trigger( + new log4cxx::rolling::SizeBasedTriggeringPolicy()); + trigger->setMaxFileSize(max_file_size); + trigger->activateOptions(pool); + setTriggeringPolicy(trigger); + + log4cxx::rolling::FixedWindowRollingPolicyPtr rolling( + new log4cxx::rolling::FixedWindowRollingPolicy()); + rolling->setMinIndex(1); + rolling->setMaxIndex(max_bkup_index); + rolling->setFileNamePattern(getFile() + LOG4CXX_STR(".%i")); + rolling->activateOptions(pool); + setRollingPolicy(rolling); + + if ( sync_ctx ) { + + // if we created then apply settings back to shared memory object + if ( created ) { + _IPC_ScopedMLock lock(sync_ctx->mutex); + sync_ctx->max_index = rolling->getMaxIndex(); + sync_ctx->max_size = trigger->getMaxFileSize(); + } + + _LL_DEBUG( "RH_SyncRollingAppender::memory access KEY:" << sync_ctx->fname ); + _LL_DEBUG( " created : " << created ); + _LL_DEBUG( " n_msgs : " << sync_ctx->n_msgs ); + _LL_DEBUG( " max_size : " << sync_ctx->max_size ); + _LL_DEBUG( " max_index : " << sync_ctx->max_index ); + _LL_DEBUG( " roll_count : " << sync_ctx->roll_count ); + _LL_DEBUG( " fname : " << sync_ctx->fname << std::endl ); + } + + rolling::RollingFileAppenderSkeleton::activateOptions(p); + + // enforce no buffered IO + setImmediateFlush(false); + + std::string fname; + log4cxx::helpers::Transcoder::encode( fileName, fname ); + flock = ipc::file_lock(fname.c_str()); + +} + +int RH_SyncRollingAppender::getMaxBackupIndex() const { + return max_bkup_index; +} + +size_t RH_SyncRollingAppender::getMaximumFileSize() const { + return max_file_size; +} + +void RH_SyncRollingAppender::setMaxBackupIndex(int maxBackups) { + max_bkup_index = maxBackups; +} + +void RH_SyncRollingAppender::setMaximumFileSize(size_t maxFileSize1) { + max_file_size = maxFileSize1; +} + +void RH_SyncRollingAppender::setMaxFileSize(const LogString& value) { + long maxFileSize=100; + max_file_size = OptionConverter::toFileSize(value, maxFileSize + 1); +} + +void RH_SyncRollingAppender::setOption(const LogString& option, const LogString& value) { + + RollingFileAppenderSkeleton::setOption( option, value ); + + + if(StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("CLEANUP"), LOG4CXX_STR("cleanup"))) { + synchronized sync(mutex); + cleanup=false; + cleanup = OptionConverter::toBoolean(value,false); + _LL_DEBUG( " RH_SyncRollingLogAppender: option: cleanup shared memory : " << cleanup ); + } + + if (StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("WAITONLOCK"), LOG4CXX_STR("waitonlock")) ) { + synchronized sync(mutex); + wait_on_lock = StringHelper::toInt(value); + _LL_DEBUG( " RH_SyncRollingAppender: option: wait_on_lock : " << wait_on_lock ); + } + + if (StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("RETRIES"), LOG4CXX_STR("retries")) ) { + synchronized sync(mutex); + retries = StringHelper::toInt(value); + _LL_DEBUG( " RH_SyncRollingAppender: option: retries : " << retries ); + } + + if (StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("MAXFILESIZE"), LOG4CXX_STR("maxfilesize")) + || StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("MAXIMUMFILESIZE"), LOG4CXX_STR("maximumfilesize"))) { + synchronized sync(mutex); + setMaxFileSize(value); + } + + if (StringHelper::equalsIgnoreCase(option,LOG4CXX_STR("MAXBACKUPINDEX"), LOG4CXX_STR("maxbackupindex")) + || StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("MAXIMUMBACKUPINDEX"), LOG4CXX_STR("maximumbackupindex"))) { + synchronized sync(mutex); + setMaxBackupIndex(StringHelper::toInt(value)); + } + +} +#endif // HAVE_LOG4CXX diff --git a/redhawk/src/base/framework/logging/RH_SyncRollingAppender.h b/redhawk/src/base/framework/logging/RH_SyncRollingAppender.h new file mode 100644 index 000000000..3f2cf9d42 --- /dev/null +++ b/redhawk/src/base/framework/logging/RH_SyncRollingAppender.h @@ -0,0 +1,117 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifdef HAVE_LOG4CXX + +#ifndef RH_SyncRollingAppender_H +#define RH_SyncRollingAppender_H +#include +#include + +#include +#include +#include +#include + +namespace ipc=boost::interprocess; + +typedef ipc::interprocess_mutex _IPC_Mutex; + + +namespace log4cxx +{ + + class RH_SyncRollingAppender : public rolling::RollingFileAppenderSkeleton { + + // shared memory object for synchronous file appender + struct sync_log_file { + uint64_t n_msgs; + uint64_t max_size; + uint64_t max_index; + uint64_t roll_count; + char fname[PATH_MAX]; + _IPC_Mutex mutex; + }; + + + public: + + DECLARE_LOG4CXX_OBJECT_WITH_CUSTOM_CLASS(RH_SyncRollingAppender, ClassRH_SyncRollingAppender ) + + BEGIN_LOG4CXX_CAST_MAP() + LOG4CXX_CAST_ENTRY(RH_SyncRollingAppender) + LOG4CXX_CAST_ENTRY_CHAIN(rolling::RollingFileAppenderSkeleton) + END_LOG4CXX_CAST_MAP() + + + RH_SyncRollingAppender(); + + RH_SyncRollingAppender( const LayoutPtr &layout, + const LogString &filename ); + + virtual ~RH_SyncRollingAppender(); + + // + // Called by log4cxx internals to process options + // + void setOption(const LogString& option, const LogString& value); + + void activateOptions( log4cxx::helpers::Pool& p); + + int getMaxBackupIndex() const; + size_t getMaximumFileSize() const; + void setMaxBackupIndex( int maxBackupIndex ); + void setMaxFileSize( const LogString & value ); + void setMaximumFileSize(size_t value ); + + protected: + + void subAppend (const spi::LoggingEventPtr &event, log4cxx::helpers::Pool &p); + void resync_rollover (log4cxx::helpers::Pool &p); + + private: + + int _get_mem( const std::string &fname ); + + // remove special characters from filename for shared memory context + std::string _clean_fname( const std::string &fname ); + + // prevent copy and assignment statements + RH_SyncRollingAppender(const RH_SyncRollingAppender&); + RH_SyncRollingAppender& operator=(const RH_SyncRollingAppender&); + + int wait_on_lock; + int retries; + size_t max_file_size; + int max_bkup_index; + uint64_t roll_count; + bool cleanup; + bool created; + sync_log_file *sync_ctx; + ipc::file_lock flock; + ipc::shared_memory_object shm; + ipc::mapped_region region; + + }; + +}; // end of namespace +#endif + + +#endif // HAVE_LOG4CXX diff --git a/redhawk/src/base/framework/logging/loghelpers.cpp b/redhawk/src/base/framework/logging/loghelpers.cpp index c63861bb0..639181645 100644 --- a/redhawk/src/base/framework/logging/loghelpers.cpp +++ b/redhawk/src/base/framework/logging/loghelpers.cpp @@ -73,11 +73,6 @@ namespace ossie { namespace logging { - // resolve logging config uri from command line - std::string ResolveLocalUri( const std::string &logfile_uri, - const std::string &rootPath, - std::string &validated_uri ); - static const std::string DomPrefix("dom"); static const std::string DevMgrPrefix("devmgr"); static const std::string DevicePrefix("dev"); @@ -349,6 +344,11 @@ namespace ossie { if ( t.size() > 0 ) { device_mgr = t[n]; } + + pid_t ppid = getppid(); + std::ostringstream os; + os << domain_name << ":" << boost::asio::ip::host_name() << ":" << device_mgr << "_" << ppid; + device_mgr_id = os.str(); } void DeviceCtx::apply( MacroTable &tbl ) { @@ -368,6 +368,10 @@ namespace ossie { domain_name = t[n]; n++; } + pid_t pid = getpid(); + std::ostringstream os; + os << domain_name << ":" << boost::asio::ip::host_name() << ":" << nodeName << "_" << pid; + instance_id = os.str(); } void DeviceMgrCtx::apply( MacroTable &tbl ) { @@ -425,6 +429,7 @@ namespace ossie { SetResourceInfo( tbl, ctx ); tbl["@@@WAVEFORM.NAME@@@"] = boost::replace_all_copy( ctx.waveform, ":", "-" ); tbl["@@@WAVEFORM.ID@@@"] = boost::replace_all_copy( ctx.waveform_id, ":", "-" ); + tbl["@@@WAVEFORM.INSTANCE@@@"] = boost::replace_all_copy( ctx.waveform_id, ":", "-" ); tbl["@@@COMPONENT.NAME@@@"] = boost::replace_all_copy( ctx.name, ":", "-" ); tbl["@@@COMPONENT.INSTANCE@@@"] = boost::replace_all_copy( ctx.instance_id, ":", "-" ); pid_t pid = getpid(); @@ -749,7 +754,7 @@ namespace ossie { "# Direct log messages to STDOUT\n" "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" -"log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n"; +"log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{3}:%L - %m%n\n"; return cfg; } @@ -1110,19 +1115,19 @@ namespace ossie { } if ( ptype == XML_PROPS ) { - STDOUT_DEBUG("Setting Logging Configuration, XML Properties: " << fname ); - log4cxx::xml::DOMConfigurator::configure(fname); - } - else { - STDOUT_DEBUG( "Setting Logging Configuration, Java Properties: " ); - log4cxx::helpers::Properties props; - // need to allocate heap object... log4cxx::helpers::Properties props takes care of deleting the memory... - log4cxx::helpers::InputStreamPtr is( new log4cxx::helpers::StringInputStream( fileContents ) ); - props.load(is); - STDOUT_DEBUG("Setting Logging Configuration, Properties using StringStream: " ); - log4cxx::PropertyConfigurator::configure(props); - - if (saveTemp) boost::filesystem::remove(fname); + STDOUT_DEBUG("Setting Logging Configuration, XML Properties: " << fname ); + log4cxx::xml::DOMConfigurator::configure(fname); + } else { + STDOUT_DEBUG( "Setting Logging Configuration, Java Properties: " ); + log4cxx::helpers::Properties props; + // need to allocate heap object... log4cxx::helpers::Properties props takes care of deleting the memory... + log4cxx::helpers::InputStreamPtr is( new log4cxx::helpers::StringInputStream( fileContents ) ); + props.load(is); + STDOUT_DEBUG("Setting Logging Configuration, Properties using StringStream: " ); + log4cxx::PropertyConfigurator::configure(props); + if (saveTemp) { + boost::filesystem::remove(fname); + } } cfgContents = fileContents; diff --git a/redhawk/src/base/framework/logging/rh_logger.cpp b/redhawk/src/base/framework/logging/rh_logger.cpp index ef3a06296..b6b5f9012 100644 --- a/redhawk/src/base/framework/logging/rh_logger.cpp +++ b/redhawk/src/base/framework/logging/rh_logger.cpp @@ -22,15 +22,17 @@ #include // logging macros used by redhawk resources +#include "ossie/logging/loghelpers.h" #include #ifdef HAVE_LOG4CXX -#include -#include -#include -#include -#include #include +#include +#include +#include +#include +#include "StringInputStream.h" +#include #endif // internal logging classes for std::out and log4cxx @@ -79,7 +81,10 @@ namespace rh_logger { }; - /* + const std::string Logger::USER_LOGS = "user"; + const std::string Logger::SYSTEM_LOGS = "system"; + + /* */ namespace spi { @@ -343,7 +348,7 @@ namespace rh_logger { // // save off the resource logger name when the resource gets it's initial logger // - static std::string _rsc_logger_name; + static std::string _rsc_logger_name = ""; // @@ -372,6 +377,10 @@ namespace rh_logger { return _rsc_logger_name; } + LoggerPtr Logger::getInstanceLogger( const std::string &name ) { + return this->getLogger(name); + } + LoggerPtr Logger::getLogger( const std::string &name ) { STDOUT_DEBUG( "RH_LOGGER getLogger BEGIN "); LoggerPtr ret; @@ -382,10 +391,10 @@ namespace rh_logger { ret = StdOutLogger::getLogger( name ); #endif if ( ret->getLevel() ) { - STDOUT_DEBUG( "RH_LOGGER getLogger name/level :" << ret->getName() << "/" << ret->getLevel()->toString() ); + STDOUT_DEBUG( "RH_LOGGER getLogger name/level :" << ret->getName() << "/" << ret->getLevel()->toString() ); } else{ - STDOUT_DEBUG( "RH_LOGGER getLogger name/level :" << ret->getName() << "/UNSET"); + STDOUT_DEBUG( "RH_LOGGER getLogger name/level :" << ret->getName() << "/UNSET"); } } else { @@ -395,12 +404,62 @@ namespace rh_logger { return ret; } + LoggerPtr Logger::getNewHierarchy( const std::string &name ) { + STDOUT_DEBUG( "RH_LOGGER getLogger BEGIN "); + LoggerPtr ret; + if ( name != "" ) { +#ifdef HAVE_LOG4CXX + ret = L4Logger::getLogger( name, true ); +#else + ret = StdOutLogger::getLogger( name ); +#endif + if ( ret->getLevel() ) { + STDOUT_DEBUG( "RH_LOGGER getLogger name/level :" << ret->getName() << "/" << ret->getLevel()->toString() ); + } + else{ + STDOUT_DEBUG( "RH_LOGGER getLogger name/level :" << ret->getName() << "/UNSET"); + } + } + else { +#ifdef HAVE_LOG4CXX + ret = L4Logger::getLogger( name, true ); +#else + ret = getRootLogger(); +#endif + } + STDOUT_DEBUG( "RH_LOGGER getLogger END "); + return ret; + } + LoggerPtr Logger::getLogger( const char *name ) { std::string n(name); return getLogger(n); } + LoggerPtr Logger::getChildLogger( const std::string &logname, const std::string &ns ) { + std::string _full_name; + std::string _ns = ns; + if (_ns == "user") { + if (name.find('.') != std::string::npos) { + _ns.clear(); + } + } + if (not _ns.empty() and ((_ns!=Logger::USER_LOGS) or ((_ns==Logger::USER_LOGS) and (name.find("."+Logger::USER_LOGS+".") == std::string::npos)))) + _full_name = name+"."+_ns+"."+logname; + else + _full_name = name+"."+logname; +#ifdef HAVE_LOG4CXX + L4Logger* _this = (L4Logger*)this; + LoggerPtr tmp = _this->getInstanceLogger(_full_name); + L4Logger* tmpl4 = (L4Logger*)(tmp.get()); + tmpl4->setHierarchy(_this->getRootHierarchy()); + return tmp; +#else + return getLogger(_full_name); +#endif + } + void Logger::setLevel ( const LevelPtr &newLevel ) { STDOUT_DEBUG( " RH LOGGER setLevel - logger: " << name ); if ( newLevel ) { @@ -539,6 +598,51 @@ namespace rh_logger { return log_records; } + void Logger::configureLogger(const std::string &configuration, bool root_reset, int level) { + } + + void StdOutLogger::configureLogger(const std::string &configuration, bool root_reset, int level) { + } + + bool Logger::isLoggerInHierarchy(const std::string& search_name) { + std::vector loggers = _rootLogger->getNamedLoggers(); + for (std::vector::iterator it=loggers.begin(); it!=loggers.end(); ++it) { + size_t _idx = it->find(name); + if (_idx == 0) { + if (it->size() > name.size()) + if ((*it)[name.size()] != '.') + continue; + if (it->find(search_name) != 0) + continue; + if (it->size() > search_name.size()) { + if ((not search_name.empty()) and ((*it)[search_name.size()] != '.')) { + continue; + } + } + return true; + } + } + return false; + } + + void* Logger::getUnderlyingLogger() { + return NULL; + } + + std::vector Logger::getNamedLoggers() { + std::vector ret; + std::vector loggers = _rootLogger->getNamedLoggers(); + for (std::vector::iterator it=loggers.begin(); it!=loggers.end(); ++it) { + size_t _idx = it->find(name); + if (_idx == 0) { + if (it->size() > name.size()) + if ((*it)[name.size()] != '.') + continue; + ret.push_back(*it); + } + } + return ret; + } // append log record to circular buffer void Logger::appendLogRecord( const LevelPtr &level, const std::string &msg) { @@ -588,6 +692,26 @@ namespace rh_logger { return _rootLogger; } + bool StdOutLogger::isLoggerInHierarchy(const std::string& search_name) { + if (search_name == name) + return true; + return false; + } + + void* StdOutLogger::getUnderlyingLogger() { + return NULL; + } + + std::vector StdOutLogger::getNamedLoggers() { + std::vector ret; + ret.push_back(name); + return ret; + } + + + LoggerPtr StdOutLogger::getInstanceLogger( const std::string &name ) { + return this->getLogger(name); + } LoggerPtr StdOutLogger::getLogger( const std::string &name ) { @@ -677,6 +801,9 @@ namespace rh_logger { log4cxx::LevelPtr ConvertRHLevelToLog4 ( rh_logger::LevelPtr rh_level ) { + if (!rh_level) { + return log4cxx::LevelPtr(); + } if (rh_level == rh_logger::Level::getOff() ) return log4cxx::Level::getOff(); if (rh_level == rh_logger::Level::getFatal() ) return log4cxx::Level::getFatal(); if (rh_level == rh_logger::Level::getError() ) return log4cxx::Level::getError(); @@ -689,6 +816,9 @@ namespace rh_logger { }; rh_logger::LevelPtr ConvertLog4ToRHLevel ( log4cxx::LevelPtr l4_level ) { + if (!l4_level) { + return rh_logger::LevelPtr(); + } if (l4_level == log4cxx::Level::getOff() ) return rh_logger::Level::getOff(); if (l4_level == log4cxx::Level::getFatal() ) return rh_logger::Level::getFatal(); if (l4_level == log4cxx::Level::getError() ) return rh_logger::Level::getError(); @@ -697,7 +827,7 @@ namespace rh_logger { if (l4_level == log4cxx::Level::getDebug() ) return rh_logger::Level::getDebug(); if (l4_level == log4cxx::Level::getTrace() ) return rh_logger::Level::getTrace(); if (l4_level == log4cxx::Level::getAll() ) return rh_logger::Level::getAll(); - return rh_logger::Level::getInfo(); + return rh_logger::Level::getInfo(); }; L4Logger::L4LoggerPtr L4Logger::_rootLogger; @@ -710,18 +840,59 @@ namespace rh_logger { if ( !_rootLogger ) { _rootLogger = L4LoggerPtr( new L4Logger("") ); _rootLogger->l4logger = log4cxx::Logger::getRootLogger(); - LevelPtr l= _rootLogger->getLevel(); + LevelPtr l= _rootLogger->getLevel(); } STDOUT_DEBUG( " L4Logger getRootLogger: END "); return _rootLogger; } + LoggerPtr L4Logger::getInstanceLogger( const std::string &name ) { + LoggerPtr ret; + ret = LoggerPtr(new L4Logger( name, this->_rootHierarchy )); + return ret; + } + + void L4Logger::configureLogger(const std::string &configuration, bool root_reset, int level) { + log4cxx::helpers::Properties props; + log4cxx::helpers::InputStreamPtr is( new log4cxx::helpers::StringInputStream( configuration ) ); + props.load(is); + this->_rootHierarchy->resetConfiguration(); + log4cxx::spi::LoggerRepositoryPtr log_repo = this->_rootHierarchy; + prop_conf.doConfigure(props, log_repo); + if (root_reset) { + log4cxx::LoggerPtr new_root = this->_rootHierarchy->getRootLogger(); + if (level == -1) { + log4cxx::LoggerPtr global_root = log4cxx::Logger::getRootLogger(); + if (global_root->getEffectiveLevel()->toInt() != new_root->getEffectiveLevel()->toInt()) { + new_root->setLevel( global_root->getEffectiveLevel() ); + } + } else { + new_root->setLevel(ConvertRHLevelToLog4(ossie::logging::ConvertDebugToRHLevel(level))); + } + } + } + LoggerPtr L4Logger::getLogger( const std::string &name ) { + return getLogger(name, false); + } + + LoggerPtr L4Logger::getLogger( const std::string &name, bool newroot ) { STDOUT_DEBUG( " L4Logger::getLogger: BEGIN name: " << name ); LoggerPtr ret; if ( name != "" ) { - ret = LoggerPtr( new L4Logger( name ) ); + if (newroot) { + L4HierarchyPtr tmpHierarchy(new L4Hierarchy(name)); + + // make sure that the new root logger inherit the log level from the global root logger + log4cxx::LoggerPtr global_root = log4cxx::Logger::getRootLogger(); + log4cxx::LoggerPtr new_root = tmpHierarchy->getRootLogger(); + new_root->setLevel( global_root->getLevel() ); + + ret = LoggerPtr( new L4Logger( name, tmpHierarchy ) ); + } else { + ret = LoggerPtr( new L4Logger( name ) ); + } if ( ret->getLevel() ) { STDOUT_DEBUG( " L4Logger::getLogger: name /level " << ret->getName() << "/" << ret->getLevel()->toString() ); } @@ -747,7 +918,19 @@ namespace rh_logger { l4logger() { l4logger = log4cxx::Logger::getLogger(name); - //_error_count = 0; + } + + L4Logger::L4Logger( const std::string &name, L4HierarchyPtr hierarchy ) : + Logger(name), + l4logger() + { + if (hierarchy) { + _instanceRootLogger = hierarchy->getRootLogger(); + l4logger = hierarchy->getLogger(name); + _rootHierarchy = hierarchy; + } else { + l4logger = log4cxx::Logger::getLogger(name); + } } L4Logger::L4Logger( const char *name ) : @@ -755,7 +938,6 @@ namespace rh_logger { l4logger() { l4logger = log4cxx::Logger::getLogger(name); - // _error_count = 0; } void L4Logger::setLevel ( const rh_logger::LevelPtr &newLevel ) { @@ -772,14 +954,63 @@ namespace rh_logger { } } + bool L4Logger::isLoggerInHierarchy(const std::string& search_name) { + log4cxx::LoggerList list = _rootHierarchy->getCurrentLoggers(); + for (log4cxx::LoggerList::iterator it=list.begin(); it!=list.end(); ++it) { + std::string _name((*it)->getName()); + size_t _idx = _name.find(name); + if (_idx == 0) { + if (_name.size() > name.size()) + if (_name[name.size()] != '.') + continue; + if (_name.find(search_name) != 0) + continue; + if (_name.size() > search_name.size()) { + if ((not search_name.empty()) and (_name[search_name.size()] != '.')) { + continue; + } + } + return true; + } + } + return false; + } + + void* L4Logger::getUnderlyingLogger() { + return static_cast(l4logger); + } + + std::vector L4Logger::getNamedLoggers() { + std::vector ret; + log4cxx::LoggerList list = _rootHierarchy->getCurrentLoggers(); + for (log4cxx::LoggerList::iterator it=list.begin(); it!=list.end(); ++it) { + std::string _name((*it)->getName()); + size_t _idx = _name.find(name); + if (_idx == 0) { + if (_name.size() > name.size()) + if (_name[name.size()] != '.') + continue; + ret.push_back(_name); + } + } + return ret; + } rh_logger::LevelPtr L4Logger::getLevel ( ) const { STDOUT_DEBUG( " L4Logger::getLevel: BEGIN logger: " << name ); if ( l4logger ) { - log4cxx::LevelPtr l4l = l4logger->getLevel(); - if ( l4l ) { - STDOUT_DEBUG( " L4Logger::getLevel: l4level: " << l4l->toString() ); - return ConvertLog4ToRHLevel( l4l ); + log4cxx::LevelPtr l4l; + log4cxx::LoggerPtr current_logger = l4logger; + while ( not l4l ) { + l4l = current_logger->getLevel(); + if ( l4l ) { + return ConvertLog4ToRHLevel( l4l ); + } else { + if (not current_logger->getParent()) { + return rh_logger::Logger::getLevel(); + } + current_logger = current_logger->getParent(); + } } } @@ -788,7 +1019,6 @@ namespace rh_logger { return rh_logger::Logger::getLevel(); } - bool L4Logger::isFatalEnabled() const { STDOUT_DEBUG( "--->> L4Logger::isFataEnabled" ); diff --git a/redhawk/src/base/framework/logging/rh_logger_p.h b/redhawk/src/base/framework/logging/rh_logger_p.h index c78525092..6c625d5e2 100644 --- a/redhawk/src/base/framework/logging/rh_logger_p.h +++ b/redhawk/src/base/framework/logging/rh_logger_p.h @@ -24,6 +24,16 @@ #include #include #include + +#ifdef HAVE_LOG4CXX +#include +#include +#include +#include +#include +#include +#endif + #include #include "rh_logger_stdout.h" @@ -34,16 +44,35 @@ namespace rh_logger { log4cxx::LevelPtr ConvertRHLevelToLog4 ( rh_logger::LevelPtr rh_level ); rh_logger::LevelPtr ConvertLog4ToRHLevel ( log4cxx::LevelPtr l4_level ); + class L4Hierarchy : public log4cxx::Hierarchy { + public: + L4Hierarchy(const std::string &name) : log4cxx::Hierarchy() { + _name = name; + }; + void resetConfiguration() { + log4cxx::Hierarchy::resetConfiguration(); + }; + std::string _name; + }; + class L4Logger : public Logger { + private: + //typedef boost::shared_ptr< L4Hierarchy > L4HierarchyPtr; + //typedef boost::shared_ptr< log4cxx::Hierarchy > L4HierarchyPtr; + typedef log4cxx::helpers::ObjectPtrT L4HierarchyPtr; public: static LoggerPtr getRootLogger( ); static LoggerPtr getLogger( const std::string &name ); + LoggerPtr getInstanceLogger( const std::string &name ); static LoggerPtr getLogger( const char *name ); + static LoggerPtr getLogger( const std::string &name, bool newroot ); virtual ~L4Logger() {} + L4Logger( const std::string &name, L4HierarchyPtr hierarchy ); + L4Logger( const std::string &name ); L4Logger( const char *name ); @@ -65,13 +94,33 @@ namespace rh_logger { const LevelPtr& getEffectiveLevel() const; + std::vector getNamedLoggers(); + + bool isLoggerInHierarchy(const std::string& search_name); + + void* getUnderlyingLogger(); + + L4HierarchyPtr getRootHierarchy() { + return _rootHierarchy; + }; + + void setHierarchy(L4HierarchyPtr hierarchy) { + _rootHierarchy = hierarchy; + }; + + void configureLogger(const std::string &configuration, bool root_reset=false, int level=-1); + private: + log4cxx::LoggerPtr l4logger; + typedef boost::shared_ptr< L4Logger > L4LoggerPtr; - static L4LoggerPtr _rootLogger; + static L4LoggerPtr _rootLogger; - log4cxx::LoggerPtr l4logger; + log4cxx::LoggerPtr _instanceRootLogger; + L4HierarchyPtr _rootHierarchy; + log4cxx::PropertyConfigurator prop_conf; uint32_t _error_count; }; diff --git a/redhawk/src/base/framework/logging/rh_logger_stdout.h b/redhawk/src/base/framework/logging/rh_logger_stdout.h index 97958eaee..acf395253 100644 --- a/redhawk/src/base/framework/logging/rh_logger_stdout.h +++ b/redhawk/src/base/framework/logging/rh_logger_stdout.h @@ -34,6 +34,7 @@ namespace rh_logger { static LoggerPtr getRootLogger( ); static LoggerPtr getLogger( const std::string &name ); + LoggerPtr getInstanceLogger( const std::string &name ); static LoggerPtr getLogger( const char *name ); virtual ~StdOutLogger() {} @@ -50,6 +51,14 @@ namespace rh_logger { const LevelPtr& getEffectiveLevel() const; + std::vector getNamedLoggers(); + + bool isLoggerInHierarchy(const std::string& search_name); + + void* getUnderlyingLogger(); + + virtual void configureLogger(const std::string &configuration, bool root_reset=false, int level=-1); + protected: diff --git a/redhawk/src/base/framework/prop_helpers.cpp b/redhawk/src/base/framework/prop_helpers.cpp index f81acb080..b5ed9eb18 100644 --- a/redhawk/src/base/framework/prop_helpers.cpp +++ b/redhawk/src/base/framework/prop_helpers.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #if HAVE_OMNIORB4 #include "omniORB4/CORBA.h" #endif @@ -30,7 +31,7 @@ #include #include #include -#include //testing +#include using namespace ossie; @@ -171,8 +172,12 @@ bool ossie::compare_anys(const CORBA::Any& a, const CORBA::Any& b, std::string& */ template CORBA::Any ossie::convertComplexStringToAny(std::string value) { - char sign, j; // sign represents + or -, j represents the letter j in the string + + char sign = '+'; // sign represents + or - + char j = 0; // j represents the letter j in the string Type A, B; // A is the real value, B is the complex value + A = 0; + B = 0; CORBA::Any result; @@ -180,11 +185,24 @@ CORBA::Any ossie::convertComplexStringToAny(std::string value) { std::stringstream stream(value); stream >> A >> sign >> j >> B; + if (value.size() > 1) { + if (value[0] == 'j') { + std::stringstream stream(value); + stream >> j >> B; + } else if ((value[0] == '-') and (value[1] == 'j')) { + std::stringstream stream(value); + stream >> sign >> j >> B; + } + } + // if A-jB instead of A+jB, flip the sign of B if (sign == '-') { B *= -1; } + if (value.find('j') == std::string::npos) + B = 0; + // Create a complex representation and convert it to a CORBA::any. CFComplexType cfComplex; cfComplex.real = A; @@ -367,6 +385,113 @@ CORBA::Any ossie::stringToComplexAny(std::string value, std::string structName) return result; } +namespace redhawk { + namespace time { + namespace utils { + CF::UTCTime create( const double wholeSecs, const double fractionalSecs ) { + double wsec = wholeSecs; + double fsec = fractionalSecs; + if ( wsec < 0.0 || fsec < 0.0 ) { + struct timeval tmp_time; + struct timezone tmp_tz; + gettimeofday(&tmp_time, &tmp_tz); + wsec = tmp_time.tv_sec; + fsec = tmp_time.tv_usec / 1e6; + } + CF::UTCTime tstamp = CF::UTCTime(); + tstamp.tcstatus = 1; + struct tm t = {0}; + tstamp.twsec = wsec + t.tm_gmtoff; + tstamp.tfsec = fsec; + return tstamp; + } + + CF::UTCTime convert( const std::string formatted ) { + if (formatted == "now") { + return now(); + } + unsigned int year; + unsigned int month; + unsigned int day; + unsigned int hour; + unsigned int minute; + double second; + int retval = sscanf(formatted.c_str(), "%d:%d:%d::%d:%d:%lf",&year,&month,&day,&hour,&minute,&second); + if (retval != 6) { + return notSet(); + } + CF::UTCTime utctime; + utctime.tcstatus=1; + struct tm t = {0}; + t.tm_year = year - 1900; + t.tm_mon = month - 1; + t.tm_mday = day; + t.tm_hour = hour; + t.tm_min = minute; + t.tm_sec = (int)second; + utctime.twsec = mktime(&t) - timezone; + utctime.tfsec = second - (int)second; + return utctime; + } + + std::string toString( const CF::UTCTime utc ) { + struct tm time; + time_t seconds = utc.twsec; + std::ostringstream stream; + gmtime_r(&seconds, &time); + stream << (1900+time.tm_year) << ':'; + stream << std::setw(2) << std::setfill('0') << (time.tm_mon+1) << ':'; + stream << std::setw(2) << time.tm_mday << "::"; + stream << std::setw(2) << time.tm_hour << ":"; + stream << std::setw(2) << time.tm_min << ":"; + stream << std::setw(2) << time.tm_sec; + int usec = round(utc.tfsec * 1000000.0); + stream << "." << std::setw(6) << usec; + return stream.str(); + } + + /* + * Create a time stamp object from the current time of day reported by the system + */ + CF::UTCTime now() { + return create(); + } + + /* + * Create a time stamp object from the current time of day reported by the system + */ + CF::UTCTime notSet() { + CF::UTCTime tstamp = CF::UTCTime(); + tstamp.tcstatus = 0; + tstamp.twsec = 0.0; + tstamp.tfsec = 0.0; + return tstamp; + } + + /* + * Adjust the whole and fractional portions of a time stamp object to + * ensure there is no fraction in the whole seconds, and vice-versa + */ + void normalize(CF::UTCTime& time) { + // Get fractional adjustment from whole seconds + double fadj = std::modf(time.twsec, &time.twsec); + + // Adjust fractional seconds and get whole seconds adjustment + double wadj = 0; + time.tfsec = std::modf(time.tfsec + fadj, &wadj); + + // If fractional seconds are negative, borrow a second from the whole + // seconds to make it positive, normalizing to [0,1) + if (time.tfsec < 0.0) { + time.tfsec += 1.0; + wadj -= 1.0; + } + time.twsec += wadj; + } + } + } +} + /* * Convert from a string to a simple CORBA::Any. * @@ -376,38 +501,71 @@ CORBA::Any ossie::stringToComplexAny(std::string value, std::string structName) */ CORBA::Any ossie::stringToSimpleAny(std::string value, CORBA::TCKind kind) { CORBA::Any result; + char *endptr = NULL; + std::string _type; + bool fixed_point = false; if (kind == CORBA::tk_boolean){ + _type = "boolean"; if ((value == "true") || (value == "True") || (value == "TRUE") || (value == "1")) { result <<= CORBA::Any::from_boolean(CORBA::Boolean(true)); } else { result <<= CORBA::Any::from_boolean(CORBA::Boolean(false)); } } else if (kind == CORBA::tk_char){ + _type = "char"; result <<= CORBA::Any::from_char(CORBA::Char(value[0])); } else if (kind == CORBA::tk_double){ - result <<= CORBA::Double(strtod(value.c_str(), NULL)); + _type = "double"; + result <<= CORBA::Double(strtod(value.c_str(), &endptr)); } else if (kind == CORBA::tk_octet){ - result <<= CORBA::Any::from_octet(CORBA::Octet(strtol(value.c_str(), NULL, 0))); + fixed_point = true; + _type = "octet"; + result <<= CORBA::Any::from_octet(CORBA::Octet(strtol(value.c_str(), &endptr, 0))); } else if (kind == CORBA::tk_ushort){ - result <<= CORBA::UShort(strtol(value.c_str(), NULL, 0)); + fixed_point = true; + _type = "ushort"; + result <<= CORBA::UShort(strtol(value.c_str(), &endptr, 0)); } else if (kind == CORBA::tk_short){ - result <<= CORBA::Short(strtol(value.c_str(), NULL, 0)); + fixed_point = true; + _type = "short"; + result <<= CORBA::Short(strtol(value.c_str(), &endptr, 0)); } else if (kind == CORBA::tk_float){ - result <<= CORBA::Float(strtof(value.c_str(), NULL)); + _type = "float"; + result <<= CORBA::Float(strtof(value.c_str(), &endptr)); } else if (kind == CORBA::tk_ulong){ - result <<= CORBA::ULong(strtol(value.c_str(), NULL, 0)); + fixed_point = true; + _type = "ulong"; + result <<= CORBA::ULong(strtol(value.c_str(), &endptr, 0)); } else if (kind == CORBA::tk_long){ - result <<= CORBA::Long(strtol(value.c_str(), NULL, 0)); + fixed_point = true; + _type = "long"; + result <<= CORBA::Long(strtol(value.c_str(), &endptr, 0)); } else if (kind == CORBA::tk_longlong){ - result <<= CORBA::LongLong(strtoll(value.c_str(), NULL, 0)); + fixed_point = true; + _type = "longlong"; + result <<= CORBA::LongLong(strtoll(value.c_str(), &endptr, 0)); } else if (kind == CORBA::tk_ulonglong){ - result <<= CORBA::ULongLong(strtoll(value.c_str(), NULL, 0)); + fixed_point = true; + _type = "ulonglong"; + result <<= CORBA::ULongLong(strtoll(value.c_str(), &endptr, 0)); } else if (kind == CORBA::tk_string){ + _type = "string"; result <<= value.c_str(); } else { + _type = "any"; result = CORBA::Any(); } // end of outer switch statement + if (endptr != NULL) { + if ((value != "(null)") and ((endptr == value.c_str()) or (*endptr != '\0'))) { + if (fixed_point) { + strtod(value.c_str(), &endptr); + if (*endptr == '\0') + return result; + } + throw ossie::badConversion(value, _type); + } + } return result; } @@ -426,7 +584,11 @@ CORBA::Any ossie::string_to_any(std::string value, CORBA::TypeCode_ptr type) // that are not explicitly supported will cause the function to // return a new, empty CORBA::Any. std::string structName = any_to_string(*(type->parameter(0))); - result = stringToComplexAny(value, structName); + if (structName == std::string("UTCTime")) { + result <<= redhawk::time::utils::convert(value); + } else { + result = stringToComplexAny(value, structName); + } } else { result = stringToSimpleAny(value, type->kind()); @@ -436,62 +598,125 @@ CORBA::Any ossie::string_to_any(std::string value, CORBA::TypeCode_ptr type) } CORBA::Any ossie::strings_to_any(const std::vector& values, CORBA::TCKind kind) +{ + return strings_to_any(values, kind, NULL); +} + +CORBA::Any ossie::strings_to_any(const std::vector& values, CORBA::TCKind kind, CORBA::TypeCode_ptr type) { CORBA::Any result; + if (type != NULL) { + if (type->kind() == CORBA::tk_struct) { + std::string structName = any_to_string(*(type->parameter(0))); + if (structName == std::string("UTCTime")) { + result <<= strings_to_utctime_sequence(values); + return result; + } + } + } switch (kind) { case CORBA::tk_boolean: result <<= strings_to_boolean_sequence(values); - break; - + return result; case CORBA::tk_char: result <<= strings_to_char_sequence(values); - break; - + return result; case CORBA::tk_double: result <<= strings_to_double_sequence(values); - break; - + return result; case CORBA::tk_octet: result <<= strings_to_octet_sequence(values); - break; - + return result; case CORBA::tk_ushort: result <<= strings_to_unsigned_short_sequence(values); - break; - + return result; case CORBA::tk_short: result <<= strings_to_short_sequence(values); - break; - + return result; case CORBA::tk_float: result <<= strings_to_float_sequence(values); - break; - + return result; case CORBA::tk_ulong: result <<= strings_to_unsigned_long_sequence(values); - break; - + return result; case CORBA::tk_long: result <<= strings_to_long_sequence(values); - break; - + return result; case CORBA::tk_longlong: result <<= strings_to_long_long_sequence(values); - break; - + return result; case CORBA::tk_ulonglong: result <<= strings_to_unsigned_long_long_sequence(values); - break; - + return result; case CORBA::tk_string: result <<= strings_to_string_sequence(values); break; + default: result = CORBA::Any(); } + if (type != NULL) { + std::string structName = any_to_string(*(type->parameter(0))); + if (structName == "complexFloat"){ + result <<= strings_to_complex_float_sequence(values); + } else if (structName == "complexBoolean"){ + result <<= strings_to_complex_boolean_sequence(values); + } else if (structName == "complexULong"){ + result <<= strings_to_complex_unsigned_long_sequence(values); + } else if (structName == "complexShort"){ + result <<= strings_to_complex_short_sequence(values); + } else if (structName == "complexOctet"){ + result <<= strings_to_complex_octet_sequence(values); + } else if (structName == "complexChar"){ + result <<= strings_to_complex_char_sequence(values); + } else if (structName == "complexUShort"){ + result <<= strings_to_complex_unsigned_short_sequence(values); + } else if (structName == "complexDouble"){ + result <<= strings_to_complex_double_sequence(values); + } else if (structName == "complexLong"){ + result <<= strings_to_complex_long_sequence(values); + } else if (structName == "complexLongLong"){ + result <<= strings_to_complex_long_long_sequence(values); + } else if (structName == "complexULongLong"){ + result <<= strings_to_complex_unsigned_long_long_sequence(values); + } + } return result; } +/*CORBA::Any ossie::strings_to_any(const std::vector& values, CORBA::TypeCode_ptr type) +{ + CORBA::Any result; + std::string structName = any_to_string(*(type->parameter(0))); + if (structName == "complexFloat"){ + result <<= strings_to_complex_float_sequence(values); + } else if (structName == "complexBoolean"){ + result <<= strings_to_complex_boolean_sequence(values); + } else if (structName == "complexULong"){ + result <<= strings_to_complex_unsigned_long_sequence(values); + } else if (structName == "complexShort"){ + result <<= strings_to_complex_short_sequence(values); + } else if (structName == "complexOctet"){ + result <<= strings_to_complex_octet_sequence(values); + } else if (structName == "complexChar"){ + result <<= strings_to_complex_char_sequence(values); + } else if (structName == "complexUShort"){ + result <<= strings_to_complex_unsigned_short_sequence(values); + } else if (structName == "complexDouble"){ + result <<= strings_to_complex_double_sequence(values); + } else if (structName == "complexLong"){ + result <<= strings_to_complex_long_sequence(values); + } else if (structName == "complexLongLong"){ + result <<= strings_to_complex_long_long_sequence(values); + } else if (structName == "complexULongLong"){ + result <<= strings_to_complex_unsigned_long_long_sequence(values); + } else { + result = CORBA::Any(); + } + + return result; +}*/ + /* * Convert a CORBA::Any to a string in the format A+jB. * @@ -558,6 +783,10 @@ std::string ossie::simpleAnyToString(const CORBA::Any& value) CORBA::TypeCode_var typeValue = value.type(); switch (typeValue->kind()) { + case CORBA::tk_null: + result << "(null)"; + break; + case CORBA::tk_boolean: { CORBA::Boolean tmp; value >>= CORBA::Any::to_boolean(tmp); @@ -658,7 +887,9 @@ std::string ossie::any_to_string(const CORBA::Any& value) std::string result; CORBA::TypeCode_var valueType = value.type(); - if (valueType->kind() == CORBA::tk_struct) { + if (valueType->equivalent(CF::_tc_Properties)) { + result = redhawk::Value::cast(value).asProperties().toString(); + } else if (valueType->kind() == CORBA::tk_struct) { result = complexAnyToString(value); } else { @@ -794,18 +1025,49 @@ CORBA::BooleanSeq* ossie::strings_to_boolean_sequence(const std::vector &values) +{ + CF::complexBooleanSeq_var result = new CF::complexBooleanSeq; + + result->length(values.size()); + CF::complexBoolean *vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + return result._retn(); +} + CORBA::CharSeq* ossie::strings_to_char_sequence(const std::vector &values) { CORBA::CharSeq_var result = new CORBA::CharSeq; result->length(values.size()); for (unsigned int i = 0; i < values.size(); ++i) { - result[i] = values[i][0]; } return result._retn(); } +CF::complexCharSeq* ossie::strings_to_complex_char_sequence(const std::vector &values) +{ + CF::complexCharSeq_var result = new CF::complexCharSeq; + + result->length(values.size()); + CF::complexChar *vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + return result._retn(); +} + CORBA::DoubleSeq* ossie::strings_to_double_sequence(const std::vector &values) { CORBA::DoubleSeq_var result = new CORBA::DoubleSeq; @@ -818,6 +1080,23 @@ CORBA::DoubleSeq* ossie::strings_to_double_sequence(const std::vector &values) +{ + CF::complexDoubleSeq_var result = new CF::complexDoubleSeq; + + result->length(values.size()); + CF::complexDouble *vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + + return result._retn(); +} + CORBA::FloatSeq* ossie::strings_to_float_sequence(const std::vector &values) { CORBA::FloatSeq_var result = new CORBA::FloatSeq; @@ -831,6 +1110,24 @@ CORBA::FloatSeq* ossie::strings_to_float_sequence(const std::vector return result._retn(); } +CF::complexFloatSeq* ossie::strings_to_complex_float_sequence(const std::vector &values) +{ + CF::complexFloatSeq_var result = new CF::complexFloatSeq; + + result->length(values.size()); + + CF::complexFloat *vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + + return result._retn(); +} + CORBA::ShortSeq* ossie::strings_to_short_sequence(const std::vector &values) { CORBA::ShortSeq_var result = new CORBA::ShortSeq; @@ -844,6 +1141,24 @@ CORBA::ShortSeq* ossie::strings_to_short_sequence(const std::vector return result._retn(); } +CF::complexShortSeq* ossie::strings_to_complex_short_sequence(const std::vector &values) +{ + CF::complexShortSeq_var result = new CF::complexShortSeq; + + result->length(values.size()); + + CF::complexShort*vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + + return result._retn(); +} + CORBA::LongSeq* ossie::strings_to_long_sequence(const std::vector &values) { CORBA::LongSeq_var result = new CORBA::LongSeq; @@ -857,6 +1172,24 @@ CORBA::LongSeq* ossie::strings_to_long_sequence(const std::vector & return result._retn(); } +CF::complexLongSeq* ossie::strings_to_complex_long_sequence(const std::vector &values) +{ + CF::complexLongSeq_var result = new CF::complexLongSeq; + + result->length(values.size()); + + CF::complexLong *vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + + return result._retn(); +} + CORBA::LongLongSeq* ossie::strings_to_long_long_sequence(const std::vector &values) { CORBA::LongLongSeq_var result = new CORBA::LongLongSeq; @@ -870,6 +1203,24 @@ CORBA::LongLongSeq* ossie::strings_to_long_long_sequence(const std::vector &values) +{ + CF::complexLongLongSeq_var result = new CF::complexLongLongSeq; + + result->length(values.size()); + + CF::complexLongLong *vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + + return result._retn(); +} + CORBA::ULongLongSeq* ossie::strings_to_unsigned_long_long_sequence(const std::vector &values) { CORBA::ULongLongSeq_var result = new CORBA::ULongLongSeq; @@ -883,6 +1234,24 @@ CORBA::ULongLongSeq* ossie::strings_to_unsigned_long_long_sequence(const std::ve return result._retn(); } +CF::complexULongLongSeq* ossie::strings_to_complex_unsigned_long_long_sequence(const std::vector &values) +{ + CF::complexULongLongSeq_var result = new CF::complexULongLongSeq; + + result->length(values.size()); + + CF::complexULongLong *vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + + return result._retn(); +} + CORBA::OctetSeq* ossie::strings_to_octet_sequence(const std::vector &values) { CORBA::OctetSeq_var result = new CORBA::OctetSeq; @@ -896,6 +1265,24 @@ CORBA::OctetSeq* ossie::strings_to_octet_sequence(const std::vector return result._retn(); } +CF::complexOctetSeq* ossie::strings_to_complex_octet_sequence(const std::vector &values) +{ + CF::complexOctetSeq_var result = new CF::complexOctetSeq; + + result->length(values.size()); + + CF::complexOctet *vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + + return result._retn(); +} + CORBA::UShortSeq* ossie::strings_to_unsigned_short_sequence(const std::vector &values) { CORBA::UShortSeq_var result = new CORBA::UShortSeq; @@ -909,6 +1296,24 @@ CORBA::UShortSeq* ossie::strings_to_unsigned_short_sequence(const std::vector &values) +{ + CF::complexUShortSeq_var result = new CF::complexUShortSeq; + + result->length(values.size()); + + CF::complexUShort *vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + + return result._retn(); +} + CORBA::ULongSeq* ossie::strings_to_unsigned_long_sequence(const std::vector &values) { CORBA::ULongSeq_var result = new CORBA::ULongSeq; @@ -922,6 +1327,24 @@ CORBA::ULongSeq* ossie::strings_to_unsigned_long_sequence(const std::vector &values) +{ + CF::complexULongSeq_var result = new CF::complexULongSeq; + + result->length(values.size()); + + CF::complexULong *vtmp; + CORBA::Any tmp; + for (unsigned int i = 0; i < values.size(); ++i) { + tmp = convertComplexStringToAny(values[i]); + tmp >>= vtmp; + result[i].imag = vtmp->imag; + result[i].real = vtmp->real; + } + + return result._retn(); +} + CORBA::StringSeq* ossie::strings_to_string_sequence(const std::vector &values) { CORBA::StringSeq_var result = new CORBA::StringSeq; @@ -935,6 +1358,19 @@ CORBA::StringSeq* ossie::strings_to_string_sequence(const std::vector &values) +{ + CF::UTCTimeSequence_var result = new CF::UTCTimeSequence; + + result->length(values.size()); + + for (unsigned int i = 0; i < values.size(); ++i) { + result[i] = redhawk::time::utils::convert(values[i].c_str()); + } + + return result._retn(); +} + /* * Get the TypeCode kind based on a string. Note that complex * types are not supported, as the TypeCode kind for all complex @@ -1000,6 +1436,8 @@ CORBA::TypeCode_ptr ossie::getTypeCode(std::string type) { kind = CORBA::_tc_ulonglong; } else if (type == "string"){ kind = CORBA::_tc_string; + } else if (type == "utctime"){ + kind = CF::_tc_UTCTime; } else if (type == "complexDouble") { kind = CF::_tc_complexDouble; } else if (type == "complexFloat") { @@ -1094,23 +1532,20 @@ CORBA::TypeCode_ptr ossie::getTypeCode(CORBA::TCKind kind, std::string structNam } -CF::Properties ossie::getNonNilProperties(CF::Properties& originalProperties) +CF::Properties ossie::getNonNilProperties(const CF::Properties& originalProperties) { - CF::Properties nonNilProperties; - CORBA::TypeCode_var typeProp; - - for (unsigned int i = 0; i < originalProperties.length(); i++) { - CF::DataType prop = originalProperties[i]; - typeProp = prop.value.type(); - if (typeProp->kind() != CORBA::tk_null) { - nonNilProperties.length(nonNilProperties.length() + 1); - nonNilProperties[nonNilProperties.length()-1] = prop; + redhawk::PropertyMap nonNilProperties; + const redhawk::PropertyMap& properties = redhawk::PropertyMap::cast(originalProperties); + + for (redhawk::PropertyMap::const_iterator prop = properties.begin(); prop != properties.end(); ++prop) { + if (!prop->getValue().isNil()) { + nonNilProperties.push_back(*prop); } } return nonNilProperties; } -CF::Properties ossie::getNonNilConfigureProperties(CF::Properties& originalProperties) +CF::Properties ossie::getNonNilConfigureProperties(const CF::Properties& originalProperties) { return getNonNilProperties(originalProperties); } diff --git a/redhawk/src/base/framework/python/.gitignore b/redhawk/src/base/framework/python/.gitignore index 95d2220a5..b2af74f2f 100644 --- a/redhawk/src/base/framework/python/.gitignore +++ b/redhawk/src/base/framework/python/.gitignore @@ -1,3 +1,2 @@ build - -.pythonInstallFiles +ossiepy.egg-info diff --git a/redhawk/src/base/framework/python/Makefile.am b/redhawk/src/base/framework/python/Makefile.am index c675eeb76..be8cf48b7 100644 --- a/redhawk/src/base/framework/python/Makefile.am +++ b/redhawk/src/base/framework/python/Makefile.am @@ -50,6 +50,7 @@ BUILT_SOURCES = \ ossie/cf/ExtendedEvent_idl.py \ ossie/cf/AggregateDevices_idl.py \ ossie/cf/QueryablePort_idl.py \ + ossie/cf/NegotiablePort_idl.py \ ossie/cf/WellKnownProperties_idl.py \ ossie/cf/sandbox_idl.py \ ossie/cf/LogInterfaces_idl.py \ @@ -57,10 +58,5 @@ BUILT_SOURCES = \ CLEANFILES = $(BUILT_SOURCES) -PYTHON_INSTALL_FILES=".pythonInstallFiles" - install-exec-hook: - python setup.py install -f --$(PYTHON_INSTALL_SCHEME)=$(DESTDIR)$(prefix) --record $(PYTHON_INSTALL_FILES) - -uninstall: - rm `cat $(PYTHON_INSTALL_FILES)` + $(PYTHON) setup.py install -f --$(PYTHON_INSTALL_SCHEME)=$(DESTDIR)$(prefix) --old-and-unmanageable diff --git a/redhawk/src/base/framework/python/ossie/apps/cleanomni b/redhawk/src/base/framework/python/ossie/apps/cleanomni index 5cc12f467..ff46224d2 100755 --- a/redhawk/src/base/framework/python/ossie/apps/cleanomni +++ b/redhawk/src/base/framework/python/ossie/apps/cleanomni @@ -86,7 +86,8 @@ if __name__ == '__main__': retval=commands.getstatusoutput('rm -rf /var/log/omniORB/*') if options.verbose: print "Removing omniEvents log" - retval=commands.getstatusoutput('rm -rf /var/lib/omniEvents/*') + retval=commands.getstatusoutput('[ -d /var/lib/omniEvents ] && rm -rf /var/lib/omniEvents/*') + retval=commands.getstatusoutput('[ -d /var/log/omniEvents ] && rm -rf /var/log/omniEvents/*') if options.verbose: print "Starting omniNames" retval=os.system('/sbin/service omniNames start') diff --git a/redhawk/src/base/framework/python/ossie/apps/qtbrowse/browsewindow.py b/redhawk/src/base/framework/python/ossie/apps/qtbrowse/browsewindow.py index 2ace342ec..6b92ded08 100644 --- a/redhawk/src/base/framework/python/ossie/apps/qtbrowse/browsewindow.py +++ b/redhawk/src/base/framework/python/ossie/apps/qtbrowse/browsewindow.py @@ -365,6 +365,9 @@ def createSelected (self): except CF.ApplicationFactory.CreateApplicationError, e: QMessageBox.critical(self, 'Creation of waveform failed.', e.msg, QMessageBox.Ok) return + except: + QMessageBox.critical(self, 'Creation of waveform failed', str(sys.exc_info()[1]), QMessageBox.Ok) + return if app_inst == None: QMessageBox.critical(self, 'Creation of waveform failed.', 'Unable to create Application instance for $SDRROOT'+app, QMessageBox.Ok) diff --git a/redhawk/src/base/framework/python/ossie/apps/qtbrowse/browsewindowbase.py b/redhawk/src/base/framework/python/ossie/apps/qtbrowse/browsewindowbase.py index 78206269c..221bd5afd 100644 --- a/redhawk/src/base/framework/python/ossie/apps/qtbrowse/browsewindowbase.py +++ b/redhawk/src/base/framework/python/ossie/apps/qtbrowse/browsewindowbase.py @@ -33,6 +33,7 @@ from ossie.utils import sb import copy from ossie.utils import redhawk,prop_helpers,type_helpers +from ossie.cf import CF import structdialog def createPlotMenu(parent): @@ -461,9 +462,19 @@ def contextMenuEvent(self, event): if resp == 'Release': self.callbackObject.addRequest(('releaseApplication(QString)',appname)) elif resp == 'Start': - contref.start() + try: + contref.start() + except CF.Resource.StartError as err: + QMessageBox.critical(self, 'Start error', err.msg, QMessageBox.Ok) + except: + QMessageBox.critical(self, 'Start error', 'Unable to start application', QMessageBox.Ok) elif resp == 'Stop': - contref.stop() + try: + contref.stop() + except CF.Resource.StopError as err: + QMessageBox.critical(self, 'Stop error', err.msg, QMessageBox.Ok) + except: + QMessageBox.critical(self, 'Stop error', 'Unable to stop application', QMessageBox.Ok) elif plotResponse(resp): plot = createPlot(resp) plot.start() diff --git a/redhawk/src/base/framework/python/ossie/apps/rh_net_diag b/redhawk/src/base/framework/python/ossie/apps/rh_net_diag index f094a73a7..7c9e29b16 100755 --- a/redhawk/src/base/framework/python/ossie/apps/rh_net_diag +++ b/redhawk/src/base/framework/python/ossie/apps/rh_net_diag @@ -34,6 +34,30 @@ import subprocess def is_readable(path): return os.access(path,os.R_OK) +def is_executable(path): + return os.access(path,os.X_OK) + +def is_writable(path): + return os.access(path,os.W_OK) + +def directoriesReadAccess(path): + for dirname, subdirlist, files in os.walk(path): + if not is_readable(dirname): + return False + return True + +def directoriesWriteAccess(path): + for dirname, subdirlist, files in os.walk(path): + if not is_readable(dirname): + return False + return True + +def directoriesExecuteAccess(path): + for dirname, subdirlist, files in os.walk(path): + if not is_executable(dirname): + return False + return True + def is_running(process): s = subprocess.Popen(["ps", "axw"],stdout=subprocess.PIPE) @@ -242,6 +266,7 @@ if __name__ == '__main__': endPointsChecked = False parser = OptionParser(usage=USAGE) + parser.add_option("--disk", action="store_true", dest="disk", help="Check $SDRROOT, $OSSIEHOME, and system configuration for minimum read/write access") parser.add_option("--ns", action="store_true", dest="namingService", help="Whether or not omniNames is expected to be running on this host") parser.add_option("--dom", action="store_true", dest="domain", help="Whether or not the Domain Manager is expected to be running on this host") parser.add_option("--dev", action="store_true", dest="device", help="Whether or not the Device Manager is expected to be running on this host") @@ -252,10 +277,12 @@ if __name__ == '__main__': options, args = parser.parse_args() if options.namingService == None and \ options.domain == None and \ + options.disk == None and \ options.device == None: options.namingService = True options.domain = True options.device = True + options.disk = True # Get hostname hostname = (socket.gethostname()).split(".")[0] @@ -264,19 +291,34 @@ if __name__ == '__main__': print "==========================================================================" # Make sure OSSIEHOME, SDRROOT, and /etc/omniORB.cfg are readable + ossiehome_set = False if os.environ.has_key('OSSIEHOME'): + ossiehome_set = True if not is_readable(os.environ['OSSIEHOME']): print "WARNING: $OSSIEHOME is not readable by this user." print "==========================================================================" + else: + print "ERROR: $OSSIEHOME is not set" + print "==========================================================================" + sdrroot_set = False if os.environ.has_key('SDRROOT'): + sdrroot_set = True if not is_readable(os.environ['SDRROOT']): print "WARNING: $SDRROOT is not readable by this user." print "==========================================================================" + else: + print "ERROR: $SDRROOT is not set" + print "==========================================================================" if not is_readable('/etc/omniORB.cfg'): print "ERROR: /etc/omniORB.cfg is not readable by this user. This will cause nameclt to fail." print "SOLUTION: Set file permissions on /etc/omniORB.cfg to make it accessible to this user." print "==========================================================================" sys.exit() + if not is_readable('/etc/hosts'): + print "ERROR: /etc/hosts is not readable by this user. This may cause errors" + print "SOLUTION: Set file permissions on /etc/hosts to make it accessible to this user." + print "==========================================================================" + sys.exit() if len(checkCatior()) == 0: print 'ERROR: Neither catior (version 4.1.6 of omniORB) nor catior.omni (version 4.1.0 of omniORB) is present. Is omniORB installed?' print 'SOLUTION: Make sure omniORB is installed and confirm that either catior or catior.omni is visible in the current path by using the following commands: \"which catior\" and \"which catior.omni\"' @@ -284,6 +326,47 @@ if __name__ == '__main__': sys.exit() # If this script is running on the omniNames host, run the following diagnostics: + errorFound = False + if options.disk and ossiehome_set and sdrroot_set: + if not directoriesReadAccess(os.getenv('SDRROOT')): + errorFound = True + print "ERROR: One or more directories in SDRROOT are not readable by this user. This may lead to deployment errors." + print "SOLUTION: Set directory access permissions on SDRROOT to make it accessible to this user." + print "==========================================================================" + if not directoriesWriteAccess(os.getenv('SDRROOT')): + errorFound = True + print "WARNING: One or more directories in SDRROOT are not writable by this user. This may lead to component or device installation errors." + print "SOLUTION: Set directory access permissions on SDRROOT to make it writable to this user." + print "==========================================================================" + if not directoriesExecuteAccess(os.getenv('SDRROOT')): + errorFound = True + print "ERROR: One or more directories in SDRROOT are not executable by this user. This may lead to deployment errors." + print "SOLUTION: Set directory access permissions on SDRROOT to make it accessible to this user." + print "==========================================================================" + if not is_writable(os.getenv('SDRROOT')+'/dev'): + errorFound = True + print "ERROR: SDRROOT/dev is not writable by this user. This will lead to deployment errors." + print "SOLUTION: Set directory write permissions on SDRROOT/dev to make it writable to this user." + print "==========================================================================" + if not directoriesReadAccess(os.getenv('OSSIEHOME')): + errorFound = True + print "ERROR: One or more directories in SDRROOT are not readable by this user. This may lead to deployment errors." + print "SOLUTION: Set directory access permissions on SDRROOT to make it accessible to this user." + print "==========================================================================" + if not directoriesWriteAccess(os.getenv('OSSIEHOME')): + errorFound = True + print "WARNING: One or more directories in OSSIEHOME are not writable by this user. This may lead to IDL project installation errors." + print "SOLUTION: Set directory access permissions on OSSIEHOME to make it writable to this user." + print "==========================================================================" + if not directoriesExecuteAccess(os.getenv('OSSIEHOME')): + errorFound = True + print "ERROR: One or more directories in SDRROOT are not executable by this user. This may lead to deployment errors." + print "SOLUTION: Set directory access permissions on SDRROOT to make it accessible to this user." + print "==========================================================================" + if not errorFound: + print "No errors detected with $SDRROOT, $OSSIEHOME, or system configuration permissions" + print "==========================================================================" + errorFound = False if options.namingService: # 1. Is omniNames process running? @@ -601,7 +684,7 @@ if __name__ == '__main__': while True: line = proc.stdout.readline() if line != '': - if line.find("inet addr:"+ hostIP) != -1: + if line.find("inet addr:"+ hostIP) != -1 or line.find("inet "+ hostIP) != -1: foundIP = True break else: diff --git a/redhawk/src/base/framework/python/ossie/device.py b/redhawk/src/base/framework/python/ossie/device.py index 74036f594..d899fc247 100644 --- a/redhawk/src/base/framework/python/ossie/device.py +++ b/redhawk/src/base/framework/python/ossie/device.py @@ -188,8 +188,11 @@ class Device(resource.Resource): def __init__(self, devmgr, identifier, label, softwareProfile, compositeDevice, execparams, propertydefs=(),loggerName=None): if not loggerName and label: loggerName = label.rsplit("_", 1)[0] - resource.Resource.__init__(self, identifier, execparams, propertydefs, loggerName=loggerName) - self._log.debug("Initializing Device %s %s %s %s", identifier, execparams, propertydefs, loggerName) + resource.Resource.__init__(self, identifier, execparams, propertydefs, loggerName=loggerName, baseLogName=label) + self._deviceLog = self._baseLog.getChildLogger('Device', ossie.utils.log4py.SYSTEM_LOGS) + self._loadableDeviceLog = self._baseLog.getChildLogger('Device', ossie.utils.log4py.SYSTEM_LOGS) + self._executableDeviceLog = self._baseLog.getChildLogger('Device', ossie.utils.log4py.SYSTEM_LOGS) + self._deviceLog.debug("Initializing Device %s %s %s %s", identifier, execparams, propertydefs, loggerName) self._label = label self._name = label self._softwareProfile = softwareProfile @@ -222,11 +225,11 @@ def registerDevice(self): This should be called by the process that instantiates the device. """ if self._devmgr: - self._log.info("Registering Device:" + str(self._label) ) + self._deviceLog.info("Registering Device:" + str(self._label) ) self._register() if self._compositeDevice: - self._log.info("Adding Device:" + str(self._label) + " to parent" ) + self._deviceLog.info("Adding Device:" + str(self._label) + " to parent" ) deviceAdded = False while not deviceAdded: try: @@ -253,7 +256,7 @@ def postConstruction(self, registrar_ior=None, idm_ior=None ): def connectIDMChannel(self, idm_ior=None ): - self._log.debug("Connecting to IDM CHANNEL idm_ior:" + str(idm_ior) ) + self._deviceLog.debug("Connecting to IDM CHANNEL idm_ior:" + str(idm_ior) ) if self._idm_publisher == None: if idm_ior != None and idm_ior != "": @@ -264,26 +267,26 @@ def connectIDMChannel(self, idm_ior=None ): idm_channel_obj = resource.createOrb().string_to_object(idm_ior) idm_channel = idm_channel_obj._narrow(CosEventChannelAdmin.EventChannel) self._idm_publisher = Publisher( idm_channel ) - self._log.info("Connected to IDM CHANNEL, (command line IOR).... DEV-ID:" + self._id ) + self._deviceLog.info("Connected to IDM CHANNEL, (command line IOR).... DEV-ID:" + self._id ) except Exception, err: #traceback.print_exc() - self._log.warn("Unable to connect to IDM channel (command line IOR).... DEV-ID:" + self._id ) + self._deviceLog.warn("Unable to connect to IDM channel (command line IOR).... DEV-ID:" + self._id ) else: try: # make sure we have access to get to the EventChanneManager for the domain if self._domMgr: if self._ecm == None: - self._log.debug("Setting up EventManager .... DEV-ID:" + self._id ) + self._deviceLog.debug("Setting up EventManager .... DEV-ID:" + self._id ) evt_mgr= Manager.GetManager(self) self._ecm = evt_mgr else: evt_mgr = self._ecm - self._log.debug("Requesting registration with IDM Channel .... DEV-ID:" + self._id ) + self._deviceLog.debug("Requesting registration with IDM Channel .... DEV-ID:" + self._id ) self._idm_publisher = evt_mgr.Publisher( ossie.events.IDM_Channel_Spec ) - self._log.info("Registered with IDM CHANNEL (Domain::EventChannelManager).... DEV-ID:" + self._id ) + self._deviceLog.info("Registered with IDM CHANNEL (Domain::EventChannelManager).... DEV-ID:" + self._id ) except: #traceback.print_exc() - self._log.warn("Unable to connect to IDM channel (Domain::EventChannelManager).... DEV-ID:" + self._id ) + self._deviceLog.warn("Unable to connect to IDM channel (Domain::EventChannelManager).... DEV-ID:" + self._id ) @@ -295,7 +298,7 @@ def initialize(self): self.__initialize() def releaseObject(self): - self._log.debug("releaseObject()") + self._resourceLog.debug("releaseObject()") if self._adminState == CF.Device.UNLOCKED: self._adminState = CF.Device.SHUTTING_DOWN @@ -328,19 +331,19 @@ def releaseObject(self): try: resource.Resource.releaseObject(self) except: - self._log.error("failed releaseObject()") + self._resourceLog.error("failed releaseObject()") ########################################### # CF::Device def _validateAllocProps(self, properties): - self._log.debug("validating") + self._deviceLog.debug("validating") # Validate before trying to consume for prop in properties: try: if not self._props.isAllocatable(prop.id): raise exceptions.Exception() except: - self._log.error("Property %s is not allocatable", prop.id) + self._deviceLog.error("Property %s is not allocatable", prop.id) raise CF.Device.InvalidCapacity("Invalid capacity %s" % prop.id, [prop]) def _allocateCapacities(self, propDict={}): @@ -358,7 +361,7 @@ def _allocateCapacities(self, propDict={}): otherwise """ - self._log.debug("_allocateCapacities(%s)" % str(propDict)) + self._deviceLog.debug("_allocateCapacities(%s)" % str(propDict)) if self.isEnabled() and self.isUnLocked(): successfulAllocations = {} self._capacityLock.acquire() @@ -377,7 +380,7 @@ def _allocateCapacities(self, propDict={}): if success: successfulAllocations[key] = val else: - self._log.debug("property %s, could not be set to %s" % + self._deviceLog.debug("property %s, could not be set to %s" % (key, val)) break # If we couldn't allocate enough capacity, add it back @@ -385,26 +388,26 @@ def _allocateCapacities(self, propDict={}): # if the allocations were not successful, then deallocate if not success: - self._log.debug("failed") + self._deviceLog.debug("failed") self._deallocateCapacities(successfulAllocations) finally: self._capacityLock.release() - self._log.debug("update") + self._deviceLog.debug("update") # Update usage state self.updateUsageState() - self._log.debug("allocateCapacity() --> %s", success) + self._deviceLog.debug("allocateCapacity() --> %s", success) return success else: - self._log.error("allocate capacity failed due to InvalidState") - self._log.debug("%s %s %s", self._adminState, self._operationalState, self._usageState) + self._deviceLog.error("allocate capacity failed due to InvalidState") + self._deviceLog.debug("%s %s %s", self._adminState, self._operationalState, self._usageState) raise CF.Device.InvalidState("System is not ENABLED and UNLOCKED") def _allocateCapacity(self, propname, value): """Override this if you want if you don't want magic dispatch""" - self._log.debug("_allocateCapacity(%s, %s)", propname, value) + self._deviceLog.debug("_allocateCapacity(%s, %s)", propname, value) if self._allocationCallbacks.has_key(propname): return self._allocationCallbacks[propname]._allocate(value) @@ -416,10 +419,10 @@ def _allocateCapacity(self, propname, value): modified_propname += '_' allocate = _getCallback(self, "allocate_%s" % modified_propname) if allocate: - self._log.debug("using callback for _allocateCapacity()", ) + self._deviceLog.debug("using callback for _allocateCapacity()", ) return allocate(value) else: - self._log.debug("no callback for _allocateCapacity()", ) + self._deviceLog.debug("no callback for _allocateCapacity()", ) return False def updateUsageState(self): @@ -447,7 +450,7 @@ def allocateCapacity(self, properties): Returns true if all the allocations were done successfully or false otherwise """ - self._log.debug("allocateCapacity(%s)", properties) + self._deviceLog.debug("allocateCapacity(%s)", properties) # Validate self._validateAllocProps(properties) # Consume @@ -463,7 +466,7 @@ def allocateCapacity(self, properties): except CF.Device.InvalidState: raise # re-raise valid exceptions except Exception, e: - self._log.exception("Unexpected error in _allocateCapacities: %s", str(e)) + self._deviceLog.exception("Unexpected error in _allocateCapacities: %s", str(e)) return False @@ -481,7 +484,7 @@ def _deallocateCapacities(self, propDict): Output: None """ - self._log.debug("_deallocateCapacities(%s)" % str(propDict)) + self._deviceLog.debug("_deallocateCapacities(%s)" % str(propDict)) if self.isEnabled() and self.isUnLocked(): # Determine if the deallocateCapacities method exists deallocate = _getCallback(self, 'deallocateCapacities') @@ -495,8 +498,8 @@ def _deallocateCapacities(self, propDict): propname = self._props.getPropName(id) self._deallocateCapacity(propname, val) else: - self._log.error("deallocate capacity failed due to InvalidState") - self._log.debug("%s %s %s", self._adminState, self._operationalState, self._usageState) + self._deviceLog.error("deallocate capacity failed due to InvalidState") + self._deviceLog.debug("%s %s %s", self._adminState, self._operationalState, self._usageState) raise CF.Device.InvalidState("Cannot deallocate capacity. System is not DISABLED and UNLOCKED") def _deallocateCapacity(self, propname, value): @@ -519,7 +522,7 @@ def deallocateCapacity(self, properties): Output: None """ - self._log.debug("deallocateCapacity(%s)", properties) + self._deviceLog.debug("deallocateCapacity(%s)", properties) # Validate self._validateAllocProps(properties) # Consume @@ -537,7 +540,7 @@ def deallocateCapacity(self, properties): # Update usage state self.updateUsageState() - self._log.debug("deallocateCapacity() -->") + self._deviceLog.debug("deallocateCapacity() -->") def _get_usageState(self): return self._usageState @@ -553,7 +556,7 @@ def _get_adminState(self): def _set_adminState(self, state): if (self._adminState, state) not in Device._adminStateTransitions: - self._log.debug("Ignoring invalid admin state transition %s->%s", self._adminState, state) + self._deviceLog.debug("Ignoring invalid admin state transition %s->%s", self._adminState, state) return self._adminState = state @@ -579,11 +582,11 @@ def _unregister(self): """ def _logUnregisterFailure(msg = ""): - self._log.error("Could not unregister from DeviceManager: %s", msg) + self._deviceLog.error("Could not unregister from DeviceManager: %s", msg) def _unregisterThreadFunction(): if self._devmgr: - self._log.debug("Unregistering from DeviceManager") + self._deviceLog.debug("Unregistering from DeviceManager") try: self._devmgr.unregisterDevice(self._this()) except CORBA.Exception, e: @@ -608,14 +611,14 @@ def _register(self): """ def _logRegisterFailure(msg = ""): - self._log.error("Could not register with DeviceManager: %s", msg) + self._deviceLog.error("Could not register with DeviceManager: %s", msg) def _logRegisterWarning(msg = ""): - self._log.warn("May not have registered with DeviceManager: %s", msg) + self._deviceLog.warn("May not have registered with DeviceManager: %s", msg) def _registerThreadFunction(): if self._devmgr: - self._log.debug("Registering with DeviceManager") + self._deviceLog.debug("Registering with DeviceManager") try: self._devmgr.registerDevice(self._this()) except CORBA.Exception, e: @@ -648,13 +651,13 @@ def __sendStateChangeEvent(self, eventType, fromState, toState): event = StandardEvent.StateChangeEventType(self._id, self._id, eventType, stateMap[fromState], stateMap[toState]) except: - self._log.warn("Error creating StateChangeEvent") + self._deviceLog.warn("Error creating StateChangeEvent") try: if self._idm_publisher.push(any.to_any(event)) != 0 : - self._log.debug("Sending state change event......" + str((self._id, self._id, eventType, stateMap[fromState], stateMap[toState])) ) + self._deviceLog.debug("Sending state change event......" + str((self._id, self._id, eventType, stateMap[fromState], stateMap[toState])) ) except: - self._log.warn("Error sending event") + self._deviceLog.warn("Error sending event") def __setattr__ (self, attr, value): # Detect when the usage state and administrative state change, so that @@ -709,6 +712,7 @@ def __init__(self, devmgr, identifier, label, softwareProfile, compositeDevice, self.__cacheLock = threading.Lock() self._sharedPkgs = {} self.initialState = envStateContainer() + self._cacheDirectory = os.getcwd() def releaseObject(self): self._unloadAll() @@ -720,7 +724,7 @@ def load(self, fileSystem, fileName, loadType): try: self._cmdLock.acquire() loadedPaths = [] - self._log.debug("load(%s, %s)", fileName, loadType) + self._loadableDeviceLog.debug("load(%s, %s)", fileName, loadType) if not fileName.startswith("/"): raise CF.InvalidFileName(CF.CF_EINVAL, "Filename must be absolute, given '%s'"%fileName) @@ -741,7 +745,7 @@ def load(self, fileSystem, fileName, loadType): try: for dir in dirs: if dir != "": - self._log.debug("Creating dir %s", dir) + self._loadableDeviceLog.debug("Creating dir %s", dir) loadPoint = os.path.join(loadPoint, dir) if not os.path.exists(loadPoint): os.mkdir(loadPoint) @@ -759,12 +763,12 @@ def load(self, fileSystem, fileName, loadType): if exist == False: break exist = exist & os.path.exists(loadedFile) - self._log.debug("File %s has reference count %s and local file existence is %s", fileName, refCnt, exist) + self._loadableDeviceLog.debug("File %s has reference count %s and local file existence is %s", fileName, refCnt, exist) # Check if the remote file is newer than the local file, and if so, update the file # in the cache. No consideration is given to clock sync differences between systems. if exist and self._modTime(fileSystem, fileName) > os.path.getmtime(localFilePath): - self._log.debug("Remote file is newer than local file") + self._loadableDeviceLog.debug("Remote file is newer than local file") exist = False if refCnt == 0 or not exist: loadedFiles = self._loadTree(fileSystem, os.path.normpath(fileName), loadPoint) @@ -781,7 +785,7 @@ def load(self, fileSystem, fileName, loadType): self._setEnvVars(localFilePath, fileName) except Exception, e: - self._log.exception(e) + self._loadableDeviceLog.exception(e) raise CF.LoadableDevice.LoadFail(CF.CF_EINVAL, "Unknown Error loading '%s'"%fileName) finally: self._cmdLock.release() @@ -883,7 +887,7 @@ def _setEnvVars(self, localFilePath, fileName): newFileValue = importFile candidatePath = currentdir+'/'+aggregateChange env_changes.addModification('PYTHONPATH', candidatePath) - self._log.debug("PYTHONPATH : ADDING:" + str(candidatePath) + " PATH:" + str(os.environ['PYTHONPATH']) ) + self._loadableDeviceLog.debug("PYTHONPATH : ADDING:" + str(candidatePath) + " PATH:" + str(os.environ['PYTHONPATH']) ) matchesPattern = True except: @@ -926,7 +930,7 @@ def _modTime(self, fileSystem, remotePath): return 0 def _copyFile(self, fileSystem, remotePath, localPath): - self._log.debug("Copy file %s -> %s", remotePath, os.path.abspath(localPath)) + self._loadableDeviceLog.debug("Copy file %s -> %s", remotePath, os.path.abspath(localPath)) modifiedName = None fileToLoad = fileSystem.open(remotePath, True) try: @@ -957,20 +961,20 @@ def _loadTree(self, fileSystem, remotePath, localPath): # This is a breadth-first load loadedFiles = [] fis = fileSystem.list(remotePath) - self._log.debug("Loading Tree %s %s %s", remotePath, localPath, fis) + self._loadableDeviceLog.debug("Loading Tree %s %s %s", remotePath, localPath, fis) if len(fis) == 0: # check to see if this is an empty directory if remotePath[-1] == '/': fis = fileSystem.list(remotePath[:-1]) return loadedFiles # SR:431 - self._log.error("File %s could not be loaded", remotePath) + self._loadableDeviceLog.error("File %s could not be loaded", remotePath) raise CF.InvalidFileName(CF.CF_EINVAL, "File could not be found %s" % remotePath) for fileInformation in fis: if fileInformation.kind == CF.FileSystem.PLAIN: localFile = os.path.join(localPath, fileInformation.name) - self._log.debug("Reading file %s -> %s", fileInformation.name, localFile) + self._loadableDeviceLog.debug("Reading file %s -> %s", fileInformation.name, localFile) if remotePath.endswith("/"): modified_file = self._copyFile(fileSystem, remotePath + fileInformation.name, localFile) else: @@ -986,10 +990,10 @@ def _loadTree(self, fileSystem, remotePath, localPath): elif fileInformation.kind == CF.FileSystem.DIRECTORY: localDirectory = os.path.join(localPath, fileInformation.name) if not os.path.exists(localDirectory): - self._log.debug("Making directory %s", localDirectory) + self._loadableDeviceLog.debug("Making directory %s", localDirectory) os.mkdir(localDirectory) if remotePath.endswith("/"): - self._log.debug("From %s loading directory %s -> %s", remotePath, fileInformation.name, localPath) + self._loadableDeviceLog.debug("From %s loading directory %s -> %s", remotePath, fileInformation.name, localPath) loadedFiles.append(localDirectory) loadedFiles.extend(self._loadTree(fileSystem, remotePath + "/" + fileInformation.name, localPath)) else: @@ -1000,10 +1004,10 @@ def _loadTree(self, fileSystem, remotePath, localPath): def _unloadAll(self): for fileName in self._loadedFiles.keys(): try: - self._log.debug("Forcing unload(%s)", fileName) + self._loadableDeviceLog.debug("Forcing unload(%s)", fileName) self._unload(fileName, force=True) except Exception: - self._log.exception("Failed to unload file %s", fileName) + self._loadableDeviceLog.exception("Failed to unload file %s", fileName) def _unload(self, fileName, force=False): self.__cacheLock.acquire() @@ -1019,7 +1023,7 @@ def _unload(self, fileName, force=False): refCnt = refCnt - 1 except KeyError: # SR:336 - self._log.error("File %s could not be unloaded", fileName) + self._loadableDeviceLog.error("File %s could not be unloaded", fileName) raise CF.InvalidFileName(CF.CF_EINVAL, "File %s could not be found" % fileName) if refCnt == 0: @@ -1056,7 +1060,7 @@ def _unload(self, fileName, force=False): def unload(self, fileName): try: self._cmdLock.acquire() - self._log.debug("unload(%s)", fileName) + self._loadableDeviceLog.debug("unload(%s)", fileName) # SR:435 if self.isLocked(): raise CF.Device.InvalidState("System is locked down") if self.isDisabled(): raise CF.Device.InvalidState("System is disabled") @@ -1090,7 +1094,7 @@ def releaseObject(self): def execute(self, name, options, parameters): try: self._cmdLock.acquire() - self._log.debug("execute(%s, %s, %s)", name, options, parameters) + self._executableDeviceLog.debug("execute(%s, %s, %s)", name, options, parameters) if not name.startswith("/"): raise CF.InvalidFileName(CF.CF_EINVAL, "Filename must be absolute") @@ -1113,11 +1117,11 @@ def execute(self, name, options, parameters): else: stack_size = val if len(invalidOptions) > 0: - self._log.error("execute() received invalid options %s", invalidOptions) + self._executableDeviceLog.error("execute() received invalid options %s", invalidOptions) raise CF.ExecutableDevice.InvalidOptions(invalidOptions) command = name[1:] # This is relative to our CWD - self._log.debug("Running %s %s", command, os.getcwd()) + self._executableDeviceLog.debug("Running %s %s", command, os.getcwd()) if not os.path.isfile(command): raise CF.InvalidFileName(CF.CF_EINVAL, "File could not be found %s" % command) @@ -1138,6 +1142,7 @@ def executeLinked(self, name, options, parameters, deps): if self._sharedPkgs.has_key(dep): selected_paths.append(self._sharedPkgs[dep]) self._update_selected_paths(selected_paths) + parameters.append(CF.DataType('RH::DEPLOYMENT_ROOT', any.to_any(self._cacheDirectory))) pid = self.execute(name, options, parameters) finally: self._cmdLock.release() @@ -1146,7 +1151,7 @@ def executeLinked(self, name, options, parameters, deps): def terminate(self, pid): try: self._cmdLock.acquire() - self._log.debug("terminate(%s)", pid) + self._executableDeviceLog.debug("terminate(%s)", pid) # SR:457 if self.isLocked(): raise CF.Device.InvalidState("System is locked down") if self.isDisabled(): raise CF.Device.InvalidState("System is disabled") @@ -1177,7 +1182,7 @@ def _execute(self, command, options, parameters): args.append(str(param.value.value())) except: raise CF.ExecutableDevice.InvalidParameters([param]) - self._log.debug("Popen %s %s", command, args) + self._executableDeviceLog.debug("Popen %s %s", command, args) # SR:445 try: @@ -1186,14 +1191,14 @@ def _execute(self, command, options, parameters): # SR:455 # CF error codes do not map directly to errno codes, so at present # we omit the enumerated value. - self._log.error("subprocess.Popen: %s", e.strerror) + self._executableDeviceLog.error("subprocess.Popen: %s", e.strerror) raise CF.ExecutableDevice.ExecuteFail(CF.CF_NOTSET, e.strerror) pid = sp.pid self._applications[pid] = sp # SR:449 - self._log.debug("execute() --> %s", pid) - self._log.debug("APPLICATIONS %s", self._applications) + self._executableDeviceLog.debug("execute() --> %s", pid) + self._executableDeviceLog.debug("APPLICATIONS %s", self._applications) return pid def _terminate(self, pid): @@ -1203,7 +1208,7 @@ def _terminate(self, pid): subclasses to have more control over the termination of components. """ # SR:458 - self._log.debug("%s", self._applications) + self._executableDeviceLog.debug("%s", self._applications) if not self._applications.has_key(pid): raise CF.ExecutableDevice.InvalidProcess(CF.CF_ENOENT, "Cannot terminate. Process %s does not exist." % str(pid)) @@ -1215,7 +1220,7 @@ def _terminate(self, pid): break try: - self._log.debug('Sending signal %d to process group %d', sig, pid) + self._executableDeviceLog.debug('Sending signal %d to process group %d', sig, pid) # the group id is used to handle child processes (if they exist) of the component being cleaned up os.killpg(pid, sig) except OSError: @@ -1226,14 +1231,14 @@ def _terminate(self, pid): time.sleep(0.1) try: - self._log.debug(' Delete APP (_terminate) %d', pid) + self._executableDeviceLog.debug(' Delete APP (_terminate) %d', pid) proc = self._applications[pid] del self._applications[pid] # check if pid has finished status = proc.poll() if status is not None: - self._log.debug('Process has stopped...process group %d status %d', pid, status) + self._executableDeviceLog.debug('Process has stopped...process group %d status %d', pid, status) except KeyError: # The SIGCHLD handler must have been called in the interim, and # removed the entry @@ -1256,9 +1261,9 @@ def _child_handler(self, signal, frame): if status == None: continue if status < 0: - self._log.error("Child process %d terminated with signal %s", pid, -status) + self._executableDeviceLog.error("Child process %d terminated with signal %s", pid, -status) try: - self._log.debug(' Delete APP (_child_handler) %d', pid) + self._executableDeviceLog.debug(' Delete APP (_child_handler) %d', pid) del self._applications[pid] except: pass @@ -1276,11 +1281,11 @@ def __init__(self): ########################################### # CF::AggregateDevice def addDevice(self, associatedDevice): - self._log.debug("addDevice(%s)", associatedDevice) + self._deviceLog.debug("addDevice(%s)", associatedDevice) self._childDevices.append(associatedDevice) def removeDevice(self, associatedDevice): - self._log.debug("removeDevice(%s)", associatedDevice) + self._deviceLog.debug("removeDevice(%s)", associatedDevice) for childdev in self._childDevices: if childdev._get_identifier() == associatedDevice._get_identifier(): @@ -1326,6 +1331,11 @@ def start_device(deviceclass, interactive_callback=None, thread_policy=None,logg start_device(MyDeviceImpl) """ execparams, interactive = resource.parseCommandLineArgs(deviceclass) + + if interactive: + print "Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain" + sys.exit(-1) + if not skip_run: resource.setupSignalHandlers() signal.signal(signal.SIGINT, signal.SIG_IGN) diff --git a/redhawk/src/base/framework/python/ossie/events/Manager.py b/redhawk/src/base/framework/python/ossie/events/Manager.py index b56516bd1..323a695e6 100644 --- a/redhawk/src/base/framework/python/ossie/events/Manager.py +++ b/redhawk/src/base/framework/python/ossie/events/Manager.py @@ -48,7 +48,9 @@ def __del__(self): self.terminate() def terminate(self): - if self._ecm: self._ecm._unregister( self._creg ) + if self._ecm: + self._ecm._unregister( self._creg ) + Publisher.terminate(self) class EM_Subscriber(Subscriber): def __init__(self, ecm, creg ): @@ -60,7 +62,9 @@ def __del__(self): self.terminate() def terminate(self): - if self._ecm: self._ecm._unregister( self._creg ) + if self._ecm: + self._ecm._unregister( self._creg ) + Subscriber.terminate(self) class Manager: @@ -82,7 +86,7 @@ def __init__(self, resource=None ): self._logger.debug("Acquired reference to EventChannelManager") except: #print traceback.format_exc() - self._logger.warn("EventChannelManager - unable to resolve DomainManager's EventChannelManager ") + self._logger.debug("EventChannelManager - unable to resolve DomainManager's EventChannelManager ") pass @@ -114,16 +118,14 @@ def Publisher(self, channel_name, registrationId=""): if self._ecm: ereg = EventChannelManager.EventRegistration( channel_name = channel_name, reg_id = registrationId) - self._logger.debug("Requesting Channel:" + str(channel_name) + " from Domain's EventChannelManager ") registration = self._ecm.registerResource( ereg ) - + pub = EM_Publisher( self, registration ) self._logger.debug("Channel:" + str(channel_name) + " Reg-Id:" + str(registration.reg.reg_id)) - + self._registrations.append( registration ) - except: #print traceback.format_exc() self._logger.error("Unable to create Publisher for Channel:" + str(channel_name )) @@ -195,7 +197,7 @@ def _unregister( self, reg ): self._ecm.unregister( reg.reg ); except: self._logger.error( "UNREGISTER FAILED, REG-ID:" + str(reg.reg.reg_id) + " CHANNEL:" + str(reg.reg.channel_name ) ) - + self._registrations.remove(creg) break diff --git a/redhawk/src/base/framework/python/ossie/events/Publisher.py b/redhawk/src/base/framework/python/ossie/events/Publisher.py index 4a9cbde72..586a100b3 100644 --- a/redhawk/src/base/framework/python/ossie/events/Publisher.py +++ b/redhawk/src/base/framework/python/ossie/events/Publisher.py @@ -29,6 +29,7 @@ from omniORB import any, URI, CORBA import CosEventComm__POA import CosEventChannelAdmin, CosEventChannelAdmin__POA +from ossie.cf import CF class Receiver(CosEventComm__POA.PushSupplier): @@ -83,8 +84,27 @@ def __init__(self,parent): Receiver.__init__(self) class Publisher: - def __init__(self, channel ): - self.channel = channel + def __init__(self, remote_obj, channel_name=''): + ''' + remote_obj is either an Event Channel or a Domain Manager + if remote_obj is a Domain Manager, a channel_name must be supplied + ''' + self.channel = None + self.domain = None + self.channel_name = channel_name + self.registration = CF.EventChannelManager.EventRegistration( channel_name = self.channel_name, reg_id = "") + self.reg_resp = None + if hasattr(remote_obj, 'ref'): + if remote_obj.ref._narrow(CF.DomainManager) != None: + self.domain = remote_obj.ref + elif remote_obj._narrow(CosEventChannelAdmin.EventChannel) != None: + self.channel = remote_obj._narrow(CosEventChannelAdmin.EventChannel) + elif remote_obj._narrow(CF.DomainManager) != None: + self.domain = remote_obj._narrow(CF.DomainManager) + if self.channel == None and self.domain == None: + raise Exception("Error. remote_obj must either be an Event Channel or a Domain Manager") + if self.domain != None and channel_name == '': + raise Exception("Error. When passing a Domain Manager as an argument, channel_name must have a valid Event Channel name") self.proxy = None self.logger = logging.getLogger("ossie.events.Publisher") self.disconnectReceiver = DefaultReceiver(self) @@ -107,7 +127,7 @@ def __del__(self): def terminate(self): self.logger.debug("Publisher::terminate START") - if self.disconnectReceiver and self.disconnectReceiver.get_disconnect() == False: + if self.disconnectReceiver: self.logger.debug("Publisher::terminate DISCONNECT") self.disconnect() @@ -148,6 +168,15 @@ def push(self,data ): def disconnect(self, retries=10, retry_wait=.01): + if self.channel != None: + self.disconnectEvtChan(retries, retry_wait) + else: + self.disconnectDomMgr() + + def disconnectDomMgr(self): + self.ecm.unregister(self.reg_resp.reg) + + def disconnectEvtChan(self, retries=10, retry_wait=.01): retval=0 if self.channel == None: return retval @@ -173,6 +202,17 @@ def disconnect(self, retries=10, retry_wait=.01): def connect(self, retries=10, retry_wait=.01): + if self.channel != None: + self.connectEvtChan(retries, retry_wait) + else: + self.connectDomMgr() + + def connectDomMgr(self): + self.ecm = self.domain._get_eventChannelMgr() + self.reg_resp = self.ecm.registerPublisher(self.registration, self.disconnectReceiver._this()) + self.proxy = self.reg_resp.proxy_consumer + + def connectEvtChan(self, retries=10, retry_wait=.01): retval=-1 if self.channel == None: diff --git a/redhawk/src/base/framework/python/ossie/events/Subscriber.py b/redhawk/src/base/framework/python/ossie/events/Subscriber.py index 8fafa2791..11defdb40 100644 --- a/redhawk/src/base/framework/python/ossie/events/Subscriber.py +++ b/redhawk/src/base/framework/python/ossie/events/Subscriber.py @@ -88,15 +88,34 @@ def __init__(self,parent): def push(self, data): if self.parent.dataArrivedCB != None: self.parent.logger.trace('Received (callback) DATA: ' + str(data)) - self.parent.dataArrivedCB( data ) + self.parent.dataArrivedCB(data) else: self.parent.logger.trace('Received (queue) DATA: ' + str(data)) self.parent.events.put(data) class Subscriber: - def __init__(self, channel, dataArrivedCB=None): - self.channel = channel + def __init__(self, remote_obj, channel_name='', dataArrivedCB=None): + ''' + remote_obj is either an Event Channel or a Domain Manager + if remote_obj is a Domain Manager, a channel_name must be supplied + ''' + self.channel = None + self.domain = None + self.channel_name = channel_name + self.registration = CF.EventChannelManager.EventRegistration( channel_name = self.channel_name, reg_id = "") + self.reg_resp = None + if hasattr(remote_obj, 'ref'): + if remote_obj.ref._narrow(CF.DomainManager) != None: + self.domain = remote_obj.ref + elif remote_obj._narrow(CosEventChannelAdmin.EventChannel) != None: + self.channel = remote_obj._narrow(CosEventChannelAdmin.EventChannel) + elif remote_obj._narrow(CF.DomainManager) != None: + self.domain = remote_obj._narrow(CF.DomainManager) + if self.channel == None and self.domain == None: + raise Exception("Error. remote_obj must either be an Event Channel or a Domain Manager") + if self.domain != None and channel_name == '': + raise Exception("Error. When passing a Domain Manager as an argument, channel_name must have a valid Event Channel name") self.proxy = None self.logger = logging.getLogger('ossie.events.Subscriber') self.dataArrivedCB=dataArrivedCB @@ -105,10 +124,9 @@ def __init__(self, channel, dataArrivedCB=None): self.consumer = DefaultConsumer(self) self.connect() - def __del__(self): self.logger.debug("Subscriber DTOR START") - if self.consumer and self.consumer.get_disconnect() == False: + if self.consumer: self.logger.debug("Subscriber::DTOR DISCONNECT") self.disconnect() @@ -124,7 +142,7 @@ def setDataArrivedCB(self, newCB=None ): def terminate(self): self.logger.debug("Subscriber::terminate START") - if self.consumer and self.consumer.get_disconnect() == False: + if self.consumer: self.logger.debug("Subscriber::terminate DISCONNECT") self.disconnect() @@ -141,17 +159,24 @@ def getData(self): return retval; try: - tmp = self.events.get(False,.01) - self.logger.debug('getData: ' + str(tmp)) - retval = any.from_any(tmp) + retval = self.events.get(False,.01) + self.logger.debug('getData: ' + str(retval)) except: - #print traceback.print_exc() retval=None return retval def disconnect(self, retries=10, retry_wait=.01): + if self.channel != None: + self.disconnectEvtChan(retries, retry_wait) + else: + self.disconnectDomMgr() + + def disconnectDomMgr(self): + self.ecm.unregister(self.reg_resp.reg) + + def disconnectEvtChan(self, retries=10, retry_wait=.01): if self.channel == None: return -1 @@ -177,7 +202,16 @@ def disconnect(self, retries=10, retry_wait=.01): def connect(self, retries=10, retry_wait=.01): + if self.channel != None: + self.connectEvtChan(retries, retry_wait) + else: + self.connectDomMgr() + def connectDomMgr(self): + self.ecm = self.domain._get_eventChannelMgr() + self.reg_resp = self.ecm.registerConsumer(self.consumer._this(), self.registration) + + def connectEvtChan(self, retries=10, retry_wait=.01): if self.channel == None: return -1 diff --git a/redhawk/src/base/framework/python/ossie/events/__init__.py b/redhawk/src/base/framework/python/ossie/events/__init__.py index f4285045d..f9179623b 100644 --- a/redhawk/src/base/framework/python/ossie/events/__init__.py +++ b/redhawk/src/base/framework/python/ossie/events/__init__.py @@ -28,8 +28,9 @@ from omniORB import any, URI, CORBA from ossie.cf import CF, CF__POA +from ossie.cf import ExtendedCF, ExtendedCF__POA from ossie.cf import ExtendedEvent, ExtendedEvent__POA -from ossie.properties import struct_from_any, struct_to_any, props_to_any +from ossie.properties import struct_from_any, struct_to_any, props_to_any, prop_to_dict from ossie.properties import simple_property, simpleseq_property, struct_property, structseq_property import traceback @@ -310,7 +311,9 @@ def connectPushConsumer(self, channel, consumer): # is terminated on exit and avoid exception messages _consumers = [] def _cleanup_consumers(): - for consumer in _consumers: + # Iterate through a copy to avoid ordering problems at shutdown when + # consumers try to de-register themselves + for consumer in _consumers[:]: consumer.terminate() atexit.register(_cleanup_consumers) @@ -321,7 +324,7 @@ def __init__(self, parent, instance_id): self.parent = parent self.instance_id = instance_id self.existence_lock = threading.Lock() - + def push(self, data): self.parent.actionQueue.put(('message',data)) @@ -350,7 +353,7 @@ def obtain_push_consumer(self): self.parent.consumer_lock.release() return objref - def __init__(self, thread_sleep=0.1, parent=None): + def __init__(self, thread_sleep=0.1, parent=None, storeMessages = False): self.consumer_lock = threading.Lock() threading.Thread.__init__(self) self._terminateMe=False @@ -365,6 +368,8 @@ def __init__(self, thread_sleep=0.1, parent=None): self.consumers = {} self.supplier_admin = self.SupplierAdmin_i(self) self._parent_comp = parent + self._storeMessages = storeMessages + self._storedMessages = [] self.startPort() @@ -444,6 +449,11 @@ def run(self): # Stop tracking this thread _consumers.remove(self) + def getMessages(self): + retval = copy.deepcopy(self._storedMessages) + self._storedMessages = [] + return retval + def _run(self): while not self._terminateMe: while not self.actionQueue.empty(): @@ -472,6 +482,8 @@ def _run(self): except Exception, e: print "Callback for message "+str(id)+" failed with exception: "+str(e) for allMsg in self._allMsg: + if self._storeMessages: + self._storedMessages.append(prop_to_dict(value)) callback = allMsg[1] try: callback(id, value) @@ -480,7 +492,7 @@ def _run(self): else: _time.sleep(self.thread_sleep) -class MessageSupplierPort(CF__POA.Port): +class MessageSupplierPort(ExtendedCF__POA.QueryablePort): class Supplier_i(CosEventComm__POA.PushSupplier): def disconnect_push_supplier(self): pass @@ -531,55 +543,121 @@ def _connectSupplierToEventChannel(self, channel): return connection # CosEventComm.PushSupplier delegation - def push(self, data): - self.portInterfaceAccess.acquire() - for connection in self._connections: - try: - self._connections[connection]['proxy_consumer'].push(data) - except: - print "WARNING: Unable to send data to",connection - self.portInterfaceAccess.release() - - def sendMessage(self, data_struct): - self.portInterfaceAccess.acquire() + def push(self, data, connectionId=None): + """ + Sends pre-serialized messages. + + Args: + data: Messages serialized to a CORBA.Any + connectionId: Target connection (default: all). + + Raises: + ValueError: If connectionId is given and does not match any + connection. + """ + + try: + self._push( data, connectionId ) + except CORBA.MARSHAL: + self._port_log.warn("Could not deliver the message. Maximum message size exceeded") + + + # CosEventComm.PushSupplier delegation + def _push(self, data, connectionId=None): + """ + Sends pre-serialized messages. + + Args: + data: Messages serialized to a CORBA.Any + connectionId: Target connection (default: all). + + Raises: + ValueError: If connectionId is given and does not match any + connection. + """ + with self.portInterfaceAccess: + self._checkConnectionId(connectionId) + + for identifier, connection in self._connections.iteritems(): + if not self._isConnectionSelected(identifier, connectionId): + continue + + try: + connection['proxy_consumer'].push(data) + except CORBA.MARSHAL, e: + raise e + except: + self._port_log.warn("WARNING: Unable to send data to " + identifier) + + + def sendMessage(self, data_struct, connectionId=None): + """ + Sends a single message. + + Args: + data_struct: Message structure or CORBA.Any to send. + connectionId: Target connection (default: all). + + Raises: + ValueError: If connectionId is given and does not match any + connection. + """ if not isinstance(data_struct, CORBA.Any): - try: - outgoing = [CF.DataType(id=data_struct.getId(),value=struct_to_any(data_struct))] - outmsg = props_to_any(outgoing) - except: - self.portInterfaceAccess.release() - raise + outgoing = [CF.DataType(id=data_struct.getId(),value=struct_to_any(data_struct))] + outmsg = props_to_any(outgoing) else: outmsg = data_struct + self.push(outmsg, connectionId) - for connection in self._connections: - try: - self._connections[connection]['proxy_consumer'].push(outmsg) - except CORBA.MARSHAL: - self._port_log.warn("Could not deliver the message. Maximum message size exceeded") - except: - print "WARNING: Unable to send data to",connection - self.portInterfaceAccess.release() - - def sendMessages(self, data_structs): - self.portInterfaceAccess.acquire() - try: - outgoing = [] - for msg in data_structs: - outgoing.append(CF.DataType(id=msg.getId(),value=struct_to_any(msg))) - outmsg = props_to_any(outgoing) - except: - self.portInterfaceAccess.release() - raise + def sendMessages(self, data_structs, connectionId=None): + """ + Sends a list of messages. - for connection in self._connections: - try: - self._connections[connection]['proxy_consumer'].push(outmsg) - except CORBA.MARSHAL: - self._port_log.warn("Could not deliver the message. Maximum message size exceeded") - except: - print "WARNING: Unable to send data to",connection - self.portInterfaceAccess.release() + Args: + data_structs: Sequence of messages to send. + connectionId: Target connection (default: all). + + Raises: + ValueError: If connectionId is given and does not match any + connection. + """ + outgoing = [] + msgid=None + for msg in data_structs: + msgid=msg.getId() + outgoing.append(CF.DataType(id=msg.getId(),value=struct_to_any(msg))) + outmsg = props_to_any(outgoing) + + try: + # try to push entire message set + self._push(outmsg, connectionId) + except CORBA.MARSHAL: + if len(data_structs) == 1: + self._port_log.warn("Could not deliver the message id="+str(msgid)+". Maximum message size exceeded") + else: + self._port_log.warn("Could not deliver the message. Maximum message size exceeded, trying individually") + # try resending individually + for msg in data_structs: + outm = props_to_any([CF.DataType(id=msg.getId(),value=struct_to_any(msg))]) + try: + self._push(outm,connectionId) + except CORBA.MARSHAL: + self._port_log.warn("Could not deliver the message id="+str(msg.getId())+". Maximum message size exceeded") + break + except: + print "WARNING: Unable to send data to",connection def disconnect_push_supplier(self): pass + + def _get_connections(self): + return [ExtendedCF.UsesConnection(k, v['port']) for k, v in self._connections.iteritems()] + + def _isConnectionSelected(self, connectionId, targetId): + if not targetId: + return True + return connectionId == targetId + + def _checkConnectionId(self, connectionId): + if connectionId and not connectionId in self._connections: + raise ValueError("invalid connection '"+connectionId+"'") diff --git a/redhawk/src/base/framework/python/ossie/logger/__init__.py b/redhawk/src/base/framework/python/ossie/logger/__init__.py index 2979ca644..90557b045 100644 --- a/redhawk/src/base/framework/python/ossie/logger/__init__.py +++ b/redhawk/src/base/framework/python/ossie/logger/__init__.py @@ -129,17 +129,20 @@ def apply(self, tbl): class DeviceCtx(ResourceCtx): def __init__(self, name, id, dpath): - ResourceCtx.__init__(self, name, id, dpath) - self.device_mgr="" - self.device_mgr_id="" - n=0 - seg=self._split_path(dpath) - if len(seg) > 1 : + ResourceCtx.__init__(self, name, id, dpath) + self.device_mgr="" + self.device_mgr_id="" + n=0 + seg=self._split_path(dpath) + if len(seg) > 1 : self.domain_name=seg[n] n = n + 1 - if len(seg) > 0 : + if len(seg) > 0 : self.device_mgr=seg[n] + ppid = str(os.getppid()) + self.device_mgr_id = self.domain_name+':'+os.uname()[1]+':'+self.device_mgr+'_'+ppid + def apply(self, tbl): SetDeviceInfo(tbl,self) @@ -159,6 +162,7 @@ def SetComponentInfo( tbl, ctx ): SetResourceInfo( tbl, ctx ) tbl["@@@WAVEFORM.NAME@@@"] = ctx.waveform.replace( ":", "-" ) tbl["@@@WAVEFORM.ID@@@"] = ctx.waveform_id.replace( ":", "-" ) + tbl["@@@WAVEFORM.INSTANCE@@@"] = ctx.waveform_id.replace( ":", "-" ) tbl["@@@COMPONENT.NAME@@@"] = ctx.name.replace( ":", "-" ) tbl["@@@COMPONENT.INSTANCE@@@"] = ctx.instance_id.replace( ":", "-" ) tbl["@@@COMPONENT.PID@@@"] = str(os.getpid()) @@ -212,7 +216,7 @@ def ConvertLogLevel( oldstyle_level ): return CF.LogLevels.INFO def ConvertLog4ToCFLevel( log4level ): - if log4level == logging.FATAL+1 : + if log4level == logging.OFF : return CF.LogLevels.OFF if log4level == logging.FATAL : return CF.LogLevels.FATAL @@ -226,14 +230,16 @@ def ConvertLog4ToCFLevel( log4level ): return CF.LogLevels.DEBUG if log4level == logging.TRACE : return CF.LogLevels.TRACE - if log4level == logging.NOTSET: + if log4level == logging.ALL: return CF.LogLevels.ALL return CF.LogLevels.INFO def ConvertToLog4Level( newLevel ): level = logging.INFO + if newLevel == -1 : + level=logging.NOTSET if newLevel == CF.LogLevels.OFF : - level=logging.FATAL+1 + level=logging.OFF if newLevel == CF.LogLevels.FATAL : level=logging.FATAL if newLevel == CF.LogLevels.ERROR : @@ -247,10 +253,34 @@ def ConvertToLog4Level( newLevel ): if newLevel == CF.LogLevels.TRACE: level=logging.TRACE if newLevel == CF.LogLevels.ALL: - level=logging.TRACE + level=logging.ALL return level + +def ConvertLevelNameToDebugLevel( level_name ): + if level_name == "OFF" : return 0 + if level_name == "FATAL" : return 0 + if level_name == "ERROR" : return 1 + if level_name == "WARN" : return 2 + if level_name == "INFO" : return 3 + if level_name == "DEBUG" : return 4 + if level_name == "TRACE": return 5 + if level_name == "ALL" : return 5 + return 3 + +def ConvertLevelNameToCFLevel( level_name ): + if level_name == "OFF" : return CF.LogLevels.OFF + if level_name == "FATAL" : return CF.LogLevels.FATAL + if level_name == "ERROR" : return CF.LogLevels.ERROR + if level_name == "WARN" : return CF.LogLevels.WARN + if level_name == "INFO" : return CF.LogLevels.INFO + if level_name == "DEBUG" : return CF.LogLevels.DEBUG + if level_name == "TRACE": return CF.LogLevels.TRACE + if level_name == "ALL" : return CF.LogLevels.ALL + return CF.LogLevels.INFO + + def SupportedCFLevel( newLevel ): level = True if newLevel != CF.LogLevels.OFF and \ @@ -285,7 +315,7 @@ def GetDefaultConfig(): "# Direct log messages to STDOUT\n" + \ "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ - "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n\n" return cfg diff --git a/redhawk/src/base/framework/python/ossie/parsers/dcd.py b/redhawk/src/base/framework/python/ossie/parsers/dcd.py index 91f8fe4fe..f50a7498d 100644 --- a/redhawk/src/base/framework/python/ossie/parsers/dcd.py +++ b/redhawk/src/base/framework/python/ossie/parsers/dcd.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- -# +# -*- coding: utf-8 -*- + # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. # @@ -18,70 +18,97 @@ # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. -# # -# Generated Thu Sep 12 14:49:30 2013 by generateDS.py version 2.7c. +# Generated Mon Jul 30 12:29:34 2018 by generateDS.py version 2.29.14. +# Python 2.7.5 (default, Nov 6 2016, 00:28:07) [GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] +# +# Command line options: +# ('-f', '') +# ('--silence', '') +# ('-m', '') +# ('-o', 'ossie/parsers/dcd.py') +# +# Command line arguments: +# ../../../xml/xsd/dcd.xsd +# +# Command line: +# /usr/bin/generateDS.py -f --silence -m -o "ossie/parsers/dcd.py" ../../../xml/xsd/dcd.xsd +# +# Current working directory (os.getcwd()): +# python # import sys -import getopt import re as re_ - -etree_ = None -Verbose_import_ = False -( XMLParser_import_none, XMLParser_import_lxml, - XMLParser_import_elementtree - ) = range(3) -XMLParser_import_library = None +import base64 +import datetime as datetime_ +import warnings as warnings_ try: - # lxml from lxml import etree as etree_ - XMLParser_import_library = XMLParser_import_lxml - if Verbose_import_: - print("running with lxml.etree") except ImportError: - try: - # cElementTree from Python 2.5+ - import xml.etree.cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree on Python 2.5+") - except ImportError: - try: - # ElementTree from Python 2.5+ - import xml.etree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree on Python 2.5+") - except ImportError: - try: - # normal cElementTree install - import cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree") - except ImportError: - try: - # normal ElementTree install - import elementtree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree") - except ImportError: - raise ImportError("Failed to import ElementTree from any known place") - -def parsexml_(*args, **kwargs): - if (XMLParser_import_library == XMLParser_import_lxml and - 'parser' not in kwargs): + from xml.etree import ElementTree as etree_ + + +Validate_simpletypes_ = True +if sys.version_info[0] == 2: + BaseStrType_ = basestring +else: + BaseStrType_ = str + + +def parsexml_(infile, parser=None, **kwargs): + if parser is None: # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. - kwargs['parser'] = etree_.ETCompatXMLParser() - doc = etree_.parse(*args, **kwargs) + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + doc = etree_.parse(infile, parser=parser, **kwargs) return doc +def parsexmlstring_(instring, parser=None, **kwargs): + if parser is None: + # Use the lxml ElementTree compatible parser so that, e.g., + # we ignore comments. + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + element = etree_.fromstring(instring, parser=parser, **kwargs) + return element + +# +# Namespace prefix definition table (and other attributes, too) +# +# The module generatedsnamespaces, if it is importable, must contain +# a dictionary named GeneratedsNamespaceDefs. This Python dictionary +# should map element type names (strings) to XML schema namespace prefix +# definitions. The export method for any class for which there is +# a namespace prefix definition, will export that definition in the +# XML representation of that element. See the export method of +# any generated element type class for a example of the use of this +# table. +# A sample table is: +# +# # File: generatedsnamespaces.py +# +# GenerateDSNamespaceDefs = { +# "ElementtypeA": "http://www.xxx.com/namespaceA", +# "ElementtypeB": "http://www.xxx.com/namespaceB", +# } # -# User methods + +try: + from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_ +except ImportError: + GenerateDSNamespaceDefs_ = {} + +# +# The root super-class for element type classes # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class @@ -89,67 +116,273 @@ def parsexml_(*args, **kwargs): try: from generatedssuper import GeneratedsSuper -except ImportError, exp: - +except ImportError as exp: + class GeneratedsSuper(object): + tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$') + class _FixedOffsetTZ(datetime_.tzinfo): + def __init__(self, offset, name): + self.__offset = datetime_.timedelta(minutes=offset) + self.__name = name + def utcoffset(self, dt): + return self.__offset + def tzname(self, dt): + return self.__name + def dst(self, dt): + return None def gds_format_string(self, input_data, input_name=''): return input_data - def gds_validate_string(self, input_data, node, input_name=''): + def gds_validate_string(self, input_data, node=None, input_name=''): + if not input_data: + return '' + else: + return input_data + def gds_format_base64(self, input_data, input_name=''): + return base64.b64encode(input_data) + def gds_validate_base64(self, input_data, node=None, input_name=''): return input_data def gds_format_integer(self, input_data, input_name=''): return '%d' % input_data - def gds_validate_integer(self, input_data, node, input_name=''): + def gds_validate_integer(self, input_data, node=None, input_name=''): return input_data def gds_format_integer_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_integer_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_integer_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + int(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of integers') - return input_data + return values def gds_format_float(self, input_data, input_name=''): - return '%f' % input_data - def gds_validate_float(self, input_data, node, input_name=''): + return ('%.15f' % input_data).rstrip('0') + def gds_validate_float(self, input_data, node=None, input_name=''): return input_data def gds_format_float_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_float_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_float_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of floats') - return input_data + return values def gds_format_double(self, input_data, input_name=''): return '%e' % input_data - def gds_validate_double(self, input_data, node, input_name=''): + def gds_validate_double(self, input_data, node=None, input_name=''): return input_data def gds_format_double_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_double_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_double_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of doubles') - return input_data + return values def gds_format_boolean(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean(self, input_data, node, input_name=''): + return ('%s' % input_data).lower() + def gds_validate_boolean(self, input_data, node=None, input_name=''): return input_data def gds_format_boolean_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_boolean_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: if value not in ('true', '1', 'false', '0', ): - raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")') + raise_parse_error( + node, + 'Requires sequence of booleans ' + '("true", "1", "false", "0")') + return values + def gds_validate_datetime(self, input_data, node=None, input_name=''): + return input_data + def gds_format_datetime(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + @classmethod + def gds_parse_datetime(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + time_parts = input_data.split('.') + if len(time_parts) > 1: + micro_seconds = int(float('0.' + time_parts[1]) * 1000000) + input_data = '%s.%s' % (time_parts[0], micro_seconds, ) + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt + def gds_validate_date(self, input_data, node=None, input_name=''): + return input_data + def gds_format_date(self, input_data, input_name=''): + _svalue = '%04d-%02d-%02d' % ( + input_data.year, + input_data.month, + input_data.day, + ) + try: + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format( + hours, minutes) + except AttributeError: + pass + return _svalue + @classmethod + def gds_parse_date(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d') + dt = dt.replace(tzinfo=tz) + return dt.date() + def gds_validate_time(self, input_data, node=None, input_name=''): return input_data + def gds_format_time(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%02d:%02d:%02d' % ( + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%02d:%02d:%02d.%s' % ( + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + def gds_validate_simple_patterns(self, patterns, target): + # pat is a list of lists of strings/patterns. We should: + # - AND the outer elements + # - OR the inner elements + found1 = True + for patterns1 in patterns: + found2 = False + for patterns2 in patterns1: + if re_.search(patterns2, target) is not None: + found2 = True + break + if not found2: + found1 = False + break + return found1 + @classmethod + def gds_parse_time(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + if len(input_data.split('.')) > 1: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt.time() def gds_str_lower(self, instring): return instring.lower() def get_path_(self, node): @@ -180,6 +413,38 @@ def get_class_obj_(self, node, default_class=None): return class_obj1 def gds_build_any(self, node, type_name=None): return None + @classmethod + def gds_reverse_node_mapping(cls, mapping): + return dict(((v, k) for k, v in mapping.iteritems())) + @staticmethod + def gds_encode(instring): + if sys.version_info[0] == 2: + return instring.encode(ExternalEncoding) + else: + return instring + @staticmethod + def convert_unicode(instring): + if isinstance(instring, str): + result = quote_xml(instring) + elif sys.version_info[0] == 2 and isinstance(instring, unicode): + result = quote_xml(instring).encode('utf8') + else: + result = GeneratedsSuper.gds_encode(str(instring)) + return result + def __eq__(self, other): + if type(self) != type(other): + return False + return self.__dict__ == other.__dict__ + def __ne__(self, other): + return not self.__eq__(other) + + def getSubclassFromModule_(module, class_): + '''Get the subclass of a class from a specific module.''' + name = class_.__name__ + 'Sub' + if hasattr(module, name): + return getattr(module, name) + else: + return None # @@ -205,29 +470,50 @@ def gds_build_any(self, node, type_name=None): Tag_pattern_ = re_.compile(r'({.*})?(.*)') String_cleanup_pat_ = re_.compile(r"[\n\r\s]+") Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)') +CDATA_pattern_ = re_.compile(r"", re_.DOTALL) + +# Change this to redirect the generated superclass module to use a +# specific subclass module. +CurrentSubclassModule_ = None # # Support/utility functions. # + def showIndent(outfile, level, pretty_print=True): if pretty_print: for idx in range(level): outfile.write(' ') + def quote_xml(inStr): + "Escape markup chars, but do not modify CDATA sections." if not inStr: return '' - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) + s2 = '' + pos = 0 + matchobjects = CDATA_pattern_.finditer(s1) + for mo in matchobjects: + s3 = s1[pos:mo.start()] + s2 += quote_xml_aux(s3) + s2 += s1[mo.start():mo.end()] + pos = mo.end() + s3 = s1[pos:] + s2 += quote_xml_aux(s3) + return s2 + + +def quote_xml_aux(inStr): + s1 = inStr.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 + def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') @@ -240,6 +526,7 @@ def quote_attrib(inStr): s1 = '"%s"' % s1 return s1 + def quote_python(inStr): s1 = inStr if s1.find("'") == -1: @@ -255,6 +542,7 @@ def quote_python(inStr): else: return '"""%s"""' % s1 + def get_all_text_(node): if node.text is not None: text = node.text @@ -265,6 +553,7 @@ def get_all_text_(node): text += child.tail return text + def find_attr_value_(attr_name, node): attrs = node.attrib attr_parts = attr_name.split(':') @@ -282,11 +571,9 @@ def find_attr_value_(attr_name, node): class GDSParseError(Exception): pass + def raise_parse_error(node, msg): - if XMLParser_import_library == XMLParser_import_lxml: - msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) - else: - msg = '%s (element %s)' % (msg, node.tag, ) + msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) raise GDSParseError(msg) @@ -305,6 +592,7 @@ class MixedContainer: TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 + TypeBase64 = 8 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type @@ -318,49 +606,104 @@ def getValue(self): return self.value def getName(self): return self.name - def export(self, outfile, level, name, namespace, pretty_print=True): + def export(self, outfile, level, name, namespace, + pretty_print=True): if self.category == MixedContainer.CategoryText: # Prevent exporting empty content as empty lines. - if self.value.strip(): + if self.value.strip(): outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace, name, pretty_print) + self.value.export( + outfile, level, namespace, name, + pretty_print=pretty_print) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: - outfile.write('<%s>%s' % (self.name, self.value, self.name)) + outfile.write('<%s>%s' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: - outfile.write('<%s>%d' % (self.name, self.value, self.name)) + outfile.write('<%s>%d' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: - outfile.write('<%s>%f' % (self.name, self.value, self.name)) + outfile.write('<%s>%f' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeBase64: + outfile.write('<%s>%s' % ( + self.name, + base64.b64encode(self.value), + self.name)) + def to_etree(self, element): + if self.category == MixedContainer.CategoryText: + # Prevent exporting empty content as empty lines. + if self.value.strip(): + if len(element) > 0: + if element[-1].tail is None: + element[-1].tail = self.value + else: + element[-1].tail += self.value + else: + if element.text is None: + element.text = self.value + else: + element.text += self.value + elif self.category == MixedContainer.CategorySimple: + subelement = etree_.SubElement( + element, '%s' % self.name) + subelement.text = self.to_etree_simple() + else: # category == MixedContainer.CategoryComplex + self.value.to_etree(element) + def to_etree_simple(self): + if self.content_type == MixedContainer.TypeString: + text = self.value + elif (self.content_type == MixedContainer.TypeInteger or + self.content_type == MixedContainer.TypeBoolean): + text = '%d' % self.value + elif (self.content_type == MixedContainer.TypeFloat or + self.content_type == MixedContainer.TypeDecimal): + text = '%f' % self.value elif self.content_type == MixedContainer.TypeDouble: - outfile.write('<%s>%g' % (self.name, self.value, self.name)) + text = '%g' % self.value + elif self.content_type == MixedContainer.TypeBase64: + text = '%s' % base64.b64encode(self.value) + return text def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s",\n' % ( + self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class MemberSpec_(object): - def __init__(self, name='', data_type='', container=0): + def __init__(self, name='', data_type='', container=0, + optional=0, child_attrs=None, choice=None): self.name = name self.data_type = data_type self.container = container + self.child_attrs = child_attrs + self.choice = choice + self.optional = optional def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type @@ -375,6 +718,13 @@ def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container + def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs + def get_child_attrs(self): return self.child_attrs + def set_choice(self, choice): self.choice = choice + def get_choice(self): return self.choice + def set_optional(self, optional): self.optional = optional + def get_optional(self): return self.optional + def _cast(typ, value): if typ is None or value is None: @@ -385,6 +735,7 @@ def _cast(typ, value): # Data representation classes. # + class deviceconfiguration(GeneratedsSuper): """The DCD is based on the SAD (e.g., componentfiles, partitioning, etc.) DTD. The intent of the DCD is to provide the means of @@ -405,6 +756,7 @@ class deviceconfiguration(GeneratedsSuper): subclass = None superclass = None def __init__(self, id_=None, name=None, description=None, devicemanagersoftpkg=None, componentfiles=None, partitioning=None, connections=None, domainmanager=None, filesystemnames=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.name = _cast(None, name) self.description = description @@ -415,6 +767,11 @@ def __init__(self, id_=None, name=None, description=None, devicemanagersoftpkg=N self.domainmanager = domainmanager self.filesystemnames = filesystemnames def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, deviceconfiguration) + if subclass is not None: + return subclass(*args_, **kwargs_) if deviceconfiguration.subclass: return deviceconfiguration.subclass(*args_, **kwargs_) else: @@ -442,34 +799,52 @@ def get_filesystemnames(self): return self.filesystemnames def set_filesystemnames(self, filesystemnames): self.filesystemnames = filesystemnames filesystemnamesProp = property(get_filesystemnames, set_filesystemnames) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + self.description is not None or + self.devicemanagersoftpkg is not None or + self.componentfiles is not None or + self.partitioning is not None or + self.connections is not None or + self.domainmanager is not None or + self.filesystemnames is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='deviceconfiguration', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('deviceconfiguration') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='deviceconfiguration') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='deviceconfiguration', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='deviceconfiguration'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='deviceconfiguration', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -477,7 +852,7 @@ def exportChildren(self, outfile, level, namespace_='', name_='deviceconfigurati eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.devicemanagersoftpkg is not None: self.devicemanagersoftpkg.export(outfile, level, namespace_, name_='devicemanagersoftpkg', pretty_print=pretty_print) if self.componentfiles is not None: @@ -490,86 +865,21 @@ def exportChildren(self, outfile, level, namespace_='', name_='deviceconfigurati self.domainmanager.export(outfile, level, namespace_, name_='domainmanager', pretty_print=pretty_print) if self.filesystemnames is not None: self.filesystemnames.export(outfile, level, namespace_, name_='filesystemnames', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.devicemanagersoftpkg is not None or - self.componentfiles is not None or - self.partitioning is not None or - self.connections is not None or - self.domainmanager is not None or - self.filesystemnames is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='deviceconfiguration'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.devicemanagersoftpkg is not None: - showIndent(outfile, level) - outfile.write('devicemanagersoftpkg=model_.devicemanagersoftpkg(\n') - self.devicemanagersoftpkg.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.componentfiles is not None: - showIndent(outfile, level) - outfile.write('componentfiles=model_.componentfiles(\n') - self.componentfiles.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.partitioning is not None: - showIndent(outfile, level) - outfile.write('partitioning=model_.partitioning(\n') - self.partitioning.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.connections is not None: - showIndent(outfile, level) - outfile.write('connections=model_.connections(\n') - self.connections.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.domainmanager is not None: - showIndent(outfile, level) - outfile.write('domainmanager=model_.domainmanager(\n') - self.domainmanager.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.filesystemnames is not None: - showIndent(outfile, level) - outfile.write('filesystemnames=model_.filesystemnames(\n') - self.filesystemnames.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': @@ -579,27 +889,33 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'devicemanagersoftpkg': obj_ = devicemanagersoftpkg.factory() obj_.build(child_) - self.set_devicemanagersoftpkg(obj_) + self.devicemanagersoftpkg = obj_ + obj_.original_tagname_ = 'devicemanagersoftpkg' elif nodeName_ == 'componentfiles': obj_ = componentfiles.factory() obj_.build(child_) - self.set_componentfiles(obj_) + self.componentfiles = obj_ + obj_.original_tagname_ = 'componentfiles' elif nodeName_ == 'partitioning': obj_ = partitioning.factory() obj_.build(child_) - self.set_partitioning(obj_) + self.partitioning = obj_ + obj_.original_tagname_ = 'partitioning' elif nodeName_ == 'connections': obj_ = connections.factory() obj_.build(child_) - self.set_connections(obj_) + self.connections = obj_ + obj_.original_tagname_ = 'connections' elif nodeName_ == 'domainmanager': obj_ = domainmanager.factory() obj_.build(child_) - self.set_domainmanager(obj_) + self.domainmanager = obj_ + obj_.original_tagname_ = 'domainmanager' elif nodeName_ == 'filesystemnames': obj_ = filesystemnames.factory() obj_.build(child_) - self.set_filesystemnames(obj_) + self.filesystemnames = obj_ + obj_.original_tagname_ = 'filesystemnames' # end class deviceconfiguration @@ -607,8 +923,14 @@ class devicemanagersoftpkg(GeneratedsSuper): subclass = None superclass = None def __init__(self, localfile=None): + self.original_tagname_ = None self.localfile = localfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, devicemanagersoftpkg) + if subclass is not None: + return subclass(*args_, **kwargs_) if devicemanagersoftpkg.subclass: return devicemanagersoftpkg.subclass(*args_, **kwargs_) else: @@ -617,18 +939,30 @@ def factory(*args_, **kwargs_): def get_localfile(self): return self.localfile def set_localfile(self, localfile): self.localfile = localfile localfileProp = property(get_localfile, set_localfile) + def hasContent_(self): + if ( + self.localfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='devicemanagersoftpkg', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('devicemanagersoftpkg') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='devicemanagersoftpkg') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='devicemanagersoftpkg', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -642,39 +976,21 @@ def exportChildren(self, outfile, level, namespace_='', name_='devicemanagersoft eol_ = '' if self.localfile is not None: self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='devicemanagersoftpkg'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localfile(\n') - self.localfile.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localfile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' # end class devicemanagersoftpkg @@ -682,11 +998,17 @@ class componentfiles(GeneratedsSuper): subclass = None superclass = None def __init__(self, componentfile=None): + self.original_tagname_ = None if componentfile is None: self.componentfile = [] else: self.componentfile = componentfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentfiles) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentfiles.subclass: return componentfiles.subclass(*args_, **kwargs_) else: @@ -695,20 +1017,33 @@ def factory(*args_, **kwargs_): def get_componentfile(self): return self.componentfile def set_componentfile(self, componentfile): self.componentfile = componentfile def add_componentfile(self, value): self.componentfile.append(value) - def insert_componentfile(self, index, value): self.componentfile[index] = value + def insert_componentfile_at(self, index, value): self.componentfile.insert(index, value) + def replace_componentfile_at(self, index, value): self.componentfile[index] = value componentfileProp = property(get_componentfile, set_componentfile) + def hasContent_(self): + if ( + self.componentfile + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentfiles', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentfiles') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentfiles') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentfiles', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -722,38 +1057,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentfiles', eol_ = '' for componentfile_ in self.componentfile: componentfile_.export(outfile, level, namespace_, name_='componentfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.componentfile - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentfiles'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('componentfile=[\n') - level += 1 - for componentfile_ in self.componentfile: - showIndent(outfile, level) - outfile.write('model_.componentfile(\n') - componentfile_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -761,17 +1071,24 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = componentfile.factory() obj_.build(child_) self.componentfile.append(obj_) + obj_.original_tagname_ = 'componentfile' # end class componentfiles class componentfile(GeneratedsSuper): subclass = None superclass = None - def __init__(self, type_=None, id_=None, localfile=None): - self.type_ = _cast(None, type_) + def __init__(self, id_=None, type_=None, localfile=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) + self.type_ = _cast(None, type_) self.localfile = localfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentfile) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentfile.subclass: return componentfile.subclass(*args_, **kwargs_) else: @@ -780,35 +1097,47 @@ def factory(*args_, **kwargs_): def get_localfile(self): return self.localfile def set_localfile(self, localfile): self.localfile = localfile localfileProp = property(get_localfile, set_localfile) + def get_id(self): return self.id_ + def set_id(self, id_): self.id_ = id_ + idProp = property(get_id, set_id) def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) - def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id - idProp = property(get_id, set_id) + def hasContent_(self): + if ( + self.localfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentfile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentfile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentfile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentfile', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentfile'): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) + if self.type_ is not None and 'type_' not in already_processed: + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='componentfile', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -816,53 +1145,28 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentfile', f eol_ = '' if self.localfile is not None: self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentfile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localfile(\n') - self.localfile.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('type', node) - if value is not None and 'type' not in already_processed: - already_processed.append('type') - self.type_ = value value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value + value = find_attr_value_('type', node) + if value is not None and 'type' not in already_processed: + already_processed.add('type') + self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localfile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' # end class componentfile @@ -870,9 +1174,14 @@ class localfile(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, localfile) + if subclass is not None: + return subclass(*args_, **kwargs_) if localfile.subclass: return localfile.subclass(*args_, **kwargs_) else: @@ -881,55 +1190,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='localfile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('localfile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='localfile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='localfile', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='localfile'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='localfile', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='localfile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -940,11 +1244,17 @@ class partitioning(GeneratedsSuper): subclass = None superclass = None def __init__(self, componentplacement=None): + self.original_tagname_ = None if componentplacement is None: self.componentplacement = [] else: self.componentplacement = componentplacement def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, partitioning) + if subclass is not None: + return subclass(*args_, **kwargs_) if partitioning.subclass: return partitioning.subclass(*args_, **kwargs_) else: @@ -953,20 +1263,33 @@ def factory(*args_, **kwargs_): def get_componentplacement(self): return self.componentplacement def set_componentplacement(self, componentplacement): self.componentplacement = componentplacement def add_componentplacement(self, value): self.componentplacement.append(value) - def insert_componentplacement(self, index, value): self.componentplacement[index] = value + def insert_componentplacement_at(self, index, value): self.componentplacement.insert(index, value) + def replace_componentplacement_at(self, index, value): self.componentplacement[index] = value componentplacementProp = property(get_componentplacement, set_componentplacement) + def hasContent_(self): + if ( + self.componentplacement + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='partitioning', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('partitioning') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='partitioning') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='partitioning', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -980,38 +1303,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='partitioning', fr eol_ = '' for componentplacement_ in self.componentplacement: componentplacement_.export(outfile, level, namespace_, name_='componentplacement', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.componentplacement - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='partitioning'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('componentplacement=[\n') - level += 1 - for componentplacement_ in self.componentplacement: - showIndent(outfile, level) - outfile.write('model_.componentplacement(\n') - componentplacement_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -1019,6 +1317,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = componentplacement.factory() obj_.build(child_) self.componentplacement.append(obj_) + obj_.original_tagname_ = 'componentplacement' # end class partitioning @@ -1026,6 +1325,7 @@ class componentplacement(GeneratedsSuper): subclass = None superclass = None def __init__(self, componentfileref=None, deployondevice=None, compositepartofdevice=None, devicepkgfile=None, componentinstantiation=None): + self.original_tagname_ = None self.componentfileref = componentfileref self.deployondevice = deployondevice self.compositepartofdevice = compositepartofdevice @@ -1035,6 +1335,11 @@ def __init__(self, componentfileref=None, deployondevice=None, compositepartofde else: self.componentinstantiation = componentinstantiation def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentplacement) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentplacement.subclass: return componentplacement.subclass(*args_, **kwargs_) else: @@ -1055,20 +1360,37 @@ def set_devicepkgfile(self, devicepkgfile): self.devicepkgfile = devicepkgfile def get_componentinstantiation(self): return self.componentinstantiation def set_componentinstantiation(self, componentinstantiation): self.componentinstantiation = componentinstantiation def add_componentinstantiation(self, value): self.componentinstantiation.append(value) - def insert_componentinstantiation(self, index, value): self.componentinstantiation[index] = value + def insert_componentinstantiation_at(self, index, value): self.componentinstantiation.insert(index, value) + def replace_componentinstantiation_at(self, index, value): self.componentinstantiation[index] = value componentinstantiationProp = property(get_componentinstantiation, set_componentinstantiation) + def hasContent_(self): + if ( + self.componentfileref is not None or + self.deployondevice is not None or + self.compositepartofdevice is not None or + self.devicepkgfile is not None or + self.componentinstantiation + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentplacement', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentplacement') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentplacement') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentplacement', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -1090,89 +1412,41 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentplacemen self.devicepkgfile.export(outfile, level, namespace_, name_='devicepkgfile', pretty_print=pretty_print) for componentinstantiation_ in self.componentinstantiation: componentinstantiation_.export(outfile, level, namespace_, name_='componentinstantiation', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.componentfileref is not None or - self.deployondevice is not None or - self.compositepartofdevice is not None or - self.devicepkgfile is not None or - self.componentinstantiation - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentplacement'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.componentfileref is not None: - showIndent(outfile, level) - outfile.write('componentfileref=model_.componentfileref(\n') - self.componentfileref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.deployondevice is not None: - showIndent(outfile, level) - outfile.write('deployondevice=model_.deployondevice(\n') - self.deployondevice.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.compositepartofdevice is not None: - showIndent(outfile, level) - outfile.write('compositepartofdevice=model_.compositepartofdevice(\n') - self.compositepartofdevice.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.devicepkgfile is not None: - showIndent(outfile, level) - outfile.write('devicepkgfile=model_.devicepkgfile(\n') - self.devicepkgfile.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('componentinstantiation=[\n') - level += 1 - for componentinstantiation_ in self.componentinstantiation: - showIndent(outfile, level) - outfile.write('model_.componentinstantiation(\n') - componentinstantiation_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'componentfileref': obj_ = componentfileref.factory() obj_.build(child_) - self.set_componentfileref(obj_) + self.componentfileref = obj_ + obj_.original_tagname_ = 'componentfileref' elif nodeName_ == 'deployondevice': obj_ = deployondevice.factory() obj_.build(child_) - self.set_deployondevice(obj_) + self.deployondevice = obj_ + obj_.original_tagname_ = 'deployondevice' elif nodeName_ == 'compositepartofdevice': obj_ = compositepartofdevice.factory() obj_.build(child_) - self.set_compositepartofdevice(obj_) + self.compositepartofdevice = obj_ + obj_.original_tagname_ = 'compositepartofdevice' elif nodeName_ == 'devicepkgfile': obj_ = devicepkgfile.factory() obj_.build(child_) - self.set_devicepkgfile(obj_) + self.devicepkgfile = obj_ + obj_.original_tagname_ = 'devicepkgfile' elif nodeName_ == 'componentinstantiation': obj_ = componentinstantiation.factory() obj_.build(child_) self.componentinstantiation.append(obj_) + obj_.original_tagname_ = 'componentinstantiation' # end class componentplacement @@ -1184,9 +1458,14 @@ class componentfileref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None): + self.original_tagname_ = None self.refid = _cast(None, refid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentfileref) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentfileref.subclass: return componentfileref.subclass(*args_, **kwargs_) else: @@ -1195,55 +1474,50 @@ def factory(*args_, **kwargs_): def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentfileref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentfileref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentfileref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentfileref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentfileref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='componentfileref', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentfileref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1257,9 +1531,14 @@ class deployondevice(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None): + self.original_tagname_ = None self.refid = _cast(None, refid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, deployondevice) + if subclass is not None: + return subclass(*args_, **kwargs_) if deployondevice.subclass: return deployondevice.subclass(*args_, **kwargs_) else: @@ -1268,55 +1547,50 @@ def factory(*args_, **kwargs_): def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='deployondevice', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('deployondevice') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='deployondevice') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='deployondevice', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='deployondevice'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='deployondevice', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='deployondevice'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1326,15 +1600,20 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): class compositepartofdevice(GeneratedsSuper): """The compositepartofdevice element is used when an aggregate relationship exists to reference the componentinstantiation - element that describes the whole Device for which this - Device’s componentinstantiation element describes a part of - the aggregate Device.""" + element that describes the whole Device for which this Device’s + componentinstantiation element describes a part of the aggregate + Device.""" subclass = None superclass = None def __init__(self, refid=None): + self.original_tagname_ = None self.refid = _cast(None, refid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, compositepartofdevice) + if subclass is not None: + return subclass(*args_, **kwargs_) if compositepartofdevice.subclass: return compositepartofdevice.subclass(*args_, **kwargs_) else: @@ -1343,55 +1622,50 @@ def factory(*args_, **kwargs_): def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='compositepartofdevice', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('compositepartofdevice') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='compositepartofdevice') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='compositepartofdevice', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='compositepartofdevice'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='compositepartofdevice', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='compositepartofdevice'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1404,9 +1678,15 @@ class devicepkgfile(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, localfile=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) self.localfile = localfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, devicepkgfile) + if subclass is not None: + return subclass(*args_, **kwargs_) if devicepkgfile.subclass: return devicepkgfile.subclass(*args_, **kwargs_) else: @@ -1418,26 +1698,38 @@ def set_localfile(self, localfile): self.localfile = localfile def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) + def hasContent_(self): + if ( + self.localfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='devicepkgfile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('devicepkgfile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='devicepkgfile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='devicepkgfile', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='devicepkgfile'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='devicepkgfile', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1445,45 +1737,24 @@ def exportChildren(self, outfile, level, namespace_='', name_='devicepkgfile', f eol_ = '' if self.localfile is not None: self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='devicepkgfile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localfile(\n') - self.localfile.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localfile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' # end class devicepkgfile @@ -1494,11 +1765,21 @@ class componentinstantiation(GeneratedsSuper): attribute is a DCE UUID that uniquely identifier the component.""" subclass = None superclass = None - def __init__(self, id_=None, usagename=None, componentproperties=None): + def __init__(self, id_=None, startorder=None, usagename=None, componentproperties=None, affinity=None, loggingconfig=None, deployerrequires=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) + self.startorder = _cast(None, startorder) self.usagename = usagename self.componentproperties = componentproperties + self.affinity = affinity + self.loggingconfig = loggingconfig + self.deployerrequires = deployerrequires def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentinstantiation) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentinstantiation.subclass: return componentinstantiation.subclass(*args_, **kwargs_) else: @@ -1510,29 +1791,60 @@ def set_usagename(self, usagename): self.usagename = usagename def get_componentproperties(self): return self.componentproperties def set_componentproperties(self, componentproperties): self.componentproperties = componentproperties componentpropertiesProp = property(get_componentproperties, set_componentproperties) + def get_affinity(self): return self.affinity + def set_affinity(self, affinity): self.affinity = affinity + affinityProp = property(get_affinity, set_affinity) + def get_loggingconfig(self): return self.loggingconfig + def set_loggingconfig(self, loggingconfig): self.loggingconfig = loggingconfig + loggingconfigProp = property(get_loggingconfig, set_loggingconfig) + def get_deployerrequires(self): return self.deployerrequires + def set_deployerrequires(self, deployerrequires): self.deployerrequires = deployerrequires + deployerrequiresProp = property(get_deployerrequires, set_deployerrequires) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) + def get_startorder(self): return self.startorder + def set_startorder(self, startorder): self.startorder = startorder + startorderProp = property(get_startorder, set_startorder) + def hasContent_(self): + if ( + self.usagename is not None or + self.componentproperties is not None or + self.affinity is not None or + self.loggingconfig is not None or + self.deployerrequires is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentinstantiation', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentinstantiation') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentinstantiation') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentinstantiation', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentinstantiation'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) + if self.startorder is not None and 'startorder' not in already_processed: + already_processed.add('startorder') + outfile.write(' startorder=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.startorder), input_name='startorder')), )) def exportChildren(self, outfile, level, namespace_='', name_='componentinstantiation', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1540,47 +1852,31 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentinstanti eol_ = '' if self.usagename is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%susagename>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.usagename).encode(ExternalEncoding), input_name='usagename'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.usagename), input_name='usagename')), eol_)) if self.componentproperties is not None: self.componentproperties.export(outfile, level, namespace_, name_='componentproperties', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.usagename is not None or - self.componentproperties is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentinstantiation'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.usagename is not None: - showIndent(outfile, level) - outfile.write('usagename=%s,\n' % quote_python(self.usagename).encode(ExternalEncoding)) - if self.componentproperties is not None: - showIndent(outfile, level) - outfile.write('componentproperties=model_.componentproperties(\n') - self.componentproperties.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') + if self.affinity is not None: + self.affinity.export(outfile, level, namespace_, name_='affinity', pretty_print=pretty_print) + if self.loggingconfig is not None: + self.loggingconfig.export(outfile, level, namespace_, name_='loggingconfig', pretty_print=pretty_print) + if self.deployerrequires is not None: + self.deployerrequires.export(outfile, level, namespace_, name_='deployerrequires', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value + value = find_attr_value_('startorder', node) + if value is not None and 'startorder' not in already_processed: + already_processed.add('startorder') + self.startorder = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'usagename': usagename_ = child_.text @@ -1589,14 +1885,403 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentproperties': obj_ = componentproperties.factory() obj_.build(child_) - self.set_componentproperties(obj_) + self.componentproperties = obj_ + obj_.original_tagname_ = 'componentproperties' + elif nodeName_ == 'affinity': + obj_ = affinity.factory() + obj_.build(child_) + self.affinity = obj_ + obj_.original_tagname_ = 'affinity' + elif nodeName_ == 'loggingconfig': + obj_ = loggingconfig.factory() + obj_.build(child_) + self.loggingconfig = obj_ + obj_.original_tagname_ = 'loggingconfig' + elif nodeName_ == 'deployerrequires': + obj_ = deployerrequires.factory() + obj_.build(child_) + self.deployerrequires = obj_ + obj_.original_tagname_ = 'deployerrequires' # end class componentinstantiation +class loggingconfig(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, level=None, valueOf_=None): + self.original_tagname_ = None + self.level = _cast(None, level) + self.value = valueOf_ + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, loggingconfig) + if subclass is not None: + return subclass(*args_, **kwargs_) + if loggingconfig.subclass: + return loggingconfig.subclass(*args_, **kwargs_) + else: + return loggingconfig(*args_, **kwargs_) + factory = staticmethod(factory) + def get_level(self): return self.level + def set_level(self, level): self.level = level + levelProp = property(get_level, set_level) + def get_value(self): return self.value + def set_value(self, valueOf_): self.value = valueOf_ + def hasContent_(self): + if ( + (1 if type(self.value) in [int,float] else self.value) + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='loggingconfig', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('loggingconfig') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='loggingconfig') + if self.hasContent_(): + outfile.write('>') + outfile.write(self.convert_unicode(self.value)) + self.exportChildren(outfile, level + 1, namespace_='', name_='loggingconfig', pretty_print=pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='loggingconfig'): + if self.level is not None and 'level' not in already_processed: + already_processed.add('level') + outfile.write(' level=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.level), input_name='level')), )) + def exportChildren(self, outfile, level, namespace_='', name_='loggingconfig', fromsubclass_=False, pretty_print=True): + pass + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + self.value = get_all_text_(node) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + value = find_attr_value_('level', node) + if value is not None and 'level' not in already_processed: + already_processed.add('level') + self.level = value + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + pass +# end class loggingconfig + + +class affinity(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, simpleref=None, simplesequenceref=None, structref=None, structsequenceref=None): + self.original_tagname_ = None + if simpleref is None: + self.simpleref = [] + else: + self.simpleref = simpleref + if simplesequenceref is None: + self.simplesequenceref = [] + else: + self.simplesequenceref = simplesequenceref + if structref is None: + self.structref = [] + else: + self.structref = structref + if structsequenceref is None: + self.structsequenceref = [] + else: + self.structsequenceref = structsequenceref + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, affinity) + if subclass is not None: + return subclass(*args_, **kwargs_) + if affinity.subclass: + return affinity.subclass(*args_, **kwargs_) + else: + return affinity(*args_, **kwargs_) + factory = staticmethod(factory) + def get_simpleref(self): return self.simpleref + def set_simpleref(self, simpleref): self.simpleref = simpleref + def add_simpleref(self, value): self.simpleref.append(value) + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value + simplerefProp = property(get_simpleref, set_simpleref) + def get_simplesequenceref(self): return self.simplesequenceref + def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref + def add_simplesequenceref(self, value): self.simplesequenceref.append(value) + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value + simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) + def get_structref(self): return self.structref + def set_structref(self, structref): self.structref = structref + def add_structref(self, value): self.structref.append(value) + def insert_structref_at(self, index, value): self.structref.insert(index, value) + def replace_structref_at(self, index, value): self.structref[index] = value + structrefProp = property(get_structref, set_structref) + def get_structsequenceref(self): return self.structsequenceref + def set_structsequenceref(self, structsequenceref): self.structsequenceref = structsequenceref + def add_structsequenceref(self, value): self.structsequenceref.append(value) + def insert_structsequenceref_at(self, index, value): self.structsequenceref.insert(index, value) + def replace_structsequenceref_at(self, index, value): self.structsequenceref[index] = value + structsequencerefProp = property(get_structsequenceref, set_structsequenceref) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref or + self.structref or + self.structsequenceref + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='affinity', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('affinity') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='affinity') + if self.hasContent_(): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='affinity', pretty_print=pretty_print) + showIndent(outfile, level, pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='affinity'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='affinity', fromsubclass_=False, pretty_print=True): + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + for simpleref_ in self.simpleref: + simpleref_.export(outfile, level, namespace_, name_='simpleref', pretty_print=pretty_print) + for simplesequenceref_ in self.simplesequenceref: + simplesequenceref_.export(outfile, level, namespace_, name_='simplesequenceref', pretty_print=pretty_print) + for structref_ in self.structref: + structref_.export(outfile, level, namespace_, name_='structref', pretty_print=pretty_print) + for structsequenceref_ in self.structsequenceref: + structsequenceref_.export(outfile, level, namespace_, name_='structsequenceref', pretty_print=pretty_print) + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + pass + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + if nodeName_ == 'simpleref': + obj_ = simpleref.factory() + obj_.build(child_) + self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' + elif nodeName_ == 'simplesequenceref': + obj_ = simplesequenceref.factory() + obj_.build(child_) + self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' + elif nodeName_ == 'structref': + obj_ = structref.factory() + obj_.build(child_) + self.structref.append(obj_) + obj_.original_tagname_ = 'structref' + elif nodeName_ == 'structsequenceref': + obj_ = structsequenceref.factory() + obj_.build(child_) + self.structsequenceref.append(obj_) + obj_.original_tagname_ = 'structsequenceref' +# end class affinity + + +class deployerrequires(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, requires=None): + self.original_tagname_ = None + if requires is None: + self.requires = [] + else: + self.requires = requires + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, deployerrequires) + if subclass is not None: + return subclass(*args_, **kwargs_) + if deployerrequires.subclass: + return deployerrequires.subclass(*args_, **kwargs_) + else: + return deployerrequires(*args_, **kwargs_) + factory = staticmethod(factory) + def get_requires(self): return self.requires + def set_requires(self, requires): self.requires = requires + def add_requires(self, value): self.requires.append(value) + def insert_requires_at(self, index, value): self.requires.insert(index, value) + def replace_requires_at(self, index, value): self.requires[index] = value + requiresProp = property(get_requires, set_requires) + def hasContent_(self): + if ( + self.requires + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='deployerrequires', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('deployerrequires') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='deployerrequires') + if self.hasContent_(): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='deployerrequires', pretty_print=pretty_print) + showIndent(outfile, level, pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='deployerrequires'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='deployerrequires', fromsubclass_=False, pretty_print=True): + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + for requires_ in self.requires: + requires_.export(outfile, level, namespace_, name_='requires', pretty_print=pretty_print) + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + pass + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + if nodeName_ == 'requires': + obj_ = idvalue.factory() + obj_.build(child_) + self.requires.append(obj_) + obj_.original_tagname_ = 'requires' +# end class deployerrequires + + +class idvalue(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, id_=None, value=None): + self.original_tagname_ = None + self.id = _cast(None, id_) + self.value = _cast(None, value) + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, idvalue) + if subclass is not None: + return subclass(*args_, **kwargs_) + if idvalue.subclass: + return idvalue.subclass(*args_, **kwargs_) + else: + return idvalue(*args_, **kwargs_) + factory = staticmethod(factory) + def get_id(self): return self.id + def set_id(self, id_): self.id = id_ + idProp = property(get_id, set_id) + def get_value(self): return self.value + def set_value(self, value): self.value = value + valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='idvalue', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('idvalue') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='idvalue') + if self.hasContent_(): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='idvalue', pretty_print=pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='idvalue'): + if self.id is not None and 'id' not in already_processed: + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), )) + if self.value is not None and 'value' not in already_processed: + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) + def exportChildren(self, outfile, level, namespace_='', name_='idvalue', fromsubclass_=False, pretty_print=True): + pass + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + value = find_attr_value_('id', node) + if value is not None and 'id' not in already_processed: + already_processed.add('id') + self.id = value + value = find_attr_value_('value', node) + if value is not None and 'value' not in already_processed: + already_processed.add('value') + self.value = value + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + pass +# end class idvalue + + class componentproperties(GeneratedsSuper): subclass = None superclass = None def __init__(self, simpleref=None, simplesequenceref=None, structref=None, structsequenceref=None): + self.original_tagname_ = None if simpleref is None: self.simpleref = [] else: @@ -1614,6 +2299,11 @@ def __init__(self, simpleref=None, simplesequenceref=None, structref=None, struc else: self.structsequenceref = structsequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentproperties) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentproperties.subclass: return componentproperties.subclass(*args_, **kwargs_) else: @@ -1622,35 +2312,54 @@ def factory(*args_, **kwargs_): def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) def get_simplesequenceref(self): return self.simplesequenceref def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref def add_simplesequenceref(self, value): self.simplesequenceref.append(value) - def insert_simplesequenceref(self, index, value): self.simplesequenceref[index] = value + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) def get_structref(self): return self.structref def set_structref(self, structref): self.structref = structref def add_structref(self, value): self.structref.append(value) - def insert_structref(self, index, value): self.structref[index] = value + def insert_structref_at(self, index, value): self.structref.insert(index, value) + def replace_structref_at(self, index, value): self.structref[index] = value structrefProp = property(get_structref, set_structref) def get_structsequenceref(self): return self.structsequenceref def set_structsequenceref(self, structsequenceref): self.structsequenceref = structsequenceref def add_structsequenceref(self, value): self.structsequenceref.append(value) - def insert_structsequenceref(self, index, value): self.structsequenceref[index] = value + def insert_structsequenceref_at(self, index, value): self.structsequenceref.insert(index, value) + def replace_structsequenceref_at(self, index, value): self.structsequenceref[index] = value structsequencerefProp = property(get_structsequenceref, set_structsequenceref) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref or + self.structref or + self.structsequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentproperties', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentproperties') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentproperties') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentproperties', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -1670,77 +2379,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentproperti structref_.export(outfile, level, namespace_, name_='structref', pretty_print=pretty_print) for structsequenceref_ in self.structsequenceref: structsequenceref_.export(outfile, level, namespace_, name_='structsequenceref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simpleref or - self.simplesequenceref or - self.structref or - self.structsequenceref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentproperties'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('simplesequenceref=[\n') - level += 1 - for simplesequenceref_ in self.simplesequenceref: - showIndent(outfile, level) - outfile.write('model_.simplesequenceref(\n') - simplesequenceref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structref=[\n') - level += 1 - for structref_ in self.structref: - showIndent(outfile, level) - outfile.write('model_.structref(\n') - structref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structsequenceref=[\n') - level += 1 - for structsequenceref_ in self.structsequenceref: - showIndent(outfile, level) - outfile.write('model_.structsequenceref(\n') - structsequenceref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -1748,18 +2393,22 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' elif nodeName_ == 'simplesequenceref': obj_ = simplesequenceref.factory() obj_.build(child_) self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' elif nodeName_ == 'structref': obj_ = structref.factory() obj_.build(child_) self.structref.append(obj_) + obj_.original_tagname_ = 'structref' elif nodeName_ == 'structsequenceref': obj_ = structsequenceref.factory() obj_.build(child_) self.structsequenceref.append(obj_) + obj_.original_tagname_ = 'structsequenceref' # end class componentproperties @@ -1767,9 +2416,14 @@ class devicethatloadedthiscomponentref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None): + self.original_tagname_ = None self.refid = _cast(None, refid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, devicethatloadedthiscomponentref) + if subclass is not None: + return subclass(*args_, **kwargs_) if devicethatloadedthiscomponentref.subclass: return devicethatloadedthiscomponentref.subclass(*args_, **kwargs_) else: @@ -1778,55 +2432,50 @@ def factory(*args_, **kwargs_): def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='devicethatloadedthiscomponentref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('devicethatloadedthiscomponentref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='devicethatloadedthiscomponentref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='devicethatloadedthiscomponentref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='devicethatloadedthiscomponentref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='devicethatloadedthiscomponentref', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='devicethatloadedthiscomponentref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1836,83 +2485,79 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): class deviceusedbythiscomponentref(GeneratedsSuper): subclass = None superclass = None - def __init__(self, usesrefid=None, refid=None): - self.usesrefid = _cast(None, usesrefid) + def __init__(self, refid=None, usesrefid=None): + self.original_tagname_ = None self.refid = _cast(None, refid) - pass + self.usesrefid = _cast(None, usesrefid) def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, deviceusedbythiscomponentref) + if subclass is not None: + return subclass(*args_, **kwargs_) if deviceusedbythiscomponentref.subclass: return deviceusedbythiscomponentref.subclass(*args_, **kwargs_) else: return deviceusedbythiscomponentref(*args_, **kwargs_) factory = staticmethod(factory) - def get_usesrefid(self): return self.usesrefid - def set_usesrefid(self, usesrefid): self.usesrefid = usesrefid - usesrefidProp = property(get_usesrefid, set_usesrefid) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def get_usesrefid(self): return self.usesrefid + def set_usesrefid(self, usesrefid): self.usesrefid = usesrefid + usesrefidProp = property(get_usesrefid, set_usesrefid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='deviceusedbythiscomponentref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('deviceusedbythiscomponentref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='deviceusedbythiscomponentref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='deviceusedbythiscomponentref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='deviceusedbythiscomponentref'): - if self.usesrefid is not None and 'usesrefid' not in already_processed: - already_processed.append('usesrefid') - outfile.write(' usesrefid=%s' % (self.gds_format_string(quote_attrib(self.usesrefid).encode(ExternalEncoding), input_name='usesrefid'), )) if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='deviceusedbythiscomponentref', fromsubclass_=False, pretty_print=True): - pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='deviceusedbythiscomponentref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) if self.usesrefid is not None and 'usesrefid' not in already_processed: - already_processed.append('usesrefid') - showIndent(outfile, level) - outfile.write('usesrefid = "%s",\n' % (self.usesrefid,)) - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('usesrefid') + outfile.write(' usesrefid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.usesrefid), input_name='usesrefid')), )) + def exportChildren(self, outfile, level, namespace_='', name_='deviceusedbythiscomponentref', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('usesrefid', node) - if value is not None and 'usesrefid' not in already_processed: - already_processed.append('usesrefid') - self.usesrefid = value value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value + value = find_attr_value_('usesrefid', node) + if value is not None and 'usesrefid' not in already_processed: + already_processed.add('usesrefid') + self.usesrefid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class deviceusedbythiscomponentref @@ -1922,10 +2567,15 @@ class simpleref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, value=None): + self.original_tagname_ = None self.refid = _cast(None, refid) self.value = _cast(None, value) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, simpleref) + if subclass is not None: + return subclass(*args_, **kwargs_) if simpleref.subclass: return simpleref.subclass(*args_, **kwargs_) else: @@ -1937,66 +2587,57 @@ def set_refid(self, refid): self.refid = refid def get_value(self): return self.value def set_value(self, value): self.value = value valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='simpleref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('simpleref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='simpleref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='simpleref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simpleref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), )) + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) def exportChildren(self, outfile, level, namespace_='', name_='simpleref', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='simpleref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - showIndent(outfile, level) - outfile.write('value = "%s",\n' % (self.value,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value value = find_attr_value_('value', node) if value is not None and 'value' not in already_processed: - already_processed.append('value') + already_processed.add('value') self.value = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -2007,9 +2648,15 @@ class simplesequenceref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, values=None): + self.original_tagname_ = None self.refid = _cast(None, refid) self.values = values def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, simplesequenceref) + if subclass is not None: + return subclass(*args_, **kwargs_) if simplesequenceref.subclass: return simplesequenceref.subclass(*args_, **kwargs_) else: @@ -2021,26 +2668,38 @@ def set_values(self, values): self.values = values def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.values is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='simplesequenceref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('simplesequenceref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='simplesequenceref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='simplesequenceref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simplesequenceref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='simplesequenceref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2048,58 +2707,47 @@ def exportChildren(self, outfile, level, namespace_='', name_='simplesequenceref eol_ = '' if self.values is not None: self.values.export(outfile, level, namespace_, name_='values', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.values is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='simplesequenceref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.values is not None: - showIndent(outfile, level) - outfile.write('values=model_.values(\n') - self.values.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'values': obj_ = values.factory() obj_.build(child_) - self.set_values(obj_) + self.values = obj_ + obj_.original_tagname_ = 'values' # end class simplesequenceref class structref(GeneratedsSuper): subclass = None superclass = None - def __init__(self, refid=None, simpleref=None): + def __init__(self, refid=None, simpleref=None, simplesequenceref=None): + self.original_tagname_ = None self.refid = _cast(None, refid) if simpleref is None: self.simpleref = [] else: self.simpleref = simpleref + if simplesequenceref is None: + self.simplesequenceref = [] + else: + self.simplesequenceref = simplesequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structref) + if subclass is not None: + return subclass(*args_, **kwargs_) if structref.subclass: return structref.subclass(*args_, **kwargs_) else: @@ -2108,31 +2756,51 @@ def factory(*args_, **kwargs_): def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) + def get_simplesequenceref(self): return self.simplesequenceref + def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref + def add_simplesequenceref(self, value): self.simplesequenceref.append(value) + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value + simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='structref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='structref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2140,51 +2808,31 @@ def exportChildren(self, outfile, level, namespace_='', name_='structref', froms eol_ = '' for simpleref_ in self.simpleref: simpleref_.export(outfile, level, namespace_, name_='simpleref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simpleref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + for simplesequenceref_ in self.simplesequenceref: + simplesequenceref_.export(outfile, level, namespace_, name_='simplesequenceref', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'simpleref': obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' + elif nodeName_ == 'simplesequenceref': + obj_ = simplesequenceref.factory() + obj_.build(child_) + self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' # end class structref @@ -2192,12 +2840,18 @@ class structsequenceref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, structvalue=None): + self.original_tagname_ = None self.refid = _cast(None, refid) if structvalue is None: self.structvalue = [] else: self.structvalue = structvalue def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structsequenceref) + if subclass is not None: + return subclass(*args_, **kwargs_) if structsequenceref.subclass: return structsequenceref.subclass(*args_, **kwargs_) else: @@ -2206,31 +2860,44 @@ def factory(*args_, **kwargs_): def get_structvalue(self): return self.structvalue def set_structvalue(self, structvalue): self.structvalue = structvalue def add_structvalue(self, value): self.structvalue.append(value) - def insert_structvalue(self, index, value): self.structvalue[index] = value + def insert_structvalue_at(self, index, value): self.structvalue.insert(index, value) + def replace_structvalue_at(self, index, value): self.structvalue[index] = value structvalueProp = property(get_structvalue, set_structvalue) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.structvalue + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structsequenceref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structsequenceref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structsequenceref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structsequenceref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='structsequenceref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='structsequenceref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2238,63 +2905,46 @@ def exportChildren(self, outfile, level, namespace_='', name_='structsequenceref eol_ = '' for structvalue_ in self.structvalue: structvalue_.export(outfile, level, namespace_, name_='structvalue', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.structvalue - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structsequenceref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('structvalue=[\n') - level += 1 - for structvalue_ in self.structvalue: - showIndent(outfile, level) - outfile.write('model_.structvalue(\n') - structvalue_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'structvalue': obj_ = structvalue.factory() obj_.build(child_) self.structvalue.append(obj_) + obj_.original_tagname_ = 'structvalue' # end class structsequenceref class structvalue(GeneratedsSuper): subclass = None superclass = None - def __init__(self, simpleref=None): + def __init__(self, simpleref=None, simplesequenceref=None): + self.original_tagname_ = None if simpleref is None: self.simpleref = [] else: self.simpleref = simpleref + if simplesequenceref is None: + self.simplesequenceref = [] + else: + self.simplesequenceref = simplesequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structvalue) + if subclass is not None: + return subclass(*args_, **kwargs_) if structvalue.subclass: return structvalue.subclass(*args_, **kwargs_) else: @@ -2303,20 +2953,40 @@ def factory(*args_, **kwargs_): def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) + def get_simplesequenceref(self): return self.simplesequenceref + def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref + def add_simplesequenceref(self, value): self.simplesequenceref.append(value) + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value + simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structvalue', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structvalue') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structvalue') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structvalue', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -2330,38 +3000,15 @@ def exportChildren(self, outfile, level, namespace_='', name_='structvalue', fro eol_ = '' for simpleref_ in self.simpleref: simpleref_.export(outfile, level, namespace_, name_='simpleref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simpleref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structvalue'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + for simplesequenceref_ in self.simplesequenceref: + simplesequenceref_.export(outfile, level, namespace_, name_='simplesequenceref', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -2369,6 +3016,12 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' + elif nodeName_ == 'simplesequenceref': + obj_ = simplesequenceref.factory() + obj_.build(child_) + self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' # end class structvalue @@ -2376,11 +3029,17 @@ class values(GeneratedsSuper): subclass = None superclass = None def __init__(self, value=None): + self.original_tagname_ = None if value is None: self.value = [] else: self.value = value def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, values) + if subclass is not None: + return subclass(*args_, **kwargs_) if values.subclass: return values.subclass(*args_, **kwargs_) else: @@ -2389,20 +3048,33 @@ def factory(*args_, **kwargs_): def get_value(self): return self.value def set_value(self, value): self.value = value def add_value(self, value): self.value.append(value) - def insert_value(self, index, value): self.value[index] = value + def insert_value_at(self, index, value): self.value.insert(index, value) + def replace_value_at(self, index, value): self.value[index] = value valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + self.value + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='values', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('values') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='values') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='values', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -2416,36 +3088,14 @@ def exportChildren(self, outfile, level, namespace_='', name_='values', fromsubc eol_ = '' for value_ in self.value: showIndent(outfile, level, pretty_print) - outfile.write('<%svalue>%s%s' % (namespace_, self.gds_format_string(quote_xml(value_).encode(ExternalEncoding), input_name='value'), namespace_, eol_)) - def hasContent_(self): - if ( - self.value - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='values'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('value=[\n') - level += 1 - for value_ in self.value: - showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(value_).encode(ExternalEncoding)) - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(value_), input_name='value')), eol_)) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -2460,9 +3110,14 @@ class componentinstantiationref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None): + self.original_tagname_ = None self.refid = _cast(None, refid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentinstantiationref) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentinstantiationref.subclass: return componentinstantiationref.subclass(*args_, **kwargs_) else: @@ -2471,55 +3126,50 @@ def factory(*args_, **kwargs_): def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentinstantiationref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentinstantiationref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentinstantiationref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentinstantiationref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentinstantiationref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='componentinstantiationref', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentinstantiationref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -2532,9 +3182,15 @@ class domainmanager(GeneratedsSuper): subclass = None superclass = None def __init__(self, namingservice=None, stringifiedobjectref=None): + self.original_tagname_ = None self.namingservice = namingservice self.stringifiedobjectref = stringifiedobjectref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, domainmanager) + if subclass is not None: + return subclass(*args_, **kwargs_) if domainmanager.subclass: return domainmanager.subclass(*args_, **kwargs_) else: @@ -2546,18 +3202,31 @@ def set_namingservice(self, namingservice): self.namingservice = namingservice def get_stringifiedobjectref(self): return self.stringifiedobjectref def set_stringifiedobjectref(self, stringifiedobjectref): self.stringifiedobjectref = stringifiedobjectref stringifiedobjectrefProp = property(get_stringifiedobjectref, set_stringifiedobjectref) + def hasContent_(self): + if ( + self.namingservice is not None or + self.stringifiedobjectref is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='domainmanager', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('domainmanager') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='domainmanager') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='domainmanager', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -2573,44 +3242,22 @@ def exportChildren(self, outfile, level, namespace_='', name_='domainmanager', f self.namingservice.export(outfile, level, namespace_, name_='namingservice', pretty_print=pretty_print) if self.stringifiedobjectref is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sstringifiedobjectref>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.stringifiedobjectref).encode(ExternalEncoding), input_name='stringifiedobjectref'), namespace_, eol_)) - def hasContent_(self): - if ( - self.namingservice is not None or - self.stringifiedobjectref is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='domainmanager'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.namingservice is not None: - showIndent(outfile, level) - outfile.write('namingservice=model_.namingservice(\n') - self.namingservice.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.stringifiedobjectref is not None: - showIndent(outfile, level) - outfile.write('stringifiedobjectref=%s,\n' % quote_python(self.stringifiedobjectref).encode(ExternalEncoding)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.stringifiedobjectref), input_name='stringifiedobjectref')), eol_)) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'namingservice': obj_ = namingservice.factory() obj_.build(child_) - self.set_namingservice(obj_) + self.namingservice = obj_ + obj_.original_tagname_ = 'namingservice' elif nodeName_ == 'stringifiedobjectref': stringifiedobjectref_ = child_.text stringifiedobjectref_ = self.gds_validate_string(stringifiedobjectref_, node, 'stringifiedobjectref') @@ -2622,10 +3269,16 @@ class findby(GeneratedsSuper): subclass = None superclass = None def __init__(self, namingservice=None, stringifiedobjectref=None, domainfinder=None): + self.original_tagname_ = None self.namingservice = namingservice self.stringifiedobjectref = stringifiedobjectref self.domainfinder = domainfinder def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, findby) + if subclass is not None: + return subclass(*args_, **kwargs_) if findby.subclass: return findby.subclass(*args_, **kwargs_) else: @@ -2640,18 +3293,32 @@ def set_stringifiedobjectref(self, stringifiedobjectref): self.stringifiedobject def get_domainfinder(self): return self.domainfinder def set_domainfinder(self, domainfinder): self.domainfinder = domainfinder domainfinderProp = property(get_domainfinder, set_domainfinder) + def hasContent_(self): + if ( + self.namingservice is not None or + self.stringifiedobjectref is not None or + self.domainfinder is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='findby', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('findby') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='findby') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='findby', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -2667,53 +3334,24 @@ def exportChildren(self, outfile, level, namespace_='', name_='findby', fromsubc self.namingservice.export(outfile, level, namespace_, name_='namingservice', pretty_print=pretty_print) if self.stringifiedobjectref is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sstringifiedobjectref>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.stringifiedobjectref).encode(ExternalEncoding), input_name='stringifiedobjectref'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.stringifiedobjectref), input_name='stringifiedobjectref')), eol_)) if self.domainfinder is not None: self.domainfinder.export(outfile, level, namespace_, name_='domainfinder', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.namingservice is not None or - self.stringifiedobjectref is not None or - self.domainfinder is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='findby'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.namingservice is not None: - showIndent(outfile, level) - outfile.write('namingservice=model_.namingservice(\n') - self.namingservice.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.stringifiedobjectref is not None: - showIndent(outfile, level) - outfile.write('stringifiedobjectref=%s,\n' % quote_python(self.stringifiedobjectref).encode(ExternalEncoding)) - if self.domainfinder is not None: - showIndent(outfile, level) - outfile.write('domainfinder=model_.domainfinder(\n') - self.domainfinder.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'namingservice': obj_ = namingservice.factory() obj_.build(child_) - self.set_namingservice(obj_) + self.namingservice = obj_ + obj_.original_tagname_ = 'namingservice' elif nodeName_ == 'stringifiedobjectref': stringifiedobjectref_ = child_.text stringifiedobjectref_ = self.gds_validate_string(stringifiedobjectref_, node, 'stringifiedobjectref') @@ -2721,7 +3359,8 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'domainfinder': obj_ = domainfinder.factory() obj_.build(child_) - self.set_domainfinder(obj_) + self.domainfinder = obj_ + obj_.original_tagname_ = 'domainfinder' # end class findby @@ -2729,9 +3368,14 @@ class namingservice(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, namingservice) + if subclass is not None: + return subclass(*args_, **kwargs_) if namingservice.subclass: return namingservice.subclass(*args_, **kwargs_) else: @@ -2740,55 +3384,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='namingservice', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('namingservice') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='namingservice') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='namingservice', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='namingservice'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='namingservice', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='namingservice'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -2799,10 +3438,15 @@ class domainfinder(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, name=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, domainfinder) + if subclass is not None: + return subclass(*args_, **kwargs_) if domainfinder.subclass: return domainfinder.subclass(*args_, **kwargs_) else: @@ -2814,66 +3458,57 @@ def set_type(self, type_): self.type_ = type_ def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='domainfinder', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('domainfinder') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='domainfinder') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='domainfinder', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='domainfinder'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='domainfinder', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='domainfinder'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -2886,11 +3521,17 @@ class filesystemnames(GeneratedsSuper): subclass = None superclass = None def __init__(self, filesystemname=None): + self.original_tagname_ = None if filesystemname is None: self.filesystemname = [] else: self.filesystemname = filesystemname def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, filesystemnames) + if subclass is not None: + return subclass(*args_, **kwargs_) if filesystemnames.subclass: return filesystemnames.subclass(*args_, **kwargs_) else: @@ -2899,20 +3540,33 @@ def factory(*args_, **kwargs_): def get_filesystemname(self): return self.filesystemname def set_filesystemname(self, filesystemname): self.filesystemname = filesystemname def add_filesystemname(self, value): self.filesystemname.append(value) - def insert_filesystemname(self, index, value): self.filesystemname[index] = value + def insert_filesystemname_at(self, index, value): self.filesystemname.insert(index, value) + def replace_filesystemname_at(self, index, value): self.filesystemname[index] = value filesystemnameProp = property(get_filesystemname, set_filesystemname) + def hasContent_(self): + if ( + self.filesystemname + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='filesystemnames', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('filesystemnames') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='filesystemnames') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='filesystemnames', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -2926,38 +3580,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='filesystemnames', eol_ = '' for filesystemname_ in self.filesystemname: filesystemname_.export(outfile, level, namespace_, name_='filesystemname', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.filesystemname - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='filesystemnames'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('filesystemname=[\n') - level += 1 - for filesystemname_ in self.filesystemname: - showIndent(outfile, level) - outfile.write('model_.filesystemname(\n') - filesystemname_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -2965,6 +3594,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = filesystemname.factory() obj_.build(child_) self.filesystemname.append(obj_) + obj_.original_tagname_ = 'filesystemname' # end class filesystemnames @@ -2972,10 +3602,15 @@ class filesystemname(GeneratedsSuper): subclass = None superclass = None def __init__(self, mountname=None, deviceid=None): + self.original_tagname_ = None self.mountname = _cast(None, mountname) self.deviceid = _cast(None, deviceid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, filesystemname) + if subclass is not None: + return subclass(*args_, **kwargs_) if filesystemname.subclass: return filesystemname.subclass(*args_, **kwargs_) else: @@ -2987,66 +3622,57 @@ def set_mountname(self, mountname): self.mountname = mountname def get_deviceid(self): return self.deviceid def set_deviceid(self, deviceid): self.deviceid = deviceid deviceidProp = property(get_deviceid, set_deviceid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='filesystemname', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('filesystemname') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='filesystemname') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='filesystemname', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='filesystemname'): if self.mountname is not None and 'mountname' not in already_processed: - already_processed.append('mountname') - outfile.write(' mountname=%s' % (self.gds_format_string(quote_attrib(self.mountname).encode(ExternalEncoding), input_name='mountname'), )) + already_processed.add('mountname') + outfile.write(' mountname=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.mountname), input_name='mountname')), )) if self.deviceid is not None and 'deviceid' not in already_processed: - already_processed.append('deviceid') - outfile.write(' deviceid=%s' % (self.gds_format_string(quote_attrib(self.deviceid).encode(ExternalEncoding), input_name='deviceid'), )) + already_processed.add('deviceid') + outfile.write(' deviceid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.deviceid), input_name='deviceid')), )) def exportChildren(self, outfile, level, namespace_='', name_='filesystemname', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='filesystemname'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.mountname is not None and 'mountname' not in already_processed: - already_processed.append('mountname') - showIndent(outfile, level) - outfile.write('mountname = "%s",\n' % (self.mountname,)) - if self.deviceid is not None and 'deviceid' not in already_processed: - already_processed.append('deviceid') - showIndent(outfile, level) - outfile.write('deviceid = "%s",\n' % (self.deviceid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('mountname', node) if value is not None and 'mountname' not in already_processed: - already_processed.append('mountname') + already_processed.add('mountname') self.mountname = value value = find_attr_value_('deviceid', node) if value is not None and 'deviceid' not in already_processed: - already_processed.append('deviceid') + already_processed.add('deviceid') self.deviceid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -3061,17 +3687,23 @@ class connections(GeneratedsSuper): in the DCD. The CF DomainManager will parse the connections element and make the connections when the CF DeviceManager registers with the CF DomainManager. To establish connections to - a CF DeviceManager, the DCD’s deviceconfiguration element’s - id attribute value is used for the SAD’s usesport element’s + a CF DeviceManager, the DCD’s deviceconfiguration element’s id + attribute value is used for the SAD’s usesport element’s componentinstantiationref element’s refid attribute value.""" subclass = None superclass = None def __init__(self, connectinterface=None): + self.original_tagname_ = None if connectinterface is None: self.connectinterface = [] else: self.connectinterface = connectinterface def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, connections) + if subclass is not None: + return subclass(*args_, **kwargs_) if connections.subclass: return connections.subclass(*args_, **kwargs_) else: @@ -3080,20 +3712,33 @@ def factory(*args_, **kwargs_): def get_connectinterface(self): return self.connectinterface def set_connectinterface(self, connectinterface): self.connectinterface = connectinterface def add_connectinterface(self, value): self.connectinterface.append(value) - def insert_connectinterface(self, index, value): self.connectinterface[index] = value + def insert_connectinterface_at(self, index, value): self.connectinterface.insert(index, value) + def replace_connectinterface_at(self, index, value): self.connectinterface[index] = value connectinterfaceProp = property(get_connectinterface, set_connectinterface) + def hasContent_(self): + if ( + self.connectinterface + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='connections', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('connections') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='connections') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='connections', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3107,38 +3752,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='connections', fro eol_ = '' for connectinterface_ in self.connectinterface: connectinterface_.export(outfile, level, namespace_, name_='connectinterface', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.connectinterface - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='connections'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('connectinterface=[\n') - level += 1 - for connectinterface_ in self.connectinterface: - showIndent(outfile, level) - outfile.write('model_.connectinterface(\n') - connectinterface_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3146,6 +3766,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = connectinterface.factory() obj_.build(child_) self.connectinterface.append(obj_) + obj_.original_tagname_ = 'connectinterface' # end class connections @@ -3153,12 +3774,18 @@ class connectinterface(GeneratedsSuper): subclass = None superclass = None def __init__(self, id_=None, usesport=None, providesport=None, componentsupportedinterface=None, findby=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.usesport = usesport self.providesport = providesport self.componentsupportedinterface = componentsupportedinterface self.findby = findby def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, connectinterface) + if subclass is not None: + return subclass(*args_, **kwargs_) if connectinterface.subclass: return connectinterface.subclass(*args_, **kwargs_) else: @@ -3177,28 +3804,43 @@ def get_findby(self): return self.findby def set_findby(self, findby): self.findby = findby findbyProp = property(get_findby, set_findby) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) + def hasContent_(self): + if ( + self.usesport is not None or + self.providesport is not None or + self.componentsupportedinterface is not None or + self.findby is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='connectinterface', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('connectinterface') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='connectinterface') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='connectinterface', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='connectinterface'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) def exportChildren(self, outfile, level, namespace_='', name_='connectinterface', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -3212,78 +3854,39 @@ def exportChildren(self, outfile, level, namespace_='', name_='connectinterface' self.componentsupportedinterface.export(outfile, level, namespace_, name_='componentsupportedinterface', pretty_print=pretty_print) if self.findby is not None: self.findby.export(outfile, level, namespace_, name_='findby', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.usesport is not None or - self.providesport is not None or - self.componentsupportedinterface is not None or - self.findby is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='connectinterface'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.usesport is not None: - showIndent(outfile, level) - outfile.write('usesport=model_.usesport(\n') - self.usesport.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.providesport is not None: - showIndent(outfile, level) - outfile.write('providesport=model_.providesport(\n') - self.providesport.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.componentsupportedinterface is not None: - showIndent(outfile, level) - outfile.write('componentsupportedinterface=model_.componentsupportedinterface(\n') - self.componentsupportedinterface.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.findby is not None: - showIndent(outfile, level) - outfile.write('findby=model_.findby(\n') - self.findby.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'usesport': obj_ = usesport.factory() obj_.build(child_) - self.set_usesport(obj_) + self.usesport = obj_ + obj_.original_tagname_ = 'usesport' elif nodeName_ == 'providesport': obj_ = providesport.factory() obj_.build(child_) - self.set_providesport(obj_) + self.providesport = obj_ + obj_.original_tagname_ = 'providesport' elif nodeName_ == 'componentsupportedinterface': obj_ = componentsupportedinterface.factory() obj_.build(child_) - self.set_componentsupportedinterface(obj_) + self.componentsupportedinterface = obj_ + obj_.original_tagname_ = 'componentsupportedinterface' elif nodeName_ == 'findby': obj_ = findby.factory() obj_.build(child_) - self.set_findby(obj_) + self.findby = obj_ + obj_.original_tagname_ = 'findby' # end class connectinterface @@ -3291,12 +3894,18 @@ class usesport(GeneratedsSuper): subclass = None superclass = None def __init__(self, usesidentifier=None, componentinstantiationref=None, devicethatloadedthiscomponentref=None, deviceusedbythiscomponentref=None, findby=None): + self.original_tagname_ = None self.usesidentifier = usesidentifier self.componentinstantiationref = componentinstantiationref self.devicethatloadedthiscomponentref = devicethatloadedthiscomponentref self.deviceusedbythiscomponentref = deviceusedbythiscomponentref self.findby = findby def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, usesport) + if subclass is not None: + return subclass(*args_, **kwargs_) if usesport.subclass: return usesport.subclass(*args_, **kwargs_) else: @@ -3317,18 +3926,34 @@ def set_deviceusedbythiscomponentref(self, deviceusedbythiscomponentref): self.d def get_findby(self): return self.findby def set_findby(self, findby): self.findby = findby findbyProp = property(get_findby, set_findby) + def hasContent_(self): + if ( + self.usesidentifier is not None or + self.componentinstantiationref is not None or + self.devicethatloadedthiscomponentref is not None or + self.deviceusedbythiscomponentref is not None or + self.findby is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='usesport', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('usesport') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='usesport') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='usesport', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3342,7 +3967,7 @@ def exportChildren(self, outfile, level, namespace_='', name_='usesport', fromsu eol_ = '' if self.usesidentifier is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%susesidentifier>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.usesidentifier).encode(ExternalEncoding), input_name='usesidentifier'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.usesidentifier), input_name='usesidentifier')), eol_)) if self.componentinstantiationref is not None: self.componentinstantiationref.export(outfile, level, namespace_, name_='componentinstantiationref', pretty_print=pretty_print) if self.devicethatloadedthiscomponentref is not None: @@ -3351,57 +3976,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='usesport', fromsu self.deviceusedbythiscomponentref.export(outfile, level, namespace_, name_='deviceusedbythiscomponentref', pretty_print=pretty_print) if self.findby is not None: self.findby.export(outfile, level, namespace_, name_='findby', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.usesidentifier is not None or - self.componentinstantiationref is not None or - self.devicethatloadedthiscomponentref is not None or - self.deviceusedbythiscomponentref is not None or - self.findby is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='usesport'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.usesidentifier is not None: - showIndent(outfile, level) - outfile.write('usesidentifier=%s,\n' % quote_python(self.usesidentifier).encode(ExternalEncoding)) - if self.componentinstantiationref is not None: - showIndent(outfile, level) - outfile.write('componentinstantiationref=model_.componentinstantiationref(\n') - self.componentinstantiationref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.devicethatloadedthiscomponentref is not None: - showIndent(outfile, level) - outfile.write('devicethatloadedthiscomponentref=model_.devicethatloadedthiscomponentref(\n') - self.devicethatloadedthiscomponentref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.deviceusedbythiscomponentref is not None: - showIndent(outfile, level) - outfile.write('deviceusedbythiscomponentref=model_.deviceusedbythiscomponentref(\n') - self.deviceusedbythiscomponentref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.findby is not None: - showIndent(outfile, level) - outfile.write('findby=model_.findby(\n') - self.findby.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3412,19 +3993,23 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentinstantiationref': obj_ = componentinstantiationref.factory() obj_.build(child_) - self.set_componentinstantiationref(obj_) + self.componentinstantiationref = obj_ + obj_.original_tagname_ = 'componentinstantiationref' elif nodeName_ == 'devicethatloadedthiscomponentref': obj_ = devicethatloadedthiscomponentref.factory() obj_.build(child_) - self.set_devicethatloadedthiscomponentref(obj_) + self.devicethatloadedthiscomponentref = obj_ + obj_.original_tagname_ = 'devicethatloadedthiscomponentref' elif nodeName_ == 'deviceusedbythiscomponentref': obj_ = deviceusedbythiscomponentref.factory() obj_.build(child_) - self.set_deviceusedbythiscomponentref(obj_) + self.deviceusedbythiscomponentref = obj_ + obj_.original_tagname_ = 'deviceusedbythiscomponentref' elif nodeName_ == 'findby': obj_ = findby.factory() obj_.build(child_) - self.set_findby(obj_) + self.findby = obj_ + obj_.original_tagname_ = 'findby' # end class usesport @@ -3432,12 +4017,18 @@ class providesport(GeneratedsSuper): subclass = None superclass = None def __init__(self, providesidentifier=None, componentinstantiationref=None, devicethatloadedthiscomponentref=None, deviceusedbythiscomponentref=None, findby=None): + self.original_tagname_ = None self.providesidentifier = providesidentifier self.componentinstantiationref = componentinstantiationref self.devicethatloadedthiscomponentref = devicethatloadedthiscomponentref self.deviceusedbythiscomponentref = deviceusedbythiscomponentref self.findby = findby def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, providesport) + if subclass is not None: + return subclass(*args_, **kwargs_) if providesport.subclass: return providesport.subclass(*args_, **kwargs_) else: @@ -3458,18 +4049,34 @@ def set_deviceusedbythiscomponentref(self, deviceusedbythiscomponentref): self.d def get_findby(self): return self.findby def set_findby(self, findby): self.findby = findby findbyProp = property(get_findby, set_findby) + def hasContent_(self): + if ( + self.providesidentifier is not None or + self.componentinstantiationref is not None or + self.devicethatloadedthiscomponentref is not None or + self.deviceusedbythiscomponentref is not None or + self.findby is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='providesport', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('providesport') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='providesport') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='providesport', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3483,7 +4090,7 @@ def exportChildren(self, outfile, level, namespace_='', name_='providesport', fr eol_ = '' if self.providesidentifier is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sprovidesidentifier>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.providesidentifier).encode(ExternalEncoding), input_name='providesidentifier'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.providesidentifier), input_name='providesidentifier')), eol_)) if self.componentinstantiationref is not None: self.componentinstantiationref.export(outfile, level, namespace_, name_='componentinstantiationref', pretty_print=pretty_print) if self.devicethatloadedthiscomponentref is not None: @@ -3492,57 +4099,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='providesport', fr self.deviceusedbythiscomponentref.export(outfile, level, namespace_, name_='deviceusedbythiscomponentref', pretty_print=pretty_print) if self.findby is not None: self.findby.export(outfile, level, namespace_, name_='findby', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.providesidentifier is not None or - self.componentinstantiationref is not None or - self.devicethatloadedthiscomponentref is not None or - self.deviceusedbythiscomponentref is not None or - self.findby is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='providesport'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.providesidentifier is not None: - showIndent(outfile, level) - outfile.write('providesidentifier=%s,\n' % quote_python(self.providesidentifier).encode(ExternalEncoding)) - if self.componentinstantiationref is not None: - showIndent(outfile, level) - outfile.write('componentinstantiationref=model_.componentinstantiationref(\n') - self.componentinstantiationref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.devicethatloadedthiscomponentref is not None: - showIndent(outfile, level) - outfile.write('devicethatloadedthiscomponentref=model_.devicethatloadedthiscomponentref(\n') - self.devicethatloadedthiscomponentref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.deviceusedbythiscomponentref is not None: - showIndent(outfile, level) - outfile.write('deviceusedbythiscomponentref=model_.deviceusedbythiscomponentref(\n') - self.deviceusedbythiscomponentref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.findby is not None: - showIndent(outfile, level) - outfile.write('findby=model_.findby(\n') - self.findby.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3553,19 +4116,23 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentinstantiationref': obj_ = componentinstantiationref.factory() obj_.build(child_) - self.set_componentinstantiationref(obj_) + self.componentinstantiationref = obj_ + obj_.original_tagname_ = 'componentinstantiationref' elif nodeName_ == 'devicethatloadedthiscomponentref': obj_ = devicethatloadedthiscomponentref.factory() obj_.build(child_) - self.set_devicethatloadedthiscomponentref(obj_) + self.devicethatloadedthiscomponentref = obj_ + obj_.original_tagname_ = 'devicethatloadedthiscomponentref' elif nodeName_ == 'deviceusedbythiscomponentref': obj_ = deviceusedbythiscomponentref.factory() obj_.build(child_) - self.set_deviceusedbythiscomponentref(obj_) + self.deviceusedbythiscomponentref = obj_ + obj_.original_tagname_ = 'deviceusedbythiscomponentref' elif nodeName_ == 'findby': obj_ = findby.factory() obj_.build(child_) - self.set_findby(obj_) + self.findby = obj_ + obj_.original_tagname_ = 'findby' # end class providesport @@ -3573,10 +4140,16 @@ class componentsupportedinterface(GeneratedsSuper): subclass = None superclass = None def __init__(self, supportedidentifier=None, componentinstantiationref=None, findby=None): + self.original_tagname_ = None self.supportedidentifier = supportedidentifier self.componentinstantiationref = componentinstantiationref self.findby = findby def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentsupportedinterface) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentsupportedinterface.subclass: return componentsupportedinterface.subclass(*args_, **kwargs_) else: @@ -3591,18 +4164,32 @@ def set_componentinstantiationref(self, componentinstantiationref): self.compone def get_findby(self): return self.findby def set_findby(self, findby): self.findby = findby findbyProp = property(get_findby, set_findby) + def hasContent_(self): + if ( + self.supportedidentifier is not None or + self.componentinstantiationref is not None or + self.findby is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentsupportedinterface', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentsupportedinterface') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentsupportedinterface') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentsupportedinterface', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3616,48 +4203,18 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentsupporte eol_ = '' if self.supportedidentifier is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%ssupportedidentifier>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.supportedidentifier).encode(ExternalEncoding), input_name='supportedidentifier'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.supportedidentifier), input_name='supportedidentifier')), eol_)) if self.componentinstantiationref is not None: self.componentinstantiationref.export(outfile, level, namespace_, name_='componentinstantiationref', pretty_print=pretty_print) if self.findby is not None: self.findby.export(outfile, level, namespace_, name_='findby', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.supportedidentifier is not None or - self.componentinstantiationref is not None or - self.findby is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentsupportedinterface'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.supportedidentifier is not None: - showIndent(outfile, level) - outfile.write('supportedidentifier=%s,\n' % quote_python(self.supportedidentifier).encode(ExternalEncoding)) - if self.componentinstantiationref is not None: - showIndent(outfile, level) - outfile.write('componentinstantiationref=model_.componentinstantiationref(\n') - self.componentinstantiationref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.findby is not None: - showIndent(outfile, level) - outfile.write('findby=model_.findby(\n') - self.findby.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3668,31 +4225,74 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentinstantiationref': obj_ = componentinstantiationref.factory() obj_.build(child_) - self.set_componentinstantiationref(obj_) + self.componentinstantiationref = obj_ + obj_.original_tagname_ = 'componentinstantiationref' elif nodeName_ == 'findby': obj_ = findby.factory() obj_.build(child_) - self.set_findby(obj_) + self.findby = obj_ + obj_.original_tagname_ = 'findby' # end class componentsupportedinterface +GDSClassesMapping = { + 'componentfile': componentfile, + 'componentfileref': componentfileref, + 'componentfiles': componentfiles, + 'componentinstantiation': componentinstantiation, + 'componentinstantiationref': componentinstantiationref, + 'componentplacement': componentplacement, + 'componentproperties': componentproperties, + 'componentsupportedinterface': componentsupportedinterface, + 'compositepartofdevice': compositepartofdevice, + 'connectinterface': connectinterface, + 'connections': connections, + 'deployondevice': deployondevice, + 'deviceconfiguration': deviceconfiguration, + 'devicemanagersoftpkg': devicemanagersoftpkg, + 'devicepkgfile': devicepkgfile, + 'devicethatloadedthiscomponentref': devicethatloadedthiscomponentref, + 'deviceusedbythiscomponentref': deviceusedbythiscomponentref, + 'domainfinder': domainfinder, + 'domainmanager': domainmanager, + 'filesystemname': filesystemname, + 'filesystemnames': filesystemnames, + 'findby': findby, + 'localfile': localfile, + 'namingservice': namingservice, + 'partitioning': partitioning, + 'providesport': providesport, + 'simpleref': simpleref, + 'simplesequenceref': simplesequenceref, + 'structref': structref, + 'structsequenceref': structsequenceref, + 'structvalue': structvalue, + 'usesport': usesport, + 'values': values, +} + + USAGE_TEXT = """ Usage: python .py [ -s ] """ + def usage(): - print USAGE_TEXT + print(USAGE_TEXT) sys.exit(1) def get_root_tag(node): tag = Tag_pattern_.match(node.tag).groups()[-1] - rootClass = globals().get(tag) + rootClass = GDSClassesMapping.get(tag) + if rootClass is None: + rootClass = globals().get(tag) return tag, rootClass -def parse(inFileName): - doc = parsexml_(inFileName) +def parse(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -3702,16 +4302,18 @@ def parse(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_=rootTag, -## namespacedef_='', -## pretty_print=True) +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='', +## pretty_print=True) return rootObj -def parseString(inString): - from StringIO import StringIO - doc = parsexml_(StringIO(inString)) +def parseEtree(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -3721,14 +4323,47 @@ def parseString(inString): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_="deviceconfiguration", -## namespacedef_='') + mapping = {} + rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping) + reverse_mapping = rootObj.gds_reverse_node_mapping(mapping) +## if not silence: +## content = etree_.tostring( +## rootElement, pretty_print=True, +## xml_declaration=True, encoding="utf-8") +## sys.stdout.write(content) +## sys.stdout.write('\n') + return rootObj, rootElement, mapping, reverse_mapping + + +def parseString(inString, silence=False): + '''Parse a string, create the object tree, and export it. + + Arguments: + - inString -- A string. This XML fragment should not start + with an XML declaration containing an encoding. + - silence -- A boolean. If False, export the object. + Returns -- The root object in the tree. + ''' + parser = None + rootNode= parsexmlstring_(inString, parser) + rootTag, rootClass = get_root_tag(rootNode) + if rootClass is None: + rootTag = 'deviceconfiguration' + rootClass = deviceconfiguration + rootObj = rootClass.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='') return rootObj -def parseLiteral(inFileName): - doc = parsexml_(inFileName) +def parseLiteral(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -3738,11 +4373,12 @@ def parseLiteral(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('#from dcd import *\n\n') -## sys.stdout.write('import dcd as model_\n\n') -## sys.stdout.write('rootObj = model_.rootTag(\n') -## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) -## sys.stdout.write(')\n') +## if not silence: +## sys.stdout.write('#from dcd import *\n\n') +## sys.stdout.write('import dcd as model_\n\n') +## sys.stdout.write('rootObj = model_.rootClass(\n') +## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) +## sys.stdout.write(')\n') return rootObj @@ -3760,6 +4396,7 @@ def main(): __all__ = [ + "affinity", "componentfile", "componentfileref", "componentfiles", @@ -3771,6 +4408,7 @@ def main(): "compositepartofdevice", "connectinterface", "connections", + "deployerrequires", "deployondevice", "deviceconfiguration", "devicemanagersoftpkg", @@ -3782,7 +4420,9 @@ def main(): "filesystemname", "filesystemnames", "findby", + "idvalue", "localfile", + "loggingconfig", "namingservice", "partitioning", "providesport", @@ -3793,4 +4433,4 @@ def main(): "structvalue", "usesport", "values" - ] +] diff --git a/redhawk/src/base/framework/python/ossie/parsers/dmd.py b/redhawk/src/base/framework/python/ossie/parsers/dmd.py index 29e18e42c..7041da59a 100644 --- a/redhawk/src/base/framework/python/ossie/parsers/dmd.py +++ b/redhawk/src/base/framework/python/ossie/parsers/dmd.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- -# +# -*- coding: utf-8 -*- + # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. # @@ -18,70 +18,97 @@ # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. -# # -# Generated Thu Sep 12 14:49:30 2013 by generateDS.py version 2.7c. +# Generated Mon Jul 30 12:29:34 2018 by generateDS.py version 2.29.14. +# Python 2.7.5 (default, Nov 6 2016, 00:28:07) [GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] +# +# Command line options: +# ('-f', '') +# ('--silence', '') +# ('-m', '') +# ('-o', 'ossie/parsers/dmd.py') +# +# Command line arguments: +# ../../../xml/xsd/dmd.xsd +# +# Command line: +# /usr/bin/generateDS.py -f --silence -m -o "ossie/parsers/dmd.py" ../../../xml/xsd/dmd.xsd +# +# Current working directory (os.getcwd()): +# python # import sys -import getopt import re as re_ - -etree_ = None -Verbose_import_ = False -( XMLParser_import_none, XMLParser_import_lxml, - XMLParser_import_elementtree - ) = range(3) -XMLParser_import_library = None +import base64 +import datetime as datetime_ +import warnings as warnings_ try: - # lxml from lxml import etree as etree_ - XMLParser_import_library = XMLParser_import_lxml - if Verbose_import_: - print("running with lxml.etree") except ImportError: - try: - # cElementTree from Python 2.5+ - import xml.etree.cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree on Python 2.5+") - except ImportError: - try: - # ElementTree from Python 2.5+ - import xml.etree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree on Python 2.5+") - except ImportError: - try: - # normal cElementTree install - import cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree") - except ImportError: - try: - # normal ElementTree install - import elementtree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree") - except ImportError: - raise ImportError("Failed to import ElementTree from any known place") - -def parsexml_(*args, **kwargs): - if (XMLParser_import_library == XMLParser_import_lxml and - 'parser' not in kwargs): + from xml.etree import ElementTree as etree_ + + +Validate_simpletypes_ = True +if sys.version_info[0] == 2: + BaseStrType_ = basestring +else: + BaseStrType_ = str + + +def parsexml_(infile, parser=None, **kwargs): + if parser is None: # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. - kwargs['parser'] = etree_.ETCompatXMLParser() - doc = etree_.parse(*args, **kwargs) + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + doc = etree_.parse(infile, parser=parser, **kwargs) return doc +def parsexmlstring_(instring, parser=None, **kwargs): + if parser is None: + # Use the lxml ElementTree compatible parser so that, e.g., + # we ignore comments. + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + element = etree_.fromstring(instring, parser=parser, **kwargs) + return element + +# +# Namespace prefix definition table (and other attributes, too) +# +# The module generatedsnamespaces, if it is importable, must contain +# a dictionary named GeneratedsNamespaceDefs. This Python dictionary +# should map element type names (strings) to XML schema namespace prefix +# definitions. The export method for any class for which there is +# a namespace prefix definition, will export that definition in the +# XML representation of that element. See the export method of +# any generated element type class for a example of the use of this +# table. +# A sample table is: +# +# # File: generatedsnamespaces.py # -# User methods +# GenerateDSNamespaceDefs = { +# "ElementtypeA": "http://www.xxx.com/namespaceA", +# "ElementtypeB": "http://www.xxx.com/namespaceB", +# } +# + +try: + from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_ +except ImportError: + GenerateDSNamespaceDefs_ = {} + +# +# The root super-class for element type classes # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class @@ -89,67 +116,273 @@ def parsexml_(*args, **kwargs): try: from generatedssuper import GeneratedsSuper -except ImportError, exp: - +except ImportError as exp: + class GeneratedsSuper(object): + tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$') + class _FixedOffsetTZ(datetime_.tzinfo): + def __init__(self, offset, name): + self.__offset = datetime_.timedelta(minutes=offset) + self.__name = name + def utcoffset(self, dt): + return self.__offset + def tzname(self, dt): + return self.__name + def dst(self, dt): + return None def gds_format_string(self, input_data, input_name=''): return input_data - def gds_validate_string(self, input_data, node, input_name=''): + def gds_validate_string(self, input_data, node=None, input_name=''): + if not input_data: + return '' + else: + return input_data + def gds_format_base64(self, input_data, input_name=''): + return base64.b64encode(input_data) + def gds_validate_base64(self, input_data, node=None, input_name=''): return input_data def gds_format_integer(self, input_data, input_name=''): return '%d' % input_data - def gds_validate_integer(self, input_data, node, input_name=''): + def gds_validate_integer(self, input_data, node=None, input_name=''): return input_data def gds_format_integer_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_integer_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_integer_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + int(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of integers') - return input_data + return values def gds_format_float(self, input_data, input_name=''): - return '%f' % input_data - def gds_validate_float(self, input_data, node, input_name=''): + return ('%.15f' % input_data).rstrip('0') + def gds_validate_float(self, input_data, node=None, input_name=''): return input_data def gds_format_float_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_float_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_float_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of floats') - return input_data + return values def gds_format_double(self, input_data, input_name=''): return '%e' % input_data - def gds_validate_double(self, input_data, node, input_name=''): + def gds_validate_double(self, input_data, node=None, input_name=''): return input_data def gds_format_double_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_double_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_double_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of doubles') - return input_data + return values def gds_format_boolean(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean(self, input_data, node, input_name=''): + return ('%s' % input_data).lower() + def gds_validate_boolean(self, input_data, node=None, input_name=''): return input_data def gds_format_boolean_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_boolean_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: if value not in ('true', '1', 'false', '0', ): - raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")') + raise_parse_error( + node, + 'Requires sequence of booleans ' + '("true", "1", "false", "0")') + return values + def gds_validate_datetime(self, input_data, node=None, input_name=''): + return input_data + def gds_format_datetime(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + @classmethod + def gds_parse_datetime(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + time_parts = input_data.split('.') + if len(time_parts) > 1: + micro_seconds = int(float('0.' + time_parts[1]) * 1000000) + input_data = '%s.%s' % (time_parts[0], micro_seconds, ) + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt + def gds_validate_date(self, input_data, node=None, input_name=''): return input_data + def gds_format_date(self, input_data, input_name=''): + _svalue = '%04d-%02d-%02d' % ( + input_data.year, + input_data.month, + input_data.day, + ) + try: + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format( + hours, minutes) + except AttributeError: + pass + return _svalue + @classmethod + def gds_parse_date(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d') + dt = dt.replace(tzinfo=tz) + return dt.date() + def gds_validate_time(self, input_data, node=None, input_name=''): + return input_data + def gds_format_time(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%02d:%02d:%02d' % ( + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%02d:%02d:%02d.%s' % ( + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + def gds_validate_simple_patterns(self, patterns, target): + # pat is a list of lists of strings/patterns. We should: + # - AND the outer elements + # - OR the inner elements + found1 = True + for patterns1 in patterns: + found2 = False + for patterns2 in patterns1: + if re_.search(patterns2, target) is not None: + found2 = True + break + if not found2: + found1 = False + break + return found1 + @classmethod + def gds_parse_time(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + if len(input_data.split('.')) > 1: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt.time() def gds_str_lower(self, instring): return instring.lower() def get_path_(self, node): @@ -180,6 +413,38 @@ def get_class_obj_(self, node, default_class=None): return class_obj1 def gds_build_any(self, node, type_name=None): return None + @classmethod + def gds_reverse_node_mapping(cls, mapping): + return dict(((v, k) for k, v in mapping.iteritems())) + @staticmethod + def gds_encode(instring): + if sys.version_info[0] == 2: + return instring.encode(ExternalEncoding) + else: + return instring + @staticmethod + def convert_unicode(instring): + if isinstance(instring, str): + result = quote_xml(instring) + elif sys.version_info[0] == 2 and isinstance(instring, unicode): + result = quote_xml(instring).encode('utf8') + else: + result = GeneratedsSuper.gds_encode(str(instring)) + return result + def __eq__(self, other): + if type(self) != type(other): + return False + return self.__dict__ == other.__dict__ + def __ne__(self, other): + return not self.__eq__(other) + + def getSubclassFromModule_(module, class_): + '''Get the subclass of a class from a specific module.''' + name = class_.__name__ + 'Sub' + if hasattr(module, name): + return getattr(module, name) + else: + return None # @@ -205,29 +470,50 @@ def gds_build_any(self, node, type_name=None): Tag_pattern_ = re_.compile(r'({.*})?(.*)') String_cleanup_pat_ = re_.compile(r"[\n\r\s]+") Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)') +CDATA_pattern_ = re_.compile(r"", re_.DOTALL) + +# Change this to redirect the generated superclass module to use a +# specific subclass module. +CurrentSubclassModule_ = None # # Support/utility functions. # + def showIndent(outfile, level, pretty_print=True): if pretty_print: for idx in range(level): outfile.write(' ') + def quote_xml(inStr): + "Escape markup chars, but do not modify CDATA sections." if not inStr: return '' - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) + s2 = '' + pos = 0 + matchobjects = CDATA_pattern_.finditer(s1) + for mo in matchobjects: + s3 = s1[pos:mo.start()] + s2 += quote_xml_aux(s3) + s2 += s1[mo.start():mo.end()] + pos = mo.end() + s3 = s1[pos:] + s2 += quote_xml_aux(s3) + return s2 + + +def quote_xml_aux(inStr): + s1 = inStr.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 + def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') @@ -240,6 +526,7 @@ def quote_attrib(inStr): s1 = '"%s"' % s1 return s1 + def quote_python(inStr): s1 = inStr if s1.find("'") == -1: @@ -255,6 +542,7 @@ def quote_python(inStr): else: return '"""%s"""' % s1 + def get_all_text_(node): if node.text is not None: text = node.text @@ -265,6 +553,7 @@ def get_all_text_(node): text += child.tail return text + def find_attr_value_(attr_name, node): attrs = node.attrib attr_parts = attr_name.split(':') @@ -282,11 +571,9 @@ def find_attr_value_(attr_name, node): class GDSParseError(Exception): pass + def raise_parse_error(node, msg): - if XMLParser_import_library == XMLParser_import_lxml: - msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) - else: - msg = '%s (element %s)' % (msg, node.tag, ) + msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) raise GDSParseError(msg) @@ -305,6 +592,7 @@ class MixedContainer: TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 + TypeBase64 = 8 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type @@ -318,49 +606,104 @@ def getValue(self): return self.value def getName(self): return self.name - def export(self, outfile, level, name, namespace, pretty_print=True): + def export(self, outfile, level, name, namespace, + pretty_print=True): if self.category == MixedContainer.CategoryText: # Prevent exporting empty content as empty lines. - if self.value.strip(): + if self.value.strip(): outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace, name, pretty_print) + self.value.export( + outfile, level, namespace, name, + pretty_print=pretty_print) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: - outfile.write('<%s>%s' % (self.name, self.value, self.name)) + outfile.write('<%s>%s' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: - outfile.write('<%s>%d' % (self.name, self.value, self.name)) + outfile.write('<%s>%d' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: - outfile.write('<%s>%f' % (self.name, self.value, self.name)) + outfile.write('<%s>%f' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeBase64: + outfile.write('<%s>%s' % ( + self.name, + base64.b64encode(self.value), + self.name)) + def to_etree(self, element): + if self.category == MixedContainer.CategoryText: + # Prevent exporting empty content as empty lines. + if self.value.strip(): + if len(element) > 0: + if element[-1].tail is None: + element[-1].tail = self.value + else: + element[-1].tail += self.value + else: + if element.text is None: + element.text = self.value + else: + element.text += self.value + elif self.category == MixedContainer.CategorySimple: + subelement = etree_.SubElement( + element, '%s' % self.name) + subelement.text = self.to_etree_simple() + else: # category == MixedContainer.CategoryComplex + self.value.to_etree(element) + def to_etree_simple(self): + if self.content_type == MixedContainer.TypeString: + text = self.value + elif (self.content_type == MixedContainer.TypeInteger or + self.content_type == MixedContainer.TypeBoolean): + text = '%d' % self.value + elif (self.content_type == MixedContainer.TypeFloat or + self.content_type == MixedContainer.TypeDecimal): + text = '%f' % self.value elif self.content_type == MixedContainer.TypeDouble: - outfile.write('<%s>%g' % (self.name, self.value, self.name)) + text = '%g' % self.value + elif self.content_type == MixedContainer.TypeBase64: + text = '%s' % base64.b64encode(self.value) + return text def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s",\n' % ( + self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class MemberSpec_(object): - def __init__(self, name='', data_type='', container=0): + def __init__(self, name='', data_type='', container=0, + optional=0, child_attrs=None, choice=None): self.name = name self.data_type = data_type self.container = container + self.child_attrs = child_attrs + self.choice = choice + self.optional = optional def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type @@ -375,6 +718,13 @@ def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container + def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs + def get_child_attrs(self): return self.child_attrs + def set_choice(self, choice): self.choice = choice + def get_choice(self): return self.choice + def set_optional(self, optional): self.optional = optional + def get_optional(self): return self.optional + def _cast(typ, value): if typ is None or value is None: @@ -385,6 +735,7 @@ def _cast(typ, value): # Data representation classes. # + class domainmanagerconfiguration(GeneratedsSuper): """DOMAINMANAGER CONFIGURATION DESCRIPTOR.The domainmanagerconfiguration element id attribute is a DCE UUID @@ -392,12 +743,18 @@ class domainmanagerconfiguration(GeneratedsSuper): subclass = None superclass = None def __init__(self, id_=None, name=None, description=None, domainmanagersoftpkg=None, services=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.name = _cast(None, name) self.description = description self.domainmanagersoftpkg = domainmanagersoftpkg self.services = services def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, domainmanagerconfiguration) + if subclass is not None: + return subclass(*args_, **kwargs_) if domainmanagerconfiguration.subclass: return domainmanagerconfiguration.subclass(*args_, **kwargs_) else: @@ -413,34 +770,48 @@ def get_services(self): return self.services def set_services(self, services): self.services = services servicesProp = property(get_services, set_services) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + self.description is not None or + self.domainmanagersoftpkg is not None or + self.services is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='domainmanagerconfiguration', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('domainmanagerconfiguration') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='domainmanagerconfiguration') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='domainmanagerconfiguration', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='domainmanagerconfiguration'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='domainmanagerconfiguration', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -448,63 +819,26 @@ def exportChildren(self, outfile, level, namespace_='', name_='domainmanagerconf eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.domainmanagersoftpkg is not None: self.domainmanagersoftpkg.export(outfile, level, namespace_, name_='domainmanagersoftpkg', pretty_print=pretty_print) if self.services is not None: self.services.export(outfile, level, namespace_, name_='services', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.domainmanagersoftpkg is not None or - self.services is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='domainmanagerconfiguration'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.domainmanagersoftpkg is not None: - showIndent(outfile, level) - outfile.write('domainmanagersoftpkg=model_.domainmanagersoftpkg(\n') - self.domainmanagersoftpkg.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.services is not None: - showIndent(outfile, level) - outfile.write('services=model_.services(\n') - self.services.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': @@ -514,11 +848,13 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'domainmanagersoftpkg': obj_ = domainmanagersoftpkg.factory() obj_.build(child_) - self.set_domainmanagersoftpkg(obj_) + self.domainmanagersoftpkg = obj_ + obj_.original_tagname_ = 'domainmanagersoftpkg' elif nodeName_ == 'services': obj_ = services.factory() obj_.build(child_) - self.set_services(obj_) + self.services = obj_ + obj_.original_tagname_ = 'services' # end class domainmanagerconfiguration @@ -526,8 +862,14 @@ class domainmanagersoftpkg(GeneratedsSuper): subclass = None superclass = None def __init__(self, localfile=None): + self.original_tagname_ = None self.localfile = localfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, domainmanagersoftpkg) + if subclass is not None: + return subclass(*args_, **kwargs_) if domainmanagersoftpkg.subclass: return domainmanagersoftpkg.subclass(*args_, **kwargs_) else: @@ -536,18 +878,30 @@ def factory(*args_, **kwargs_): def get_localfile(self): return self.localfile def set_localfile(self, localfile): self.localfile = localfile localfileProp = property(get_localfile, set_localfile) + def hasContent_(self): + if ( + self.localfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='domainmanagersoftpkg', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('domainmanagersoftpkg') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='domainmanagersoftpkg') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='domainmanagersoftpkg', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -561,39 +915,21 @@ def exportChildren(self, outfile, level, namespace_='', name_='domainmanagersoft eol_ = '' if self.localfile is not None: self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='domainmanagersoftpkg'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localfile(\n') - self.localfile.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localfile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' # end class domainmanagersoftpkg @@ -601,9 +937,14 @@ class localfile(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, localfile) + if subclass is not None: + return subclass(*args_, **kwargs_) if localfile.subclass: return localfile.subclass(*args_, **kwargs_) else: @@ -612,55 +953,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='localfile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('localfile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='localfile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='localfile', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='localfile'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='localfile', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='localfile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -671,11 +1007,17 @@ class services(GeneratedsSuper): subclass = None superclass = None def __init__(self, service=None): + self.original_tagname_ = None if service is None: self.service = [] else: self.service = service def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, services) + if subclass is not None: + return subclass(*args_, **kwargs_) if services.subclass: return services.subclass(*args_, **kwargs_) else: @@ -684,20 +1026,33 @@ def factory(*args_, **kwargs_): def get_service(self): return self.service def set_service(self, service): self.service = service def add_service(self, value): self.service.append(value) - def insert_service(self, index, value): self.service[index] = value + def insert_service_at(self, index, value): self.service.insert(index, value) + def replace_service_at(self, index, value): self.service[index] = value serviceProp = property(get_service, set_service) + def hasContent_(self): + if ( + self.service + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='services', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('services') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='services') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='services', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -711,38 +1066,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='services', fromsu eol_ = '' for service_ in self.service: service_.export(outfile, level, namespace_, name_='service', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.service - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='services'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('service=[\n') - level += 1 - for service_ in self.service: - showIndent(outfile, level) - outfile.write('model_.service(\n') - service_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -750,6 +1080,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = service.factory() obj_.build(child_) self.service.append(obj_) + obj_.original_tagname_ = 'service' # end class services @@ -757,9 +1088,15 @@ class service(GeneratedsSuper): subclass = None superclass = None def __init__(self, usesidentifier=None, findby=None): + self.original_tagname_ = None self.usesidentifier = usesidentifier self.findby = findby def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, service) + if subclass is not None: + return subclass(*args_, **kwargs_) if service.subclass: return service.subclass(*args_, **kwargs_) else: @@ -771,18 +1108,31 @@ def set_usesidentifier(self, usesidentifier): self.usesidentifier = usesidentifi def get_findby(self): return self.findby def set_findby(self, findby): self.findby = findby findbyProp = property(get_findby, set_findby) + def hasContent_(self): + if ( + self.usesidentifier is not None or + self.findby is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='service', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('service') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='service') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='service', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -796,39 +1146,16 @@ def exportChildren(self, outfile, level, namespace_='', name_='service', fromsub eol_ = '' if self.usesidentifier is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%susesidentifier>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.usesidentifier).encode(ExternalEncoding), input_name='usesidentifier'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.usesidentifier), input_name='usesidentifier')), eol_)) if self.findby is not None: self.findby.export(outfile, level, namespace_, name_='findby', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.usesidentifier is not None or - self.findby is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='service'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.usesidentifier is not None: - showIndent(outfile, level) - outfile.write('usesidentifier=%s,\n' % quote_python(self.usesidentifier).encode(ExternalEncoding)) - if self.findby is not None: - showIndent(outfile, level) - outfile.write('findby=model_.findby(\n') - self.findby.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -839,7 +1166,8 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'findby': obj_ = findby.factory() obj_.build(child_) - self.set_findby(obj_) + self.findby = obj_ + obj_.original_tagname_ = 'findby' # end class service @@ -847,10 +1175,16 @@ class findby(GeneratedsSuper): subclass = None superclass = None def __init__(self, namingservice=None, stringifiedobjectref=None, domainfinder=None): + self.original_tagname_ = None self.namingservice = namingservice self.stringifiedobjectref = stringifiedobjectref self.domainfinder = domainfinder def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, findby) + if subclass is not None: + return subclass(*args_, **kwargs_) if findby.subclass: return findby.subclass(*args_, **kwargs_) else: @@ -865,18 +1199,32 @@ def set_stringifiedobjectref(self, stringifiedobjectref): self.stringifiedobject def get_domainfinder(self): return self.domainfinder def set_domainfinder(self, domainfinder): self.domainfinder = domainfinder domainfinderProp = property(get_domainfinder, set_domainfinder) + def hasContent_(self): + if ( + self.namingservice is not None or + self.stringifiedobjectref is not None or + self.domainfinder is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='findby', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('findby') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='findby') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='findby', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -892,53 +1240,24 @@ def exportChildren(self, outfile, level, namespace_='', name_='findby', fromsubc self.namingservice.export(outfile, level, namespace_, name_='namingservice', pretty_print=pretty_print) if self.stringifiedobjectref is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sstringifiedobjectref>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.stringifiedobjectref).encode(ExternalEncoding), input_name='stringifiedobjectref'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.stringifiedobjectref), input_name='stringifiedobjectref')), eol_)) if self.domainfinder is not None: self.domainfinder.export(outfile, level, namespace_, name_='domainfinder', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.namingservice is not None or - self.stringifiedobjectref is not None or - self.domainfinder is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='findby'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.namingservice is not None: - showIndent(outfile, level) - outfile.write('namingservice=model_.namingservice(\n') - self.namingservice.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.stringifiedobjectref is not None: - showIndent(outfile, level) - outfile.write('stringifiedobjectref=%s,\n' % quote_python(self.stringifiedobjectref).encode(ExternalEncoding)) - if self.domainfinder is not None: - showIndent(outfile, level) - outfile.write('domainfinder=model_.domainfinder(\n') - self.domainfinder.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'namingservice': obj_ = namingservice.factory() obj_.build(child_) - self.set_namingservice(obj_) + self.namingservice = obj_ + obj_.original_tagname_ = 'namingservice' elif nodeName_ == 'stringifiedobjectref': stringifiedobjectref_ = child_.text stringifiedobjectref_ = self.gds_validate_string(stringifiedobjectref_, node, 'stringifiedobjectref') @@ -946,7 +1265,8 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'domainfinder': obj_ = domainfinder.factory() obj_.build(child_) - self.set_domainfinder(obj_) + self.domainfinder = obj_ + obj_.original_tagname_ = 'domainfinder' # end class findby @@ -954,9 +1274,14 @@ class namingservice(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, namingservice) + if subclass is not None: + return subclass(*args_, **kwargs_) if namingservice.subclass: return namingservice.subclass(*args_, **kwargs_) else: @@ -965,55 +1290,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='namingservice', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('namingservice') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='namingservice') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='namingservice', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='namingservice'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') outfile.write(' name=%s' % (quote_attrib(self.name), )) def exportChildren(self, outfile, level, namespace_='', name_='namingservice', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='namingservice'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = %s,\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1024,10 +1344,15 @@ class domainfinder(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, name=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, domainfinder) + if subclass is not None: + return subclass(*args_, **kwargs_) if domainfinder.subclass: return domainfinder.subclass(*args_, **kwargs_) else: @@ -1039,89 +1364,96 @@ def set_type(self, type_): self.type_ = type_ def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='domainfinder', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('domainfinder') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='domainfinder') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='domainfinder', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='domainfinder'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='domainfinder', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='domainfinder'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class domainfinder +GDSClassesMapping = { + 'domainfinder': domainfinder, + 'domainmanagerconfiguration': domainmanagerconfiguration, + 'domainmanagersoftpkg': domainmanagersoftpkg, + 'findby': findby, + 'localfile': localfile, + 'namingservice': namingservice, + 'service': service, + 'services': services, +} + + USAGE_TEXT = """ Usage: python .py [ -s ] """ + def usage(): - print USAGE_TEXT + print(USAGE_TEXT) sys.exit(1) def get_root_tag(node): tag = Tag_pattern_.match(node.tag).groups()[-1] - rootClass = globals().get(tag) + rootClass = GDSClassesMapping.get(tag) + if rootClass is None: + rootClass = globals().get(tag) return tag, rootClass -def parse(inFileName): - doc = parsexml_(inFileName) +def parse(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -1131,16 +1463,18 @@ def parse(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_=rootTag, -## namespacedef_='', -## pretty_print=True) +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='', +## pretty_print=True) return rootObj -def parseString(inString): - from StringIO import StringIO - doc = parsexml_(StringIO(inString)) +def parseEtree(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -1150,14 +1484,47 @@ def parseString(inString): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_="domainmanagerconfiguration", -## namespacedef_='') + mapping = {} + rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping) + reverse_mapping = rootObj.gds_reverse_node_mapping(mapping) +## if not silence: +## content = etree_.tostring( +## rootElement, pretty_print=True, +## xml_declaration=True, encoding="utf-8") +## sys.stdout.write(content) +## sys.stdout.write('\n') + return rootObj, rootElement, mapping, reverse_mapping + + +def parseString(inString, silence=False): + '''Parse a string, create the object tree, and export it. + + Arguments: + - inString -- A string. This XML fragment should not start + with an XML declaration containing an encoding. + - silence -- A boolean. If False, export the object. + Returns -- The root object in the tree. + ''' + parser = None + rootNode= parsexmlstring_(inString, parser) + rootTag, rootClass = get_root_tag(rootNode) + if rootClass is None: + rootTag = 'domainmanagerconfiguration' + rootClass = domainmanagerconfiguration + rootObj = rootClass.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='') return rootObj -def parseLiteral(inFileName): - doc = parsexml_(inFileName) +def parseLiteral(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -1167,11 +1534,12 @@ def parseLiteral(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('#from dmd import *\n\n') -## sys.stdout.write('import dmd as model_\n\n') -## sys.stdout.write('rootObj = model_.rootTag(\n') -## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) -## sys.stdout.write(')\n') +## if not silence: +## sys.stdout.write('#from dmd import *\n\n') +## sys.stdout.write('import dmd as model_\n\n') +## sys.stdout.write('rootObj = model_.rootClass(\n') +## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) +## sys.stdout.write(')\n') return rootObj @@ -1197,4 +1565,4 @@ def main(): "namingservice", "service", "services" - ] +] diff --git a/redhawk/src/base/framework/python/ossie/parsers/dpd.py b/redhawk/src/base/framework/python/ossie/parsers/dpd.py index 8a67690fc..88d5647b6 100644 --- a/redhawk/src/base/framework/python/ossie/parsers/dpd.py +++ b/redhawk/src/base/framework/python/ossie/parsers/dpd.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- -# +# -*- coding: utf-8 -*- + # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. # @@ -18,70 +18,97 @@ # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. -# # -# Generated Thu Sep 12 14:49:30 2013 by generateDS.py version 2.7c. +# Generated Mon Jul 30 12:29:34 2018 by generateDS.py version 2.29.14. +# Python 2.7.5 (default, Nov 6 2016, 00:28:07) [GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] +# +# Command line options: +# ('-f', '') +# ('--silence', '') +# ('-m', '') +# ('-o', 'ossie/parsers/dpd.py') +# +# Command line arguments: +# ../../../xml/xsd/dpd.xsd +# +# Command line: +# /usr/bin/generateDS.py -f --silence -m -o "ossie/parsers/dpd.py" ../../../xml/xsd/dpd.xsd +# +# Current working directory (os.getcwd()): +# python # import sys -import getopt import re as re_ - -etree_ = None -Verbose_import_ = False -( XMLParser_import_none, XMLParser_import_lxml, - XMLParser_import_elementtree - ) = range(3) -XMLParser_import_library = None +import base64 +import datetime as datetime_ +import warnings as warnings_ try: - # lxml from lxml import etree as etree_ - XMLParser_import_library = XMLParser_import_lxml - if Verbose_import_: - print("running with lxml.etree") except ImportError: - try: - # cElementTree from Python 2.5+ - import xml.etree.cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree on Python 2.5+") - except ImportError: - try: - # ElementTree from Python 2.5+ - import xml.etree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree on Python 2.5+") - except ImportError: - try: - # normal cElementTree install - import cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree") - except ImportError: - try: - # normal ElementTree install - import elementtree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree") - except ImportError: - raise ImportError("Failed to import ElementTree from any known place") - -def parsexml_(*args, **kwargs): - if (XMLParser_import_library == XMLParser_import_lxml and - 'parser' not in kwargs): + from xml.etree import ElementTree as etree_ + + +Validate_simpletypes_ = True +if sys.version_info[0] == 2: + BaseStrType_ = basestring +else: + BaseStrType_ = str + + +def parsexml_(infile, parser=None, **kwargs): + if parser is None: # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. - kwargs['parser'] = etree_.ETCompatXMLParser() - doc = etree_.parse(*args, **kwargs) + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + doc = etree_.parse(infile, parser=parser, **kwargs) return doc +def parsexmlstring_(instring, parser=None, **kwargs): + if parser is None: + # Use the lxml ElementTree compatible parser so that, e.g., + # we ignore comments. + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + element = etree_.fromstring(instring, parser=parser, **kwargs) + return element + # -# User methods +# Namespace prefix definition table (and other attributes, too) +# +# The module generatedsnamespaces, if it is importable, must contain +# a dictionary named GeneratedsNamespaceDefs. This Python dictionary +# should map element type names (strings) to XML schema namespace prefix +# definitions. The export method for any class for which there is +# a namespace prefix definition, will export that definition in the +# XML representation of that element. See the export method of +# any generated element type class for a example of the use of this +# table. +# A sample table is: +# +# # File: generatedsnamespaces.py +# +# GenerateDSNamespaceDefs = { +# "ElementtypeA": "http://www.xxx.com/namespaceA", +# "ElementtypeB": "http://www.xxx.com/namespaceB", +# } +# + +try: + from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_ +except ImportError: + GenerateDSNamespaceDefs_ = {} + +# +# The root super-class for element type classes # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class @@ -89,67 +116,273 @@ def parsexml_(*args, **kwargs): try: from generatedssuper import GeneratedsSuper -except ImportError, exp: - +except ImportError as exp: + class GeneratedsSuper(object): + tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$') + class _FixedOffsetTZ(datetime_.tzinfo): + def __init__(self, offset, name): + self.__offset = datetime_.timedelta(minutes=offset) + self.__name = name + def utcoffset(self, dt): + return self.__offset + def tzname(self, dt): + return self.__name + def dst(self, dt): + return None def gds_format_string(self, input_data, input_name=''): return input_data - def gds_validate_string(self, input_data, node, input_name=''): + def gds_validate_string(self, input_data, node=None, input_name=''): + if not input_data: + return '' + else: + return input_data + def gds_format_base64(self, input_data, input_name=''): + return base64.b64encode(input_data) + def gds_validate_base64(self, input_data, node=None, input_name=''): return input_data def gds_format_integer(self, input_data, input_name=''): return '%d' % input_data - def gds_validate_integer(self, input_data, node, input_name=''): + def gds_validate_integer(self, input_data, node=None, input_name=''): return input_data def gds_format_integer_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_integer_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_integer_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + int(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of integers') - return input_data + return values def gds_format_float(self, input_data, input_name=''): - return '%f' % input_data - def gds_validate_float(self, input_data, node, input_name=''): + return ('%.15f' % input_data).rstrip('0') + def gds_validate_float(self, input_data, node=None, input_name=''): return input_data def gds_format_float_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_float_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_float_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of floats') - return input_data + return values def gds_format_double(self, input_data, input_name=''): return '%e' % input_data - def gds_validate_double(self, input_data, node, input_name=''): + def gds_validate_double(self, input_data, node=None, input_name=''): return input_data def gds_format_double_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_double_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_double_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of doubles') - return input_data + return values def gds_format_boolean(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean(self, input_data, node, input_name=''): + return ('%s' % input_data).lower() + def gds_validate_boolean(self, input_data, node=None, input_name=''): return input_data def gds_format_boolean_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_boolean_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: if value not in ('true', '1', 'false', '0', ): - raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")') + raise_parse_error( + node, + 'Requires sequence of booleans ' + '("true", "1", "false", "0")') + return values + def gds_validate_datetime(self, input_data, node=None, input_name=''): return input_data + def gds_format_datetime(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + @classmethod + def gds_parse_datetime(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + time_parts = input_data.split('.') + if len(time_parts) > 1: + micro_seconds = int(float('0.' + time_parts[1]) * 1000000) + input_data = '%s.%s' % (time_parts[0], micro_seconds, ) + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt + def gds_validate_date(self, input_data, node=None, input_name=''): + return input_data + def gds_format_date(self, input_data, input_name=''): + _svalue = '%04d-%02d-%02d' % ( + input_data.year, + input_data.month, + input_data.day, + ) + try: + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format( + hours, minutes) + except AttributeError: + pass + return _svalue + @classmethod + def gds_parse_date(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d') + dt = dt.replace(tzinfo=tz) + return dt.date() + def gds_validate_time(self, input_data, node=None, input_name=''): + return input_data + def gds_format_time(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%02d:%02d:%02d' % ( + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%02d:%02d:%02d.%s' % ( + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + def gds_validate_simple_patterns(self, patterns, target): + # pat is a list of lists of strings/patterns. We should: + # - AND the outer elements + # - OR the inner elements + found1 = True + for patterns1 in patterns: + found2 = False + for patterns2 in patterns1: + if re_.search(patterns2, target) is not None: + found2 = True + break + if not found2: + found1 = False + break + return found1 + @classmethod + def gds_parse_time(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + if len(input_data.split('.')) > 1: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt.time() def gds_str_lower(self, instring): return instring.lower() def get_path_(self, node): @@ -180,6 +413,38 @@ def get_class_obj_(self, node, default_class=None): return class_obj1 def gds_build_any(self, node, type_name=None): return None + @classmethod + def gds_reverse_node_mapping(cls, mapping): + return dict(((v, k) for k, v in mapping.iteritems())) + @staticmethod + def gds_encode(instring): + if sys.version_info[0] == 2: + return instring.encode(ExternalEncoding) + else: + return instring + @staticmethod + def convert_unicode(instring): + if isinstance(instring, str): + result = quote_xml(instring) + elif sys.version_info[0] == 2 and isinstance(instring, unicode): + result = quote_xml(instring).encode('utf8') + else: + result = GeneratedsSuper.gds_encode(str(instring)) + return result + def __eq__(self, other): + if type(self) != type(other): + return False + return self.__dict__ == other.__dict__ + def __ne__(self, other): + return not self.__eq__(other) + + def getSubclassFromModule_(module, class_): + '''Get the subclass of a class from a specific module.''' + name = class_.__name__ + 'Sub' + if hasattr(module, name): + return getattr(module, name) + else: + return None # @@ -205,29 +470,50 @@ def gds_build_any(self, node, type_name=None): Tag_pattern_ = re_.compile(r'({.*})?(.*)') String_cleanup_pat_ = re_.compile(r"[\n\r\s]+") Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)') +CDATA_pattern_ = re_.compile(r"", re_.DOTALL) + +# Change this to redirect the generated superclass module to use a +# specific subclass module. +CurrentSubclassModule_ = None # # Support/utility functions. # + def showIndent(outfile, level, pretty_print=True): if pretty_print: for idx in range(level): outfile.write(' ') + def quote_xml(inStr): + "Escape markup chars, but do not modify CDATA sections." if not inStr: return '' - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) + s2 = '' + pos = 0 + matchobjects = CDATA_pattern_.finditer(s1) + for mo in matchobjects: + s3 = s1[pos:mo.start()] + s2 += quote_xml_aux(s3) + s2 += s1[mo.start():mo.end()] + pos = mo.end() + s3 = s1[pos:] + s2 += quote_xml_aux(s3) + return s2 + + +def quote_xml_aux(inStr): + s1 = inStr.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 + def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') @@ -240,6 +526,7 @@ def quote_attrib(inStr): s1 = '"%s"' % s1 return s1 + def quote_python(inStr): s1 = inStr if s1.find("'") == -1: @@ -255,6 +542,7 @@ def quote_python(inStr): else: return '"""%s"""' % s1 + def get_all_text_(node): if node.text is not None: text = node.text @@ -265,6 +553,7 @@ def get_all_text_(node): text += child.tail return text + def find_attr_value_(attr_name, node): attrs = node.attrib attr_parts = attr_name.split(':') @@ -282,11 +571,9 @@ def find_attr_value_(attr_name, node): class GDSParseError(Exception): pass + def raise_parse_error(node, msg): - if XMLParser_import_library == XMLParser_import_lxml: - msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) - else: - msg = '%s (element %s)' % (msg, node.tag, ) + msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) raise GDSParseError(msg) @@ -305,6 +592,7 @@ class MixedContainer: TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 + TypeBase64 = 8 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type @@ -318,49 +606,104 @@ def getValue(self): return self.value def getName(self): return self.name - def export(self, outfile, level, name, namespace, pretty_print=True): + def export(self, outfile, level, name, namespace, + pretty_print=True): if self.category == MixedContainer.CategoryText: # Prevent exporting empty content as empty lines. - if self.value.strip(): + if self.value.strip(): outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace, name, pretty_print) + self.value.export( + outfile, level, namespace, name, + pretty_print=pretty_print) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: - outfile.write('<%s>%s' % (self.name, self.value, self.name)) + outfile.write('<%s>%s' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: - outfile.write('<%s>%d' % (self.name, self.value, self.name)) + outfile.write('<%s>%d' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: - outfile.write('<%s>%f' % (self.name, self.value, self.name)) + outfile.write('<%s>%f' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeBase64: + outfile.write('<%s>%s' % ( + self.name, + base64.b64encode(self.value), + self.name)) + def to_etree(self, element): + if self.category == MixedContainer.CategoryText: + # Prevent exporting empty content as empty lines. + if self.value.strip(): + if len(element) > 0: + if element[-1].tail is None: + element[-1].tail = self.value + else: + element[-1].tail += self.value + else: + if element.text is None: + element.text = self.value + else: + element.text += self.value + elif self.category == MixedContainer.CategorySimple: + subelement = etree_.SubElement( + element, '%s' % self.name) + subelement.text = self.to_etree_simple() + else: # category == MixedContainer.CategoryComplex + self.value.to_etree(element) + def to_etree_simple(self): + if self.content_type == MixedContainer.TypeString: + text = self.value + elif (self.content_type == MixedContainer.TypeInteger or + self.content_type == MixedContainer.TypeBoolean): + text = '%d' % self.value + elif (self.content_type == MixedContainer.TypeFloat or + self.content_type == MixedContainer.TypeDecimal): + text = '%f' % self.value elif self.content_type == MixedContainer.TypeDouble: - outfile.write('<%s>%g' % (self.name, self.value, self.name)) + text = '%g' % self.value + elif self.content_type == MixedContainer.TypeBase64: + text = '%s' % base64.b64encode(self.value) + return text def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s",\n' % ( + self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class MemberSpec_(object): - def __init__(self, name='', data_type='', container=0): + def __init__(self, name='', data_type='', container=0, + optional=0, child_attrs=None, choice=None): self.name = name self.data_type = data_type self.container = container + self.child_attrs = child_attrs + self.choice = choice + self.optional = optional def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type @@ -375,6 +718,13 @@ def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container + def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs + def get_child_attrs(self): return self.child_attrs + def set_choice(self, choice): self.choice = choice + def get_choice(self): return self.choice + def set_optional(self, optional): self.optional = optional + def get_optional(self): return self.optional + def _cast(typ, value): if typ is None or value is None: @@ -385,6 +735,7 @@ def _cast(typ, value): # Data representation classes. # + class devicepkg(GeneratedsSuper): """DEVICE PACKAGE The SCA Device Package Descriptor (DPD) is the part of a Device Profile that contains hardware device Registration @@ -408,10 +759,11 @@ class devicepkg(GeneratedsSuper): (e.g., "1,0,0,0").""" subclass = None superclass = None - def __init__(self, version=None, id_=None, name=None, title=None, author=None, description=None, hwdeviceregistration=None): - self.version = _cast(None, version) + def __init__(self, id_=None, name=None, version=None, title=None, author=None, description=None, hwdeviceregistration=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.name = _cast(None, name) + self.version = _cast(None, version) self.title = title if author is None: self.author = [] @@ -420,6 +772,11 @@ def __init__(self, version=None, id_=None, name=None, title=None, author=None, d self.description = description self.hwdeviceregistration = hwdeviceregistration def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, devicepkg) + if subclass is not None: + return subclass(*args_, **kwargs_) if devicepkg.subclass: return devicepkg.subclass(*args_, **kwargs_) else: @@ -431,7 +788,8 @@ def set_title(self, title): self.title = title def get_author(self): return self.author def set_author(self, author): self.author = author def add_author(self, value): self.author.append(value) - def insert_author(self, index, value): self.author[index] = value + def insert_author_at(self, index, value): self.author.insert(index, value) + def replace_author_at(self, index, value): self.author[index] = value authorProp = property(get_author, set_author) def get_description(self): return self.description def set_description(self, description): self.description = description @@ -439,41 +797,56 @@ def set_description(self, description): self.description = description def get_hwdeviceregistration(self): return self.hwdeviceregistration def set_hwdeviceregistration(self, hwdeviceregistration): self.hwdeviceregistration = hwdeviceregistration hwdeviceregistrationProp = property(get_hwdeviceregistration, set_hwdeviceregistration) - def get_version(self): return self.version - def set_version(self, version): self.version = version - versionProp = property(get_version, set_version) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def get_version(self): return self.version + def set_version(self, version): self.version = version + versionProp = property(get_version, set_version) + def hasContent_(self): + if ( + self.title is not None or + self.author or + self.description is not None or + self.hwdeviceregistration is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='devicepkg', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('devicepkg') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='devicepkg') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='devicepkg', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='devicepkg'): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) + if self.version is not None and 'version' not in already_processed: + already_processed.add('version') + outfile.write(' version=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.version), input_name='version')), )) def exportChildren(self, outfile, level, namespace_='', name_='devicepkg', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -481,85 +854,34 @@ def exportChildren(self, outfile, level, namespace_='', name_='devicepkg', froms eol_ = '' if self.title is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%stitle>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.title), input_name='title')), eol_)) for author_ in self.author: author_.export(outfile, level, namespace_, name_='author', pretty_print=pretty_print) if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.hwdeviceregistration is not None: self.hwdeviceregistration.export(outfile, level, namespace_, name_='hwdeviceregistration', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.title is not None or - self.author or - self.description is not None or - self.hwdeviceregistration is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='devicepkg'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - showIndent(outfile, level) - outfile.write('version = "%s",\n' % (self.version,)) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.title is not None: - showIndent(outfile, level) - outfile.write('title=%s,\n' % quote_python(self.title).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('author=[\n') - level += 1 - for author_ in self.author: - showIndent(outfile, level) - outfile.write('model_.author(\n') - author_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.hwdeviceregistration is not None: - showIndent(outfile, level) - outfile.write('hwdeviceregistration=model_.hwdeviceregistration(\n') - self.hwdeviceregistration.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('version', node) - if value is not None and 'version' not in already_processed: - already_processed.append('version') - self.version = value value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value + value = find_attr_value_('version', node) + if value is not None and 'version' not in already_processed: + already_processed.add('version') + self.version = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'title': title_ = child_.text @@ -569,6 +891,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = author.factory() obj_.build(child_) self.author.append(obj_) + obj_.original_tagname_ = 'author' elif nodeName_ == 'description': description_ = child_.text description_ = self.gds_validate_string(description_, node, 'description') @@ -576,7 +899,8 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'hwdeviceregistration': obj_ = hwdeviceregistration.factory() obj_.build(child_) - self.set_hwdeviceregistration(obj_) + self.hwdeviceregistration = obj_ + obj_.original_tagname_ = 'hwdeviceregistration' # end class devicepkg @@ -584,6 +908,7 @@ class author(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None, company=None, webpage=None): + self.original_tagname_ = None if name is None: self.name = [] else: @@ -591,6 +916,11 @@ def __init__(self, name=None, company=None, webpage=None): self.company = company self.webpage = webpage def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, author) + if subclass is not None: + return subclass(*args_, **kwargs_) if author.subclass: return author.subclass(*args_, **kwargs_) else: @@ -599,7 +929,8 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name def add_name(self, value): self.name.append(value) - def insert_name(self, index, value): self.name[index] = value + def insert_name_at(self, index, value): self.name.insert(index, value) + def replace_name_at(self, index, value): self.name[index] = value nameProp = property(get_name, set_name) def get_company(self): return self.company def set_company(self, company): self.company = company @@ -607,18 +938,32 @@ def set_company(self, company): self.company = company def get_webpage(self): return self.webpage def set_webpage(self, webpage): self.webpage = webpage webpageProp = property(get_webpage, set_webpage) + def hasContent_(self): + if ( + self.name or + self.company is not None or + self.webpage is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='author', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('author') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='author') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='author', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -632,50 +977,20 @@ def exportChildren(self, outfile, level, namespace_='', name_='author', fromsubc eol_ = '' for name_ in self.name: showIndent(outfile, level, pretty_print) - outfile.write('<%sname>%s%s' % (namespace_, self.gds_format_string(quote_xml(name_).encode(ExternalEncoding), input_name='name'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(name_), input_name='name')), eol_)) if self.company is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%scompany>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.company).encode(ExternalEncoding), input_name='company'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.company), input_name='company')), eol_)) if self.webpage is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%swebpage>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.webpage).encode(ExternalEncoding), input_name='webpage'), namespace_, eol_)) - def hasContent_(self): - if ( - self.name or - self.company is not None or - self.webpage is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='author'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('name=[\n') - level += 1 - for name_ in self.name: - showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(name_).encode(ExternalEncoding)) - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.company is not None: - showIndent(outfile, level) - outfile.write('company=%s,\n' % quote_python(self.company).encode(ExternalEncoding)) - if self.webpage is not None: - showIndent(outfile, level) - outfile.write('webpage=%s,\n' % quote_python(self.webpage).encode(ExternalEncoding)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.webpage), input_name='webpage')), eol_)) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -696,8 +1011,8 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): class hwdeviceregistration(GeneratedsSuper): """At a minimum, the hwdeviceregistration element must include a - description, the manufacturer, the model number and the - device’s hardware class(es) (Refer to SCA section 4, Hardware + description, the manufacturer, the model number and the device’s + hardware class(es) (Refer to SCA section 4, Hardware Architecture Definition).The hwdeviceregistration id attribute uniquely identifies the device and is a DCE UUID, as defined in paragraph D.2.1.The name attribute is a userfriendlylabel for @@ -707,10 +1022,11 @@ class hwdeviceregistration(GeneratedsSuper): version numbers separated by commas (e.g., "1,0,0,0").""" subclass = None superclass = None - def __init__(self, version=None, id_=None, name=None, propertyfile=None, description=None, manufacturer=None, modelnumber=None, deviceclass=None, childhwdevice=None): - self.version = _cast(None, version) + def __init__(self, id_=None, name=None, version=None, propertyfile=None, description=None, manufacturer=None, modelnumber=None, deviceclass=None, childhwdevice=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.name = _cast(None, name) + self.version = _cast(None, version) self.propertyfile = propertyfile self.description = description self.manufacturer = manufacturer @@ -721,6 +1037,11 @@ def __init__(self, version=None, id_=None, name=None, propertyfile=None, descrip else: self.childhwdevice = childhwdevice def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, hwdeviceregistration) + if subclass is not None: + return subclass(*args_, **kwargs_) if hwdeviceregistration.subclass: return hwdeviceregistration.subclass(*args_, **kwargs_) else: @@ -744,43 +1065,61 @@ def set_deviceclass(self, deviceclass): self.deviceclass = deviceclass def get_childhwdevice(self): return self.childhwdevice def set_childhwdevice(self, childhwdevice): self.childhwdevice = childhwdevice def add_childhwdevice(self, value): self.childhwdevice.append(value) - def insert_childhwdevice(self, index, value): self.childhwdevice[index] = value + def insert_childhwdevice_at(self, index, value): self.childhwdevice.insert(index, value) + def replace_childhwdevice_at(self, index, value): self.childhwdevice[index] = value childhwdeviceProp = property(get_childhwdevice, set_childhwdevice) - def get_version(self): return self.version - def set_version(self, version): self.version = version - versionProp = property(get_version, set_version) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def get_version(self): return self.version + def set_version(self, version): self.version = version + versionProp = property(get_version, set_version) + def hasContent_(self): + if ( + self.propertyfile is not None or + self.description is not None or + self.manufacturer is not None or + self.modelnumber is not None or + self.deviceclass is not None or + self.childhwdevice + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='hwdeviceregistration', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('hwdeviceregistration') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='hwdeviceregistration') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='hwdeviceregistration', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='hwdeviceregistration'): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) + if self.version is not None and 'version' not in already_processed: + already_processed.add('version') + outfile.write(' version=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.version), input_name='version')), )) def exportChildren(self, outfile, level, namespace_='', name_='hwdeviceregistration', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -790,104 +1129,43 @@ def exportChildren(self, outfile, level, namespace_='', name_='hwdeviceregistrat self.propertyfile.export(outfile, level, namespace_, name_='propertyfile', pretty_print=pretty_print) if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.manufacturer is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%smanufacturer>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.manufacturer).encode(ExternalEncoding), input_name='manufacturer'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.manufacturer), input_name='manufacturer')), eol_)) if self.modelnumber is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%smodelnumber>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.modelnumber).encode(ExternalEncoding), input_name='modelnumber'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.modelnumber), input_name='modelnumber')), eol_)) if self.deviceclass is not None: self.deviceclass.export(outfile, level, namespace_, name_='deviceclass', pretty_print=pretty_print) for childhwdevice_ in self.childhwdevice: childhwdevice_.export(outfile, level, namespace_, name_='childhwdevice', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.propertyfile is not None or - self.description is not None or - self.manufacturer is not None or - self.modelnumber is not None or - self.deviceclass is not None or - self.childhwdevice - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='hwdeviceregistration'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - showIndent(outfile, level) - outfile.write('version = "%s",\n' % (self.version,)) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.propertyfile is not None: - showIndent(outfile, level) - outfile.write('propertyfile=model_.propertyfile(\n') - self.propertyfile.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.manufacturer is not None: - showIndent(outfile, level) - outfile.write('manufacturer=%s,\n' % quote_python(self.manufacturer).encode(ExternalEncoding)) - if self.modelnumber is not None: - showIndent(outfile, level) - outfile.write('modelnumber=%s,\n' % quote_python(self.modelnumber).encode(ExternalEncoding)) - if self.deviceclass is not None: - showIndent(outfile, level) - outfile.write('deviceclass=model_.deviceclass(\n') - self.deviceclass.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('childhwdevice=[\n') - level += 1 - for childhwdevice_ in self.childhwdevice: - showIndent(outfile, level) - outfile.write('model_.childhwdevice(\n') - childhwdevice_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('version', node) - if value is not None and 'version' not in already_processed: - already_processed.append('version') - self.version = value value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value + value = find_attr_value_('version', node) + if value is not None and 'version' not in already_processed: + already_processed.add('version') + self.version = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'propertyfile': obj_ = propertyfile.factory() obj_.build(child_) - self.set_propertyfile(obj_) + self.propertyfile = obj_ + obj_.original_tagname_ = 'propertyfile' elif nodeName_ == 'description': description_ = child_.text description_ = self.gds_validate_string(description_, node, 'description') @@ -903,11 +1181,13 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'deviceclass': obj_ = deviceclass.factory() obj_.build(child_) - self.set_deviceclass(obj_) + self.deviceclass = obj_ + obj_.original_tagname_ = 'deviceclass' elif nodeName_ == 'childhwdevice': obj_ = childhwdevice.factory() obj_.build(child_) self.childhwdevice.append(obj_) + obj_.original_tagname_ = 'childhwdevice' # end class hwdeviceregistration @@ -915,9 +1195,15 @@ class propertyfile(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, localfile=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) self.localfile = localfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, propertyfile) + if subclass is not None: + return subclass(*args_, **kwargs_) if propertyfile.subclass: return propertyfile.subclass(*args_, **kwargs_) else: @@ -929,26 +1215,38 @@ def set_localfile(self, localfile): self.localfile = localfile def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) + def hasContent_(self): + if ( + self.localfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='propertyfile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('propertyfile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='propertyfile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='propertyfile', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='propertyfile'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='propertyfile', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -956,45 +1254,24 @@ def exportChildren(self, outfile, level, namespace_='', name_='propertyfile', fr eol_ = '' if self.localfile is not None: self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='propertyfile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localfile(\n') - self.localfile.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localfile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' # end class propertyfile @@ -1002,9 +1279,14 @@ class localfile(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, localfile) + if subclass is not None: + return subclass(*args_, **kwargs_) if localfile.subclass: return localfile.subclass(*args_, **kwargs_) else: @@ -1013,55 +1295,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='localfile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('localfile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='localfile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='localfile', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='localfile'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='localfile', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='localfile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1075,34 +1352,53 @@ class deviceclass(GeneratedsSuper): Structure).""" subclass = None superclass = None - def __init__(self, classxx=None): - if classxx is None: - self.classxx = [] + def __init__(self, class_=None): + self.original_tagname_ = None + if class_ is None: + self.class_ = [] else: - self.classxx = classxx + self.class_ = class_ def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, deviceclass) + if subclass is not None: + return subclass(*args_, **kwargs_) if deviceclass.subclass: return deviceclass.subclass(*args_, **kwargs_) else: return deviceclass(*args_, **kwargs_) factory = staticmethod(factory) - def get_class(self): return self.classxx - def set_class(self, classxx): self.classxx = classxx - def add_class(self, value): self.classxx.append(value) - def insert_class(self, index, value): self.classxx[index] = value + def get_class(self): return self.class_ + def set_class(self, class_): self.class_ = class_ + def add_class(self, value): self.class_.append(value) + def insert_class_at(self, index, value): self.class_.insert(index, value) + def replace_class_at(self, index, value): self.class_[index] = value classProp = property(get_class, set_class) + def hasContent_(self): + if ( + self.class_ + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='deviceclass', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('deviceclass') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='deviceclass') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='deviceclass', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -1114,45 +1410,23 @@ def exportChildren(self, outfile, level, namespace_='', name_='deviceclass', fro eol_ = '\n' else: eol_ = '' - for class_ in self.classxx: + for class_ in self.class_: showIndent(outfile, level, pretty_print) - outfile.write('<%sclass>%s%s' % (namespace_, self.gds_format_string(quote_xml(class_).encode(ExternalEncoding), input_name='class'), namespace_, eol_)) - def hasContent_(self): - if ( - self.classxx - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='deviceclass'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('classxx=[\n') - level += 1 - for class_ in self.classxx: - showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(class_).encode(ExternalEncoding)) - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(class_), input_name='class')), eol_)) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'class': class_ = child_.text class_ = self.gds_validate_string(class_, node, 'class') - self.classxx.append(class_) + self.class_.append(class_) # end class deviceclass @@ -1160,8 +1434,8 @@ class childhwdevice(GeneratedsSuper): """The childhwdevice element (see Figure D-10) indicates additional device-specific information for hardware devices that make up the root or parent hardware device registration. An example of - childhwdevice would be a radio’s RF module that has receiver - and exciter functions within it. In this case, a CF Device + childhwdevice would be a radio’s RF module that has receiver and + exciter functions within it. In this case, a CF Device representing the RF module itself would be a parent Device with its DPD, and the receiver and exciter are child devices to the module. The parent / child relationship indicates that when the @@ -1170,9 +1444,15 @@ class childhwdevice(GeneratedsSuper): subclass = None superclass = None def __init__(self, hwdeviceregistration=None, devicepkgref=None): + self.original_tagname_ = None self.hwdeviceregistration = hwdeviceregistration self.devicepkgref = devicepkgref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, childhwdevice) + if subclass is not None: + return subclass(*args_, **kwargs_) if childhwdevice.subclass: return childhwdevice.subclass(*args_, **kwargs_) else: @@ -1184,18 +1464,31 @@ def set_hwdeviceregistration(self, hwdeviceregistration): self.hwdeviceregistrat def get_devicepkgref(self): return self.devicepkgref def set_devicepkgref(self, devicepkgref): self.devicepkgref = devicepkgref devicepkgrefProp = property(get_devicepkgref, set_devicepkgref) + def hasContent_(self): + if ( + self.hwdeviceregistration is not None or + self.devicepkgref is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='childhwdevice', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('childhwdevice') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='childhwdevice') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='childhwdevice', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -1211,50 +1504,26 @@ def exportChildren(self, outfile, level, namespace_='', name_='childhwdevice', f self.hwdeviceregistration.export(outfile, level, namespace_, name_='hwdeviceregistration', pretty_print=pretty_print) if self.devicepkgref is not None: self.devicepkgref.export(outfile, level, namespace_, name_='devicepkgref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.hwdeviceregistration is not None or - self.devicepkgref is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='childhwdevice'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.hwdeviceregistration is not None: - showIndent(outfile, level) - outfile.write('hwdeviceregistration=model_.hwdeviceregistration(\n') - self.hwdeviceregistration.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.devicepkgref is not None: - showIndent(outfile, level) - outfile.write('devicepkgref=model_.devicepkgref(\n') - self.devicepkgref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'hwdeviceregistration': obj_ = hwdeviceregistration.factory() obj_.build(child_) - self.set_hwdeviceregistration(obj_) + self.hwdeviceregistration = obj_ + obj_.original_tagname_ = 'hwdeviceregistration' elif nodeName_ == 'devicepkgref': obj_ = devicepkgref.factory() obj_.build(child_) - self.set_devicepkgref(obj_) + self.devicepkgref = obj_ + obj_.original_tagname_ = 'devicepkgref' # end class childhwdevice @@ -1262,9 +1531,15 @@ class devicepkgref(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, localfile=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) self.localfile = localfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, devicepkgref) + if subclass is not None: + return subclass(*args_, **kwargs_) if devicepkgref.subclass: return devicepkgref.subclass(*args_, **kwargs_) else: @@ -1276,26 +1551,38 @@ def set_localfile(self, localfile): self.localfile = localfile def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) + def hasContent_(self): + if ( + self.localfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='devicepkgref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('devicepkgref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='devicepkgref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='devicepkgref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='devicepkgref'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='devicepkgref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1303,65 +1590,60 @@ def exportChildren(self, outfile, level, namespace_='', name_='devicepkgref', fr eol_ = '' if self.localfile is not None: self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='devicepkgref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localfile(\n') - self.localfile.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localfile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' # end class devicepkgref +GDSClassesMapping = { + 'author': author, + 'childhwdevice': childhwdevice, + 'deviceclass': deviceclass, + 'devicepkg': devicepkg, + 'devicepkgref': devicepkgref, + 'hwdeviceregistration': hwdeviceregistration, + 'localfile': localfile, + 'propertyfile': propertyfile, +} + + USAGE_TEXT = """ Usage: python .py [ -s ] """ + def usage(): - print USAGE_TEXT + print(USAGE_TEXT) sys.exit(1) def get_root_tag(node): tag = Tag_pattern_.match(node.tag).groups()[-1] - rootClass = globals().get(tag) + rootClass = GDSClassesMapping.get(tag) + if rootClass is None: + rootClass = globals().get(tag) return tag, rootClass -def parse(inFileName): - doc = parsexml_(inFileName) +def parse(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -1371,16 +1653,18 @@ def parse(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_=rootTag, -## namespacedef_='', -## pretty_print=True) +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='', +## pretty_print=True) return rootObj -def parseString(inString): - from StringIO import StringIO - doc = parsexml_(StringIO(inString)) +def parseEtree(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -1390,14 +1674,47 @@ def parseString(inString): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_="devicepkg", -## namespacedef_='') + mapping = {} + rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping) + reverse_mapping = rootObj.gds_reverse_node_mapping(mapping) +## if not silence: +## content = etree_.tostring( +## rootElement, pretty_print=True, +## xml_declaration=True, encoding="utf-8") +## sys.stdout.write(content) +## sys.stdout.write('\n') + return rootObj, rootElement, mapping, reverse_mapping + + +def parseString(inString, silence=False): + '''Parse a string, create the object tree, and export it. + + Arguments: + - inString -- A string. This XML fragment should not start + with an XML declaration containing an encoding. + - silence -- A boolean. If False, export the object. + Returns -- The root object in the tree. + ''' + parser = None + rootNode= parsexmlstring_(inString, parser) + rootTag, rootClass = get_root_tag(rootNode) + if rootClass is None: + rootTag = 'devicepkg' + rootClass = devicepkg + rootObj = rootClass.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='') return rootObj -def parseLiteral(inFileName): - doc = parsexml_(inFileName) +def parseLiteral(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -1407,11 +1724,12 @@ def parseLiteral(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('#from dpd import *\n\n') -## sys.stdout.write('import dpd as model_\n\n') -## sys.stdout.write('rootObj = model_.rootTag(\n') -## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) -## sys.stdout.write(')\n') +## if not silence: +## sys.stdout.write('#from dpd import *\n\n') +## sys.stdout.write('import dpd as model_\n\n') +## sys.stdout.write('rootObj = model_.rootClass(\n') +## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) +## sys.stdout.write(')\n') return rootObj @@ -1437,4 +1755,4 @@ def main(): "hwdeviceregistration", "localfile", "propertyfile" - ] +] diff --git a/redhawk/src/base/framework/python/ossie/parsers/generatedssuper.py b/redhawk/src/base/framework/python/ossie/parsers/generatedssuper.py index ea4146df8..9c053fa9c 100644 --- a/redhawk/src/base/framework/python/ossie/parsers/generatedssuper.py +++ b/redhawk/src/base/framework/python/ossie/parsers/generatedssuper.py @@ -20,9 +20,49 @@ # REDHAWK-specific version of generateDS base class # Implements only the methods that are explicitly used by the parsers + +import sys +import re as re_ + +ExternalEncoding = 'ascii' +if sys.version_info[0] == 2: + BaseStrType_ = basestring +else: + BaseStrType_ = str +CDATA_pattern_ = re_.compile(r"", re_.DOTALL) + +def quote_xml(inStr): + "Escape markup chars, but do not modify CDATA sections." + if not inStr: + return '' + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) + s2 = '' + pos = 0 + matchobjects = CDATA_pattern_.finditer(s1) + for mo in matchobjects: + s3 = s1[pos:mo.start()] + s2 += quote_xml_aux(s3) + s2 += s1[mo.start():mo.end()] + pos = mo.end() + s3 = s1[pos:] + s2 += quote_xml_aux(s3) + return s2 + +def quote_xml_aux(inStr): + s1 = inStr.replace('&', '&') + s1 = s1.replace('<', '<') + s1 = s1.replace('>', '>') + return s1 + class GeneratedsSuper(object): def gds_format_string(self, input_data, input_name=''): return input_data + @staticmethod + def gds_encode(instring): + if sys.version_info[0] == 2: + return instring.encode(ExternalEncoding) + else: + return instring def gds_validate_string(self, input_data, node, input_name=''): if input_data is None: # ElementTree parsers return None for empty text nodes @@ -32,3 +72,11 @@ def gds_format_integer(self, input_data, input_name=''): return '%d' % input_data def gds_validate_integer(self, input_data, node, input_name=''): return input_data + def convert_unicode(self, instring): + if isinstance(instring, str): + result = quote_xml(instring) + elif sys.version_info[0] == 2 and isinstance(instring, unicode): + result = quote_xml(instring).encode('utf8') + else: + result = GeneratedsSuper.gds_encode(str(instring)) + return result diff --git a/redhawk/src/base/framework/python/ossie/parsers/prf.py b/redhawk/src/base/framework/python/ossie/parsers/prf.py index d105f6eae..6daf8860f 100644 --- a/redhawk/src/base/framework/python/ossie/parsers/prf.py +++ b/redhawk/src/base/framework/python/ossie/parsers/prf.py @@ -1,5 +1,6 @@ #!/usr/bin/env python -# +# -*- coding: utf-8 -*- + # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. # @@ -17,71 +18,97 @@ # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. -# -# -*- coding: utf-8 -*- # -# Generated Thu Oct 1 16:02:25 2015 by generateDS.py version 2.7c. +# Generated Mon Jul 30 12:29:35 2018 by generateDS.py version 2.29.14. +# Python 2.7.5 (default, Nov 6 2016, 00:28:07) [GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] +# +# Command line options: +# ('-f', '') +# ('--silence', '') +# ('-m', '') +# ('-o', 'ossie/parsers/prf.py') +# +# Command line arguments: +# ../../../xml/xsd/prf.xsd +# +# Command line: +# /usr/bin/generateDS.py -f --silence -m -o "ossie/parsers/prf.py" ../../../xml/xsd/prf.xsd +# +# Current working directory (os.getcwd()): +# python # import sys -import getopt import re as re_ - -etree_ = None -Verbose_import_ = False -( XMLParser_import_none, XMLParser_import_lxml, - XMLParser_import_elementtree - ) = range(3) -XMLParser_import_library = None +import base64 +import datetime as datetime_ +import warnings as warnings_ try: - # lxml from lxml import etree as etree_ - XMLParser_import_library = XMLParser_import_lxml - if Verbose_import_: - print("running with lxml.etree") except ImportError: - try: - # cElementTree from Python 2.5+ - import xml.etree.cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree on Python 2.5+") - except ImportError: - try: - # ElementTree from Python 2.5+ - import xml.etree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree on Python 2.5+") - except ImportError: - try: - # normal cElementTree install - import cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree") - except ImportError: - try: - # normal ElementTree install - import elementtree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree") - except ImportError: - raise ImportError("Failed to import ElementTree from any known place") - -def parsexml_(*args, **kwargs): - if (XMLParser_import_library == XMLParser_import_lxml and - 'parser' not in kwargs): + from xml.etree import ElementTree as etree_ + + +Validate_simpletypes_ = True +if sys.version_info[0] == 2: + BaseStrType_ = basestring +else: + BaseStrType_ = str + + +def parsexml_(infile, parser=None, **kwargs): + if parser is None: # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. - kwargs['parser'] = etree_.ETCompatXMLParser() - doc = etree_.parse(*args, **kwargs) + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + doc = etree_.parse(infile, parser=parser, **kwargs) return doc +def parsexmlstring_(instring, parser=None, **kwargs): + if parser is None: + # Use the lxml ElementTree compatible parser so that, e.g., + # we ignore comments. + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + element = etree_.fromstring(instring, parser=parser, **kwargs) + return element + +# +# Namespace prefix definition table (and other attributes, too) +# +# The module generatedsnamespaces, if it is importable, must contain +# a dictionary named GeneratedsNamespaceDefs. This Python dictionary +# should map element type names (strings) to XML schema namespace prefix +# definitions. The export method for any class for which there is +# a namespace prefix definition, will export that definition in the +# XML representation of that element. See the export method of +# any generated element type class for a example of the use of this +# table. +# A sample table is: +# +# # File: generatedsnamespaces.py +# +# GenerateDSNamespaceDefs = { +# "ElementtypeA": "http://www.xxx.com/namespaceA", +# "ElementtypeB": "http://www.xxx.com/namespaceB", +# } # -# User methods + +try: + from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_ +except ImportError: + GenerateDSNamespaceDefs_ = {} + +# +# The root super-class for element type classes # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class @@ -89,67 +116,273 @@ def parsexml_(*args, **kwargs): try: from generatedssuper import GeneratedsSuper -except ImportError, exp: - +except ImportError as exp: + class GeneratedsSuper(object): + tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$') + class _FixedOffsetTZ(datetime_.tzinfo): + def __init__(self, offset, name): + self.__offset = datetime_.timedelta(minutes=offset) + self.__name = name + def utcoffset(self, dt): + return self.__offset + def tzname(self, dt): + return self.__name + def dst(self, dt): + return None def gds_format_string(self, input_data, input_name=''): return input_data - def gds_validate_string(self, input_data, node, input_name=''): + def gds_validate_string(self, input_data, node=None, input_name=''): + if not input_data: + return '' + else: + return input_data + def gds_format_base64(self, input_data, input_name=''): + return base64.b64encode(input_data) + def gds_validate_base64(self, input_data, node=None, input_name=''): return input_data def gds_format_integer(self, input_data, input_name=''): return '%d' % input_data - def gds_validate_integer(self, input_data, node, input_name=''): + def gds_validate_integer(self, input_data, node=None, input_name=''): return input_data def gds_format_integer_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_integer_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_integer_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + int(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of integers') - return input_data + return values def gds_format_float(self, input_data, input_name=''): - return '%f' % input_data - def gds_validate_float(self, input_data, node, input_name=''): + return ('%.15f' % input_data).rstrip('0') + def gds_validate_float(self, input_data, node=None, input_name=''): return input_data def gds_format_float_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_float_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_float_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of floats') - return input_data + return values def gds_format_double(self, input_data, input_name=''): return '%e' % input_data - def gds_validate_double(self, input_data, node, input_name=''): + def gds_validate_double(self, input_data, node=None, input_name=''): return input_data def gds_format_double_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_double_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_double_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of doubles') - return input_data + return values def gds_format_boolean(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean(self, input_data, node, input_name=''): + return ('%s' % input_data).lower() + def gds_validate_boolean(self, input_data, node=None, input_name=''): return input_data def gds_format_boolean_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_boolean_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: if value not in ('true', '1', 'false', '0', ): - raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")') + raise_parse_error( + node, + 'Requires sequence of booleans ' + '("true", "1", "false", "0")') + return values + def gds_validate_datetime(self, input_data, node=None, input_name=''): + return input_data + def gds_format_datetime(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + @classmethod + def gds_parse_datetime(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + time_parts = input_data.split('.') + if len(time_parts) > 1: + micro_seconds = int(float('0.' + time_parts[1]) * 1000000) + input_data = '%s.%s' % (time_parts[0], micro_seconds, ) + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt + def gds_validate_date(self, input_data, node=None, input_name=''): + return input_data + def gds_format_date(self, input_data, input_name=''): + _svalue = '%04d-%02d-%02d' % ( + input_data.year, + input_data.month, + input_data.day, + ) + try: + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format( + hours, minutes) + except AttributeError: + pass + return _svalue + @classmethod + def gds_parse_date(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d') + dt = dt.replace(tzinfo=tz) + return dt.date() + def gds_validate_time(self, input_data, node=None, input_name=''): return input_data + def gds_format_time(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%02d:%02d:%02d' % ( + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%02d:%02d:%02d.%s' % ( + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + def gds_validate_simple_patterns(self, patterns, target): + # pat is a list of lists of strings/patterns. We should: + # - AND the outer elements + # - OR the inner elements + found1 = True + for patterns1 in patterns: + found2 = False + for patterns2 in patterns1: + if re_.search(patterns2, target) is not None: + found2 = True + break + if not found2: + found1 = False + break + return found1 + @classmethod + def gds_parse_time(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + if len(input_data.split('.')) > 1: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt.time() def gds_str_lower(self, instring): return instring.lower() def get_path_(self, node): @@ -180,6 +413,38 @@ def get_class_obj_(self, node, default_class=None): return class_obj1 def gds_build_any(self, node, type_name=None): return None + @classmethod + def gds_reverse_node_mapping(cls, mapping): + return dict(((v, k) for k, v in mapping.iteritems())) + @staticmethod + def gds_encode(instring): + if sys.version_info[0] == 2: + return instring.encode(ExternalEncoding) + else: + return instring + @staticmethod + def convert_unicode(instring): + if isinstance(instring, str): + result = quote_xml(instring) + elif sys.version_info[0] == 2 and isinstance(instring, unicode): + result = quote_xml(instring).encode('utf8') + else: + result = GeneratedsSuper.gds_encode(str(instring)) + return result + def __eq__(self, other): + if type(self) != type(other): + return False + return self.__dict__ == other.__dict__ + def __ne__(self, other): + return not self.__eq__(other) + + def getSubclassFromModule_(module, class_): + '''Get the subclass of a class from a specific module.''' + name = class_.__name__ + 'Sub' + if hasattr(module, name): + return getattr(module, name) + else: + return None # @@ -205,29 +470,50 @@ def gds_build_any(self, node, type_name=None): Tag_pattern_ = re_.compile(r'({.*})?(.*)') String_cleanup_pat_ = re_.compile(r"[\n\r\s]+") Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)') +CDATA_pattern_ = re_.compile(r"", re_.DOTALL) + +# Change this to redirect the generated superclass module to use a +# specific subclass module. +CurrentSubclassModule_ = None # # Support/utility functions. # + def showIndent(outfile, level, pretty_print=True): if pretty_print: for idx in range(level): outfile.write(' ') + def quote_xml(inStr): + "Escape markup chars, but do not modify CDATA sections." if not inStr: return '' - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) + s2 = '' + pos = 0 + matchobjects = CDATA_pattern_.finditer(s1) + for mo in matchobjects: + s3 = s1[pos:mo.start()] + s2 += quote_xml_aux(s3) + s2 += s1[mo.start():mo.end()] + pos = mo.end() + s3 = s1[pos:] + s2 += quote_xml_aux(s3) + return s2 + + +def quote_xml_aux(inStr): + s1 = inStr.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 + def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') @@ -240,6 +526,7 @@ def quote_attrib(inStr): s1 = '"%s"' % s1 return s1 + def quote_python(inStr): s1 = inStr if s1.find("'") == -1: @@ -255,6 +542,7 @@ def quote_python(inStr): else: return '"""%s"""' % s1 + def get_all_text_(node): if node.text is not None: text = node.text @@ -265,6 +553,7 @@ def get_all_text_(node): text += child.tail return text + def find_attr_value_(attr_name, node): attrs = node.attrib attr_parts = attr_name.split(':') @@ -282,11 +571,9 @@ def find_attr_value_(attr_name, node): class GDSParseError(Exception): pass + def raise_parse_error(node, msg): - if XMLParser_import_library == XMLParser_import_lxml: - msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) - else: - msg = '%s (element %s)' % (msg, node.tag, ) + msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) raise GDSParseError(msg) @@ -305,6 +592,7 @@ class MixedContainer: TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 + TypeBase64 = 8 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type @@ -318,49 +606,104 @@ def getValue(self): return self.value def getName(self): return self.name - def export(self, outfile, level, name, namespace, pretty_print=True): + def export(self, outfile, level, name, namespace, + pretty_print=True): if self.category == MixedContainer.CategoryText: # Prevent exporting empty content as empty lines. - if self.value.strip(): + if self.value.strip(): outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace, name, pretty_print) + self.value.export( + outfile, level, namespace, name, + pretty_print=pretty_print) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: - outfile.write('<%s>%s' % (self.name, self.value, self.name)) + outfile.write('<%s>%s' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: - outfile.write('<%s>%d' % (self.name, self.value, self.name)) + outfile.write('<%s>%d' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: - outfile.write('<%s>%f' % (self.name, self.value, self.name)) + outfile.write('<%s>%f' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeBase64: + outfile.write('<%s>%s' % ( + self.name, + base64.b64encode(self.value), + self.name)) + def to_etree(self, element): + if self.category == MixedContainer.CategoryText: + # Prevent exporting empty content as empty lines. + if self.value.strip(): + if len(element) > 0: + if element[-1].tail is None: + element[-1].tail = self.value + else: + element[-1].tail += self.value + else: + if element.text is None: + element.text = self.value + else: + element.text += self.value + elif self.category == MixedContainer.CategorySimple: + subelement = etree_.SubElement( + element, '%s' % self.name) + subelement.text = self.to_etree_simple() + else: # category == MixedContainer.CategoryComplex + self.value.to_etree(element) + def to_etree_simple(self): + if self.content_type == MixedContainer.TypeString: + text = self.value + elif (self.content_type == MixedContainer.TypeInteger or + self.content_type == MixedContainer.TypeBoolean): + text = '%d' % self.value + elif (self.content_type == MixedContainer.TypeFloat or + self.content_type == MixedContainer.TypeDecimal): + text = '%f' % self.value elif self.content_type == MixedContainer.TypeDouble: - outfile.write('<%s>%g' % (self.name, self.value, self.name)) + text = '%g' % self.value + elif self.content_type == MixedContainer.TypeBase64: + text = '%s' % base64.b64encode(self.value) + return text def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s",\n' % ( + self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class MemberSpec_(object): - def __init__(self, name='', data_type='', container=0): + def __init__(self, name='', data_type='', container=0, + optional=0, child_attrs=None, choice=None): self.name = name self.data_type = data_type self.container = container + self.child_attrs = child_attrs + self.choice = choice + self.optional = optional def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type @@ -375,6 +718,13 @@ def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container + def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs + def get_child_attrs(self): return self.child_attrs + def set_choice(self, choice): self.choice = choice + def get_choice(self): return self.choice + def set_optional(self, optional): self.optional = optional + def get_optional(self): return self.optional + def _cast(typ, value): if typ is None or value is None: @@ -385,13 +735,19 @@ def _cast(typ, value): # Data representation classes. # + class action(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_='external'): + self.original_tagname_ = None self.type_ = _cast(None, type_) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, action) + if subclass is not None: + return subclass(*args_, **kwargs_) if action.subclass: return action.subclass(*args_, **kwargs_) else: @@ -402,56 +758,60 @@ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) def validate_ActionType(self, value): # Validate type ActionType, a restriction on xs:string. - pass + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['ge', 'gt', 'external', 'le', 'lt', 'ne', 'eq'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on ActionType' % {"value" : value.encode("utf-8")} ) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='action', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('action') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='action') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='action', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='action'): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') + if self.type_ != "external" and 'type_' not in already_processed: + already_processed.add('type_') outfile.write(' type=%s' % (quote_attrib(self.type_), )) def exportChildren(self, outfile, level, namespace_='', name_='action', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='action'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value self.validate_ActionType(self.type_) # validate type ActionType def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -463,9 +823,14 @@ class configurationKind(GeneratedsSuper): subclass = None superclass = None def __init__(self, kindtype='configure'): + self.original_tagname_ = None self.kindtype = _cast(None, kindtype) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, configurationKind) + if subclass is not None: + return subclass(*args_, **kwargs_) if configurationKind.subclass: return configurationKind.subclass(*args_, **kwargs_) else: @@ -476,56 +841,69 @@ def set_kindtype(self, kindtype): self.kindtype = kindtype kindtypeProp = property(get_kindtype, set_kindtype) def validate_StructPropertyConfigurationType(self, value): # Validate type StructPropertyConfigurationType, a restriction on PropertyConfigurationType. - pass + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['configure', 'allocation', 'factoryparam', 'test', 'event', 'message', 'property'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on StructPropertyConfigurationType' % {"value" : value.encode("utf-8")} ) + value = str(value) + enumerations = ['configure', 'execparam', 'allocation', 'factoryparam', 'test', 'event', 'message', 'property'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on StructPropertyConfigurationType' % {"value" : value.encode("utf-8")} ) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='configurationKind', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('configurationKind') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='configurationKind') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='configurationKind', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='configurationKind'): - if self.kindtype is not None and 'kindtype' not in already_processed: - already_processed.append('kindtype') + if self.kindtype != "configure" and 'kindtype' not in already_processed: + already_processed.add('kindtype') outfile.write(' kindtype=%s' % (quote_attrib(self.kindtype), )) def exportChildren(self, outfile, level, namespace_='', name_='configurationKind', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='configurationKind'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.kindtype is not None and 'kindtype' not in already_processed: - already_processed.append('kindtype') - showIndent(outfile, level) - outfile.write('kindtype = "%s",\n' % (self.kindtype,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('kindtype', node) if value is not None and 'kindtype' not in already_processed: - already_processed.append('kindtype') + already_processed.add('kindtype') self.kindtype = value self.validate_StructPropertyConfigurationType(self.kindtype) # validate type StructPropertyConfigurationType def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -536,83 +914,79 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): class enumeration(GeneratedsSuper): subclass = None superclass = None - def __init__(self, value=None, label=None): - self.value = _cast(None, value) + def __init__(self, label=None, value=None): + self.original_tagname_ = None self.label = _cast(None, label) - pass + self.value = _cast(None, value) def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, enumeration) + if subclass is not None: + return subclass(*args_, **kwargs_) if enumeration.subclass: return enumeration.subclass(*args_, **kwargs_) else: return enumeration(*args_, **kwargs_) factory = staticmethod(factory) - def get_value(self): return self.value - def set_value(self, value): self.value = value - valueProp = property(get_value, set_value) def get_label(self): return self.label def set_label(self, label): self.label = label labelProp = property(get_label, set_label) + def get_value(self): return self.value + def set_value(self, value): self.value = value + valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='enumeration', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('enumeration') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='enumeration') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='enumeration', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='enumeration'): - if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), )) if self.label is not None and 'label' not in already_processed: - already_processed.append('label') - outfile.write(' label=%s' % (self.gds_format_string(quote_attrib(self.label).encode(ExternalEncoding), input_name='label'), )) - def exportChildren(self, outfile, level, namespace_='', name_='enumeration', fromsubclass_=False, pretty_print=True): - pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='enumeration'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + already_processed.add('label') + outfile.write(' label=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.label), input_name='label')), )) if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - showIndent(outfile, level) - outfile.write('value = "%s",\n' % (self.value,)) - if self.label is not None and 'label' not in already_processed: - already_processed.append('label') - showIndent(outfile, level) - outfile.write('label = "%s",\n' % (self.label,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) + def exportChildren(self, outfile, level, namespace_='', name_='enumeration', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('value', node) - if value is not None and 'value' not in already_processed: - already_processed.append('value') - self.value = value value = find_attr_value_('label', node) if value is not None and 'label' not in already_processed: - already_processed.append('label') + already_processed.add('label') self.label = value + value = find_attr_value_('value', node) + if value is not None and 'value' not in already_processed: + already_processed.add('value') + self.value = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class enumeration @@ -622,11 +996,17 @@ class enumerations(GeneratedsSuper): subclass = None superclass = None def __init__(self, enumeration=None): + self.original_tagname_ = None if enumeration is None: self.enumeration = [] else: self.enumeration = enumeration def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, enumerations) + if subclass is not None: + return subclass(*args_, **kwargs_) if enumerations.subclass: return enumerations.subclass(*args_, **kwargs_) else: @@ -635,20 +1015,33 @@ def factory(*args_, **kwargs_): def get_enumeration(self): return self.enumeration def set_enumeration(self, enumeration): self.enumeration = enumeration def add_enumeration(self, value): self.enumeration.append(value) - def insert_enumeration(self, index, value): self.enumeration[index] = value + def insert_enumeration_at(self, index, value): self.enumeration.insert(index, value) + def replace_enumeration_at(self, index, value): self.enumeration[index] = value enumerationProp = property(get_enumeration, set_enumeration) + def hasContent_(self): + if ( + self.enumeration + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='enumerations', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('enumerations') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='enumerations') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='enumerations', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -662,38 +1055,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='enumerations', fr eol_ = '' for enumeration_ in self.enumeration: enumeration_.export(outfile, level, namespace_, name_='enumeration', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.enumeration - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='enumerations'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('enumeration=[\n') - level += 1 - for enumeration_ in self.enumeration: - showIndent(outfile, level) - outfile.write('model_.enumeration(\n') - enumeration_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -701,6 +1069,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = enumeration.factory() obj_.build(child_) self.enumeration.append(obj_) + obj_.original_tagname_ = 'enumeration' # end class enumerations @@ -708,11 +1077,17 @@ class inputValue(GeneratedsSuper): subclass = None superclass = None def __init__(self, simple=None): + self.original_tagname_ = None if simple is None: self.simple = [] else: self.simple = simple def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, inputValue) + if subclass is not None: + return subclass(*args_, **kwargs_) if inputValue.subclass: return inputValue.subclass(*args_, **kwargs_) else: @@ -721,20 +1096,33 @@ def factory(*args_, **kwargs_): def get_simple(self): return self.simple def set_simple(self, simple): self.simple = simple def add_simple(self, value): self.simple.append(value) - def insert_simple(self, index, value): self.simple[index] = value + def insert_simple_at(self, index, value): self.simple.insert(index, value) + def replace_simple_at(self, index, value): self.simple[index] = value simpleProp = property(get_simple, set_simple) + def hasContent_(self): + if ( + self.simple + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='inputValue', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('inputValue') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='inputValue') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='inputValue', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -748,38 +1136,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='inputValue', from eol_ = '' for simple_ in self.simple: simple_.export(outfile, level, namespace_, name_='simple', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simple - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='inputValue'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simple=[\n') - level += 1 - for simple_ in self.simple: - showIndent(outfile, level) - outfile.write('model_.simple(\n') - simple_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -787,6 +1150,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simple.factory() obj_.build(child_) self.simple.append(obj_) + obj_.original_tagname_ = 'simple' # end class inputValue @@ -794,9 +1158,14 @@ class kind(GeneratedsSuper): subclass = None superclass = None def __init__(self, kindtype='configure'): + self.original_tagname_ = None self.kindtype = _cast(None, kindtype) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, kind) + if subclass is not None: + return subclass(*args_, **kwargs_) if kind.subclass: return kind.subclass(*args_, **kwargs_) else: @@ -807,56 +1176,60 @@ def set_kindtype(self, kindtype): self.kindtype = kindtype kindtypeProp = property(get_kindtype, set_kindtype) def validate_PropertyConfigurationType(self, value): # Validate type PropertyConfigurationType, a restriction on xs:string. - pass + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['configure', 'execparam', 'allocation', 'factoryparam', 'test', 'event', 'message', 'property'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on PropertyConfigurationType' % {"value" : value.encode("utf-8")} ) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='kind', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('kind') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='kind') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='kind', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='kind'): - if self.kindtype is not None and 'kindtype' not in already_processed: - already_processed.append('kindtype') + if self.kindtype != "configure" and 'kindtype' not in already_processed: + already_processed.add('kindtype') outfile.write(' kindtype=%s' % (quote_attrib(self.kindtype), )) def exportChildren(self, outfile, level, namespace_='', name_='kind', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='kind'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.kindtype is not None and 'kindtype' not in already_processed: - already_processed.append('kindtype') - showIndent(outfile, level) - outfile.write('kindtype = "%s",\n' % (self.kindtype,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('kindtype', node) if value is not None and 'kindtype' not in already_processed: - already_processed.append('kindtype') + already_processed.add('kindtype') self.kindtype = value self.validate_PropertyConfigurationType(self.kindtype) # validate type PropertyConfigurationType def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -874,6 +1247,7 @@ class properties(GeneratedsSuper): subclass = None superclass = None def __init__(self, description=None, simple=None, simplesequence=None, test=None, struct=None, structsequence=None): + self.original_tagname_ = None self.description = description if simple is None: self.simple = [] @@ -896,6 +1270,11 @@ def __init__(self, description=None, simple=None, simplesequence=None, test=None else: self.structsequence = structsequence def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, properties) + if subclass is not None: + return subclass(*args_, **kwargs_) if properties.subclass: return properties.subclass(*args_, **kwargs_) else: @@ -907,40 +1286,62 @@ def set_description(self, description): self.description = description def get_simple(self): return self.simple def set_simple(self, simple): self.simple = simple def add_simple(self, value): self.simple.append(value) - def insert_simple(self, index, value): self.simple[index] = value + def insert_simple_at(self, index, value): self.simple.insert(index, value) + def replace_simple_at(self, index, value): self.simple[index] = value simpleProp = property(get_simple, set_simple) def get_simplesequence(self): return self.simplesequence def set_simplesequence(self, simplesequence): self.simplesequence = simplesequence def add_simplesequence(self, value): self.simplesequence.append(value) - def insert_simplesequence(self, index, value): self.simplesequence[index] = value + def insert_simplesequence_at(self, index, value): self.simplesequence.insert(index, value) + def replace_simplesequence_at(self, index, value): self.simplesequence[index] = value simplesequenceProp = property(get_simplesequence, set_simplesequence) def get_test(self): return self.test def set_test(self, test): self.test = test def add_test(self, value): self.test.append(value) - def insert_test(self, index, value): self.test[index] = value + def insert_test_at(self, index, value): self.test.insert(index, value) + def replace_test_at(self, index, value): self.test[index] = value testProp = property(get_test, set_test) def get_struct(self): return self.struct def set_struct(self, struct): self.struct = struct def add_struct(self, value): self.struct.append(value) - def insert_struct(self, index, value): self.struct[index] = value + def insert_struct_at(self, index, value): self.struct.insert(index, value) + def replace_struct_at(self, index, value): self.struct[index] = value structProp = property(get_struct, set_struct) def get_structsequence(self): return self.structsequence def set_structsequence(self, structsequence): self.structsequence = structsequence def add_structsequence(self, value): self.structsequence.append(value) - def insert_structsequence(self, index, value): self.structsequence[index] = value + def insert_structsequence_at(self, index, value): self.structsequence.insert(index, value) + def replace_structsequence_at(self, index, value): self.structsequence[index] = value structsequenceProp = property(get_structsequence, set_structsequence) + def hasContent_(self): + if ( + self.description is not None or + self.simple or + self.simplesequence or + self.test or + self.struct or + self.structsequence + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='properties', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('properties') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='properties') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='properties', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -954,7 +1355,7 @@ def exportChildren(self, outfile, level, namespace_='', name_='properties', from eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) for simple_ in self.simple: simple_.export(outfile, level, namespace_, name_='simple', pretty_print=pretty_print) for simplesequence_ in self.simplesequence: @@ -965,94 +1366,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='properties', from struct_.export(outfile, level, namespace_, name_='struct', pretty_print=pretty_print) for structsequence_ in self.structsequence: structsequence_.export(outfile, level, namespace_, name_='structsequence', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.simple or - self.simplesequence or - self.test or - self.struct or - self.structsequence - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='properties'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('simple=[\n') - level += 1 - for simple_ in self.simple: - showIndent(outfile, level) - outfile.write('model_.simple(\n') - simple_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('simplesequence=[\n') - level += 1 - for simplesequence_ in self.simplesequence: - showIndent(outfile, level) - outfile.write('model_.simpleSequence(\n') - simplesequence_.exportLiteral(outfile, level, name_='simpleSequence') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('test=[\n') - level += 1 - for test_ in self.test: - showIndent(outfile, level) - outfile.write('model_.test(\n') - test_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('struct=[\n') - level += 1 - for struct_ in self.struct: - showIndent(outfile, level) - outfile.write('model_.struct(\n') - struct_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structsequence=[\n') - level += 1 - for structsequence_ in self.structsequence: - showIndent(outfile, level) - outfile.write('model_.structSequence(\n') - structsequence_.exportLiteral(outfile, level, name_='structSequence') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -1064,22 +1384,27 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simple.factory() obj_.build(child_) self.simple.append(obj_) + obj_.original_tagname_ = 'simple' elif nodeName_ == 'simplesequence': obj_ = simpleSequence.factory() obj_.build(child_) self.simplesequence.append(obj_) + obj_.original_tagname_ = 'simplesequence' elif nodeName_ == 'test': obj_ = test.factory() obj_.build(child_) self.test.append(obj_) + obj_.original_tagname_ = 'test' elif nodeName_ == 'struct': obj_ = struct.factory() obj_.build(child_) self.struct.append(obj_) + obj_.original_tagname_ = 'struct' elif nodeName_ == 'structsequence': obj_ = structSequence.factory() obj_.build(child_) self.structsequence.append(obj_) + obj_.original_tagname_ = 'structsequence' # end class properties @@ -1087,10 +1412,15 @@ class range_(GeneratedsSuper): subclass = None superclass = None def __init__(self, max=None, min=None): + self.original_tagname_ = None self.max = _cast(None, max) self.min = _cast(None, min) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, range_) + if subclass is not None: + return subclass(*args_, **kwargs_) if range_.subclass: return range_.subclass(*args_, **kwargs_) else: @@ -1102,81 +1432,78 @@ def set_max(self, max): self.max = max def get_min(self): return self.min def set_min(self, min): self.min = min minProp = property(get_min, set_min) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='range', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('range') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='range') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='range', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='range'): if self.max is not None and 'max' not in already_processed: - already_processed.append('max') - outfile.write(' max=%s' % (self.gds_format_string(quote_attrib(self.max).encode(ExternalEncoding), input_name='max'), )) + already_processed.add('max') + outfile.write(' max=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.max), input_name='max')), )) if self.min is not None and 'min' not in already_processed: - already_processed.append('min') - outfile.write(' min=%s' % (self.gds_format_string(quote_attrib(self.min).encode(ExternalEncoding), input_name='min'), )) + already_processed.add('min') + outfile.write(' min=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.min), input_name='min')), )) def exportChildren(self, outfile, level, namespace_='', name_='range', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='range'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.max is not None and 'max' not in already_processed: - already_processed.append('max') - showIndent(outfile, level) - outfile.write('max = "%s",\n' % (self.max,)) - if self.min is not None and 'min' not in already_processed: - already_processed.append('min') - showIndent(outfile, level) - outfile.write('min = "%s",\n' % (self.min,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('max', node) if value is not None and 'max' not in already_processed: - already_processed.append('max') + already_processed.add('max') self.max = value value = find_attr_value_('min', node) if value is not None and 'min' not in already_processed: - already_processed.append('min') + already_processed.add('min') self.min = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass -# end class range +# end class range_ class resultValue(GeneratedsSuper): subclass = None superclass = None def __init__(self, simple=None): + self.original_tagname_ = None if simple is None: self.simple = [] else: self.simple = simple def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, resultValue) + if subclass is not None: + return subclass(*args_, **kwargs_) if resultValue.subclass: return resultValue.subclass(*args_, **kwargs_) else: @@ -1185,20 +1512,33 @@ def factory(*args_, **kwargs_): def get_simple(self): return self.simple def set_simple(self, simple): self.simple = simple def add_simple(self, value): self.simple.append(value) - def insert_simple(self, index, value): self.simple[index] = value + def insert_simple_at(self, index, value): self.simple.insert(index, value) + def replace_simple_at(self, index, value): self.simple[index] = value simpleProp = property(get_simple, set_simple) + def hasContent_(self): + if ( + self.simple + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='resultValue', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('resultValue') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='resultValue') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='resultValue', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -1212,38 +1552,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='resultValue', fro eol_ = '' for simple_ in self.simple: simple_.export(outfile, level, namespace_, name_='simple', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simple - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='resultValue'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simple=[\n') - level += 1 - for simple_ in self.simple: - showIndent(outfile, level) - outfile.write('model_.simple(\n') - simple_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -1251,6 +1566,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simple.factory() obj_.build(child_) self.simple.append(obj_) + obj_.original_tagname_ = 'simple' # end class resultValue @@ -1278,17 +1594,19 @@ class simple(GeneratedsSuper): the type of the kind element is "configure".""" subclass = None superclass = None - def __init__(self, name=None, type_=None, commandline='false', complex='false', mode='readwrite', optional='false', id_=None, description=None, value=None, units=None, range_=None, enumerations=None, kind=None, action=None): + def __init__(self, id_=None, mode='readwrite', name=None, complex='false', commandline='false', optional='false', type_=None, description=None, value=None, units=None, range_=None, enumerations=None, kind=None, action=None): + self.original_tagname_ = None + self.id_ = _cast(None, id_) + self.mode = _cast(None, mode) self.name = _cast(None, name) - self.type_ = _cast(None, type_) - self.commandline = _cast(None, commandline) self.complex = _cast(None, complex) - self.mode = _cast(None, mode) + self.commandline = _cast(None, commandline) self.optional = _cast(None, optional) - self.id_ = _cast(None, id_) + self.type_ = _cast(None, type_) self.description = description self.value = value self.units = units + self.validate_Unit(self.units) self.range_ = range_ self.enumerations = enumerations if kind is None: @@ -1297,6 +1615,11 @@ def __init__(self, name=None, type_=None, commandline='false', complex='false', self.kind = kind self.action = action def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, simple) + if subclass is not None: + return subclass(*args_, **kwargs_) if simple.subclass: return simple.subclass(*args_, **kwargs_) else: @@ -1311,9 +1634,6 @@ def set_value(self, value): self.value = value def get_units(self): return self.units def set_units(self, units): self.units = units unitsProp = property(get_units, set_units) - def validate_Unit(self, value): - # Validate type Unit, a restriction on xs:string. - pass def get_range(self): return self.range_ def set_range(self, range_): self.range_ = range_ rangeProp = property(get_range, set_range) @@ -1323,85 +1643,153 @@ def set_enumerations(self, enumerations): self.enumerations = enumerations def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def add_kind(self, value): self.kind.append(value) - def insert_kind(self, index, value): self.kind[index] = value + def insert_kind_at(self, index, value): self.kind.insert(index, value) + def replace_kind_at(self, index, value): self.kind[index] = value kindProp = property(get_kind, set_kind) def get_action(self): return self.action def set_action(self, action): self.action = action actionProp = property(get_action, set_action) + def get_id(self): return self.id_ + def set_id(self, id_): self.id_ = id_ + idProp = property(get_id, set_id) + def get_mode(self): return self.mode + def set_mode(self, mode): self.mode = mode + modeProp = property(get_mode, set_mode) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) - def get_type(self): return self.type_ - def set_type(self, type_): self.type_ = type_ - typeProp = property(get_type, set_type) - def validate_PropertyValueType(self, value): - # Validate type PropertyValueType, a restriction on xs:string. - pass - def get_commandline(self): return self.commandline - def set_commandline(self, commandline): self.commandline = commandline - commandlineProp = property(get_commandline, set_commandline) - def validate_IsCommandLine(self, value): - # Validate type IsCommandLine, a restriction on xs:string. - pass def get_complex(self): return self.complex def set_complex(self, complex): self.complex = complex complexProp = property(get_complex, set_complex) - def validate_IsComplex(self, value): - # Validate type IsComplex, a restriction on xs:string. - pass - def get_mode(self): return self.mode - def set_mode(self, mode): self.mode = mode - modeProp = property(get_mode, set_mode) - def validate_AccessType(self, value): - # Validate type AccessType, a restriction on xs:string. - pass + def get_commandline(self): return self.commandline + def set_commandline(self, commandline): self.commandline = commandline + commandlineProp = property(get_commandline, set_commandline) def get_optional(self): return self.optional def set_optional(self, optional): self.optional = optional optionalProp = property(get_optional, set_optional) + def get_type(self): return self.type_ + def set_type(self, type_): self.type_ = type_ + typeProp = property(get_type, set_type) + def validate_Unit(self, value): + # Validate type Unit, a restriction on xs:string. + if value is not None and Validate_simpletypes_: + pass + def validate_AccessType(self, value): + # Validate type AccessType, a restriction on xs:string. + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['writeonly', 'readonly', 'readwrite'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on AccessType' % {"value" : value.encode("utf-8")} ) + def validate_IsComplex(self, value): + # Validate type IsComplex, a restriction on xs:string. + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['false', 'true'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on IsComplex' % {"value" : value.encode("utf-8")} ) + def validate_IsCommandLine(self, value): + # Validate type IsCommandLine, a restriction on xs:string. + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['false', 'true'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on IsCommandLine' % {"value" : value.encode("utf-8")} ) def validate_IsOptional(self, value): # Validate type IsOptional, a restriction on xs:string. - pass - def get_id(self): return self.id_ - def set_id(self, id_): self.id_ = id_ - idProp = property(get_id, set_id) + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['false', 'true'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on IsOptional' % {"value" : value.encode("utf-8")} ) + def validate_PropertyValueType(self, value): + # Validate type PropertyValueType, a restriction on xs:string. + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['string', 'boolean', 'ulong', 'objref', 'short', 'float', 'octet', 'char', 'ushort', 'double', 'long', 'longlong', 'ulonglong', 'utctime'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on PropertyValueType' % {"value" : value.encode("utf-8")} ) + def hasContent_(self): + if ( + self.description is not None or + self.value is not None or + self.units is not None or + self.range_ is not None or + self.enumerations is not None or + self.kind or + self.action is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='simple', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('simple') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='simple') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='simple', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simple'): + if self.id_ is not None and 'id' not in already_processed: + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) + if self.mode != "readwrite" and 'mode' not in already_processed: + already_processed.add('mode') + outfile.write(' mode=%s' % (quote_attrib(self.mode), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (quote_attrib(self.type_), )) - if self.commandline is not None and 'commandline' not in already_processed: - already_processed.append('commandline') - outfile.write(' commandline=%s' % (quote_attrib(self.commandline), )) - if self.complex is not None and 'complex' not in already_processed: - already_processed.append('complex') + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) + if self.complex != "false" and 'complex' not in already_processed: + already_processed.add('complex') outfile.write(' complex=%s' % (quote_attrib(self.complex), )) - if self.mode is not None and 'mode' not in already_processed: - already_processed.append('mode') - outfile.write(' mode=%s' % (quote_attrib(self.mode), )) - if self.optional is not None and 'optional' not in already_processed: - already_processed.append('optional') + if self.commandline != "false" and 'commandline' not in already_processed: + already_processed.add('commandline') + outfile.write(' commandline=%s' % (quote_attrib(self.commandline), )) + if self.optional != "false" and 'optional' not in already_processed: + already_processed.add('optional') outfile.write(' optional=%s' % (quote_attrib(self.optional), )) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + if self.type_ is not None and 'type_' not in already_processed: + already_processed.add('type_') + outfile.write(' type=%s' % (quote_attrib(self.type_), )) def exportChildren(self, outfile, level, namespace_='', name_='simple', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1409,13 +1797,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='simple', fromsubc eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.value is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%svalue>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.value).encode(ExternalEncoding), input_name='value'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.value), input_name='value')), eol_)) if self.units is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sunits>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.units).encode(ExternalEncoding), input_name='units'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.units), input_name='units')), eol_)) if self.range_ is not None: self.range_.export(outfile, level, namespace_, name_='range', pretty_print=pretty_print) if self.enumerations is not None: @@ -1424,132 +1812,47 @@ def exportChildren(self, outfile, level, namespace_='', name_='simple', fromsubc kind_.export(outfile, level, namespace_, name_='kind', pretty_print=pretty_print) if self.action is not None: self.action.export(outfile, level, namespace_, name_='action', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.value is not None or - self.units is not None or - self.range_ is not None or - self.enumerations is not None or - self.kind or - self.action is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='simple'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.commandline is not None and 'commandline' not in already_processed: - already_processed.append('commandline') - showIndent(outfile, level) - outfile.write('commandline = "%s",\n' % (self.commandline,)) - if self.complex is not None and 'complex' not in already_processed: - already_processed.append('complex') - showIndent(outfile, level) - outfile.write('complex = "%s",\n' % (self.complex,)) - if self.mode is not None and 'mode' not in already_processed: - already_processed.append('mode') - showIndent(outfile, level) - outfile.write('mode = "%s",\n' % (self.mode,)) - if self.optional is not None and 'optional' not in already_processed: - already_processed.append('optional') - showIndent(outfile, level) - outfile.write('optional = "%s",\n' % (self.optional,)) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.value is not None: - showIndent(outfile, level) - outfile.write('value=%s,\n' % quote_python(self.value).encode(ExternalEncoding)) - if self.units is not None: - showIndent(outfile, level) - outfile.write('units=%s,\n' % quote_python(self.units).encode(ExternalEncoding)) - if self.range_ is not None: - showIndent(outfile, level) - outfile.write('range=model_.range(\n') - self.range_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.enumerations is not None: - showIndent(outfile, level) - outfile.write('enumerations=model_.enumerations(\n') - self.enumerations.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('kind=[\n') - level += 1 - for kind_ in self.kind: - showIndent(outfile, level) - outfile.write('model_.kind(\n') - kind_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.action is not None: - showIndent(outfile, level) - outfile.write('action=model_.action(\n') - self.action.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): + value = find_attr_value_('id', node) + if value is not None and 'id' not in already_processed: + already_processed.add('id') + self.id_ = value + value = find_attr_value_('mode', node) + if value is not None and 'mode' not in already_processed: + already_processed.add('mode') + self.mode = value + self.validate_AccessType(self.mode) # validate type AccessType value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value - value = find_attr_value_('type', node) - if value is not None and 'type' not in already_processed: - already_processed.append('type') - self.type_ = value - self.validate_PropertyValueType(self.type_) # validate type PropertyValueType - value = find_attr_value_('commandline', node) - if value is not None and 'commandline' not in already_processed: - already_processed.append('commandline') - self.commandline = value - self.validate_IsCommandLine(self.commandline) # validate type IsCommandLine value = find_attr_value_('complex', node) if value is not None and 'complex' not in already_processed: - already_processed.append('complex') + already_processed.add('complex') self.complex = value self.validate_IsComplex(self.complex) # validate type IsComplex - value = find_attr_value_('mode', node) - if value is not None and 'mode' not in already_processed: - already_processed.append('mode') - self.mode = value - self.validate_AccessType(self.mode) # validate type AccessType + value = find_attr_value_('commandline', node) + if value is not None and 'commandline' not in already_processed: + already_processed.add('commandline') + self.commandline = value + self.validate_IsCommandLine(self.commandline) # validate type IsCommandLine value = find_attr_value_('optional', node) if value is not None and 'optional' not in already_processed: - already_processed.append('optional') + already_processed.add('optional') self.optional = value self.validate_IsOptional(self.optional) # validate type IsOptional - value = find_attr_value_('id', node) - if value is not None and 'id' not in already_processed: - already_processed.append('id') - self.id_ = value + value = find_attr_value_('type', node) + if value is not None and 'type' not in already_processed: + already_processed.add('type') + self.type_ = value + self.validate_PropertyValueType(self.type_) # validate type PropertyValueType def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': description_ = child_.text @@ -1563,23 +1866,28 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): units_ = child_.text units_ = self.gds_validate_string(units_, node, 'units') self.units = units_ - self.validate_Unit(self.units) # validate type Unit + # validate type Unit + self.validate_Unit(self.units) elif nodeName_ == 'range': obj_ = range_.factory() obj_.build(child_) - self.set_range(obj_) + self.range_ = obj_ + obj_.original_tagname_ = 'range' elif nodeName_ == 'enumerations': obj_ = enumerations.factory() obj_.build(child_) - self.set_enumerations(obj_) + self.enumerations = obj_ + obj_.original_tagname_ = 'enumerations' elif nodeName_ == 'kind': obj_ = kind.factory() obj_.build(child_) self.kind.append(obj_) + obj_.original_tagname_ = 'kind' elif nodeName_ == 'action': obj_ = action.factory() obj_.build(child_) - self.set_action(obj_) + self.action = obj_ + obj_.original_tagname_ = 'action' # end class simple @@ -1587,10 +1895,15 @@ class simpleRef(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, value=None): + self.original_tagname_ = None self.refid = _cast(None, refid) self.value = _cast(None, value) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, simpleRef) + if subclass is not None: + return subclass(*args_, **kwargs_) if simpleRef.subclass: return simpleRef.subclass(*args_, **kwargs_) else: @@ -1602,66 +1915,57 @@ def set_refid(self, refid): self.refid = refid def get_value(self): return self.value def set_value(self, value): self.value = value valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='simpleRef', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('simpleRef') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='simpleRef') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='simpleRef', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simpleRef'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), )) + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) def exportChildren(self, outfile, level, namespace_='', name_='simpleRef', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='simpleRef'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - showIndent(outfile, level) - outfile.write('value = "%s",\n' % (self.value,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value value = find_attr_value_('value', node) if value is not None and 'value' not in already_processed: - already_processed.append('value') + already_processed.add('value') self.value = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1672,9 +1976,15 @@ class simpleSequenceRef(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, values=None): + self.original_tagname_ = None self.refid = _cast(None, refid) self.values = values def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, simpleSequenceRef) + if subclass is not None: + return subclass(*args_, **kwargs_) if simpleSequenceRef.subclass: return simpleSequenceRef.subclass(*args_, **kwargs_) else: @@ -1686,26 +1996,38 @@ def set_values(self, values): self.values = values def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.values is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='simpleSequenceRef', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('simpleSequenceRef') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='simpleSequenceRef') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='simpleSequenceRef', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simpleSequenceRef'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='simpleSequenceRef', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1713,61 +2035,42 @@ def exportChildren(self, outfile, level, namespace_='', name_='simpleSequenceRef eol_ = '' if self.values is not None: self.values.export(outfile, level, namespace_, name_='values', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.values is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='simpleSequenceRef'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.values is not None: - showIndent(outfile, level) - outfile.write('values=model_.values(\n') - self.values.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'values': obj_ = values.factory() obj_.build(child_) - self.set_values(obj_) + self.values = obj_ + obj_.original_tagname_ = 'values' # end class simpleSequenceRef class simpleSequence(GeneratedsSuper): subclass = None superclass = None - def __init__(self, name=None, optional='false', complex='false', mode='readwrite', type_=None, id_=None, description=None, values=None, units=None, range_=None, kind=None, action=None): - self.name = _cast(None, name) - self.optional = _cast(None, optional) - self.complex = _cast(None, complex) + def __init__(self, id_=None, mode='readwrite', name=None, type_=None, complex='false', optional='false', description=None, values=None, units=None, range_=None, kind=None, action=None): + self.original_tagname_ = None + self.id_ = _cast(None, id_) self.mode = _cast(None, mode) + self.name = _cast(None, name) self.type_ = _cast(None, type_) - self.id_ = _cast(None, id_) + self.complex = _cast(None, complex) + self.optional = _cast(None, optional) self.description = description self.values = values self.units = units + self.validate_Unit(self.units) self.range_ = range_ if kind is None: self.kind = [] @@ -1775,6 +2078,11 @@ def __init__(self, name=None, optional='false', complex='false', mode='readwrite self.kind = kind self.action = action def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, simpleSequence) + if subclass is not None: + return subclass(*args_, **kwargs_) if simpleSequence.subclass: return simpleSequence.subclass(*args_, **kwargs_) else: @@ -1789,85 +2097,140 @@ def set_values(self, values): self.values = values def get_units(self): return self.units def set_units(self, units): self.units = units unitsProp = property(get_units, set_units) - def validate_Unit(self, value): - # Validate type Unit, a restriction on xs:string. - pass def get_range(self): return self.range_ def set_range(self, range_): self.range_ = range_ rangeProp = property(get_range, set_range) def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def add_kind(self, value): self.kind.append(value) - def insert_kind(self, index, value): self.kind[index] = value + def insert_kind_at(self, index, value): self.kind.insert(index, value) + def replace_kind_at(self, index, value): self.kind[index] = value kindProp = property(get_kind, set_kind) def get_action(self): return self.action def set_action(self, action): self.action = action actionProp = property(get_action, set_action) + def get_id(self): return self.id_ + def set_id(self, id_): self.id_ = id_ + idProp = property(get_id, set_id) + def get_mode(self): return self.mode + def set_mode(self, mode): self.mode = mode + modeProp = property(get_mode, set_mode) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) - def get_optional(self): return self.optional - def set_optional(self, optional): self.optional = optional - optionalProp = property(get_optional, set_optional) - def validate_IsOptional(self, value): - # Validate type IsOptional, a restriction on xs:string. - pass + def get_type(self): return self.type_ + def set_type(self, type_): self.type_ = type_ + typeProp = property(get_type, set_type) def get_complex(self): return self.complex def set_complex(self, complex): self.complex = complex complexProp = property(get_complex, set_complex) - def validate_IsComplex(self, value): - # Validate type IsComplex, a restriction on xs:string. - pass - def get_mode(self): return self.mode - def set_mode(self, mode): self.mode = mode - modeProp = property(get_mode, set_mode) + def get_optional(self): return self.optional + def set_optional(self, optional): self.optional = optional + optionalProp = property(get_optional, set_optional) + def validate_Unit(self, value): + # Validate type Unit, a restriction on xs:string. + if value is not None and Validate_simpletypes_: + pass def validate_AccessType(self, value): # Validate type AccessType, a restriction on xs:string. - pass - def get_type(self): return self.type_ - def set_type(self, type_): self.type_ = type_ - typeProp = property(get_type, set_type) + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['writeonly', 'readonly', 'readwrite'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on AccessType' % {"value" : value.encode("utf-8")} ) def validate_PropertyValueType(self, value): # Validate type PropertyValueType, a restriction on xs:string. - pass - def get_id(self): return self.id_ - def set_id(self, id_): self.id_ = id_ - idProp = property(get_id, set_id) + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['string', 'boolean', 'ulong', 'objref', 'short', 'float', 'octet', 'char', 'ushort', 'double', 'long', 'longlong', 'ulonglong', 'utctime'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on PropertyValueType' % {"value" : value.encode("utf-8")} ) + def validate_IsComplex(self, value): + # Validate type IsComplex, a restriction on xs:string. + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['false', 'true'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on IsComplex' % {"value" : value.encode("utf-8")} ) + def validate_IsOptional(self, value): + # Validate type IsOptional, a restriction on xs:string. + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['false', 'true'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on IsOptional' % {"value" : value.encode("utf-8")} ) + def hasContent_(self): + if ( + self.description is not None or + self.values is not None or + self.units is not None or + self.range_ is not None or + self.kind or + self.action is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='simpleSequence', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('simpleSequence') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='simpleSequence') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='simpleSequence', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simpleSequence'): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - if self.optional is not None and 'optional' not in already_processed: - already_processed.append('optional') - outfile.write(' optional=%s' % (quote_attrib(self.optional), )) - if self.complex is not None and 'complex' not in already_processed: - already_processed.append('complex') - outfile.write(' complex=%s' % (quote_attrib(self.complex), )) - if self.mode is not None and 'mode' not in already_processed: - already_processed.append('mode') + if self.id_ is not None and 'id' not in already_processed: + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) + if self.mode != "readwrite" and 'mode' not in already_processed: + already_processed.add('mode') outfile.write(' mode=%s' % (quote_attrib(self.mode), )) + if self.name is not None and 'name' not in already_processed: + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') + already_processed.add('type_') outfile.write(' type=%s' % (quote_attrib(self.type_), )) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + if self.complex != "false" and 'complex' not in already_processed: + already_processed.add('complex') + outfile.write(' complex=%s' % (quote_attrib(self.complex), )) + if self.optional != "false" and 'optional' not in already_processed: + already_processed.add('optional') + outfile.write(' optional=%s' % (quote_attrib(self.optional), )) def exportChildren(self, outfile, level, namespace_='', name_='simpleSequence', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1875,131 +2238,54 @@ def exportChildren(self, outfile, level, namespace_='', name_='simpleSequence', eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.values is not None: self.values.export(outfile, level, namespace_, name_='values', pretty_print=pretty_print) if self.units is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sunits>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.units).encode(ExternalEncoding), input_name='units'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.units), input_name='units')), eol_)) if self.range_ is not None: self.range_.export(outfile, level, namespace_, name_='range', pretty_print=pretty_print) for kind_ in self.kind: kind_.export(outfile, level, namespace_, name_='kind', pretty_print=pretty_print) if self.action is not None: self.action.export(outfile, level, namespace_, name_='action', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.values is not None or - self.units is not None or - self.range_ is not None or - self.kind or - self.action is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='simpleSequence'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - if self.optional is not None and 'optional' not in already_processed: - already_processed.append('optional') - showIndent(outfile, level) - outfile.write('optional = "%s",\n' % (self.optional,)) - if self.complex is not None and 'complex' not in already_processed: - already_processed.append('complex') - showIndent(outfile, level) - outfile.write('complex = "%s",\n' % (self.complex,)) - if self.mode is not None and 'mode' not in already_processed: - already_processed.append('mode') - showIndent(outfile, level) - outfile.write('mode = "%s",\n' % (self.mode,)) - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.values is not None: - showIndent(outfile, level) - outfile.write('values=model_.values(\n') - self.values.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.units is not None: - showIndent(outfile, level) - outfile.write('units=%s,\n' % quote_python(self.units).encode(ExternalEncoding)) - if self.range_ is not None: - showIndent(outfile, level) - outfile.write('range=model_.range(\n') - self.range_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('kind=[\n') - level += 1 - for kind_ in self.kind: - showIndent(outfile, level) - outfile.write('model_.kind(\n') - kind_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.action is not None: - showIndent(outfile, level) - outfile.write('action=model_.action(\n') - self.action.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('name', node) - if value is not None and 'name' not in already_processed: - already_processed.append('name') - self.name = value - value = find_attr_value_('optional', node) - if value is not None and 'optional' not in already_processed: - already_processed.append('optional') - self.optional = value - self.validate_IsOptional(self.optional) # validate type IsOptional - value = find_attr_value_('complex', node) - if value is not None and 'complex' not in already_processed: - already_processed.append('complex') - self.complex = value - self.validate_IsComplex(self.complex) # validate type IsComplex + value = find_attr_value_('id', node) + if value is not None and 'id' not in already_processed: + already_processed.add('id') + self.id_ = value value = find_attr_value_('mode', node) if value is not None and 'mode' not in already_processed: - already_processed.append('mode') + already_processed.add('mode') self.mode = value self.validate_AccessType(self.mode) # validate type AccessType + value = find_attr_value_('name', node) + if value is not None and 'name' not in already_processed: + already_processed.add('name') + self.name = value value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value self.validate_PropertyValueType(self.type_) # validate type PropertyValueType - value = find_attr_value_('id', node) - if value is not None and 'id' not in already_processed: - already_processed.append('id') - self.id_ = value + value = find_attr_value_('complex', node) + if value is not None and 'complex' not in already_processed: + already_processed.add('complex') + self.complex = value + self.validate_IsComplex(self.complex) # validate type IsComplex + value = find_attr_value_('optional', node) + if value is not None and 'optional' not in already_processed: + already_processed.add('optional') + self.optional = value + self.validate_IsOptional(self.optional) # validate type IsOptional def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': description_ = child_.text @@ -2008,34 +2294,40 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'values': obj_ = values.factory() obj_.build(child_) - self.set_values(obj_) + self.values = obj_ + obj_.original_tagname_ = 'values' elif nodeName_ == 'units': units_ = child_.text units_ = self.gds_validate_string(units_, node, 'units') self.units = units_ - self.validate_Unit(self.units) # validate type Unit + # validate type Unit + self.validate_Unit(self.units) elif nodeName_ == 'range': obj_ = range_.factory() obj_.build(child_) - self.set_range(obj_) + self.range_ = obj_ + obj_.original_tagname_ = 'range' elif nodeName_ == 'kind': obj_ = kind.factory() obj_.build(child_) self.kind.append(obj_) + obj_.original_tagname_ = 'kind' elif nodeName_ == 'action': obj_ = action.factory() obj_.build(child_) - self.set_action(obj_) + self.action = obj_ + obj_.original_tagname_ = 'action' # end class simpleSequence class struct(GeneratedsSuper): subclass = None superclass = None - def __init__(self, id_=None, name=None, mode='readwrite', description=None, simple=None, simplesequence=None, configurationkind=None): + def __init__(self, id_=None, mode='readwrite', name=None, description=None, simple=None, simplesequence=None, configurationkind=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) - self.name = _cast(None, name) self.mode = _cast(None, mode) + self.name = _cast(None, name) self.description = description if simple is None: self.simple = [] @@ -2050,6 +2342,11 @@ def __init__(self, id_=None, name=None, mode='readwrite', description=None, simp else: self.configurationkind = configurationkind def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, struct) + if subclass is not None: + return subclass(*args_, **kwargs_) if struct.subclass: return struct.subclass(*args_, **kwargs_) else: @@ -2061,56 +2358,83 @@ def set_description(self, description): self.description = description def get_simple(self): return self.simple def set_simple(self, simple): self.simple = simple def add_simple(self, value): self.simple.append(value) - def insert_simple(self, index, value): self.simple[index] = value + def insert_simple_at(self, index, value): self.simple.insert(index, value) + def replace_simple_at(self, index, value): self.simple[index] = value simpleProp = property(get_simple, set_simple) def get_simplesequence(self): return self.simplesequence def set_simplesequence(self, simplesequence): self.simplesequence = simplesequence def add_simplesequence(self, value): self.simplesequence.append(value) - def insert_simplesequence(self, index, value): self.simplesequence[index] = value + def insert_simplesequence_at(self, index, value): self.simplesequence.insert(index, value) + def replace_simplesequence_at(self, index, value): self.simplesequence[index] = value simplesequenceProp = property(get_simplesequence, set_simplesequence) def get_configurationkind(self): return self.configurationkind def set_configurationkind(self, configurationkind): self.configurationkind = configurationkind def add_configurationkind(self, value): self.configurationkind.append(value) - def insert_configurationkind(self, index, value): self.configurationkind[index] = value + def insert_configurationkind_at(self, index, value): self.configurationkind.insert(index, value) + def replace_configurationkind_at(self, index, value): self.configurationkind[index] = value configurationkindProp = property(get_configurationkind, set_configurationkind) def get_id(self): return self.id_ def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) - def get_name(self): return self.name - def set_name(self, name): self.name = name - nameProp = property(get_name, set_name) def get_mode(self): return self.mode def set_mode(self, mode): self.mode = mode modeProp = property(get_mode, set_mode) + def get_name(self): return self.name + def set_name(self, name): self.name = name + nameProp = property(get_name, set_name) def validate_AccessType(self, value): # Validate type AccessType, a restriction on xs:string. - pass + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['writeonly', 'readonly', 'readwrite'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on AccessType' % {"value" : value.encode("utf-8")} ) + def hasContent_(self): + if ( + self.description is not None or + self.simple or + self.simplesequence or + self.configurationkind + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='struct', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('struct') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='struct') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='struct', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='struct'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - if self.mode is not None and 'mode' not in already_processed: - already_processed.append('mode') + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) + if self.mode != "readwrite" and 'mode' not in already_processed: + already_processed.add('mode') outfile.write(' mode=%s' % (quote_attrib(self.mode), )) + if self.name is not None and 'name' not in already_processed: + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='struct', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2118,100 +2442,34 @@ def exportChildren(self, outfile, level, namespace_='', name_='struct', fromsubc eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) for simple_ in self.simple: simple_.export(outfile, level, namespace_, name_='simple', pretty_print=pretty_print) for simplesequence_ in self.simplesequence: simplesequence_.export(outfile, level, namespace_, name_='simplesequence', pretty_print=pretty_print) for configurationkind_ in self.configurationkind: configurationkind_.export(outfile, level, namespace_, name_='configurationkind', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.simple or - self.simplesequence or - self.configurationkind - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='struct'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - if self.mode is not None and 'mode' not in already_processed: - already_processed.append('mode') - showIndent(outfile, level) - outfile.write('mode = "%s",\n' % (self.mode,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('simple=[\n') - level += 1 - for simple_ in self.simple: - showIndent(outfile, level) - outfile.write('model_.simple(\n') - simple_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('simplesequence=[\n') - level += 1 - for simplesequence_ in self.simplesequence: - showIndent(outfile, level) - outfile.write('model_.simpleSequence(\n') - simplesequence_.exportLiteral(outfile, level, name_='simpleSequence') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('configurationkind=[\n') - level += 1 - for configurationkind_ in self.configurationkind: - showIndent(outfile, level) - outfile.write('model_.configurationKind(\n') - configurationkind_.exportLiteral(outfile, level, name_='configurationKind') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value - value = find_attr_value_('name', node) - if value is not None and 'name' not in already_processed: - already_processed.append('name') - self.name = value value = find_attr_value_('mode', node) if value is not None and 'mode' not in already_processed: - already_processed.append('mode') + already_processed.add('mode') self.mode = value self.validate_AccessType(self.mode) # validate type AccessType + value = find_attr_value_('name', node) + if value is not None and 'name' not in already_processed: + already_processed.add('name') + self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': description_ = child_.text @@ -2221,24 +2479,28 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simple.factory() obj_.build(child_) self.simple.append(obj_) + obj_.original_tagname_ = 'simple' elif nodeName_ == 'simplesequence': obj_ = simpleSequence.factory() obj_.build(child_) self.simplesequence.append(obj_) + obj_.original_tagname_ = 'simplesequence' elif nodeName_ == 'configurationkind': obj_ = configurationKind.factory() obj_.build(child_) self.configurationkind.append(obj_) + obj_.original_tagname_ = 'configurationkind' # end class struct class structSequence(GeneratedsSuper): subclass = None superclass = None - def __init__(self, id_=None, name=None, mode='readwrite', description=None, struct=None, structvalue=None, configurationkind=None): + def __init__(self, id_=None, mode='readwrite', name=None, description=None, struct=None, structvalue=None, configurationkind=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) - self.name = _cast(None, name) self.mode = _cast(None, mode) + self.name = _cast(None, name) self.description = description self.struct = struct if structvalue is None: @@ -2250,6 +2512,11 @@ def __init__(self, id_=None, name=None, mode='readwrite', description=None, stru else: self.configurationkind = configurationkind def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structSequence) + if subclass is not None: + return subclass(*args_, **kwargs_) if structSequence.subclass: return structSequence.subclass(*args_, **kwargs_) else: @@ -2264,51 +2531,77 @@ def set_struct(self, struct): self.struct = struct def get_structvalue(self): return self.structvalue def set_structvalue(self, structvalue): self.structvalue = structvalue def add_structvalue(self, value): self.structvalue.append(value) - def insert_structvalue(self, index, value): self.structvalue[index] = value + def insert_structvalue_at(self, index, value): self.structvalue.insert(index, value) + def replace_structvalue_at(self, index, value): self.structvalue[index] = value structvalueProp = property(get_structvalue, set_structvalue) def get_configurationkind(self): return self.configurationkind def set_configurationkind(self, configurationkind): self.configurationkind = configurationkind def add_configurationkind(self, value): self.configurationkind.append(value) - def insert_configurationkind(self, index, value): self.configurationkind[index] = value + def insert_configurationkind_at(self, index, value): self.configurationkind.insert(index, value) + def replace_configurationkind_at(self, index, value): self.configurationkind[index] = value configurationkindProp = property(get_configurationkind, set_configurationkind) def get_id(self): return self.id_ def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) - def get_name(self): return self.name - def set_name(self, name): self.name = name - nameProp = property(get_name, set_name) def get_mode(self): return self.mode def set_mode(self, mode): self.mode = mode modeProp = property(get_mode, set_mode) + def get_name(self): return self.name + def set_name(self, name): self.name = name + nameProp = property(get_name, set_name) def validate_AccessType(self, value): # Validate type AccessType, a restriction on xs:string. - pass + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['writeonly', 'readonly', 'readwrite'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on AccessType' % {"value" : value.encode("utf-8")} ) + def hasContent_(self): + if ( + self.description is not None or + self.struct is not None or + self.structvalue or + self.configurationkind + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structSequence', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structSequence') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structSequence') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structSequence', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='structSequence'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - if self.mode is not None and 'mode' not in already_processed: - already_processed.append('mode') + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) + if self.mode != "readwrite" and 'mode' not in already_processed: + already_processed.add('mode') outfile.write(' mode=%s' % (quote_attrib(self.mode), )) + if self.name is not None and 'name' not in already_processed: + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='structSequence', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2316,94 +2609,34 @@ def exportChildren(self, outfile, level, namespace_='', name_='structSequence', eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.struct is not None: self.struct.export(outfile, level, namespace_, name_='struct', pretty_print=pretty_print) for structvalue_ in self.structvalue: structvalue_.export(outfile, level, namespace_, name_='structvalue', pretty_print=pretty_print) for configurationkind_ in self.configurationkind: configurationkind_.export(outfile, level, namespace_, name_='configurationkind', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.struct is not None or - self.structvalue or - self.configurationkind - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structSequence'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - if self.mode is not None and 'mode' not in already_processed: - already_processed.append('mode') - showIndent(outfile, level) - outfile.write('mode = "%s",\n' % (self.mode,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.struct is not None: - showIndent(outfile, level) - outfile.write('struct=model_.struct(\n') - self.struct.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('structvalue=[\n') - level += 1 - for structvalue_ in self.structvalue: - showIndent(outfile, level) - outfile.write('model_.structValue(\n') - structvalue_.exportLiteral(outfile, level, name_='structValue') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('configurationkind=[\n') - level += 1 - for configurationkind_ in self.configurationkind: - showIndent(outfile, level) - outfile.write('model_.configurationKind(\n') - configurationkind_.exportLiteral(outfile, level, name_='configurationKind') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value - value = find_attr_value_('name', node) - if value is not None and 'name' not in already_processed: - already_processed.append('name') - self.name = value value = find_attr_value_('mode', node) if value is not None and 'mode' not in already_processed: - already_processed.append('mode') + already_processed.add('mode') self.mode = value self.validate_AccessType(self.mode) # validate type AccessType + value = find_attr_value_('name', node) + if value is not None and 'name' not in already_processed: + already_processed.add('name') + self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': description_ = child_.text @@ -2412,15 +2645,18 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'struct': obj_ = struct.factory() obj_.build(child_) - self.set_struct(obj_) + self.struct = obj_ + obj_.original_tagname_ = 'struct' elif nodeName_ == 'structvalue': obj_ = structValue.factory() obj_.build(child_) self.structvalue.append(obj_) + obj_.original_tagname_ = 'structvalue' elif nodeName_ == 'configurationkind': obj_ = configurationKind.factory() obj_.build(child_) self.configurationkind.append(obj_) + obj_.original_tagname_ = 'configurationkind' # end class structSequence @@ -2428,6 +2664,7 @@ class structValue(GeneratedsSuper): subclass = None superclass = None def __init__(self, simpleref=None, simplesequenceref=None): + self.original_tagname_ = None if simpleref is None: self.simpleref = [] else: @@ -2437,6 +2674,11 @@ def __init__(self, simpleref=None, simplesequenceref=None): else: self.simplesequenceref = simplesequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structValue) + if subclass is not None: + return subclass(*args_, **kwargs_) if structValue.subclass: return structValue.subclass(*args_, **kwargs_) else: @@ -2445,25 +2687,40 @@ def factory(*args_, **kwargs_): def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) def get_simplesequenceref(self): return self.simplesequenceref def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref def add_simplesequenceref(self, value): self.simplesequenceref.append(value) - def insert_simplesequenceref(self, index, value): self.simplesequenceref[index] = value + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structValue', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structValue') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structValue') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structValue', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -2479,51 +2736,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='structValue', fro simpleref_.export(outfile, level, namespace_, name_='simpleref', pretty_print=pretty_print) for simplesequenceref_ in self.simplesequenceref: simplesequenceref_.export(outfile, level, namespace_, name_='simplesequenceref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simpleref or - self.simplesequenceref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structValue'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleRef(\n') - simpleref_.exportLiteral(outfile, level, name_='simpleRef') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('simplesequenceref=[\n') - level += 1 - for simplesequenceref_ in self.simplesequenceref: - showIndent(outfile, level) - outfile.write('model_.simpleSequenceRef(\n') - simplesequenceref_.exportLiteral(outfile, level, name_='simpleSequenceRef') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -2531,10 +2750,12 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simpleRef.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' elif nodeName_ == 'simplesequenceref': obj_ = simpleSequenceRef.factory() obj_.build(child_) self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' # end class structValue @@ -2542,11 +2763,17 @@ class test(GeneratedsSuper): subclass = None superclass = None def __init__(self, id_=None, description=None, inputvalue=None, resultvalue=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.description = description self.inputvalue = inputvalue self.resultvalue = resultvalue def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, test) + if subclass is not None: + return subclass(*args_, **kwargs_) if test.subclass: return test.subclass(*args_, **kwargs_) else: @@ -2564,26 +2791,40 @@ def set_resultvalue(self, resultvalue): self.resultvalue = resultvalue def get_id(self): return self.id_ def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) + def hasContent_(self): + if ( + self.description is not None or + self.inputvalue is not None or + self.resultvalue is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='test', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('test') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='test') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='test', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='test'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) def exportChildren(self, outfile, level, namespace_='', name_='test', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2591,55 +2832,22 @@ def exportChildren(self, outfile, level, namespace_='', name_='test', fromsubcla eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.inputvalue is not None: self.inputvalue.export(outfile, level, namespace_, name_='inputvalue', pretty_print=pretty_print) if self.resultvalue is not None: self.resultvalue.export(outfile, level, namespace_, name_='resultvalue', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.inputvalue is not None or - self.resultvalue is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='test'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.inputvalue is not None: - showIndent(outfile, level) - outfile.write('inputvalue=model_.inputValue(\n') - self.inputvalue.exportLiteral(outfile, level, name_='inputvalue') - showIndent(outfile, level) - outfile.write('),\n') - if self.resultvalue is not None: - showIndent(outfile, level) - outfile.write('resultvalue=model_.resultValue(\n') - self.resultvalue.exportLiteral(outfile, level, name_='resultvalue') - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': @@ -2649,11 +2857,13 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'inputvalue': obj_ = inputValue.factory() obj_.build(child_) - self.set_inputvalue(obj_) + self.inputvalue = obj_ + obj_.original_tagname_ = 'inputvalue' elif nodeName_ == 'resultvalue': obj_ = resultValue.factory() obj_.build(child_) - self.set_resultvalue(obj_) + self.resultvalue = obj_ + obj_.original_tagname_ = 'resultvalue' # end class test @@ -2661,11 +2871,17 @@ class values(GeneratedsSuper): subclass = None superclass = None def __init__(self, value=None): + self.original_tagname_ = None if value is None: self.value = [] else: self.value = value def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, values) + if subclass is not None: + return subclass(*args_, **kwargs_) if values.subclass: return values.subclass(*args_, **kwargs_) else: @@ -2674,20 +2890,33 @@ def factory(*args_, **kwargs_): def get_value(self): return self.value def set_value(self, value): self.value = value def add_value(self, value): self.value.append(value) - def insert_value(self, index, value): self.value[index] = value + def insert_value_at(self, index, value): self.value.insert(index, value) + def replace_value_at(self, index, value): self.value[index] = value valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + self.value + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='values', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('values') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='values') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='values', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -2701,36 +2930,14 @@ def exportChildren(self, outfile, level, namespace_='', name_='values', fromsubc eol_ = '' for value_ in self.value: showIndent(outfile, level, pretty_print) - outfile.write('<%svalue>%s%s' % (namespace_, self.gds_format_string(quote_xml(value_).encode(ExternalEncoding), input_name='value'), namespace_, eol_)) - def hasContent_(self): - if ( - self.value - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='values'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('value=[\n') - level += 1 - for value_ in self.value: - showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(value_).encode(ExternalEncoding)) - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(value_), input_name='value')), eol_)) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -2741,23 +2948,32 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): # end class values +GDSClassesMapping = { + 'properties': properties, +} + + USAGE_TEXT = """ Usage: python .py [ -s ] """ + def usage(): - print USAGE_TEXT + print(USAGE_TEXT) sys.exit(1) def get_root_tag(node): tag = Tag_pattern_.match(node.tag).groups()[-1] - rootClass = globals().get(tag) + rootClass = GDSClassesMapping.get(tag) + if rootClass is None: + rootClass = globals().get(tag) return tag, rootClass -def parse(inFileName): - doc = parsexml_(inFileName) +def parse(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -2767,16 +2983,18 @@ def parse(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_=rootTag, -## namespacedef_='', -## pretty_print=True) +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='', +## pretty_print=True) return rootObj -def parseString(inString): - from StringIO import StringIO - doc = parsexml_(StringIO(inString)) +def parseEtree(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -2786,14 +3004,47 @@ def parseString(inString): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_="properties", -## namespacedef_='') + mapping = {} + rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping) + reverse_mapping = rootObj.gds_reverse_node_mapping(mapping) +## if not silence: +## content = etree_.tostring( +## rootElement, pretty_print=True, +## xml_declaration=True, encoding="utf-8") +## sys.stdout.write(content) +## sys.stdout.write('\n') + return rootObj, rootElement, mapping, reverse_mapping + + +def parseString(inString, silence=False): + '''Parse a string, create the object tree, and export it. + + Arguments: + - inString -- A string. This XML fragment should not start + with an XML declaration containing an encoding. + - silence -- A boolean. If False, export the object. + Returns -- The root object in the tree. + ''' + parser = None + rootNode= parsexmlstring_(inString, parser) + rootTag, rootClass = get_root_tag(rootNode) + if rootClass is None: + rootTag = 'properties' + rootClass = properties + rootObj = rootClass.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='') return rootObj -def parseLiteral(inFileName): - doc = parsexml_(inFileName) +def parseLiteral(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -2803,11 +3054,12 @@ def parseLiteral(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('#from prf import *\n\n') -## sys.stdout.write('import prf as model_\n\n') -## sys.stdout.write('rootObj = model_.rootTag(\n') -## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) -## sys.stdout.write(')\n') +## if not silence: +## sys.stdout.write('#from prf import *\n\n') +## sys.stdout.write('import prf as model_\n\n') +## sys.stdout.write('rootObj = model_.rootClass(\n') +## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) +## sys.stdout.write(')\n') return rootObj @@ -2843,4 +3095,4 @@ def main(): "structValue", "test", "values" - ] +] diff --git a/redhawk/src/base/framework/python/ossie/parsers/profile.py b/redhawk/src/base/framework/python/ossie/parsers/profile.py index 1527a39d3..5976aba9b 100644 --- a/redhawk/src/base/framework/python/ossie/parsers/profile.py +++ b/redhawk/src/base/framework/python/ossie/parsers/profile.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- -# +# -*- coding: utf-8 -*- + # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. # @@ -18,70 +18,97 @@ # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. -# # -# Generated Thu Sep 12 14:49:30 2013 by generateDS.py version 2.7c. +# Generated Mon Jul 30 12:29:35 2018 by generateDS.py version 2.29.14. +# Python 2.7.5 (default, Nov 6 2016, 00:28:07) [GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] +# +# Command line options: +# ('-f', '') +# ('--silence', '') +# ('-m', '') +# ('-o', 'ossie/parsers/profile.py') +# +# Command line arguments: +# ../../../xml/xsd/profile.xsd +# +# Command line: +# /usr/bin/generateDS.py -f --silence -m -o "ossie/parsers/profile.py" ../../../xml/xsd/profile.xsd +# +# Current working directory (os.getcwd()): +# python # import sys -import getopt import re as re_ - -etree_ = None -Verbose_import_ = False -( XMLParser_import_none, XMLParser_import_lxml, - XMLParser_import_elementtree - ) = range(3) -XMLParser_import_library = None +import base64 +import datetime as datetime_ +import warnings as warnings_ try: - # lxml from lxml import etree as etree_ - XMLParser_import_library = XMLParser_import_lxml - if Verbose_import_: - print("running with lxml.etree") except ImportError: - try: - # cElementTree from Python 2.5+ - import xml.etree.cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree on Python 2.5+") - except ImportError: - try: - # ElementTree from Python 2.5+ - import xml.etree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree on Python 2.5+") - except ImportError: - try: - # normal cElementTree install - import cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree") - except ImportError: - try: - # normal ElementTree install - import elementtree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree") - except ImportError: - raise ImportError("Failed to import ElementTree from any known place") - -def parsexml_(*args, **kwargs): - if (XMLParser_import_library == XMLParser_import_lxml and - 'parser' not in kwargs): + from xml.etree import ElementTree as etree_ + + +Validate_simpletypes_ = True +if sys.version_info[0] == 2: + BaseStrType_ = basestring +else: + BaseStrType_ = str + + +def parsexml_(infile, parser=None, **kwargs): + if parser is None: # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. - kwargs['parser'] = etree_.ETCompatXMLParser() - doc = etree_.parse(*args, **kwargs) + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + doc = etree_.parse(infile, parser=parser, **kwargs) return doc +def parsexmlstring_(instring, parser=None, **kwargs): + if parser is None: + # Use the lxml ElementTree compatible parser so that, e.g., + # we ignore comments. + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + element = etree_.fromstring(instring, parser=parser, **kwargs) + return element + +# +# Namespace prefix definition table (and other attributes, too) +# +# The module generatedsnamespaces, if it is importable, must contain +# a dictionary named GeneratedsNamespaceDefs. This Python dictionary +# should map element type names (strings) to XML schema namespace prefix +# definitions. The export method for any class for which there is +# a namespace prefix definition, will export that definition in the +# XML representation of that element. See the export method of +# any generated element type class for a example of the use of this +# table. +# A sample table is: +# +# # File: generatedsnamespaces.py +# +# GenerateDSNamespaceDefs = { +# "ElementtypeA": "http://www.xxx.com/namespaceA", +# "ElementtypeB": "http://www.xxx.com/namespaceB", +# } +# + +try: + from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_ +except ImportError: + GenerateDSNamespaceDefs_ = {} + # -# User methods +# The root super-class for element type classes # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class @@ -89,67 +116,273 @@ def parsexml_(*args, **kwargs): try: from generatedssuper import GeneratedsSuper -except ImportError, exp: - +except ImportError as exp: + class GeneratedsSuper(object): + tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$') + class _FixedOffsetTZ(datetime_.tzinfo): + def __init__(self, offset, name): + self.__offset = datetime_.timedelta(minutes=offset) + self.__name = name + def utcoffset(self, dt): + return self.__offset + def tzname(self, dt): + return self.__name + def dst(self, dt): + return None def gds_format_string(self, input_data, input_name=''): return input_data - def gds_validate_string(self, input_data, node, input_name=''): + def gds_validate_string(self, input_data, node=None, input_name=''): + if not input_data: + return '' + else: + return input_data + def gds_format_base64(self, input_data, input_name=''): + return base64.b64encode(input_data) + def gds_validate_base64(self, input_data, node=None, input_name=''): return input_data def gds_format_integer(self, input_data, input_name=''): return '%d' % input_data - def gds_validate_integer(self, input_data, node, input_name=''): + def gds_validate_integer(self, input_data, node=None, input_name=''): return input_data def gds_format_integer_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_integer_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_integer_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + int(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of integers') - return input_data + return values def gds_format_float(self, input_data, input_name=''): - return '%f' % input_data - def gds_validate_float(self, input_data, node, input_name=''): + return ('%.15f' % input_data).rstrip('0') + def gds_validate_float(self, input_data, node=None, input_name=''): return input_data def gds_format_float_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_float_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_float_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of floats') - return input_data + return values def gds_format_double(self, input_data, input_name=''): return '%e' % input_data - def gds_validate_double(self, input_data, node, input_name=''): + def gds_validate_double(self, input_data, node=None, input_name=''): return input_data def gds_format_double_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_double_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_double_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of doubles') - return input_data + return values def gds_format_boolean(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean(self, input_data, node, input_name=''): + return ('%s' % input_data).lower() + def gds_validate_boolean(self, input_data, node=None, input_name=''): return input_data def gds_format_boolean_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_boolean_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: if value not in ('true', '1', 'false', '0', ): - raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")') + raise_parse_error( + node, + 'Requires sequence of booleans ' + '("true", "1", "false", "0")') + return values + def gds_validate_datetime(self, input_data, node=None, input_name=''): + return input_data + def gds_format_datetime(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + @classmethod + def gds_parse_datetime(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + time_parts = input_data.split('.') + if len(time_parts) > 1: + micro_seconds = int(float('0.' + time_parts[1]) * 1000000) + input_data = '%s.%s' % (time_parts[0], micro_seconds, ) + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt + def gds_validate_date(self, input_data, node=None, input_name=''): return input_data + def gds_format_date(self, input_data, input_name=''): + _svalue = '%04d-%02d-%02d' % ( + input_data.year, + input_data.month, + input_data.day, + ) + try: + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format( + hours, minutes) + except AttributeError: + pass + return _svalue + @classmethod + def gds_parse_date(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d') + dt = dt.replace(tzinfo=tz) + return dt.date() + def gds_validate_time(self, input_data, node=None, input_name=''): + return input_data + def gds_format_time(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%02d:%02d:%02d' % ( + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%02d:%02d:%02d.%s' % ( + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + def gds_validate_simple_patterns(self, patterns, target): + # pat is a list of lists of strings/patterns. We should: + # - AND the outer elements + # - OR the inner elements + found1 = True + for patterns1 in patterns: + found2 = False + for patterns2 in patterns1: + if re_.search(patterns2, target) is not None: + found2 = True + break + if not found2: + found1 = False + break + return found1 + @classmethod + def gds_parse_time(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + if len(input_data.split('.')) > 1: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt.time() def gds_str_lower(self, instring): return instring.lower() def get_path_(self, node): @@ -180,6 +413,38 @@ def get_class_obj_(self, node, default_class=None): return class_obj1 def gds_build_any(self, node, type_name=None): return None + @classmethod + def gds_reverse_node_mapping(cls, mapping): + return dict(((v, k) for k, v in mapping.iteritems())) + @staticmethod + def gds_encode(instring): + if sys.version_info[0] == 2: + return instring.encode(ExternalEncoding) + else: + return instring + @staticmethod + def convert_unicode(instring): + if isinstance(instring, str): + result = quote_xml(instring) + elif sys.version_info[0] == 2 and isinstance(instring, unicode): + result = quote_xml(instring).encode('utf8') + else: + result = GeneratedsSuper.gds_encode(str(instring)) + return result + def __eq__(self, other): + if type(self) != type(other): + return False + return self.__dict__ == other.__dict__ + def __ne__(self, other): + return not self.__eq__(other) + + def getSubclassFromModule_(module, class_): + '''Get the subclass of a class from a specific module.''' + name = class_.__name__ + 'Sub' + if hasattr(module, name): + return getattr(module, name) + else: + return None # @@ -205,29 +470,50 @@ def gds_build_any(self, node, type_name=None): Tag_pattern_ = re_.compile(r'({.*})?(.*)') String_cleanup_pat_ = re_.compile(r"[\n\r\s]+") Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)') +CDATA_pattern_ = re_.compile(r"", re_.DOTALL) + +# Change this to redirect the generated superclass module to use a +# specific subclass module. +CurrentSubclassModule_ = None # # Support/utility functions. # + def showIndent(outfile, level, pretty_print=True): if pretty_print: for idx in range(level): outfile.write(' ') + def quote_xml(inStr): + "Escape markup chars, but do not modify CDATA sections." if not inStr: return '' - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) + s2 = '' + pos = 0 + matchobjects = CDATA_pattern_.finditer(s1) + for mo in matchobjects: + s3 = s1[pos:mo.start()] + s2 += quote_xml_aux(s3) + s2 += s1[mo.start():mo.end()] + pos = mo.end() + s3 = s1[pos:] + s2 += quote_xml_aux(s3) + return s2 + + +def quote_xml_aux(inStr): + s1 = inStr.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 + def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') @@ -240,6 +526,7 @@ def quote_attrib(inStr): s1 = '"%s"' % s1 return s1 + def quote_python(inStr): s1 = inStr if s1.find("'") == -1: @@ -255,6 +542,7 @@ def quote_python(inStr): else: return '"""%s"""' % s1 + def get_all_text_(node): if node.text is not None: text = node.text @@ -265,6 +553,7 @@ def get_all_text_(node): text += child.tail return text + def find_attr_value_(attr_name, node): attrs = node.attrib attr_parts = attr_name.split(':') @@ -282,11 +571,9 @@ def find_attr_value_(attr_name, node): class GDSParseError(Exception): pass + def raise_parse_error(node, msg): - if XMLParser_import_library == XMLParser_import_lxml: - msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) - else: - msg = '%s (element %s)' % (msg, node.tag, ) + msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) raise GDSParseError(msg) @@ -305,6 +592,7 @@ class MixedContainer: TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 + TypeBase64 = 8 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type @@ -318,49 +606,104 @@ def getValue(self): return self.value def getName(self): return self.name - def export(self, outfile, level, name, namespace, pretty_print=True): + def export(self, outfile, level, name, namespace, + pretty_print=True): if self.category == MixedContainer.CategoryText: # Prevent exporting empty content as empty lines. - if self.value.strip(): + if self.value.strip(): outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace, name, pretty_print) + self.value.export( + outfile, level, namespace, name, + pretty_print=pretty_print) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: - outfile.write('<%s>%s' % (self.name, self.value, self.name)) + outfile.write('<%s>%s' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: - outfile.write('<%s>%d' % (self.name, self.value, self.name)) + outfile.write('<%s>%d' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: - outfile.write('<%s>%f' % (self.name, self.value, self.name)) + outfile.write('<%s>%f' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeBase64: + outfile.write('<%s>%s' % ( + self.name, + base64.b64encode(self.value), + self.name)) + def to_etree(self, element): + if self.category == MixedContainer.CategoryText: + # Prevent exporting empty content as empty lines. + if self.value.strip(): + if len(element) > 0: + if element[-1].tail is None: + element[-1].tail = self.value + else: + element[-1].tail += self.value + else: + if element.text is None: + element.text = self.value + else: + element.text += self.value + elif self.category == MixedContainer.CategorySimple: + subelement = etree_.SubElement( + element, '%s' % self.name) + subelement.text = self.to_etree_simple() + else: # category == MixedContainer.CategoryComplex + self.value.to_etree(element) + def to_etree_simple(self): + if self.content_type == MixedContainer.TypeString: + text = self.value + elif (self.content_type == MixedContainer.TypeInteger or + self.content_type == MixedContainer.TypeBoolean): + text = '%d' % self.value + elif (self.content_type == MixedContainer.TypeFloat or + self.content_type == MixedContainer.TypeDecimal): + text = '%f' % self.value elif self.content_type == MixedContainer.TypeDouble: - outfile.write('<%s>%g' % (self.name, self.value, self.name)) + text = '%g' % self.value + elif self.content_type == MixedContainer.TypeBase64: + text = '%s' % base64.b64encode(self.value) + return text def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s",\n' % ( + self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class MemberSpec_(object): - def __init__(self, name='', data_type='', container=0): + def __init__(self, name='', data_type='', container=0, + optional=0, child_attrs=None, choice=None): self.name = name self.data_type = data_type self.container = container + self.child_attrs = child_attrs + self.choice = choice + self.optional = optional def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type @@ -375,6 +718,13 @@ def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container + def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs + def get_child_attrs(self): return self.child_attrs + def set_choice(self, choice): self.choice = choice + def get_choice(self): return self.choice + def set_optional(self, optional): self.optional = optional + def get_optional(self): return self.optional + def _cast(typ, value): if typ is None or value is None: @@ -385,6 +735,7 @@ def _cast(typ, value): # Data representation classes. # + class profile(GeneratedsSuper): """The profile element can be used to specify the absolute profile file pathname relative to a mounted CF FileSystem.The filename @@ -395,105 +746,110 @@ class profile(GeneratedsSuper): values are “SAD”, “SPD”, “DCD”, and “DMD”.""" subclass = None superclass = None - def __init__(self, type_=None, filename=None): - self.type_ = _cast(None, type_) + def __init__(self, filename=None, type_=None): + self.original_tagname_ = None self.filename = _cast(None, filename) - pass + self.type_ = _cast(None, type_) def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, profile) + if subclass is not None: + return subclass(*args_, **kwargs_) if profile.subclass: return profile.subclass(*args_, **kwargs_) else: return profile(*args_, **kwargs_) factory = staticmethod(factory) - def get_type(self): return self.type_ - def set_type(self, type_): self.type_ = type_ - typeProp = property(get_type, set_type) def get_filename(self): return self.filename def set_filename(self, filename): self.filename = filename filenameProp = property(get_filename, set_filename) + def get_type(self): return self.type_ + def set_type(self, type_): self.type_ = type_ + typeProp = property(get_type, set_type) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='profile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('profile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='profile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='profile', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='profile'): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) if self.filename is not None and 'filename' not in already_processed: - already_processed.append('filename') - outfile.write(' filename=%s' % (self.gds_format_string(quote_attrib(self.filename).encode(ExternalEncoding), input_name='filename'), )) - def exportChildren(self, outfile, level, namespace_='', name_='profile', fromsubclass_=False, pretty_print=True): - pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='profile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + already_processed.add('filename') + outfile.write(' filename=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.filename), input_name='filename')), )) if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.filename is not None and 'filename' not in already_processed: - already_processed.append('filename') - showIndent(outfile, level) - outfile.write('filename = "%s",\n' % (self.filename,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) + def exportChildren(self, outfile, level, namespace_='', name_='profile', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('type', node) - if value is not None and 'type' not in already_processed: - already_processed.append('type') - self.type_ = value value = find_attr_value_('filename', node) if value is not None and 'filename' not in already_processed: - already_processed.append('filename') + already_processed.add('filename') self.filename = value + value = find_attr_value_('type', node) + if value is not None and 'type' not in already_processed: + already_processed.add('type') + self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class profile +GDSClassesMapping = { + 'profile': profile, +} + + USAGE_TEXT = """ Usage: python .py [ -s ] """ + def usage(): - print USAGE_TEXT + print(USAGE_TEXT) sys.exit(1) def get_root_tag(node): tag = Tag_pattern_.match(node.tag).groups()[-1] - rootClass = globals().get(tag) + rootClass = GDSClassesMapping.get(tag) + if rootClass is None: + rootClass = globals().get(tag) return tag, rootClass -def parse(inFileName): - doc = parsexml_(inFileName) +def parse(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -503,16 +859,18 @@ def parse(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_=rootTag, -## namespacedef_='', -## pretty_print=True) +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='', +## pretty_print=True) return rootObj -def parseString(inString): - from StringIO import StringIO - doc = parsexml_(StringIO(inString)) +def parseEtree(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -522,14 +880,47 @@ def parseString(inString): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_="profile", -## namespacedef_='') + mapping = {} + rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping) + reverse_mapping = rootObj.gds_reverse_node_mapping(mapping) +## if not silence: +## content = etree_.tostring( +## rootElement, pretty_print=True, +## xml_declaration=True, encoding="utf-8") +## sys.stdout.write(content) +## sys.stdout.write('\n') + return rootObj, rootElement, mapping, reverse_mapping + + +def parseString(inString, silence=False): + '''Parse a string, create the object tree, and export it. + + Arguments: + - inString -- A string. This XML fragment should not start + with an XML declaration containing an encoding. + - silence -- A boolean. If False, export the object. + Returns -- The root object in the tree. + ''' + parser = None + rootNode= parsexmlstring_(inString, parser) + rootTag, rootClass = get_root_tag(rootNode) + if rootClass is None: + rootTag = 'profile' + rootClass = profile + rootObj = rootClass.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='') return rootObj -def parseLiteral(inFileName): - doc = parsexml_(inFileName) +def parseLiteral(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -539,11 +930,12 @@ def parseLiteral(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('#from profile import *\n\n') -## sys.stdout.write('import profile as model_\n\n') -## sys.stdout.write('rootObj = model_.rootTag(\n') -## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) -## sys.stdout.write(')\n') +## if not silence: +## sys.stdout.write('#from profile import *\n\n') +## sys.stdout.write('import profile as model_\n\n') +## sys.stdout.write('rootObj = model_.rootClass(\n') +## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) +## sys.stdout.write(')\n') return rootObj @@ -562,4 +954,4 @@ def main(): __all__ = [ "profile" - ] +] diff --git a/redhawk/src/base/framework/python/ossie/parsers/sad.py b/redhawk/src/base/framework/python/ossie/parsers/sad.py index 4b5e073e7..47cd6d4f2 100644 --- a/redhawk/src/base/framework/python/ossie/parsers/sad.py +++ b/redhawk/src/base/framework/python/ossie/parsers/sad.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- -# +# -*- coding: utf-8 -*- + # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. # @@ -18,70 +18,97 @@ # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. -# # -# Generated Thu Sep 12 14:49:31 2013 by generateDS.py version 2.7c. +# Generated Mon Jul 30 12:29:35 2018 by generateDS.py version 2.29.14. +# Python 2.7.5 (default, Nov 6 2016, 00:28:07) [GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] +# +# Command line options: +# ('-f', '') +# ('--silence', '') +# ('-m', '') +# ('-o', 'ossie/parsers/sad.py') +# +# Command line arguments: +# ../../../xml/xsd/sad.xsd +# +# Command line: +# /usr/bin/generateDS.py -f --silence -m -o "ossie/parsers/sad.py" ../../../xml/xsd/sad.xsd +# +# Current working directory (os.getcwd()): +# python # import sys -import getopt import re as re_ - -etree_ = None -Verbose_import_ = False -( XMLParser_import_none, XMLParser_import_lxml, - XMLParser_import_elementtree - ) = range(3) -XMLParser_import_library = None +import base64 +import datetime as datetime_ +import warnings as warnings_ try: - # lxml from lxml import etree as etree_ - XMLParser_import_library = XMLParser_import_lxml - if Verbose_import_: - print("running with lxml.etree") except ImportError: - try: - # cElementTree from Python 2.5+ - import xml.etree.cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree on Python 2.5+") - except ImportError: - try: - # ElementTree from Python 2.5+ - import xml.etree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree on Python 2.5+") - except ImportError: - try: - # normal cElementTree install - import cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree") - except ImportError: - try: - # normal ElementTree install - import elementtree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree") - except ImportError: - raise ImportError("Failed to import ElementTree from any known place") - -def parsexml_(*args, **kwargs): - if (XMLParser_import_library == XMLParser_import_lxml and - 'parser' not in kwargs): + from xml.etree import ElementTree as etree_ + + +Validate_simpletypes_ = True +if sys.version_info[0] == 2: + BaseStrType_ = basestring +else: + BaseStrType_ = str + + +def parsexml_(infile, parser=None, **kwargs): + if parser is None: # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. - kwargs['parser'] = etree_.ETCompatXMLParser() - doc = etree_.parse(*args, **kwargs) + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + doc = etree_.parse(infile, parser=parser, **kwargs) return doc +def parsexmlstring_(instring, parser=None, **kwargs): + if parser is None: + # Use the lxml ElementTree compatible parser so that, e.g., + # we ignore comments. + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + element = etree_.fromstring(instring, parser=parser, **kwargs) + return element + +# +# Namespace prefix definition table (and other attributes, too) +# +# The module generatedsnamespaces, if it is importable, must contain +# a dictionary named GeneratedsNamespaceDefs. This Python dictionary +# should map element type names (strings) to XML schema namespace prefix +# definitions. The export method for any class for which there is +# a namespace prefix definition, will export that definition in the +# XML representation of that element. See the export method of +# any generated element type class for a example of the use of this +# table. +# A sample table is: +# +# # File: generatedsnamespaces.py +# +# GenerateDSNamespaceDefs = { +# "ElementtypeA": "http://www.xxx.com/namespaceA", +# "ElementtypeB": "http://www.xxx.com/namespaceB", +# } # -# User methods + +try: + from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_ +except ImportError: + GenerateDSNamespaceDefs_ = {} + +# +# The root super-class for element type classes # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class @@ -89,67 +116,273 @@ def parsexml_(*args, **kwargs): try: from generatedssuper import GeneratedsSuper -except ImportError, exp: - +except ImportError as exp: + class GeneratedsSuper(object): + tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$') + class _FixedOffsetTZ(datetime_.tzinfo): + def __init__(self, offset, name): + self.__offset = datetime_.timedelta(minutes=offset) + self.__name = name + def utcoffset(self, dt): + return self.__offset + def tzname(self, dt): + return self.__name + def dst(self, dt): + return None def gds_format_string(self, input_data, input_name=''): return input_data - def gds_validate_string(self, input_data, node, input_name=''): + def gds_validate_string(self, input_data, node=None, input_name=''): + if not input_data: + return '' + else: + return input_data + def gds_format_base64(self, input_data, input_name=''): + return base64.b64encode(input_data) + def gds_validate_base64(self, input_data, node=None, input_name=''): return input_data def gds_format_integer(self, input_data, input_name=''): return '%d' % input_data - def gds_validate_integer(self, input_data, node, input_name=''): + def gds_validate_integer(self, input_data, node=None, input_name=''): return input_data def gds_format_integer_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_integer_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_integer_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + int(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of integers') - return input_data + return values def gds_format_float(self, input_data, input_name=''): - return '%f' % input_data - def gds_validate_float(self, input_data, node, input_name=''): + return ('%.15f' % input_data).rstrip('0') + def gds_validate_float(self, input_data, node=None, input_name=''): return input_data def gds_format_float_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_float_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_float_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of floats') - return input_data + return values def gds_format_double(self, input_data, input_name=''): return '%e' % input_data - def gds_validate_double(self, input_data, node, input_name=''): + def gds_validate_double(self, input_data, node=None, input_name=''): return input_data def gds_format_double_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_double_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_double_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of doubles') - return input_data + return values def gds_format_boolean(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean(self, input_data, node, input_name=''): + return ('%s' % input_data).lower() + def gds_validate_boolean(self, input_data, node=None, input_name=''): return input_data def gds_format_boolean_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_boolean_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: if value not in ('true', '1', 'false', '0', ): - raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")') + raise_parse_error( + node, + 'Requires sequence of booleans ' + '("true", "1", "false", "0")') + return values + def gds_validate_datetime(self, input_data, node=None, input_name=''): + return input_data + def gds_format_datetime(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + @classmethod + def gds_parse_datetime(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + time_parts = input_data.split('.') + if len(time_parts) > 1: + micro_seconds = int(float('0.' + time_parts[1]) * 1000000) + input_data = '%s.%s' % (time_parts[0], micro_seconds, ) + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt + def gds_validate_date(self, input_data, node=None, input_name=''): + return input_data + def gds_format_date(self, input_data, input_name=''): + _svalue = '%04d-%02d-%02d' % ( + input_data.year, + input_data.month, + input_data.day, + ) + try: + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format( + hours, minutes) + except AttributeError: + pass + return _svalue + @classmethod + def gds_parse_date(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d') + dt = dt.replace(tzinfo=tz) + return dt.date() + def gds_validate_time(self, input_data, node=None, input_name=''): return input_data + def gds_format_time(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%02d:%02d:%02d' % ( + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%02d:%02d:%02d.%s' % ( + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + def gds_validate_simple_patterns(self, patterns, target): + # pat is a list of lists of strings/patterns. We should: + # - AND the outer elements + # - OR the inner elements + found1 = True + for patterns1 in patterns: + found2 = False + for patterns2 in patterns1: + if re_.search(patterns2, target) is not None: + found2 = True + break + if not found2: + found1 = False + break + return found1 + @classmethod + def gds_parse_time(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + if len(input_data.split('.')) > 1: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt.time() def gds_str_lower(self, instring): return instring.lower() def get_path_(self, node): @@ -180,6 +413,38 @@ def get_class_obj_(self, node, default_class=None): return class_obj1 def gds_build_any(self, node, type_name=None): return None + @classmethod + def gds_reverse_node_mapping(cls, mapping): + return dict(((v, k) for k, v in mapping.iteritems())) + @staticmethod + def gds_encode(instring): + if sys.version_info[0] == 2: + return instring.encode(ExternalEncoding) + else: + return instring + @staticmethod + def convert_unicode(instring): + if isinstance(instring, str): + result = quote_xml(instring) + elif sys.version_info[0] == 2 and isinstance(instring, unicode): + result = quote_xml(instring).encode('utf8') + else: + result = GeneratedsSuper.gds_encode(str(instring)) + return result + def __eq__(self, other): + if type(self) != type(other): + return False + return self.__dict__ == other.__dict__ + def __ne__(self, other): + return not self.__eq__(other) + + def getSubclassFromModule_(module, class_): + '''Get the subclass of a class from a specific module.''' + name = class_.__name__ + 'Sub' + if hasattr(module, name): + return getattr(module, name) + else: + return None # @@ -205,29 +470,50 @@ def gds_build_any(self, node, type_name=None): Tag_pattern_ = re_.compile(r'({.*})?(.*)') String_cleanup_pat_ = re_.compile(r"[\n\r\s]+") Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)') +CDATA_pattern_ = re_.compile(r"", re_.DOTALL) + +# Change this to redirect the generated superclass module to use a +# specific subclass module. +CurrentSubclassModule_ = None # # Support/utility functions. # + def showIndent(outfile, level, pretty_print=True): if pretty_print: for idx in range(level): outfile.write(' ') + def quote_xml(inStr): + "Escape markup chars, but do not modify CDATA sections." if not inStr: return '' - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) + s2 = '' + pos = 0 + matchobjects = CDATA_pattern_.finditer(s1) + for mo in matchobjects: + s3 = s1[pos:mo.start()] + s2 += quote_xml_aux(s3) + s2 += s1[mo.start():mo.end()] + pos = mo.end() + s3 = s1[pos:] + s2 += quote_xml_aux(s3) + return s2 + + +def quote_xml_aux(inStr): + s1 = inStr.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 + def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') @@ -240,6 +526,7 @@ def quote_attrib(inStr): s1 = '"%s"' % s1 return s1 + def quote_python(inStr): s1 = inStr if s1.find("'") == -1: @@ -255,6 +542,7 @@ def quote_python(inStr): else: return '"""%s"""' % s1 + def get_all_text_(node): if node.text is not None: text = node.text @@ -265,6 +553,7 @@ def get_all_text_(node): text += child.tail return text + def find_attr_value_(attr_name, node): attrs = node.attrib attr_parts = attr_name.split(':') @@ -282,11 +571,9 @@ def find_attr_value_(attr_name, node): class GDSParseError(Exception): pass + def raise_parse_error(node, msg): - if XMLParser_import_library == XMLParser_import_lxml: - msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) - else: - msg = '%s (element %s)' % (msg, node.tag, ) + msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) raise GDSParseError(msg) @@ -305,6 +592,7 @@ class MixedContainer: TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 + TypeBase64 = 8 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type @@ -318,49 +606,104 @@ def getValue(self): return self.value def getName(self): return self.name - def export(self, outfile, level, name, namespace, pretty_print=True): + def export(self, outfile, level, name, namespace, + pretty_print=True): if self.category == MixedContainer.CategoryText: # Prevent exporting empty content as empty lines. - if self.value.strip(): + if self.value.strip(): outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace, name, pretty_print) + self.value.export( + outfile, level, namespace, name, + pretty_print=pretty_print) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: - outfile.write('<%s>%s' % (self.name, self.value, self.name)) + outfile.write('<%s>%s' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: - outfile.write('<%s>%d' % (self.name, self.value, self.name)) + outfile.write('<%s>%d' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: - outfile.write('<%s>%f' % (self.name, self.value, self.name)) + outfile.write('<%s>%f' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeBase64: + outfile.write('<%s>%s' % ( + self.name, + base64.b64encode(self.value), + self.name)) + def to_etree(self, element): + if self.category == MixedContainer.CategoryText: + # Prevent exporting empty content as empty lines. + if self.value.strip(): + if len(element) > 0: + if element[-1].tail is None: + element[-1].tail = self.value + else: + element[-1].tail += self.value + else: + if element.text is None: + element.text = self.value + else: + element.text += self.value + elif self.category == MixedContainer.CategorySimple: + subelement = etree_.SubElement( + element, '%s' % self.name) + subelement.text = self.to_etree_simple() + else: # category == MixedContainer.CategoryComplex + self.value.to_etree(element) + def to_etree_simple(self): + if self.content_type == MixedContainer.TypeString: + text = self.value + elif (self.content_type == MixedContainer.TypeInteger or + self.content_type == MixedContainer.TypeBoolean): + text = '%d' % self.value + elif (self.content_type == MixedContainer.TypeFloat or + self.content_type == MixedContainer.TypeDecimal): + text = '%f' % self.value elif self.content_type == MixedContainer.TypeDouble: - outfile.write('<%s>%g' % (self.name, self.value, self.name)) + text = '%g' % self.value + elif self.content_type == MixedContainer.TypeBase64: + text = '%s' % base64.b64encode(self.value) + return text def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s",\n' % ( + self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class MemberSpec_(object): - def __init__(self, name='', data_type='', container=0): + def __init__(self, name='', data_type='', container=0, + optional=0, child_attrs=None, choice=None): self.name = name self.data_type = data_type self.container = container + self.child_attrs = child_attrs + self.choice = choice + self.optional = optional def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type @@ -375,6 +718,13 @@ def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container + def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs + def get_child_attrs(self): return self.child_attrs + def set_choice(self, choice): self.choice = choice + def get_choice(self): return self.choice + def set_optional(self, optional): self.optional = optional + def get_optional(self): return self.optional + def _cast(typ, value): if typ is None or value is None: @@ -385,13 +735,15 @@ def _cast(typ, value): # Data representation classes. # + class softwareassembly(GeneratedsSuper): subclass = None superclass = None - def __init__(self, version=None, id_=None, name=None, description=None, componentfiles=None, partitioning=None, assemblycontroller=None, connections=None, externalports=None, externalproperties=None, usesdevicedependencies=None): - self.version = _cast(None, version) + def __init__(self, id_=None, name=None, version=None, description=None, componentfiles=None, partitioning=None, assemblycontroller=None, connections=None, externalports=None, externalproperties=None, options=None, usesdevicedependencies=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.name = _cast(None, name) + self.version = _cast(None, version) self.description = description self.componentfiles = componentfiles self.partitioning = partitioning @@ -399,11 +751,17 @@ def __init__(self, version=None, id_=None, name=None, description=None, componen self.connections = connections self.externalports = externalports self.externalproperties = externalproperties + self.options = options if usesdevicedependencies is None: self.usesdevicedependencies = [] else: self.usesdevicedependencies = usesdevicedependencies def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, softwareassembly) + if subclass is not None: + return subclass(*args_, **kwargs_) if softwareassembly.subclass: return softwareassembly.subclass(*args_, **kwargs_) else: @@ -430,46 +788,70 @@ def set_externalports(self, externalports): self.externalports = externalports def get_externalproperties(self): return self.externalproperties def set_externalproperties(self, externalproperties): self.externalproperties = externalproperties externalpropertiesProp = property(get_externalproperties, set_externalproperties) + def get_options(self): return self.options + def set_options(self, options): self.options = options + optionsProp = property(get_options, set_options) def get_usesdevicedependencies(self): return self.usesdevicedependencies def set_usesdevicedependencies(self, usesdevicedependencies): self.usesdevicedependencies = usesdevicedependencies def add_usesdevicedependencies(self, value): self.usesdevicedependencies.append(value) - def insert_usesdevicedependencies(self, index, value): self.usesdevicedependencies[index] = value + def insert_usesdevicedependencies_at(self, index, value): self.usesdevicedependencies.insert(index, value) + def replace_usesdevicedependencies_at(self, index, value): self.usesdevicedependencies[index] = value usesdevicedependenciesProp = property(get_usesdevicedependencies, set_usesdevicedependencies) - def get_version(self): return self.version - def set_version(self, version): self.version = version - versionProp = property(get_version, set_version) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def get_version(self): return self.version + def set_version(self, version): self.version = version + versionProp = property(get_version, set_version) + def hasContent_(self): + if ( + self.description is not None or + self.componentfiles is not None or + self.partitioning is not None or + self.assemblycontroller is not None or + self.connections is not None or + self.externalports is not None or + self.externalproperties is not None or + self.options is not None or + self.usesdevicedependencies + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='softwareassembly', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('softwareassembly') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='softwareassembly') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='softwareassembly', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='softwareassembly'): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) + if self.version is not None and 'version' not in already_processed: + already_processed.add('version') + outfile.write(' version=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.version), input_name='version')), )) def exportChildren(self, outfile, level, namespace_='', name_='softwareassembly', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -477,7 +859,7 @@ def exportChildren(self, outfile, level, namespace_='', name_='softwareassembly' eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.componentfiles is not None: self.componentfiles.export(outfile, level, namespace_, name_='componentfiles', pretty_print=pretty_print) if self.partitioning is not None: @@ -490,110 +872,30 @@ def exportChildren(self, outfile, level, namespace_='', name_='softwareassembly' self.externalports.export(outfile, level, namespace_, name_='externalports', pretty_print=pretty_print) if self.externalproperties is not None: self.externalproperties.export(outfile, level, namespace_, name_='externalproperties', pretty_print=pretty_print) + if self.options is not None: + self.options.export(outfile, level, namespace_, name_='options', pretty_print=pretty_print) for usesdevicedependencies_ in self.usesdevicedependencies: usesdevicedependencies_.export(outfile, level, namespace_, name_='usesdevicedependencies', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.componentfiles is not None or - self.partitioning is not None or - self.assemblycontroller is not None or - self.connections is not None or - self.externalports is not None or - self.externalproperties is not None or - self.usesdevicedependencies - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='softwareassembly'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - showIndent(outfile, level) - outfile.write('version = "%s",\n' % (self.version,)) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.componentfiles is not None: - showIndent(outfile, level) - outfile.write('componentfiles=model_.componentfiles(\n') - self.componentfiles.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.partitioning is not None: - showIndent(outfile, level) - outfile.write('partitioning=model_.partitioning(\n') - self.partitioning.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.assemblycontroller is not None: - showIndent(outfile, level) - outfile.write('assemblycontroller=model_.assemblycontroller(\n') - self.assemblycontroller.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.connections is not None: - showIndent(outfile, level) - outfile.write('connections=model_.connections(\n') - self.connections.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.externalports is not None: - showIndent(outfile, level) - outfile.write('externalports=model_.externalports(\n') - self.externalports.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.externalproperties is not None: - showIndent(outfile, level) - outfile.write('externalproperties=model_.externalproperties(\n') - self.externalproperties.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('usesdevicedependencies=[\n') - level += 1 - for usesdevicedependencies_ in self.usesdevicedependencies: - showIndent(outfile, level) - outfile.write('model_.usesdevicedependencies(\n') - usesdevicedependencies_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('version', node) - if value is not None and 'version' not in already_processed: - already_processed.append('version') - self.version = value value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value + value = find_attr_value_('version', node) + if value is not None and 'version' not in already_processed: + already_processed.add('version') + self.version = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': description_ = child_.text @@ -602,31 +904,43 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentfiles': obj_ = componentfiles.factory() obj_.build(child_) - self.set_componentfiles(obj_) + self.componentfiles = obj_ + obj_.original_tagname_ = 'componentfiles' elif nodeName_ == 'partitioning': obj_ = partitioning.factory() obj_.build(child_) - self.set_partitioning(obj_) + self.partitioning = obj_ + obj_.original_tagname_ = 'partitioning' elif nodeName_ == 'assemblycontroller': obj_ = assemblycontroller.factory() obj_.build(child_) - self.set_assemblycontroller(obj_) + self.assemblycontroller = obj_ + obj_.original_tagname_ = 'assemblycontroller' elif nodeName_ == 'connections': obj_ = connections.factory() obj_.build(child_) - self.set_connections(obj_) + self.connections = obj_ + obj_.original_tagname_ = 'connections' elif nodeName_ == 'externalports': obj_ = externalports.factory() obj_.build(child_) - self.set_externalports(obj_) + self.externalports = obj_ + obj_.original_tagname_ = 'externalports' elif nodeName_ == 'externalproperties': obj_ = externalproperties.factory() obj_.build(child_) - self.set_externalproperties(obj_) + self.externalproperties = obj_ + obj_.original_tagname_ = 'externalproperties' + elif nodeName_ == 'options': + obj_ = options.factory() + obj_.build(child_) + self.options = obj_ + obj_.original_tagname_ = 'options' elif nodeName_ == 'usesdevicedependencies': obj_ = usesdevicedependencies.factory() obj_.build(child_) self.usesdevicedependencies.append(obj_) + obj_.original_tagname_ = 'usesdevicedependencies' # end class softwareassembly @@ -634,11 +948,17 @@ class componentfiles(GeneratedsSuper): subclass = None superclass = None def __init__(self, componentfile=None): + self.original_tagname_ = None if componentfile is None: self.componentfile = [] else: self.componentfile = componentfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentfiles) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentfiles.subclass: return componentfiles.subclass(*args_, **kwargs_) else: @@ -647,20 +967,33 @@ def factory(*args_, **kwargs_): def get_componentfile(self): return self.componentfile def set_componentfile(self, componentfile): self.componentfile = componentfile def add_componentfile(self, value): self.componentfile.append(value) - def insert_componentfile(self, index, value): self.componentfile[index] = value + def insert_componentfile_at(self, index, value): self.componentfile.insert(index, value) + def replace_componentfile_at(self, index, value): self.componentfile[index] = value componentfileProp = property(get_componentfile, set_componentfile) + def hasContent_(self): + if ( + self.componentfile + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentfiles', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentfiles') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentfiles') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentfiles', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -674,38 +1007,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentfiles', eol_ = '' for componentfile_ in self.componentfile: componentfile_.export(outfile, level, namespace_, name_='componentfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.componentfile - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentfiles'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('componentfile=[\n') - level += 1 - for componentfile_ in self.componentfile: - showIndent(outfile, level) - outfile.write('model_.componentfile(\n') - componentfile_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -713,17 +1021,24 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = componentfile.factory() obj_.build(child_) self.componentfile.append(obj_) + obj_.original_tagname_ = 'componentfile' # end class componentfiles class componentfile(GeneratedsSuper): subclass = None superclass = None - def __init__(self, type_=None, id_=None, localfile=None): - self.type_ = _cast(None, type_) + def __init__(self, id_=None, type_=None, localfile=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) + self.type_ = _cast(None, type_) self.localfile = localfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentfile) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentfile.subclass: return componentfile.subclass(*args_, **kwargs_) else: @@ -732,35 +1047,47 @@ def factory(*args_, **kwargs_): def get_localfile(self): return self.localfile def set_localfile(self, localfile): self.localfile = localfile localfileProp = property(get_localfile, set_localfile) + def get_id(self): return self.id_ + def set_id(self, id_): self.id_ = id_ + idProp = property(get_id, set_id) def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) - def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id - idProp = property(get_id, set_id) + def hasContent_(self): + if ( + self.localfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentfile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentfile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentfile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentfile', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentfile'): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) + if self.type_ is not None and 'type_' not in already_processed: + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='componentfile', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -768,53 +1095,28 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentfile', f eol_ = '' if self.localfile is not None: self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentfile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localfile(\n') - self.localfile.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('type', node) - if value is not None and 'type' not in already_processed: - already_processed.append('type') - self.type_ = value value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value + value = find_attr_value_('type', node) + if value is not None and 'type' not in already_processed: + already_processed.add('type') + self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localfile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' # end class componentfile @@ -822,9 +1124,14 @@ class localfile(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, localfile) + if subclass is not None: + return subclass(*args_, **kwargs_) if localfile.subclass: return localfile.subclass(*args_, **kwargs_) else: @@ -833,55 +1140,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='localfile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('localfile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='localfile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='localfile', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='localfile'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='localfile', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='localfile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -892,6 +1194,7 @@ class partitioning(GeneratedsSuper): subclass = None superclass = None def __init__(self, componentplacement=None, hostcollocation=None): + self.original_tagname_ = None if componentplacement is None: self.componentplacement = [] else: @@ -901,6 +1204,11 @@ def __init__(self, componentplacement=None, hostcollocation=None): else: self.hostcollocation = hostcollocation def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, partitioning) + if subclass is not None: + return subclass(*args_, **kwargs_) if partitioning.subclass: return partitioning.subclass(*args_, **kwargs_) else: @@ -909,25 +1217,40 @@ def factory(*args_, **kwargs_): def get_componentplacement(self): return self.componentplacement def set_componentplacement(self, componentplacement): self.componentplacement = componentplacement def add_componentplacement(self, value): self.componentplacement.append(value) - def insert_componentplacement(self, index, value): self.componentplacement[index] = value + def insert_componentplacement_at(self, index, value): self.componentplacement.insert(index, value) + def replace_componentplacement_at(self, index, value): self.componentplacement[index] = value componentplacementProp = property(get_componentplacement, set_componentplacement) def get_hostcollocation(self): return self.hostcollocation def set_hostcollocation(self, hostcollocation): self.hostcollocation = hostcollocation def add_hostcollocation(self, value): self.hostcollocation.append(value) - def insert_hostcollocation(self, index, value): self.hostcollocation[index] = value + def insert_hostcollocation_at(self, index, value): self.hostcollocation.insert(index, value) + def replace_hostcollocation_at(self, index, value): self.hostcollocation[index] = value hostcollocationProp = property(get_hostcollocation, set_hostcollocation) + def hasContent_(self): + if ( + self.componentplacement or + self.hostcollocation + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='partitioning', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('partitioning') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='partitioning') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='partitioning', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -943,51 +1266,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='partitioning', fr componentplacement_.export(outfile, level, namespace_, name_='componentplacement', pretty_print=pretty_print) for hostcollocation_ in self.hostcollocation: hostcollocation_.export(outfile, level, namespace_, name_='hostcollocation', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.componentplacement or - self.hostcollocation - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='partitioning'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('componentplacement=[\n') - level += 1 - for componentplacement_ in self.componentplacement: - showIndent(outfile, level) - outfile.write('model_.componentplacement(\n') - componentplacement_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('hostcollocation=[\n') - level += 1 - for hostcollocation_ in self.hostcollocation: - showIndent(outfile, level) - outfile.write('model_.hostcollocation(\n') - hostcollocation_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -995,10 +1280,12 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = componentplacement.factory() obj_.build(child_) self.componentplacement.append(obj_) + obj_.original_tagname_ = 'componentplacement' elif nodeName_ == 'hostcollocation': obj_ = hostcollocation.factory() obj_.build(child_) self.hostcollocation.append(obj_) + obj_.original_tagname_ = 'hostcollocation' # end class partitioning @@ -1006,12 +1293,18 @@ class componentplacement(GeneratedsSuper): subclass = None superclass = None def __init__(self, componentfileref=None, componentinstantiation=None): + self.original_tagname_ = None self.componentfileref = componentfileref if componentinstantiation is None: self.componentinstantiation = [] else: self.componentinstantiation = componentinstantiation def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentplacement) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentplacement.subclass: return componentplacement.subclass(*args_, **kwargs_) else: @@ -1023,20 +1316,34 @@ def set_componentfileref(self, componentfileref): self.componentfileref = compon def get_componentinstantiation(self): return self.componentinstantiation def set_componentinstantiation(self, componentinstantiation): self.componentinstantiation = componentinstantiation def add_componentinstantiation(self, value): self.componentinstantiation.append(value) - def insert_componentinstantiation(self, index, value): self.componentinstantiation[index] = value + def insert_componentinstantiation_at(self, index, value): self.componentinstantiation.insert(index, value) + def replace_componentinstantiation_at(self, index, value): self.componentinstantiation[index] = value componentinstantiationProp = property(get_componentinstantiation, set_componentinstantiation) + def hasContent_(self): + if ( + self.componentfileref is not None or + self.componentinstantiation + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentplacement', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentplacement') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentplacement') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentplacement', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -1052,56 +1359,26 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentplacemen self.componentfileref.export(outfile, level, namespace_, name_='componentfileref', pretty_print=pretty_print) for componentinstantiation_ in self.componentinstantiation: componentinstantiation_.export(outfile, level, namespace_, name_='componentinstantiation', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.componentfileref is not None or - self.componentinstantiation - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentplacement'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.componentfileref is not None: - showIndent(outfile, level) - outfile.write('componentfileref=model_.componentfileref(\n') - self.componentfileref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('componentinstantiation=[\n') - level += 1 - for componentinstantiation_ in self.componentinstantiation: - showIndent(outfile, level) - outfile.write('model_.componentinstantiation(\n') - componentinstantiation_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'componentfileref': obj_ = componentfileref.factory() obj_.build(child_) - self.set_componentfileref(obj_) + self.componentfileref = obj_ + obj_.original_tagname_ = 'componentfileref' elif nodeName_ == 'componentinstantiation': obj_ = componentinstantiation.factory() obj_.build(child_) self.componentinstantiation.append(obj_) + obj_.original_tagname_ = 'componentinstantiation' # end class componentplacement @@ -1109,9 +1386,14 @@ class componentfileref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None): + self.original_tagname_ = None self.refid = _cast(None, refid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentfileref) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentfileref.subclass: return componentfileref.subclass(*args_, **kwargs_) else: @@ -1120,55 +1402,50 @@ def factory(*args_, **kwargs_): def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) - def export(self, outfile, level, namespace_='', name_='componentfileref', namespacedef_='', pretty_print=True): - if pretty_print: - eol_ = '\n' - else: + def hasContent_(self): + if ( + + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='componentfileref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentfileref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentfileref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentfileref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentfileref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='componentfileref', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentfileref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1178,13 +1455,22 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): class componentinstantiation(GeneratedsSuper): subclass = None superclass = None - def __init__(self, id_=None, startorder=None, usagename=None, componentproperties=None, findcomponent=None): + def __init__(self, id_=None, startorder=None, usagename=None, componentproperties=None, affinity=None, loggingconfig=None, findcomponent=None, devicerequires=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.startorder = _cast(None, startorder) self.usagename = usagename self.componentproperties = componentproperties + self.affinity = affinity + self.loggingconfig = loggingconfig self.findcomponent = findcomponent + self.devicerequires = devicerequires def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentinstantiation) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentinstantiation.subclass: return componentinstantiation.subclass(*args_, **kwargs_) else: @@ -1196,38 +1482,64 @@ def set_usagename(self, usagename): self.usagename = usagename def get_componentproperties(self): return self.componentproperties def set_componentproperties(self, componentproperties): self.componentproperties = componentproperties componentpropertiesProp = property(get_componentproperties, set_componentproperties) + def get_affinity(self): return self.affinity + def set_affinity(self, affinity): self.affinity = affinity + affinityProp = property(get_affinity, set_affinity) + def get_loggingconfig(self): return self.loggingconfig + def set_loggingconfig(self, loggingconfig): self.loggingconfig = loggingconfig + loggingconfigProp = property(get_loggingconfig, set_loggingconfig) def get_findcomponent(self): return self.findcomponent def set_findcomponent(self, findcomponent): self.findcomponent = findcomponent findcomponentProp = property(get_findcomponent, set_findcomponent) + def get_devicerequires(self): return self.devicerequires + def set_devicerequires(self, devicerequires): self.devicerequires = devicerequires + devicerequiresProp = property(get_devicerequires, set_devicerequires) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) def get_startorder(self): return self.startorder def set_startorder(self, startorder): self.startorder = startorder startorderProp = property(get_startorder, set_startorder) + def hasContent_(self): + if ( + self.usagename is not None or + self.componentproperties is not None or + self.affinity is not None or + self.loggingconfig is not None or + self.findcomponent is not None or + self.devicerequires is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentinstantiation', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentinstantiation') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentinstantiation') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentinstantiation', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentinstantiation'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) if self.startorder is not None and 'startorder' not in already_processed: - already_processed.append('startorder') - outfile.write(' startorder=%s' % (self.gds_format_string(quote_attrib(self.startorder).encode(ExternalEncoding), input_name='startorder'), )) + already_processed.add('startorder') + outfile.write(' startorder=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.startorder), input_name='startorder')), )) def exportChildren(self, outfile, level, namespace_='', name_='componentinstantiation', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1235,63 +1547,32 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentinstanti eol_ = '' if self.usagename is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%susagename>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.usagename).encode(ExternalEncoding), input_name='usagename'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.usagename), input_name='usagename')), eol_)) if self.componentproperties is not None: self.componentproperties.export(outfile, level, namespace_, name_='componentproperties', pretty_print=pretty_print) + if self.affinity is not None: + self.affinity.export(outfile, level, namespace_, name_='affinity', pretty_print=pretty_print) + if self.loggingconfig is not None: + self.loggingconfig.export(outfile, level, namespace_, name_='loggingconfig', pretty_print=pretty_print) if self.findcomponent is not None: self.findcomponent.export(outfile, level, namespace_, name_='findcomponent', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.usagename is not None or - self.componentproperties is not None or - self.findcomponent is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentinstantiation'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.startorder is not None and 'startorder' not in already_processed: - already_processed.append('startorder') - showIndent(outfile, level) - outfile.write('startorder = "%s",\n' % (self.startorder,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.usagename is not None: - showIndent(outfile, level) - outfile.write('usagename=%s,\n' % quote_python(self.usagename).encode(ExternalEncoding)) - if self.componentproperties is not None: - showIndent(outfile, level) - outfile.write('componentproperties=model_.componentproperties(\n') - self.componentproperties.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.findcomponent is not None: - showIndent(outfile, level) - outfile.write('findcomponent=model_.findcomponent(\n') - self.findcomponent.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') + if self.devicerequires is not None: + self.devicerequires.export(outfile, level, namespace_, name_='devicerequires', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value value = find_attr_value_('startorder', node) if value is not None and 'startorder' not in already_processed: - already_processed.append('startorder') + already_processed.add('startorder') self.startorder = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'usagename': @@ -1301,18 +1582,111 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentproperties': obj_ = componentproperties.factory() obj_.build(child_) - self.set_componentproperties(obj_) + self.componentproperties = obj_ + obj_.original_tagname_ = 'componentproperties' + elif nodeName_ == 'affinity': + obj_ = affinity.factory() + obj_.build(child_) + self.affinity = obj_ + obj_.original_tagname_ = 'affinity' + elif nodeName_ == 'loggingconfig': + obj_ = loggingconfig.factory() + obj_.build(child_) + self.loggingconfig = obj_ + obj_.original_tagname_ = 'loggingconfig' elif nodeName_ == 'findcomponent': obj_ = findcomponent.factory() obj_.build(child_) - self.set_findcomponent(obj_) + self.findcomponent = obj_ + obj_.original_tagname_ = 'findcomponent' + elif nodeName_ == 'devicerequires': + obj_ = devicerequires.factory() + obj_.build(child_) + self.devicerequires = obj_ + obj_.original_tagname_ = 'devicerequires' # end class componentinstantiation -class componentproperties(GeneratedsSuper): +class loggingconfig(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, level=None, valueOf_=None): + self.original_tagname_ = None + self.level = _cast(None, level) + self.value = valueOf_ + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, loggingconfig) + if subclass is not None: + return subclass(*args_, **kwargs_) + if loggingconfig.subclass: + return loggingconfig.subclass(*args_, **kwargs_) + else: + return loggingconfig(*args_, **kwargs_) + factory = staticmethod(factory) + def get_level(self): return self.level + def set_level(self, level): self.level = level + levelProp = property(get_level, set_level) + def get_value(self): return self.value + def set_value(self, valueOf_): self.value = valueOf_ + def hasContent_(self): + if ( + (1 if type(self.value ) in [int,float] else self.value) + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='loggingconfig', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('loggingconfig') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='loggingconfig') + if self.hasContent_(): + outfile.write('>') + outfile.write(self.convert_unicode(self.value)) + self.exportChildren(outfile, level + 1, namespace_='', name_='loggingconfig', pretty_print=pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='loggingconfig'): + if self.level is not None and 'level' not in already_processed: + already_processed.add('level') + outfile.write(' level=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.level), input_name='level')), )) + def exportChildren(self, outfile, level, namespace_='', name_='loggingconfig', fromsubclass_=False, pretty_print=True): + pass + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + self.value = get_all_text_(node) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + value = find_attr_value_('level', node) + if value is not None and 'level' not in already_processed: + already_processed.add('level') + self.level = value + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + pass +# end class loggingconfig + + +class affinity(GeneratedsSuper): subclass = None superclass = None def __init__(self, simpleref=None, simplesequenceref=None, structref=None, structsequenceref=None): + self.original_tagname_ = None if simpleref is None: self.simpleref = [] else: @@ -1330,50 +1704,74 @@ def __init__(self, simpleref=None, simplesequenceref=None, structref=None, struc else: self.structsequenceref = structsequenceref def factory(*args_, **kwargs_): - if componentproperties.subclass: - return componentproperties.subclass(*args_, **kwargs_) - else: - return componentproperties(*args_, **kwargs_) + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, affinity) + if subclass is not None: + return subclass(*args_, **kwargs_) + if affinity.subclass: + return affinity.subclass(*args_, **kwargs_) + else: + return affinity(*args_, **kwargs_) factory = staticmethod(factory) def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) def get_simplesequenceref(self): return self.simplesequenceref def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref def add_simplesequenceref(self, value): self.simplesequenceref.append(value) - def insert_simplesequenceref(self, index, value): self.simplesequenceref[index] = value + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) def get_structref(self): return self.structref def set_structref(self, structref): self.structref = structref def add_structref(self, value): self.structref.append(value) - def insert_structref(self, index, value): self.structref[index] = value + def insert_structref_at(self, index, value): self.structref.insert(index, value) + def replace_structref_at(self, index, value): self.structref[index] = value structrefProp = property(get_structref, set_structref) def get_structsequenceref(self): return self.structsequenceref def set_structsequenceref(self, structsequenceref): self.structsequenceref = structsequenceref def add_structsequenceref(self, value): self.structsequenceref.append(value) - def insert_structsequenceref(self, index, value): self.structsequenceref[index] = value + def insert_structsequenceref_at(self, index, value): self.structsequenceref.insert(index, value) + def replace_structsequenceref_at(self, index, value): self.structsequenceref[index] = value structsequencerefProp = property(get_structsequenceref, set_structsequenceref) - def export(self, outfile, level, namespace_='', name_='componentproperties', namespacedef_='', pretty_print=True): + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref or + self.structref or + self.structsequenceref + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='affinity', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('affinity') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] - self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentproperties') + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='affinity') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='affinity', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) - def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentproperties'): + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='affinity'): pass - def exportChildren(self, outfile, level, namespace_='', name_='componentproperties', fromsubclass_=False, pretty_print=True): + def exportChildren(self, outfile, level, namespace_='', name_='affinity', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: @@ -1386,77 +1784,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentproperti structref_.export(outfile, level, namespace_, name_='structref', pretty_print=pretty_print) for structsequenceref_ in self.structsequenceref: structsequenceref_.export(outfile, level, namespace_, name_='structsequenceref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simpleref or - self.simplesequenceref or - self.structref or - self.structsequenceref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentproperties'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('simplesequenceref=[\n') - level += 1 - for simplesequenceref_ in self.simplesequenceref: - showIndent(outfile, level) - outfile.write('model_.simplesequenceref(\n') - simplesequenceref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structref=[\n') - level += 1 - for structref_ in self.structref: - showIndent(outfile, level) - outfile.write('model_.structref(\n') - structref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structsequenceref=[\n') - level += 1 - for structsequenceref_ in self.structsequenceref: - showIndent(outfile, level) - outfile.write('model_.structsequenceref(\n') - structsequenceref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -1464,264 +1798,557 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' elif nodeName_ == 'simplesequenceref': obj_ = simplesequenceref.factory() obj_.build(child_) self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' elif nodeName_ == 'structref': obj_ = structref.factory() obj_.build(child_) self.structref.append(obj_) + obj_.original_tagname_ = 'structref' elif nodeName_ == 'structsequenceref': obj_ = structsequenceref.factory() obj_.build(child_) self.structsequenceref.append(obj_) -# end class componentproperties + obj_.original_tagname_ = 'structsequenceref' +# end class affinity -class findcomponent(GeneratedsSuper): +class devicerequires(GeneratedsSuper): subclass = None superclass = None - def __init__(self, componentresourcefactoryref=None, namingservice=None): - self.componentresourcefactoryref = componentresourcefactoryref - self.namingservice = namingservice - def factory(*args_, **kwargs_): - if findcomponent.subclass: - return findcomponent.subclass(*args_, **kwargs_) + def __init__(self, requires=None): + self.original_tagname_ = None + if requires is None: + self.requires = [] else: - return findcomponent(*args_, **kwargs_) + self.requires = requires + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, devicerequires) + if subclass is not None: + return subclass(*args_, **kwargs_) + if devicerequires.subclass: + return devicerequires.subclass(*args_, **kwargs_) + else: + return devicerequires(*args_, **kwargs_) factory = staticmethod(factory) - def get_componentresourcefactoryref(self): return self.componentresourcefactoryref - def set_componentresourcefactoryref(self, componentresourcefactoryref): self.componentresourcefactoryref = componentresourcefactoryref - componentresourcefactoryrefProp = property(get_componentresourcefactoryref, set_componentresourcefactoryref) - def get_namingservice(self): return self.namingservice - def set_namingservice(self, namingservice): self.namingservice = namingservice - namingserviceProp = property(get_namingservice, set_namingservice) - def export(self, outfile, level, namespace_='', name_='findcomponent', namespacedef_='', pretty_print=True): + def get_requires(self): return self.requires + def set_requires(self, requires): self.requires = requires + def add_requires(self, value): self.requires.append(value) + def insert_requires_at(self, index, value): self.requires.insert(index, value) + def replace_requires_at(self, index, value): self.requires[index] = value + requiresProp = property(get_requires, set_requires) + def hasContent_(self): + if ( + self.requires + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='devicerequires', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('devicerequires') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] - self.exportAttributes(outfile, level, already_processed, namespace_, name_='findcomponent') + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='devicerequires') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='devicerequires', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) - def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='findcomponent'): + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='devicerequires'): pass - def exportChildren(self, outfile, level, namespace_='', name_='findcomponent', fromsubclass_=False, pretty_print=True): + def exportChildren(self, outfile, level, namespace_='', name_='devicerequires', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' - if self.componentresourcefactoryref is not None: - self.componentresourcefactoryref.export(outfile, level, namespace_, name_='componentresourcefactoryref', pretty_print=pretty_print) - if self.namingservice is not None: - self.namingservice.export(outfile, level, namespace_, name_='namingservice', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.componentresourcefactoryref is not None or - self.namingservice is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='findcomponent'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.componentresourcefactoryref is not None: - showIndent(outfile, level) - outfile.write('componentresourcefactoryref=model_.componentresourcefactoryref(\n') - self.componentresourcefactoryref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.namingservice is not None: - showIndent(outfile, level) - outfile.write('namingservice=model_.namingservice(\n') - self.namingservice.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') + for requires_ in self.requires: + requires_.export(outfile, level, namespace_, name_='requires', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): - if nodeName_ == 'componentresourcefactoryref': - obj_ = componentresourcefactoryref.factory() - obj_.build(child_) - self.set_componentresourcefactoryref(obj_) - elif nodeName_ == 'namingservice': - obj_ = namingservice.factory() + if nodeName_ == 'requires': + obj_ = idvalue.factory() obj_.build(child_) - self.set_namingservice(obj_) -# end class findcomponent + self.requires.append(obj_) + obj_.original_tagname_ = 'requires' +# end class devicerequires -class componentresourcefactoryref(GeneratedsSuper): +class idvalue(GeneratedsSuper): subclass = None superclass = None - def __init__(self, refid=None, resourcefactoryproperties=None): - self.refid = _cast(None, refid) - self.resourcefactoryproperties = resourcefactoryproperties + def __init__(self, id_=None, value=None): + self.original_tagname_ = None + self.id = _cast(None, id_) + self.value = _cast(None, value) def factory(*args_, **kwargs_): - if componentresourcefactoryref.subclass: - return componentresourcefactoryref.subclass(*args_, **kwargs_) - else: - return componentresourcefactoryref(*args_, **kwargs_) + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, idvalue) + if subclass is not None: + return subclass(*args_, **kwargs_) + if idvalue.subclass: + return idvalue.subclass(*args_, **kwargs_) + else: + return idvalue(*args_, **kwargs_) factory = staticmethod(factory) - def get_resourcefactoryproperties(self): return self.resourcefactoryproperties - def set_resourcefactoryproperties(self, resourcefactoryproperties): self.resourcefactoryproperties = resourcefactoryproperties - resourcefactorypropertiesProp = property(get_resourcefactoryproperties, set_resourcefactoryproperties) - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - refidProp = property(get_refid, set_refid) - def export(self, outfile, level, namespace_='', name_='componentresourcefactoryref', namespacedef_='', pretty_print=True): + def get_id(self): return self.id + def set_id(self, id_): self.id = id_ + idProp = property(get_id, set_id) + def get_value(self): return self.value + def set_value(self, value): self.value = value + valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='idvalue', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('idvalue') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] - self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentresourcefactoryref') + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='idvalue') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) - showIndent(outfile, level, pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='idvalue', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) - def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentresourcefactoryref'): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='componentresourcefactoryref', fromsubclass_=False, pretty_print=True): - if pretty_print: - eol_ = '\n' - else: - eol_ = '' - if self.resourcefactoryproperties is not None: - self.resourcefactoryproperties.export(outfile, level, namespace_, name_='resourcefactoryproperties', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.resourcefactoryproperties is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentresourcefactoryref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.resourcefactoryproperties is not None: - showIndent(outfile, level) - outfile.write('resourcefactoryproperties=model_.resourcefactoryproperties(\n') - self.resourcefactoryproperties.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='idvalue'): + if self.id is not None and 'id' not in already_processed: + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id), input_name='id')), )) + if self.value is not None and 'value' not in already_processed: + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) + def exportChildren(self, outfile, level, namespace_='', name_='idvalue', fromsubclass_=False, pretty_print=True): + pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('refid', node) - if value is not None and 'refid' not in already_processed: - already_processed.append('refid') - self.refid = value + value = find_attr_value_('id', node) + if value is not None and 'id' not in already_processed: + already_processed.add('id') + self.id = value + value = find_attr_value_('value', node) + if value is not None and 'value' not in already_processed: + already_processed.add('value') + self.value = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): - if nodeName_ == 'resourcefactoryproperties': - obj_ = resourcefactoryproperties.factory() - obj_.build(child_) - self.set_resourcefactoryproperties(obj_) -# end class componentresourcefactoryref + pass +# end class idvalue -class devicethatloadedthiscomponentref(GeneratedsSuper): +class componentproperties(GeneratedsSuper): subclass = None superclass = None - def __init__(self, refid=None): - self.refid = _cast(None, refid) - pass - def factory(*args_, **kwargs_): - if devicethatloadedthiscomponentref.subclass: - return devicethatloadedthiscomponentref.subclass(*args_, **kwargs_) - else: - return devicethatloadedthiscomponentref(*args_, **kwargs_) - factory = staticmethod(factory) - def get_refid(self): return self.refid - def set_refid(self, refid): self.refid = refid - refidProp = property(get_refid, set_refid) - def export(self, outfile, level, namespace_='', name_='devicethatloadedthiscomponentref', namespacedef_='', pretty_print=True): - if pretty_print: - eol_ = '\n' + def __init__(self, simpleref=None, simplesequenceref=None, structref=None, structsequenceref=None): + self.original_tagname_ = None + if simpleref is None: + self.simpleref = [] else: - eol_ = '' + self.simpleref = simpleref + if simplesequenceref is None: + self.simplesequenceref = [] + else: + self.simplesequenceref = simplesequenceref + if structref is None: + self.structref = [] + else: + self.structref = structref + if structsequenceref is None: + self.structsequenceref = [] + else: + self.structsequenceref = structsequenceref + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentproperties) + if subclass is not None: + return subclass(*args_, **kwargs_) + if componentproperties.subclass: + return componentproperties.subclass(*args_, **kwargs_) + else: + return componentproperties(*args_, **kwargs_) + factory = staticmethod(factory) + def get_simpleref(self): return self.simpleref + def set_simpleref(self, simpleref): self.simpleref = simpleref + def add_simpleref(self, value): self.simpleref.append(value) + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value + simplerefProp = property(get_simpleref, set_simpleref) + def get_simplesequenceref(self): return self.simplesequenceref + def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref + def add_simplesequenceref(self, value): self.simplesequenceref.append(value) + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value + simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) + def get_structref(self): return self.structref + def set_structref(self, structref): self.structref = structref + def add_structref(self, value): self.structref.append(value) + def insert_structref_at(self, index, value): self.structref.insert(index, value) + def replace_structref_at(self, index, value): self.structref[index] = value + structrefProp = property(get_structref, set_structref) + def get_structsequenceref(self): return self.structsequenceref + def set_structsequenceref(self, structsequenceref): self.structsequenceref = structsequenceref + def add_structsequenceref(self, value): self.structsequenceref.append(value) + def insert_structsequenceref_at(self, index, value): self.structsequenceref.insert(index, value) + def replace_structsequenceref_at(self, index, value): self.structsequenceref[index] = value + structsequencerefProp = property(get_structsequenceref, set_structsequenceref) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref or + self.structref or + self.structsequenceref + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='componentproperties', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentproperties') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] - self.exportAttributes(outfile, level, already_processed, namespace_, name_='devicethatloadedthiscomponentref') + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentproperties') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentproperties', pretty_print=pretty_print) + showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) - def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='devicethatloadedthiscomponentref'): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='devicethatloadedthiscomponentref', fromsubclass_=False, pretty_print=True): + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentproperties'): pass + def exportChildren(self, outfile, level, namespace_='', name_='componentproperties', fromsubclass_=False, pretty_print=True): + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + for simpleref_ in self.simpleref: + simpleref_.export(outfile, level, namespace_, name_='simpleref', pretty_print=pretty_print) + for simplesequenceref_ in self.simplesequenceref: + simplesequenceref_.export(outfile, level, namespace_, name_='simplesequenceref', pretty_print=pretty_print) + for structref_ in self.structref: + structref_.export(outfile, level, namespace_, name_='structref', pretty_print=pretty_print) + for structsequenceref_ in self.structsequenceref: + structsequenceref_.export(outfile, level, namespace_, name_='structsequenceref', pretty_print=pretty_print) + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + pass + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + if nodeName_ == 'simpleref': + obj_ = simpleref.factory() + obj_.build(child_) + self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' + elif nodeName_ == 'simplesequenceref': + obj_ = simplesequenceref.factory() + obj_.build(child_) + self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' + elif nodeName_ == 'structref': + obj_ = structref.factory() + obj_.build(child_) + self.structref.append(obj_) + obj_.original_tagname_ = 'structref' + elif nodeName_ == 'structsequenceref': + obj_ = structsequenceref.factory() + obj_.build(child_) + self.structsequenceref.append(obj_) + obj_.original_tagname_ = 'structsequenceref' +# end class componentproperties + + +class findcomponent(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, componentresourcefactoryref=None, namingservice=None): + self.original_tagname_ = None + self.componentresourcefactoryref = componentresourcefactoryref + self.namingservice = namingservice + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, findcomponent) + if subclass is not None: + return subclass(*args_, **kwargs_) + if findcomponent.subclass: + return findcomponent.subclass(*args_, **kwargs_) + else: + return findcomponent(*args_, **kwargs_) + factory = staticmethod(factory) + def get_componentresourcefactoryref(self): return self.componentresourcefactoryref + def set_componentresourcefactoryref(self, componentresourcefactoryref): self.componentresourcefactoryref = componentresourcefactoryref + componentresourcefactoryrefProp = property(get_componentresourcefactoryref, set_componentresourcefactoryref) + def get_namingservice(self): return self.namingservice + def set_namingservice(self, namingservice): self.namingservice = namingservice + namingserviceProp = property(get_namingservice, set_namingservice) def hasContent_(self): if ( + self.componentresourcefactoryref is not None or + self.namingservice is not None + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='findcomponent', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('findcomponent') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='findcomponent') + if self.hasContent_(): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='findcomponent', pretty_print=pretty_print) + showIndent(outfile, level, pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='findcomponent'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='findcomponent', fromsubclass_=False, pretty_print=True): + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.componentresourcefactoryref is not None: + self.componentresourcefactoryref.export(outfile, level, namespace_, name_='componentresourcefactoryref', pretty_print=pretty_print) + if self.namingservice is not None: + self.namingservice.export(outfile, level, namespace_, name_='namingservice', pretty_print=pretty_print) + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + pass + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + if nodeName_ == 'componentresourcefactoryref': + obj_ = componentresourcefactoryref.factory() + obj_.build(child_) + self.componentresourcefactoryref = obj_ + obj_.original_tagname_ = 'componentresourcefactoryref' + elif nodeName_ == 'namingservice': + obj_ = namingservice.factory() + obj_.build(child_) + self.namingservice = obj_ + obj_.original_tagname_ = 'namingservice' +# end class findcomponent - ): + +class componentresourcefactoryref(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None, resourcefactoryproperties=None): + self.original_tagname_ = None + self.refid = _cast(None, refid) + self.resourcefactoryproperties = resourcefactoryproperties + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentresourcefactoryref) + if subclass is not None: + return subclass(*args_, **kwargs_) + if componentresourcefactoryref.subclass: + return componentresourcefactoryref.subclass(*args_, **kwargs_) + else: + return componentresourcefactoryref(*args_, **kwargs_) + factory = staticmethod(factory) + def get_resourcefactoryproperties(self): return self.resourcefactoryproperties + def set_resourcefactoryproperties(self, resourcefactoryproperties): self.resourcefactoryproperties = resourcefactoryproperties + resourcefactorypropertiesProp = property(get_resourcefactoryproperties, set_resourcefactoryproperties) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.resourcefactoryproperties is not None + ): return True else: return False - def exportLiteral(self, outfile, level, name_='devicethatloadedthiscomponentref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) + def export(self, outfile, level, namespace_='', name_='componentresourcefactoryref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentresourcefactoryref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentresourcefactoryref') if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentresourcefactoryref', pretty_print=pretty_print) + showIndent(outfile, level, pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentresourcefactoryref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) + def exportChildren(self, outfile, level, namespace_='', name_='componentresourcefactoryref', fromsubclass_=False, pretty_print=True): + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.resourcefactoryproperties is not None: + self.resourcefactoryproperties.export(outfile, level, namespace_, name_='resourcefactoryproperties', pretty_print=pretty_print) + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + value = find_attr_value_('refid', node) + if value is not None and 'refid' not in already_processed: + already_processed.add('refid') + self.refid = value + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + if nodeName_ == 'resourcefactoryproperties': + obj_ = resourcefactoryproperties.factory() + obj_.build(child_) + self.resourcefactoryproperties = obj_ + obj_.original_tagname_ = 'resourcefactoryproperties' +# end class componentresourcefactoryref + + +class devicethatloadedthiscomponentref(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None): + self.original_tagname_ = None + self.refid = _cast(None, refid) + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, devicethatloadedthiscomponentref) + if subclass is not None: + return subclass(*args_, **kwargs_) + if devicethatloadedthiscomponentref.subclass: + return devicethatloadedthiscomponentref.subclass(*args_, **kwargs_) + else: + return devicethatloadedthiscomponentref(*args_, **kwargs_) + factory = staticmethod(factory) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='devicethatloadedthiscomponentref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('devicethatloadedthiscomponentref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='devicethatloadedthiscomponentref') + if self.hasContent_(): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='devicethatloadedthiscomponentref', pretty_print=pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='devicethatloadedthiscomponentref'): + if self.refid is not None and 'refid' not in already_processed: + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) + def exportChildren(self, outfile, level, namespace_='', name_='devicethatloadedthiscomponentref', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1731,83 +2358,79 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): class deviceusedbythiscomponentref(GeneratedsSuper): subclass = None superclass = None - def __init__(self, usesrefid=None, refid=None): - self.usesrefid = _cast(None, usesrefid) + def __init__(self, refid=None, usesrefid=None): + self.original_tagname_ = None self.refid = _cast(None, refid) - pass + self.usesrefid = _cast(None, usesrefid) def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, deviceusedbythiscomponentref) + if subclass is not None: + return subclass(*args_, **kwargs_) if deviceusedbythiscomponentref.subclass: return deviceusedbythiscomponentref.subclass(*args_, **kwargs_) else: return deviceusedbythiscomponentref(*args_, **kwargs_) factory = staticmethod(factory) - def get_usesrefid(self): return self.usesrefid - def set_usesrefid(self, usesrefid): self.usesrefid = usesrefid - usesrefidProp = property(get_usesrefid, set_usesrefid) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def get_usesrefid(self): return self.usesrefid + def set_usesrefid(self, usesrefid): self.usesrefid = usesrefid + usesrefidProp = property(get_usesrefid, set_usesrefid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='deviceusedbythiscomponentref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('deviceusedbythiscomponentref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='deviceusedbythiscomponentref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='deviceusedbythiscomponentref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='deviceusedbythiscomponentref'): - if self.usesrefid is not None and 'usesrefid' not in already_processed: - already_processed.append('usesrefid') - outfile.write(' usesrefid=%s' % (self.gds_format_string(quote_attrib(self.usesrefid).encode(ExternalEncoding), input_name='usesrefid'), )) if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='deviceusedbythiscomponentref', fromsubclass_=False, pretty_print=True): - pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='deviceusedbythiscomponentref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) if self.usesrefid is not None and 'usesrefid' not in already_processed: - already_processed.append('usesrefid') - showIndent(outfile, level) - outfile.write('usesrefid = "%s",\n' % (self.usesrefid,)) - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('usesrefid') + outfile.write(' usesrefid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.usesrefid), input_name='usesrefid')), )) + def exportChildren(self, outfile, level, namespace_='', name_='deviceusedbythiscomponentref', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('usesrefid', node) - if value is not None and 'usesrefid' not in already_processed: - already_processed.append('usesrefid') - self.usesrefid = value value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value + value = find_attr_value_('usesrefid', node) + if value is not None and 'usesrefid' not in already_processed: + already_processed.add('usesrefid') + self.usesrefid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class deviceusedbythiscomponentref @@ -1817,9 +2440,14 @@ class deviceusedbyapplication(GeneratedsSuper): subclass = None superclass = None def __init__(self, usesrefid=None): + self.original_tagname_ = None self.usesrefid = _cast(None, usesrefid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, deviceusedbyapplication) + if subclass is not None: + return subclass(*args_, **kwargs_) if deviceusedbyapplication.subclass: return deviceusedbyapplication.subclass(*args_, **kwargs_) else: @@ -1828,55 +2456,50 @@ def factory(*args_, **kwargs_): def get_usesrefid(self): return self.usesrefid def set_usesrefid(self, usesrefid): self.usesrefid = usesrefid usesrefidProp = property(get_usesrefid, set_usesrefid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='deviceusedbyapplication', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('deviceusedbyapplication') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='deviceusedbyapplication') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='deviceusedbyapplication', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='deviceusedbyapplication'): if self.usesrefid is not None and 'usesrefid' not in already_processed: - already_processed.append('usesrefid') - outfile.write(' usesrefid=%s' % (self.gds_format_string(quote_attrib(self.usesrefid).encode(ExternalEncoding), input_name='usesrefid'), )) + already_processed.add('usesrefid') + outfile.write(' usesrefid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.usesrefid), input_name='usesrefid')), )) def exportChildren(self, outfile, level, namespace_='', name_='deviceusedbyapplication', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='deviceusedbyapplication'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.usesrefid is not None and 'usesrefid' not in already_processed: - already_processed.append('usesrefid') - showIndent(outfile, level) - outfile.write('usesrefid = "%s",\n' % (self.usesrefid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('usesrefid', node) if value is not None and 'usesrefid' not in already_processed: - already_processed.append('usesrefid') + already_processed.add('usesrefid') self.usesrefid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1887,6 +2510,7 @@ class resourcefactoryproperties(GeneratedsSuper): subclass = None superclass = None def __init__(self, simpleref=None, simplesequenceref=None, structref=None, structsequenceref=None): + self.original_tagname_ = None if simpleref is None: self.simpleref = [] else: @@ -1904,6 +2528,11 @@ def __init__(self, simpleref=None, simplesequenceref=None, structref=None, struc else: self.structsequenceref = structsequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, resourcefactoryproperties) + if subclass is not None: + return subclass(*args_, **kwargs_) if resourcefactoryproperties.subclass: return resourcefactoryproperties.subclass(*args_, **kwargs_) else: @@ -1912,35 +2541,54 @@ def factory(*args_, **kwargs_): def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) def get_simplesequenceref(self): return self.simplesequenceref def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref def add_simplesequenceref(self, value): self.simplesequenceref.append(value) - def insert_simplesequenceref(self, index, value): self.simplesequenceref[index] = value + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) def get_structref(self): return self.structref def set_structref(self, structref): self.structref = structref def add_structref(self, value): self.structref.append(value) - def insert_structref(self, index, value): self.structref[index] = value + def insert_structref_at(self, index, value): self.structref.insert(index, value) + def replace_structref_at(self, index, value): self.structref[index] = value structrefProp = property(get_structref, set_structref) def get_structsequenceref(self): return self.structsequenceref def set_structsequenceref(self, structsequenceref): self.structsequenceref = structsequenceref def add_structsequenceref(self, value): self.structsequenceref.append(value) - def insert_structsequenceref(self, index, value): self.structsequenceref[index] = value + def insert_structsequenceref_at(self, index, value): self.structsequenceref.insert(index, value) + def replace_structsequenceref_at(self, index, value): self.structsequenceref[index] = value structsequencerefProp = property(get_structsequenceref, set_structsequenceref) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref or + self.structref or + self.structsequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='resourcefactoryproperties', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('resourcefactoryproperties') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='resourcefactoryproperties') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='resourcefactoryproperties', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -1960,77 +2608,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='resourcefactorypr structref_.export(outfile, level, namespace_, name_='structref', pretty_print=pretty_print) for structsequenceref_ in self.structsequenceref: structsequenceref_.export(outfile, level, namespace_, name_='structsequenceref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simpleref or - self.simplesequenceref or - self.structref or - self.structsequenceref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='resourcefactoryproperties'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('simplesequenceref=[\n') - level += 1 - for simplesequenceref_ in self.simplesequenceref: - showIndent(outfile, level) - outfile.write('model_.simplesequenceref(\n') - simplesequenceref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structref=[\n') - level += 1 - for structref_ in self.structref: - showIndent(outfile, level) - outfile.write('model_.structref(\n') - structref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structsequenceref=[\n') - level += 1 - for structsequenceref_ in self.structsequenceref: - showIndent(outfile, level) - outfile.write('model_.structsequenceref(\n') - structsequenceref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -2038,18 +2622,22 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' elif nodeName_ == 'simplesequenceref': obj_ = simplesequenceref.factory() obj_.build(child_) self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' elif nodeName_ == 'structref': obj_ = structref.factory() obj_.build(child_) self.structref.append(obj_) + obj_.original_tagname_ = 'structref' elif nodeName_ == 'structsequenceref': obj_ = structsequenceref.factory() obj_.build(child_) self.structsequenceref.append(obj_) + obj_.original_tagname_ = 'structsequenceref' # end class resourcefactoryproperties @@ -2057,10 +2645,15 @@ class simpleref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, value=None): + self.original_tagname_ = None self.refid = _cast(None, refid) self.value = _cast(None, value) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, simpleref) + if subclass is not None: + return subclass(*args_, **kwargs_) if simpleref.subclass: return simpleref.subclass(*args_, **kwargs_) else: @@ -2072,66 +2665,57 @@ def set_refid(self, refid): self.refid = refid def get_value(self): return self.value def set_value(self, value): self.value = value valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='simpleref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('simpleref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='simpleref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='simpleref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simpleref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), )) + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) def exportChildren(self, outfile, level, namespace_='', name_='simpleref', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='simpleref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - showIndent(outfile, level) - outfile.write('value = "%s",\n' % (self.value,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value value = find_attr_value_('value', node) if value is not None and 'value' not in already_processed: - already_processed.append('value') + already_processed.add('value') self.value = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -2142,9 +2726,15 @@ class simplesequenceref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, values=None): + self.original_tagname_ = None self.refid = _cast(None, refid) self.values = values def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, simplesequenceref) + if subclass is not None: + return subclass(*args_, **kwargs_) if simplesequenceref.subclass: return simplesequenceref.subclass(*args_, **kwargs_) else: @@ -2156,26 +2746,38 @@ def set_values(self, values): self.values = values def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.values is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='simplesequenceref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('simplesequenceref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='simplesequenceref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='simplesequenceref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simplesequenceref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='simplesequenceref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2183,58 +2785,47 @@ def exportChildren(self, outfile, level, namespace_='', name_='simplesequenceref eol_ = '' if self.values is not None: self.values.export(outfile, level, namespace_, name_='values', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.values is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='simplesequenceref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.values is not None: - showIndent(outfile, level) - outfile.write('values=model_.values(\n') - self.values.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'values': obj_ = values.factory() obj_.build(child_) - self.set_values(obj_) + self.values = obj_ + obj_.original_tagname_ = 'values' # end class simplesequenceref class structref(GeneratedsSuper): subclass = None superclass = None - def __init__(self, refid=None, simpleref=None): + def __init__(self, refid=None, simpleref=None, simplesequenceref=None): + self.original_tagname_ = None self.refid = _cast(None, refid) if simpleref is None: self.simpleref = [] else: self.simpleref = simpleref + if simplesequenceref is None: + self.simplesequenceref = [] + else: + self.simplesequenceref = simplesequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structref) + if subclass is not None: + return subclass(*args_, **kwargs_) if structref.subclass: return structref.subclass(*args_, **kwargs_) else: @@ -2243,31 +2834,51 @@ def factory(*args_, **kwargs_): def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) + def get_simplesequenceref(self): return self.simplesequenceref + def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref + def add_simplesequenceref(self, value): self.simplesequenceref.append(value) + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value + simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='structref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='structref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2275,51 +2886,31 @@ def exportChildren(self, outfile, level, namespace_='', name_='structref', froms eol_ = '' for simpleref_ in self.simpleref: simpleref_.export(outfile, level, namespace_, name_='simpleref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simpleref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + for simplesequenceref_ in self.simplesequenceref: + simplesequenceref_.export(outfile, level, namespace_, name_='simplesequenceref', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'simpleref': obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' + elif nodeName_ == 'simplesequenceref': + obj_ = simplesequenceref.factory() + obj_.build(child_) + self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' # end class structref @@ -2327,12 +2918,18 @@ class structsequenceref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, structvalue=None): + self.original_tagname_ = None self.refid = _cast(None, refid) if structvalue is None: self.structvalue = [] else: self.structvalue = structvalue def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structsequenceref) + if subclass is not None: + return subclass(*args_, **kwargs_) if structsequenceref.subclass: return structsequenceref.subclass(*args_, **kwargs_) else: @@ -2341,31 +2938,44 @@ def factory(*args_, **kwargs_): def get_structvalue(self): return self.structvalue def set_structvalue(self, structvalue): self.structvalue = structvalue def add_structvalue(self, value): self.structvalue.append(value) - def insert_structvalue(self, index, value): self.structvalue[index] = value + def insert_structvalue_at(self, index, value): self.structvalue.insert(index, value) + def replace_structvalue_at(self, index, value): self.structvalue[index] = value structvalueProp = property(get_structvalue, set_structvalue) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.structvalue + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structsequenceref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structsequenceref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structsequenceref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structsequenceref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='structsequenceref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='structsequenceref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2373,63 +2983,46 @@ def exportChildren(self, outfile, level, namespace_='', name_='structsequenceref eol_ = '' for structvalue_ in self.structvalue: structvalue_.export(outfile, level, namespace_, name_='structvalue', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.structvalue - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structsequenceref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('structvalue=[\n') - level += 1 - for structvalue_ in self.structvalue: - showIndent(outfile, level) - outfile.write('model_.structvalue(\n') - structvalue_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'structvalue': obj_ = structvalue.factory() obj_.build(child_) self.structvalue.append(obj_) + obj_.original_tagname_ = 'structvalue' # end class structsequenceref class structvalue(GeneratedsSuper): subclass = None superclass = None - def __init__(self, simpleref=None): + def __init__(self, simpleref=None, simplesequenceref=None): + self.original_tagname_ = None if simpleref is None: self.simpleref = [] else: self.simpleref = simpleref + if simplesequenceref is None: + self.simplesequenceref = [] + else: + self.simplesequenceref = simplesequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structvalue) + if subclass is not None: + return subclass(*args_, **kwargs_) if structvalue.subclass: return structvalue.subclass(*args_, **kwargs_) else: @@ -2438,20 +3031,40 @@ def factory(*args_, **kwargs_): def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) + def get_simplesequenceref(self): return self.simplesequenceref + def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref + def add_simplesequenceref(self, value): self.simplesequenceref.append(value) + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value + simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structvalue', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structvalue') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structvalue') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structvalue', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -2465,38 +3078,15 @@ def exportChildren(self, outfile, level, namespace_='', name_='structvalue', fro eol_ = '' for simpleref_ in self.simpleref: simpleref_.export(outfile, level, namespace_, name_='simpleref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simpleref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structvalue'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + for simplesequenceref_ in self.simplesequenceref: + simplesequenceref_.export(outfile, level, namespace_, name_='simplesequenceref', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -2504,6 +3094,12 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' + elif nodeName_ == 'simplesequenceref': + obj_ = simplesequenceref.factory() + obj_.build(child_) + self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' # end class structvalue @@ -2511,11 +3107,17 @@ class values(GeneratedsSuper): subclass = None superclass = None def __init__(self, value=None): + self.original_tagname_ = None if value is None: self.value = [] else: self.value = value def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, values) + if subclass is not None: + return subclass(*args_, **kwargs_) if values.subclass: return values.subclass(*args_, **kwargs_) else: @@ -2524,20 +3126,33 @@ def factory(*args_, **kwargs_): def get_value(self): return self.value def set_value(self, value): self.value = value def add_value(self, value): self.value.append(value) - def insert_value(self, index, value): self.value[index] = value + def insert_value_at(self, index, value): self.value.insert(index, value) + def replace_value_at(self, index, value): self.value[index] = value valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + self.value + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='values', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('values') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='values') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='values', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -2551,36 +3166,14 @@ def exportChildren(self, outfile, level, namespace_='', name_='values', fromsubc eol_ = '' for value_ in self.value: showIndent(outfile, level, pretty_print) - outfile.write('<%svalue>%s%s' % (namespace_, self.gds_format_string(quote_xml(value_).encode(ExternalEncoding), input_name='value'), namespace_, eol_)) - def hasContent_(self): - if ( - self.value - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='values'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('value=[\n') - level += 1 - for value_ in self.value: - showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(value_).encode(ExternalEncoding)) - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(value_), input_name='value')), eol_)) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -2595,9 +3188,14 @@ class componentinstantiationref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None): + self.original_tagname_ = None self.refid = _cast(None, refid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentinstantiationref) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentinstantiationref.subclass: return componentinstantiationref.subclass(*args_, **kwargs_) else: @@ -2606,55 +3204,50 @@ def factory(*args_, **kwargs_): def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentinstantiationref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentinstantiationref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentinstantiationref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentinstantiationref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentinstantiationref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='componentinstantiationref', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentinstantiationref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -2665,10 +3258,16 @@ class findby(GeneratedsSuper): subclass = None superclass = None def __init__(self, namingservice=None, stringifiedobjectref=None, domainfinder=None): + self.original_tagname_ = None self.namingservice = namingservice self.stringifiedobjectref = stringifiedobjectref self.domainfinder = domainfinder def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, findby) + if subclass is not None: + return subclass(*args_, **kwargs_) if findby.subclass: return findby.subclass(*args_, **kwargs_) else: @@ -2683,18 +3282,32 @@ def set_stringifiedobjectref(self, stringifiedobjectref): self.stringifiedobject def get_domainfinder(self): return self.domainfinder def set_domainfinder(self, domainfinder): self.domainfinder = domainfinder domainfinderProp = property(get_domainfinder, set_domainfinder) + def hasContent_(self): + if ( + self.namingservice is not None or + self.stringifiedobjectref is not None or + self.domainfinder is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='findby', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('findby') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='findby') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='findby', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -2710,53 +3323,24 @@ def exportChildren(self, outfile, level, namespace_='', name_='findby', fromsubc self.namingservice.export(outfile, level, namespace_, name_='namingservice', pretty_print=pretty_print) if self.stringifiedobjectref is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sstringifiedobjectref>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.stringifiedobjectref).encode(ExternalEncoding), input_name='stringifiedobjectref'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.stringifiedobjectref), input_name='stringifiedobjectref')), eol_)) if self.domainfinder is not None: self.domainfinder.export(outfile, level, namespace_, name_='domainfinder', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.namingservice is not None or - self.stringifiedobjectref is not None or - self.domainfinder is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='findby'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.namingservice is not None: - showIndent(outfile, level) - outfile.write('namingservice=model_.namingservice(\n') - self.namingservice.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.stringifiedobjectref is not None: - showIndent(outfile, level) - outfile.write('stringifiedobjectref=%s,\n' % quote_python(self.stringifiedobjectref).encode(ExternalEncoding)) - if self.domainfinder is not None: - showIndent(outfile, level) - outfile.write('domainfinder=model_.domainfinder(\n') - self.domainfinder.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'namingservice': obj_ = namingservice.factory() obj_.build(child_) - self.set_namingservice(obj_) + self.namingservice = obj_ + obj_.original_tagname_ = 'namingservice' elif nodeName_ == 'stringifiedobjectref': stringifiedobjectref_ = child_.text stringifiedobjectref_ = self.gds_validate_string(stringifiedobjectref_, node, 'stringifiedobjectref') @@ -2764,7 +3348,8 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'domainfinder': obj_ = domainfinder.factory() obj_.build(child_) - self.set_domainfinder(obj_) + self.domainfinder = obj_ + obj_.original_tagname_ = 'domainfinder' # end class findby @@ -2772,9 +3357,14 @@ class namingservice(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, namingservice) + if subclass is not None: + return subclass(*args_, **kwargs_) if namingservice.subclass: return namingservice.subclass(*args_, **kwargs_) else: @@ -2783,55 +3373,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='namingservice', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('namingservice') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='namingservice') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='namingservice', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='namingservice'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='namingservice', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='namingservice'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -2842,10 +3427,15 @@ class domainfinder(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, name=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, domainfinder) + if subclass is not None: + return subclass(*args_, **kwargs_) if domainfinder.subclass: return domainfinder.subclass(*args_, **kwargs_) else: @@ -2857,191 +3447,520 @@ def set_type(self, type_): self.type_ = type_ def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='domainfinder', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('domainfinder') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='domainfinder') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='domainfinder', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='domainfinder'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='domainfinder', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='domainfinder'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class domainfinder -class hostcollocation(GeneratedsSuper): +class hostcollocationcp(GeneratedsSuper): subclass = None superclass = None - def __init__(self, id_=None, name=None, componentplacement=None): - self.id_ = _cast(None, id_) - self.name = _cast(None, name) + def __init__(self, componentplacement=None, extensiontype_=None): + self.original_tagname_ = None if componentplacement is None: self.componentplacement = [] else: self.componentplacement = componentplacement + self.extensiontype_ = extensiontype_ def factory(*args_, **kwargs_): - if hostcollocation.subclass: - return hostcollocation.subclass(*args_, **kwargs_) - else: - return hostcollocation(*args_, **kwargs_) + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, hostcollocationcp) + if subclass is not None: + return subclass(*args_, **kwargs_) + if hostcollocationcp.subclass: + return hostcollocationcp.subclass(*args_, **kwargs_) + else: + return hostcollocationcp(*args_, **kwargs_) factory = staticmethod(factory) def get_componentplacement(self): return self.componentplacement def set_componentplacement(self, componentplacement): self.componentplacement = componentplacement def add_componentplacement(self, value): self.componentplacement.append(value) - def insert_componentplacement(self, index, value): self.componentplacement[index] = value + def insert_componentplacement_at(self, index, value): self.componentplacement.insert(index, value) + def replace_componentplacement_at(self, index, value): self.componentplacement[index] = value componentplacementProp = property(get_componentplacement, set_componentplacement) + def get_extensiontype_(self): return self.extensiontype_ + def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_ + def hasContent_(self): + if ( + self.componentplacement + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='hostcollocationcp', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('hostcollocationcp') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='hostcollocationcp') + if self.hasContent_(): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='hostcollocationcp', pretty_print=pretty_print) + showIndent(outfile, level, pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='hostcollocationcp'): + if self.extensiontype_ is not None and 'xsi:type' not in already_processed: + already_processed.add('xsi:type') + outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"') + outfile.write(' xsi:type="%s"' % self.extensiontype_) + pass + def exportChildren(self, outfile, level, namespace_='', name_='hostcollocationcp', fromsubclass_=False, pretty_print=True): + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + for componentplacement_ in self.componentplacement: + componentplacement_.export(outfile, level, namespace_, name_='componentplacement', pretty_print=pretty_print) + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + value = find_attr_value_('xsi:type', node) + if value is not None and 'xsi:type' not in already_processed: + already_processed.add('xsi:type') + self.extensiontype_ = value + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + if nodeName_ == 'componentplacement': + obj_ = componentplacement.factory() + obj_.build(child_) + self.componentplacement.append(obj_) + obj_.original_tagname_ = 'componentplacement' +# end class hostcollocationcp + + +class hostcollocationcpud(hostcollocationcp): + subclass = None + superclass = hostcollocationcp + def __init__(self, componentplacement=None, usesdeviceref=None, extensiontype_=None): + self.original_tagname_ = None + super(hostcollocationcpud, self).__init__(componentplacement, extensiontype_, ) + if usesdeviceref is None: + self.usesdeviceref = [] + else: + self.usesdeviceref = usesdeviceref + self.extensiontype_ = extensiontype_ + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, hostcollocationcpud) + if subclass is not None: + return subclass(*args_, **kwargs_) + if hostcollocationcpud.subclass: + return hostcollocationcpud.subclass(*args_, **kwargs_) + else: + return hostcollocationcpud(*args_, **kwargs_) + factory = staticmethod(factory) + def get_usesdeviceref(self): return self.usesdeviceref + def set_usesdeviceref(self, usesdeviceref): self.usesdeviceref = usesdeviceref + def add_usesdeviceref(self, value): self.usesdeviceref.append(value) + def insert_usesdeviceref_at(self, index, value): self.usesdeviceref.insert(index, value) + def replace_usesdeviceref_at(self, index, value): self.usesdeviceref[index] = value + usesdevicerefProp = property(get_usesdeviceref, set_usesdeviceref) + def get_extensiontype_(self): return self.extensiontype_ + def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_ + def hasContent_(self): + if ( + self.usesdeviceref or + super(hostcollocationcpud, self).hasContent_() + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='hostcollocationcpud', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('hostcollocationcpud') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='hostcollocationcpud') + if self.hasContent_(): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='hostcollocationcpud', pretty_print=pretty_print) + showIndent(outfile, level, pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='hostcollocationcpud'): + super(hostcollocationcpud, self).exportAttributes(outfile, level, already_processed, namespace_, name_='hostcollocationcpud') + if self.extensiontype_ is not None and 'xsi:type' not in already_processed: + already_processed.add('xsi:type') + outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"') + outfile.write(' xsi:type="%s"' % self.extensiontype_) + def exportChildren(self, outfile, level, namespace_='', name_='hostcollocationcpud', fromsubclass_=False, pretty_print=True): + super(hostcollocationcpud, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print) + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + for usesdeviceref_ in self.usesdeviceref: + usesdeviceref_.export(outfile, level, namespace_, name_='usesdeviceref', pretty_print=pretty_print) + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + value = find_attr_value_('xsi:type', node) + if value is not None and 'xsi:type' not in already_processed: + already_processed.add('xsi:type') + self.extensiontype_ = value + super(hostcollocationcpud, self).buildAttributes(node, attrs, already_processed) + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + if nodeName_ == 'usesdeviceref': + obj_ = usesdeviceref.factory() + obj_.build(child_) + self.usesdeviceref.append(obj_) + obj_.original_tagname_ = 'usesdeviceref' + super(hostcollocationcpud, self).buildChildren(child_, node, nodeName_, True) +# end class hostcollocationcpud + + +class hostcollocation(hostcollocationcpud): + subclass = None + superclass = hostcollocationcpud + def __init__(self, componentplacement=None, usesdeviceref=None, id_=None, name=None, reservation=None): + self.original_tagname_ = None + super(hostcollocation, self).__init__(componentplacement, usesdeviceref, ) + self.id_ = _cast(None, id_) + self.name = _cast(None, name) + if reservation is None: + self.reservation = [] + else: + self.reservation = reservation + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, hostcollocation) + if subclass is not None: + return subclass(*args_, **kwargs_) + if hostcollocation.subclass: + return hostcollocation.subclass(*args_, **kwargs_) + else: + return hostcollocation(*args_, **kwargs_) + factory = staticmethod(factory) + def get_reservation(self): return self.reservation + def set_reservation(self, reservation): self.reservation = reservation + def add_reservation(self, value): self.reservation.append(value) + def insert_reservation_at(self, index, value): self.reservation.insert(index, value) + def replace_reservation_at(self, index, value): self.reservation[index] = value + reservationProp = property(get_reservation, set_reservation) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + self.reservation or + super(hostcollocation, self).hasContent_() + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='hostcollocation', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('hostcollocation') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='hostcollocation') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='hostcollocation', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='hostcollocation'): + super(hostcollocation, self).exportAttributes(outfile, level, already_processed, namespace_, name_='hostcollocation') if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='hostcollocation', fromsubclass_=False, pretty_print=True): + super(hostcollocation, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print) if pretty_print: eol_ = '\n' else: eol_ = '' - for componentplacement_ in self.componentplacement: - componentplacement_.export(outfile, level, namespace_, name_='componentplacement', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.componentplacement - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='hostcollocation'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('componentplacement=[\n') - level += 1 - for componentplacement_ in self.componentplacement: - showIndent(outfile, level) - outfile.write('model_.componentplacement(\n') - componentplacement_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + for reservation_ in self.reservation: + reservation_.export(outfile, level, namespace_, name_='reservation', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value + super(hostcollocation, self).buildAttributes(node, attrs, already_processed) + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + if nodeName_ == 'reservation': + obj_ = reservation.factory() + obj_.build(child_) + self.reservation.append(obj_) + obj_.original_tagname_ = 'reservation' + super(hostcollocation, self).buildChildren(child_, node, nodeName_, True) +# end class hostcollocation + + +class reservation(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, kind=None, value=None): + self.original_tagname_ = None + self.kind = _cast(None, kind) + self.value = _cast(None, value) + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, reservation) + if subclass is not None: + return subclass(*args_, **kwargs_) + if reservation.subclass: + return reservation.subclass(*args_, **kwargs_) + else: + return reservation(*args_, **kwargs_) + factory = staticmethod(factory) + def get_kind(self): return self.kind + def set_kind(self, kind): self.kind = kind + kindProp = property(get_kind, set_kind) + def get_value(self): return self.value + def set_value(self, value): self.value = value + valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='reservation', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('reservation') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='reservation') + if self.hasContent_(): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='reservation', pretty_print=pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='reservation'): + if self.kind is not None and 'kind' not in already_processed: + already_processed.add('kind') + outfile.write(' kind=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.kind), input_name='kind')), )) + if self.value is not None and 'value' not in already_processed: + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) + def exportChildren(self, outfile, level, namespace_='', name_='reservation', fromsubclass_=False, pretty_print=True): + pass + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + value = find_attr_value_('kind', node) + if value is not None and 'kind' not in already_processed: + already_processed.add('kind') + self.kind = value + value = find_attr_value_('value', node) + if value is not None and 'value' not in already_processed: + already_processed.add('value') + self.value = value + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + pass +# end class reservation + + +class usesdeviceref(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, refid=None): + self.original_tagname_ = None + self.refid = _cast(None, refid) + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, usesdeviceref) + if subclass is not None: + return subclass(*args_, **kwargs_) + if usesdeviceref.subclass: + return usesdeviceref.subclass(*args_, **kwargs_) + else: + return usesdeviceref(*args_, **kwargs_) + factory = staticmethod(factory) + def get_refid(self): return self.refid + def set_refid(self, refid): self.refid = refid + refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='usesdeviceref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('usesdeviceref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='usesdeviceref') + if self.hasContent_(): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='usesdeviceref', pretty_print=pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='usesdeviceref'): + if self.refid is not None and 'refid' not in already_processed: + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) + def exportChildren(self, outfile, level, namespace_='', name_='usesdeviceref', fromsubclass_=False, pretty_print=True): + pass + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + value = find_attr_value_('refid', node) + if value is not None and 'refid' not in already_processed: + already_processed.add('refid') + self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): - if nodeName_ == 'componentplacement': - obj_ = componentplacement.factory() - obj_.build(child_) - self.componentplacement.append(obj_) -# end class hostcollocation + pass +# end class usesdeviceref class assemblycontroller(GeneratedsSuper): subclass = None superclass = None def __init__(self, componentinstantiationref=None): + self.original_tagname_ = None self.componentinstantiationref = componentinstantiationref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, assemblycontroller) + if subclass is not None: + return subclass(*args_, **kwargs_) if assemblycontroller.subclass: return assemblycontroller.subclass(*args_, **kwargs_) else: @@ -3050,18 +3969,30 @@ def factory(*args_, **kwargs_): def get_componentinstantiationref(self): return self.componentinstantiationref def set_componentinstantiationref(self, componentinstantiationref): self.componentinstantiationref = componentinstantiationref componentinstantiationrefProp = property(get_componentinstantiationref, set_componentinstantiationref) + def hasContent_(self): + if ( + self.componentinstantiationref is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='assemblycontroller', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('assemblycontroller') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='assemblycontroller') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='assemblycontroller', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3075,39 +4006,21 @@ def exportChildren(self, outfile, level, namespace_='', name_='assemblycontrolle eol_ = '' if self.componentinstantiationref is not None: self.componentinstantiationref.export(outfile, level, namespace_, name_='componentinstantiationref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.componentinstantiationref is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='assemblycontroller'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.componentinstantiationref is not None: - showIndent(outfile, level) - outfile.write('componentinstantiationref=model_.componentinstantiationref(\n') - self.componentinstantiationref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'componentinstantiationref': obj_ = componentinstantiationref.factory() obj_.build(child_) - self.set_componentinstantiationref(obj_) + self.componentinstantiationref = obj_ + obj_.original_tagname_ = 'componentinstantiationref' # end class assemblycontroller @@ -3115,11 +4028,17 @@ class connections(GeneratedsSuper): subclass = None superclass = None def __init__(self, connectinterface=None): + self.original_tagname_ = None if connectinterface is None: self.connectinterface = [] else: self.connectinterface = connectinterface def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, connections) + if subclass is not None: + return subclass(*args_, **kwargs_) if connections.subclass: return connections.subclass(*args_, **kwargs_) else: @@ -3128,20 +4047,33 @@ def factory(*args_, **kwargs_): def get_connectinterface(self): return self.connectinterface def set_connectinterface(self, connectinterface): self.connectinterface = connectinterface def add_connectinterface(self, value): self.connectinterface.append(value) - def insert_connectinterface(self, index, value): self.connectinterface[index] = value + def insert_connectinterface_at(self, index, value): self.connectinterface.insert(index, value) + def replace_connectinterface_at(self, index, value): self.connectinterface[index] = value connectinterfaceProp = property(get_connectinterface, set_connectinterface) + def hasContent_(self): + if ( + self.connectinterface + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='connections', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('connections') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='connections') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='connections', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3155,38 +4087,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='connections', fro eol_ = '' for connectinterface_ in self.connectinterface: connectinterface_.export(outfile, level, namespace_, name_='connectinterface', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.connectinterface - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='connections'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('connectinterface=[\n') - level += 1 - for connectinterface_ in self.connectinterface: - showIndent(outfile, level) - outfile.write('model_.connectinterface(\n') - connectinterface_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3194,6 +4101,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = connectinterface.factory() obj_.build(child_) self.connectinterface.append(obj_) + obj_.original_tagname_ = 'connectinterface' # end class connections @@ -3201,12 +4109,18 @@ class connectinterface(GeneratedsSuper): subclass = None superclass = None def __init__(self, id_=None, usesport=None, providesport=None, componentsupportedinterface=None, findby=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.usesport = usesport self.providesport = providesport self.componentsupportedinterface = componentsupportedinterface self.findby = findby def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, connectinterface) + if subclass is not None: + return subclass(*args_, **kwargs_) if connectinterface.subclass: return connectinterface.subclass(*args_, **kwargs_) else: @@ -3225,28 +4139,43 @@ def get_findby(self): return self.findby def set_findby(self, findby): self.findby = findby findbyProp = property(get_findby, set_findby) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) + def hasContent_(self): + if ( + self.usesport is not None or + self.providesport is not None or + self.componentsupportedinterface is not None or + self.findby is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='connectinterface', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('connectinterface') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='connectinterface') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='connectinterface', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='connectinterface'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) def exportChildren(self, outfile, level, namespace_='', name_='connectinterface', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -3260,78 +4189,39 @@ def exportChildren(self, outfile, level, namespace_='', name_='connectinterface' self.componentsupportedinterface.export(outfile, level, namespace_, name_='componentsupportedinterface', pretty_print=pretty_print) if self.findby is not None: self.findby.export(outfile, level, namespace_, name_='findby', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.usesport is not None or - self.providesport is not None or - self.componentsupportedinterface is not None or - self.findby is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='connectinterface'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.usesport is not None: - showIndent(outfile, level) - outfile.write('usesport=model_.usesport(\n') - self.usesport.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.providesport is not None: - showIndent(outfile, level) - outfile.write('providesport=model_.providesport(\n') - self.providesport.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.componentsupportedinterface is not None: - showIndent(outfile, level) - outfile.write('componentsupportedinterface=model_.componentsupportedinterface(\n') - self.componentsupportedinterface.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.findby is not None: - showIndent(outfile, level) - outfile.write('findby=model_.findby(\n') - self.findby.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'usesport': obj_ = usesport.factory() obj_.build(child_) - self.set_usesport(obj_) + self.usesport = obj_ + obj_.original_tagname_ = 'usesport' elif nodeName_ == 'providesport': obj_ = providesport.factory() obj_.build(child_) - self.set_providesport(obj_) + self.providesport = obj_ + obj_.original_tagname_ = 'providesport' elif nodeName_ == 'componentsupportedinterface': obj_ = componentsupportedinterface.factory() obj_.build(child_) - self.set_componentsupportedinterface(obj_) + self.componentsupportedinterface = obj_ + obj_.original_tagname_ = 'componentsupportedinterface' elif nodeName_ == 'findby': obj_ = findby.factory() obj_.build(child_) - self.set_findby(obj_) + self.findby = obj_ + obj_.original_tagname_ = 'findby' # end class connectinterface @@ -3339,6 +4229,7 @@ class usesport(GeneratedsSuper): subclass = None superclass = None def __init__(self, usesidentifier=None, componentinstantiationref=None, devicethatloadedthiscomponentref=None, deviceusedbythiscomponentref=None, deviceusedbyapplication=None, findby=None): + self.original_tagname_ = None self.usesidentifier = usesidentifier self.componentinstantiationref = componentinstantiationref self.devicethatloadedthiscomponentref = devicethatloadedthiscomponentref @@ -3346,6 +4237,11 @@ def __init__(self, usesidentifier=None, componentinstantiationref=None, deviceth self.deviceusedbyapplication = deviceusedbyapplication self.findby = findby def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, usesport) + if subclass is not None: + return subclass(*args_, **kwargs_) if usesport.subclass: return usesport.subclass(*args_, **kwargs_) else: @@ -3369,18 +4265,35 @@ def set_deviceusedbyapplication(self, deviceusedbyapplication): self.deviceusedb def get_findby(self): return self.findby def set_findby(self, findby): self.findby = findby findbyProp = property(get_findby, set_findby) + def hasContent_(self): + if ( + self.usesidentifier is not None or + self.componentinstantiationref is not None or + self.devicethatloadedthiscomponentref is not None or + self.deviceusedbythiscomponentref is not None or + self.deviceusedbyapplication is not None or + self.findby is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='usesport', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('usesport') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='usesport') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='usesport', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3394,7 +4307,7 @@ def exportChildren(self, outfile, level, namespace_='', name_='usesport', fromsu eol_ = '' if self.usesidentifier is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%susesidentifier>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.usesidentifier).encode(ExternalEncoding), input_name='usesidentifier'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.usesidentifier), input_name='usesidentifier')), eol_)) if self.componentinstantiationref is not None: self.componentinstantiationref.export(outfile, level, namespace_, name_='componentinstantiationref', pretty_print=pretty_print) if self.devicethatloadedthiscomponentref is not None: @@ -3405,64 +4318,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='usesport', fromsu self.deviceusedbyapplication.export(outfile, level, namespace_, name_='deviceusedbyapplication', pretty_print=pretty_print) if self.findby is not None: self.findby.export(outfile, level, namespace_, name_='findby', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.usesidentifier is not None or - self.componentinstantiationref is not None or - self.devicethatloadedthiscomponentref is not None or - self.deviceusedbythiscomponentref is not None or - self.deviceusedbyapplication is not None or - self.findby is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='usesport'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.usesidentifier is not None: - showIndent(outfile, level) - outfile.write('usesidentifier=%s,\n' % quote_python(self.usesidentifier).encode(ExternalEncoding)) - if self.componentinstantiationref is not None: - showIndent(outfile, level) - outfile.write('componentinstantiationref=model_.componentinstantiationref(\n') - self.componentinstantiationref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.devicethatloadedthiscomponentref is not None: - showIndent(outfile, level) - outfile.write('devicethatloadedthiscomponentref=model_.devicethatloadedthiscomponentref(\n') - self.devicethatloadedthiscomponentref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.deviceusedbythiscomponentref is not None: - showIndent(outfile, level) - outfile.write('deviceusedbythiscomponentref=model_.deviceusedbythiscomponentref(\n') - self.deviceusedbythiscomponentref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.deviceusedbyapplication is not None: - showIndent(outfile, level) - outfile.write('deviceusedbyapplication=model_.deviceusedbyapplication(\n') - self.deviceusedbyapplication.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.findby is not None: - showIndent(outfile, level) - outfile.write('findby=model_.findby(\n') - self.findby.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3473,23 +4335,28 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentinstantiationref': obj_ = componentinstantiationref.factory() obj_.build(child_) - self.set_componentinstantiationref(obj_) + self.componentinstantiationref = obj_ + obj_.original_tagname_ = 'componentinstantiationref' elif nodeName_ == 'devicethatloadedthiscomponentref': obj_ = devicethatloadedthiscomponentref.factory() obj_.build(child_) - self.set_devicethatloadedthiscomponentref(obj_) + self.devicethatloadedthiscomponentref = obj_ + obj_.original_tagname_ = 'devicethatloadedthiscomponentref' elif nodeName_ == 'deviceusedbythiscomponentref': obj_ = deviceusedbythiscomponentref.factory() obj_.build(child_) - self.set_deviceusedbythiscomponentref(obj_) + self.deviceusedbythiscomponentref = obj_ + obj_.original_tagname_ = 'deviceusedbythiscomponentref' elif nodeName_ == 'deviceusedbyapplication': obj_ = deviceusedbyapplication.factory() obj_.build(child_) - self.set_deviceusedbyapplication(obj_) + self.deviceusedbyapplication = obj_ + obj_.original_tagname_ = 'deviceusedbyapplication' elif nodeName_ == 'findby': obj_ = findby.factory() obj_.build(child_) - self.set_findby(obj_) + self.findby = obj_ + obj_.original_tagname_ = 'findby' # end class usesport @@ -3497,6 +4364,7 @@ class providesport(GeneratedsSuper): subclass = None superclass = None def __init__(self, providesidentifier=None, componentinstantiationref=None, devicethatloadedthiscomponentref=None, deviceusedbythiscomponentref=None, deviceusedbyapplication=None, findby=None): + self.original_tagname_ = None self.providesidentifier = providesidentifier self.componentinstantiationref = componentinstantiationref self.devicethatloadedthiscomponentref = devicethatloadedthiscomponentref @@ -3504,6 +4372,11 @@ def __init__(self, providesidentifier=None, componentinstantiationref=None, devi self.deviceusedbyapplication = deviceusedbyapplication self.findby = findby def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, providesport) + if subclass is not None: + return subclass(*args_, **kwargs_) if providesport.subclass: return providesport.subclass(*args_, **kwargs_) else: @@ -3527,18 +4400,35 @@ def set_deviceusedbyapplication(self, deviceusedbyapplication): self.deviceusedb def get_findby(self): return self.findby def set_findby(self, findby): self.findby = findby findbyProp = property(get_findby, set_findby) + def hasContent_(self): + if ( + self.providesidentifier is not None or + self.componentinstantiationref is not None or + self.devicethatloadedthiscomponentref is not None or + self.deviceusedbythiscomponentref is not None or + self.deviceusedbyapplication is not None or + self.findby is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='providesport', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('providesport') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='providesport') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='providesport', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3552,7 +4442,7 @@ def exportChildren(self, outfile, level, namespace_='', name_='providesport', fr eol_ = '' if self.providesidentifier is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sprovidesidentifier>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.providesidentifier).encode(ExternalEncoding), input_name='providesidentifier'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.providesidentifier), input_name='providesidentifier')), eol_)) if self.componentinstantiationref is not None: self.componentinstantiationref.export(outfile, level, namespace_, name_='componentinstantiationref', pretty_print=pretty_print) if self.devicethatloadedthiscomponentref is not None: @@ -3563,64 +4453,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='providesport', fr self.deviceusedbyapplication.export(outfile, level, namespace_, name_='deviceusedbyapplication', pretty_print=pretty_print) if self.findby is not None: self.findby.export(outfile, level, namespace_, name_='findby', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.providesidentifier is not None or - self.componentinstantiationref is not None or - self.devicethatloadedthiscomponentref is not None or - self.deviceusedbythiscomponentref is not None or - self.deviceusedbyapplication is not None or - self.findby is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='providesport'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.providesidentifier is not None: - showIndent(outfile, level) - outfile.write('providesidentifier=%s,\n' % quote_python(self.providesidentifier).encode(ExternalEncoding)) - if self.componentinstantiationref is not None: - showIndent(outfile, level) - outfile.write('componentinstantiationref=model_.componentinstantiationref(\n') - self.componentinstantiationref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.devicethatloadedthiscomponentref is not None: - showIndent(outfile, level) - outfile.write('devicethatloadedthiscomponentref=model_.devicethatloadedthiscomponentref(\n') - self.devicethatloadedthiscomponentref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.deviceusedbythiscomponentref is not None: - showIndent(outfile, level) - outfile.write('deviceusedbythiscomponentref=model_.deviceusedbythiscomponentref(\n') - self.deviceusedbythiscomponentref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.deviceusedbyapplication is not None: - showIndent(outfile, level) - outfile.write('deviceusedbyapplication=model_.deviceusedbyapplication(\n') - self.deviceusedbyapplication.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.findby is not None: - showIndent(outfile, level) - outfile.write('findby=model_.findby(\n') - self.findby.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3631,23 +4470,28 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentinstantiationref': obj_ = componentinstantiationref.factory() obj_.build(child_) - self.set_componentinstantiationref(obj_) + self.componentinstantiationref = obj_ + obj_.original_tagname_ = 'componentinstantiationref' elif nodeName_ == 'devicethatloadedthiscomponentref': obj_ = devicethatloadedthiscomponentref.factory() obj_.build(child_) - self.set_devicethatloadedthiscomponentref(obj_) + self.devicethatloadedthiscomponentref = obj_ + obj_.original_tagname_ = 'devicethatloadedthiscomponentref' elif nodeName_ == 'deviceusedbythiscomponentref': obj_ = deviceusedbythiscomponentref.factory() obj_.build(child_) - self.set_deviceusedbythiscomponentref(obj_) + self.deviceusedbythiscomponentref = obj_ + obj_.original_tagname_ = 'deviceusedbythiscomponentref' elif nodeName_ == 'deviceusedbyapplication': obj_ = deviceusedbyapplication.factory() obj_.build(child_) - self.set_deviceusedbyapplication(obj_) + self.deviceusedbyapplication = obj_ + obj_.original_tagname_ = 'deviceusedbyapplication' elif nodeName_ == 'findby': obj_ = findby.factory() obj_.build(child_) - self.set_findby(obj_) + self.findby = obj_ + obj_.original_tagname_ = 'findby' # end class providesport @@ -3655,6 +4499,7 @@ class componentsupportedinterface(GeneratedsSuper): subclass = None superclass = None def __init__(self, supportedidentifier=None, componentinstantiationref=None, devicethatloadedthiscomponentref=None, deviceusedbythiscomponentref=None, deviceusedbyapplication=None, findby=None): + self.original_tagname_ = None self.supportedidentifier = supportedidentifier self.componentinstantiationref = componentinstantiationref self.devicethatloadedthiscomponentref = devicethatloadedthiscomponentref @@ -3662,6 +4507,11 @@ def __init__(self, supportedidentifier=None, componentinstantiationref=None, dev self.deviceusedbyapplication = deviceusedbyapplication self.findby = findby def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentsupportedinterface) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentsupportedinterface.subclass: return componentsupportedinterface.subclass(*args_, **kwargs_) else: @@ -3685,18 +4535,35 @@ def set_deviceusedbyapplication(self, deviceusedbyapplication): self.deviceusedb def get_findby(self): return self.findby def set_findby(self, findby): self.findby = findby findbyProp = property(get_findby, set_findby) + def hasContent_(self): + if ( + self.supportedidentifier is not None or + self.componentinstantiationref is not None or + self.devicethatloadedthiscomponentref is not None or + self.deviceusedbythiscomponentref is not None or + self.deviceusedbyapplication is not None or + self.findby is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentsupportedinterface', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentsupportedinterface') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentsupportedinterface') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentsupportedinterface', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3710,7 +4577,7 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentsupporte eol_ = '' if self.supportedidentifier is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%ssupportedidentifier>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.supportedidentifier).encode(ExternalEncoding), input_name='supportedidentifier'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.supportedidentifier), input_name='supportedidentifier')), eol_)) if self.componentinstantiationref is not None: self.componentinstantiationref.export(outfile, level, namespace_, name_='componentinstantiationref', pretty_print=pretty_print) if self.devicethatloadedthiscomponentref is not None: @@ -3721,64 +4588,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentsupporte self.deviceusedbyapplication.export(outfile, level, namespace_, name_='deviceusedbyapplication', pretty_print=pretty_print) if self.findby is not None: self.findby.export(outfile, level, namespace_, name_='findby', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.supportedidentifier is not None or - self.componentinstantiationref is not None or - self.devicethatloadedthiscomponentref is not None or - self.deviceusedbythiscomponentref is not None or - self.deviceusedbyapplication is not None or - self.findby is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentsupportedinterface'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.supportedidentifier is not None: - showIndent(outfile, level) - outfile.write('supportedidentifier=%s,\n' % quote_python(self.supportedidentifier).encode(ExternalEncoding)) - if self.componentinstantiationref is not None: - showIndent(outfile, level) - outfile.write('componentinstantiationref=model_.componentinstantiationref(\n') - self.componentinstantiationref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.devicethatloadedthiscomponentref is not None: - showIndent(outfile, level) - outfile.write('devicethatloadedthiscomponentref=model_.devicethatloadedthiscomponentref(\n') - self.devicethatloadedthiscomponentref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.deviceusedbythiscomponentref is not None: - showIndent(outfile, level) - outfile.write('deviceusedbythiscomponentref=model_.deviceusedbythiscomponentref(\n') - self.deviceusedbythiscomponentref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.deviceusedbyapplication is not None: - showIndent(outfile, level) - outfile.write('deviceusedbyapplication=model_.deviceusedbyapplication(\n') - self.deviceusedbyapplication.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.findby is not None: - showIndent(outfile, level) - outfile.write('findby=model_.findby(\n') - self.findby.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3789,23 +4605,28 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentinstantiationref': obj_ = componentinstantiationref.factory() obj_.build(child_) - self.set_componentinstantiationref(obj_) + self.componentinstantiationref = obj_ + obj_.original_tagname_ = 'componentinstantiationref' elif nodeName_ == 'devicethatloadedthiscomponentref': obj_ = devicethatloadedthiscomponentref.factory() obj_.build(child_) - self.set_devicethatloadedthiscomponentref(obj_) + self.devicethatloadedthiscomponentref = obj_ + obj_.original_tagname_ = 'devicethatloadedthiscomponentref' elif nodeName_ == 'deviceusedbythiscomponentref': obj_ = deviceusedbythiscomponentref.factory() obj_.build(child_) - self.set_deviceusedbythiscomponentref(obj_) + self.deviceusedbythiscomponentref = obj_ + obj_.original_tagname_ = 'deviceusedbythiscomponentref' elif nodeName_ == 'deviceusedbyapplication': obj_ = deviceusedbyapplication.factory() obj_.build(child_) - self.set_deviceusedbyapplication(obj_) + self.deviceusedbyapplication = obj_ + obj_.original_tagname_ = 'deviceusedbyapplication' elif nodeName_ == 'findby': obj_ = findby.factory() obj_.build(child_) - self.set_findby(obj_) + self.findby = obj_ + obj_.original_tagname_ = 'findby' # end class componentsupportedinterface @@ -3813,11 +4634,17 @@ class externalports(GeneratedsSuper): subclass = None superclass = None def __init__(self, port=None): + self.original_tagname_ = None if port is None: self.port = [] else: self.port = port def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, externalports) + if subclass is not None: + return subclass(*args_, **kwargs_) if externalports.subclass: return externalports.subclass(*args_, **kwargs_) else: @@ -3826,20 +4653,33 @@ def factory(*args_, **kwargs_): def get_port(self): return self.port def set_port(self, port): self.port = port def add_port(self, value): self.port.append(value) - def insert_port(self, index, value): self.port[index] = value + def insert_port_at(self, index, value): self.port.insert(index, value) + def replace_port_at(self, index, value): self.port[index] = value portProp = property(get_port, set_port) + def hasContent_(self): + if ( + self.port + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='externalports', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('externalports') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='externalports') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='externalports', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3853,38 +4693,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='externalports', f eol_ = '' for port_ in self.port: port_.export(outfile, level, namespace_, name_='port', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.port - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='externalports'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('port=[\n') - level += 1 - for port_ in self.port: - showIndent(outfile, level) - outfile.write('model_.port(\n') - port_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3892,6 +4707,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = port.factory() obj_.build(child_) self.port.append(obj_) + obj_.original_tagname_ = 'port' # end class externalports @@ -3899,6 +4715,7 @@ class port(GeneratedsSuper): subclass = None superclass = None def __init__(self, externalname=None, description=None, usesidentifier=None, providesidentifier=None, supportedidentifier=None, componentinstantiationref=None): + self.original_tagname_ = None self.externalname = _cast(None, externalname) self.description = description self.usesidentifier = usesidentifier @@ -3906,6 +4723,11 @@ def __init__(self, externalname=None, description=None, usesidentifier=None, pro self.supportedidentifier = supportedidentifier self.componentinstantiationref = componentinstantiationref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, port) + if subclass is not None: + return subclass(*args_, **kwargs_) if port.subclass: return port.subclass(*args_, **kwargs_) else: @@ -3929,94 +4751,72 @@ def set_componentinstantiationref(self, componentinstantiationref): self.compone def get_externalname(self): return self.externalname def set_externalname(self, externalname): self.externalname = externalname externalnameProp = property(get_externalname, set_externalname) + def hasContent_(self): + if ( + self.description is not None or + self.usesidentifier is not None or + self.providesidentifier is not None or + self.supportedidentifier is not None or + self.componentinstantiationref is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='port', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('port') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='port') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='port', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: - outfile.write('/>%s' % (eol_, )) - def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='port'): - if self.externalname is not None and 'externalname' not in already_processed: - already_processed.append('externalname') - outfile.write(' externalname=%s' % (self.gds_format_string(quote_attrib(self.externalname).encode(ExternalEncoding), input_name='externalname'), )) - def exportChildren(self, outfile, level, namespace_='', name_='port', fromsubclass_=False, pretty_print=True): - if pretty_print: - eol_ = '\n' - else: - eol_ = '' - if self.description is not None: - showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) - if self.usesidentifier is not None: - showIndent(outfile, level, pretty_print) - outfile.write('<%susesidentifier>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.usesidentifier).encode(ExternalEncoding), input_name='usesidentifier'), namespace_, eol_)) - if self.providesidentifier is not None: - showIndent(outfile, level, pretty_print) - outfile.write('<%sprovidesidentifier>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.providesidentifier).encode(ExternalEncoding), input_name='providesidentifier'), namespace_, eol_)) - if self.supportedidentifier is not None: - showIndent(outfile, level, pretty_print) - outfile.write('<%ssupportedidentifier>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.supportedidentifier).encode(ExternalEncoding), input_name='supportedidentifier'), namespace_, eol_)) - if self.componentinstantiationref is not None: - self.componentinstantiationref.export(outfile, level, namespace_, name_='componentinstantiationref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.usesidentifier is not None or - self.providesidentifier is not None or - self.supportedidentifier is not None or - self.componentinstantiationref is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='port'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='port'): if self.externalname is not None and 'externalname' not in already_processed: - already_processed.append('externalname') - showIndent(outfile, level) - outfile.write('externalname = "%s",\n' % (self.externalname,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('externalname') + outfile.write(' externalname=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.externalname), input_name='externalname')), )) + def exportChildren(self, outfile, level, namespace_='', name_='port', fromsubclass_=False, pretty_print=True): + if pretty_print: + eol_ = '\n' + else: + eol_ = '' if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) + showIndent(outfile, level, pretty_print) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.usesidentifier is not None: - showIndent(outfile, level) - outfile.write('usesidentifier=%s,\n' % quote_python(self.usesidentifier).encode(ExternalEncoding)) + showIndent(outfile, level, pretty_print) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.usesidentifier), input_name='usesidentifier')), eol_)) if self.providesidentifier is not None: - showIndent(outfile, level) - outfile.write('providesidentifier=%s,\n' % quote_python(self.providesidentifier).encode(ExternalEncoding)) + showIndent(outfile, level, pretty_print) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.providesidentifier), input_name='providesidentifier')), eol_)) if self.supportedidentifier is not None: - showIndent(outfile, level) - outfile.write('supportedidentifier=%s,\n' % quote_python(self.supportedidentifier).encode(ExternalEncoding)) + showIndent(outfile, level, pretty_print) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.supportedidentifier), input_name='supportedidentifier')), eol_)) if self.componentinstantiationref is not None: - showIndent(outfile, level) - outfile.write('componentinstantiationref=model_.componentinstantiationref(\n') - self.componentinstantiationref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') + self.componentinstantiationref.export(outfile, level, namespace_, name_='componentinstantiationref', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('externalname', node) if value is not None and 'externalname' not in already_processed: - already_processed.append('externalname') + already_processed.add('externalname') self.externalname = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': @@ -4038,7 +4838,8 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentinstantiationref': obj_ = componentinstantiationref.factory() obj_.build(child_) - self.set_componentinstantiationref(obj_) + self.componentinstantiationref = obj_ + obj_.original_tagname_ = 'componentinstantiationref' # end class port @@ -4046,11 +4847,17 @@ class externalproperties(GeneratedsSuper): subclass = None superclass = None def __init__(self, property=None): + self.original_tagname_ = None if property is None: self.property = [] else: self.property = property def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, externalproperties) + if subclass is not None: + return subclass(*args_, **kwargs_) if externalproperties.subclass: return externalproperties.subclass(*args_, **kwargs_) else: @@ -4059,20 +4866,33 @@ def factory(*args_, **kwargs_): def get_property(self): return self.property def set_property(self, property): self.property = property def add_property(self, value): self.property.append(value) - def insert_property(self, index, value): self.property[index] = value + def insert_property_at(self, index, value): self.property.insert(index, value) + def replace_property_at(self, index, value): self.property[index] = value propertyProp = property(get_property, set_property) + def hasContent_(self): + if ( + self.property + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='externalproperties', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('externalproperties') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='externalproperties') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='externalproperties', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -4086,38 +4906,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='externalpropertie eol_ = '' for property_ in self.property: property_.export(outfile, level, namespace_, name_='property', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.property - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='externalproperties'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('property=[\n') - level += 1 - for property_ in self.property: - showIndent(outfile, level) - outfile.write('model_.property(\n') - property_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -4125,118 +4920,279 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = property.factory() obj_.build(child_) self.property.append(obj_) + obj_.original_tagname_ = 'property' # end class externalproperties class property(GeneratedsSuper): subclass = None superclass = None - def __init__(self, propid=None, externalpropid=None, comprefid=None): + def __init__(self, comprefid=None, propid=None, externalpropid=None): + self.original_tagname_ = None + self.comprefid = _cast(None, comprefid) self.propid = _cast(None, propid) self.externalpropid = _cast(None, externalpropid) - self.comprefid = _cast(None, comprefid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, property) + if subclass is not None: + return subclass(*args_, **kwargs_) if property.subclass: return property.subclass(*args_, **kwargs_) else: return property(*args_, **kwargs_) factory = staticmethod(factory) + def get_comprefid(self): return self.comprefid + def set_comprefid(self, comprefid): self.comprefid = comprefid + comprefidProp = property(get_comprefid, set_comprefid) def get_propid(self): return self.propid def set_propid(self, propid): self.propid = propid propidProp = property(get_propid, set_propid) def get_externalpropid(self): return self.externalpropid def set_externalpropid(self, externalpropid): self.externalpropid = externalpropid externalpropidProp = property(get_externalpropid, set_externalpropid) - def get_comprefid(self): return self.comprefid - def set_comprefid(self, comprefid): self.comprefid = comprefid - comprefidProp = property(get_comprefid, set_comprefid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='property', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('property') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='property') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='property', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='property'): + if self.comprefid is not None and 'comprefid' not in already_processed: + already_processed.add('comprefid') + outfile.write(' comprefid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.comprefid), input_name='comprefid')), )) if self.propid is not None and 'propid' not in already_processed: - already_processed.append('propid') - outfile.write(' propid=%s' % (self.gds_format_string(quote_attrib(self.propid).encode(ExternalEncoding), input_name='propid'), )) + already_processed.add('propid') + outfile.write(' propid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.propid), input_name='propid')), )) if self.externalpropid is not None and 'externalpropid' not in already_processed: - already_processed.append('externalpropid') - outfile.write(' externalpropid=%s' % (self.gds_format_string(quote_attrib(self.externalpropid).encode(ExternalEncoding), input_name='externalpropid'), )) - if self.comprefid is not None and 'comprefid' not in already_processed: - already_processed.append('comprefid') - outfile.write(' comprefid=%s' % (self.gds_format_string(quote_attrib(self.comprefid).encode(ExternalEncoding), input_name='comprefid'), )) + already_processed.add('externalpropid') + outfile.write(' externalpropid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.externalpropid), input_name='externalpropid')), )) def exportChildren(self, outfile, level, namespace_='', name_='property', fromsubclass_=False, pretty_print=True): pass + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + value = find_attr_value_('comprefid', node) + if value is not None and 'comprefid' not in already_processed: + already_processed.add('comprefid') + self.comprefid = value + value = find_attr_value_('propid', node) + if value is not None and 'propid' not in already_processed: + already_processed.add('propid') + self.propid = value + value = find_attr_value_('externalpropid', node) + if value is not None and 'externalpropid' not in already_processed: + already_processed.add('externalpropid') + self.externalpropid = value + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + pass +# end class property + + +class options(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, option=None): + self.original_tagname_ = None + if option is None: + self.option = [] + else: + self.option = option + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, options) + if subclass is not None: + return subclass(*args_, **kwargs_) + if options.subclass: + return options.subclass(*args_, **kwargs_) + else: + return options(*args_, **kwargs_) + factory = staticmethod(factory) + def get_option(self): return self.option + def set_option(self, option): self.option = option + def add_option(self, value): self.option.append(value) + def insert_option_at(self, index, value): self.option.insert(index, value) + def replace_option_at(self, index, value): self.option[index] = value + optionProp = property(get_option, set_option) + def hasContent_(self): + if ( + self.option + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='options', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('options') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='options') + if self.hasContent_(): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='options', pretty_print=pretty_print) + showIndent(outfile, level, pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='options'): + pass + def exportChildren(self, outfile, level, namespace_='', name_='options', fromsubclass_=False, pretty_print=True): + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + for option_ in self.option: + option_.export(outfile, level, namespace_, name_='option', pretty_print=pretty_print) + def build(self, node): + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) + for child in node: + nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] + self.buildChildren(child, node, nodeName_) + return self + def buildAttributes(self, node, attrs, already_processed): + pass + def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): + if nodeName_ == 'option': + obj_ = option.factory() + obj_.build(child_) + self.option.append(obj_) + obj_.original_tagname_ = 'option' +# end class options + + +class option(GeneratedsSuper): + subclass = None + superclass = None + def __init__(self, name=None, value=None): + self.original_tagname_ = None + self.name = _cast(None, name) + self.value = _cast(None, value) + def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, option) + if subclass is not None: + return subclass(*args_, **kwargs_) + if option.subclass: + return option.subclass(*args_, **kwargs_) + else: + return option(*args_, **kwargs_) + factory = staticmethod(factory) + def get_name(self): return self.name + def set_name(self, name): self.name = name + nameProp = property(get_name, set_name) + def get_value(self): return self.value + def set_value(self, value): self.value = value + valueProp = property(get_value, set_value) def hasContent_(self): if ( - ): + ): return True else: return False - def exportLiteral(self, outfile, level, name_='property'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) + def export(self, outfile, level, namespace_='', name_='option', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('option') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ + if pretty_print: + eol_ = '\n' + else: + eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ + showIndent(outfile, level, pretty_print) + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='option') if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.propid is not None and 'propid' not in already_processed: - already_processed.append('propid') - showIndent(outfile, level) - outfile.write('propid = "%s",\n' % (self.propid,)) - if self.externalpropid is not None and 'externalpropid' not in already_processed: - already_processed.append('externalpropid') - showIndent(outfile, level) - outfile.write('externalpropid = "%s",\n' % (self.externalpropid,)) - if self.comprefid is not None and 'comprefid' not in already_processed: - already_processed.append('comprefid') - showIndent(outfile, level) - outfile.write('comprefid = "%s",\n' % (self.comprefid,)) - def exportLiteralChildren(self, outfile, level, name_): + outfile.write('>%s' % (eol_, )) + self.exportChildren(outfile, level + 1, namespace_='', name_='option', pretty_print=pretty_print) + outfile.write('%s' % (namespace_, name_, eol_)) + else: + outfile.write('/>%s' % (eol_, )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='option'): + if self.name is not None and 'name' not in already_processed: + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) + if self.value is not None and 'value' not in already_processed: + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) + def exportChildren(self, outfile, level, namespace_='', name_='option', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('propid', node) - if value is not None and 'propid' not in already_processed: - already_processed.append('propid') - self.propid = value - value = find_attr_value_('externalpropid', node) - if value is not None and 'externalpropid' not in already_processed: - already_processed.append('externalpropid') - self.externalpropid = value - value = find_attr_value_('comprefid', node) - if value is not None and 'comprefid' not in already_processed: - already_processed.append('comprefid') - self.comprefid = value + value = find_attr_value_('name', node) + if value is not None and 'name' not in already_processed: + already_processed.add('name') + self.name = value + value = find_attr_value_('value', node) + if value is not None and 'value' not in already_processed: + already_processed.add('value') + self.value = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass -# end class property +# end class option class usesdevicedependencies(GeneratedsSuper): subclass = None superclass = None def __init__(self, usesdevice=None): + self.original_tagname_ = None if usesdevice is None: self.usesdevice = [] else: self.usesdevice = usesdevice def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, usesdevicedependencies) + if subclass is not None: + return subclass(*args_, **kwargs_) if usesdevicedependencies.subclass: return usesdevicedependencies.subclass(*args_, **kwargs_) else: @@ -4245,20 +5201,33 @@ def factory(*args_, **kwargs_): def get_usesdevice(self): return self.usesdevice def set_usesdevice(self, usesdevice): self.usesdevice = usesdevice def add_usesdevice(self, value): self.usesdevice.append(value) - def insert_usesdevice(self, index, value): self.usesdevice[index] = value + def insert_usesdevice_at(self, index, value): self.usesdevice.insert(index, value) + def replace_usesdevice_at(self, index, value): self.usesdevice[index] = value usesdeviceProp = property(get_usesdevice, set_usesdevice) + def hasContent_(self): + if ( + self.usesdevice + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='usesdevicedependencies', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('usesdevicedependencies') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='usesdevicedependencies') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='usesdevicedependencies', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -4272,38 +5241,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='usesdevicedepende eol_ = '' for usesdevice_ in self.usesdevice: usesdevice_.export(outfile, level, namespace_, name_='usesdevice', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.usesdevice - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='usesdevicedependencies'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('usesdevice=[\n') - level += 1 - for usesdevice_ in self.usesdevice: - showIndent(outfile, level) - outfile.write('model_.usesdevice(\n') - usesdevice_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -4311,15 +5255,17 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = usesdevice.factory() obj_.build(child_) self.usesdevice.append(obj_) + obj_.original_tagname_ = 'usesdevice' # end class usesdevicedependencies class usesdevice(GeneratedsSuper): subclass = None superclass = None - def __init__(self, type_=None, id_=None, propertyref=None, simpleref=None, simplesequenceref=None, structref=None, structsequenceref=None): - self.type_ = _cast(None, type_) + def __init__(self, id_=None, type_=None, propertyref=None, simpleref=None, simplesequenceref=None, structref=None, structsequenceref=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) + self.type_ = _cast(None, type_) if propertyref is None: self.propertyref = [] else: @@ -4341,6 +5287,11 @@ def __init__(self, type_=None, id_=None, propertyref=None, simpleref=None, simpl else: self.structsequenceref = structsequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, usesdevice) + if subclass is not None: + return subclass(*args_, **kwargs_) if usesdevice.subclass: return usesdevice.subclass(*args_, **kwargs_) else: @@ -4349,57 +5300,78 @@ def factory(*args_, **kwargs_): def get_propertyref(self): return self.propertyref def set_propertyref(self, propertyref): self.propertyref = propertyref def add_propertyref(self, value): self.propertyref.append(value) - def insert_propertyref(self, index, value): self.propertyref[index] = value + def insert_propertyref_at(self, index, value): self.propertyref.insert(index, value) + def replace_propertyref_at(self, index, value): self.propertyref[index] = value propertyrefProp = property(get_propertyref, set_propertyref) def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) def get_simplesequenceref(self): return self.simplesequenceref def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref def add_simplesequenceref(self, value): self.simplesequenceref.append(value) - def insert_simplesequenceref(self, index, value): self.simplesequenceref[index] = value + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) def get_structref(self): return self.structref def set_structref(self, structref): self.structref = structref def add_structref(self, value): self.structref.append(value) - def insert_structref(self, index, value): self.structref[index] = value + def insert_structref_at(self, index, value): self.structref.insert(index, value) + def replace_structref_at(self, index, value): self.structref[index] = value structrefProp = property(get_structref, set_structref) def get_structsequenceref(self): return self.structsequenceref def set_structsequenceref(self, structsequenceref): self.structsequenceref = structsequenceref def add_structsequenceref(self, value): self.structsequenceref.append(value) - def insert_structsequenceref(self, index, value): self.structsequenceref[index] = value + def insert_structsequenceref_at(self, index, value): self.structsequenceref.insert(index, value) + def replace_structsequenceref_at(self, index, value): self.structsequenceref[index] = value structsequencerefProp = property(get_structsequenceref, set_structsequenceref) + def get_id(self): return self.id_ + def set_id(self, id_): self.id_ = id_ + idProp = property(get_id, set_id) def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) - def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id - idProp = property(get_id, set_id) + def hasContent_(self): + if ( + self.propertyref or + self.simpleref or + self.simplesequenceref or + self.structref or + self.structsequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='usesdevice', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('usesdevice') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='usesdevice') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='usesdevice', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='usesdevice'): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) + if self.type_ is not None and 'type_' not in already_processed: + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='usesdevice', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -4415,127 +5387,48 @@ def exportChildren(self, outfile, level, namespace_='', name_='usesdevice', from structref_.export(outfile, level, namespace_, name_='structref', pretty_print=pretty_print) for structsequenceref_ in self.structsequenceref: structsequenceref_.export(outfile, level, namespace_, name_='structsequenceref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.propertyref or - self.simpleref or - self.simplesequenceref or - self.structref or - self.structsequenceref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='usesdevice'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('propertyref=[\n') - level += 1 - for propertyref_ in self.propertyref: - showIndent(outfile, level) - outfile.write('model_.propertyref(\n') - propertyref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('simplesequenceref=[\n') - level += 1 - for simplesequenceref_ in self.simplesequenceref: - showIndent(outfile, level) - outfile.write('model_.simplesequenceref(\n') - simplesequenceref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structref=[\n') - level += 1 - for structref_ in self.structref: - showIndent(outfile, level) - outfile.write('model_.structref(\n') - structref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structsequenceref=[\n') - level += 1 - for structsequenceref_ in self.structsequenceref: - showIndent(outfile, level) - outfile.write('model_.structsequenceref(\n') - structsequenceref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('type', node) - if value is not None and 'type' not in already_processed: - already_processed.append('type') - self.type_ = value value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value + value = find_attr_value_('type', node) + if value is not None and 'type' not in already_processed: + already_processed.add('type') + self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'propertyref': obj_ = propertyref.factory() obj_.build(child_) self.propertyref.append(obj_) + obj_.original_tagname_ = 'propertyref' elif nodeName_ == 'simpleref': obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' elif nodeName_ == 'simplesequenceref': obj_ = simplesequenceref.factory() obj_.build(child_) self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' elif nodeName_ == 'structref': obj_ = structref.factory() obj_.build(child_) self.structref.append(obj_) + obj_.original_tagname_ = 'structref' elif nodeName_ == 'structsequenceref': obj_ = structsequenceref.factory() obj_.build(child_) self.structsequenceref.append(obj_) + obj_.original_tagname_ = 'structsequenceref' # end class usesdevice @@ -4543,10 +5436,15 @@ class propertyref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, value=None): + self.original_tagname_ = None self.refid = _cast(None, refid) self.value = _cast(None, value) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, propertyref) + if subclass is not None: + return subclass(*args_, **kwargs_) if propertyref.subclass: return propertyref.subclass(*args_, **kwargs_) else: @@ -4558,89 +5456,89 @@ def set_refid(self, refid): self.refid = refid def get_value(self): return self.value def set_value(self, value): self.value = value valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='propertyref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('propertyref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='propertyref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='propertyref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='propertyref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), )) + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) def exportChildren(self, outfile, level, namespace_='', name_='propertyref', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='propertyref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - showIndent(outfile, level) - outfile.write('value = "%s",\n' % (self.value,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value value = find_attr_value_('value', node) if value is not None and 'value' not in already_processed: - already_processed.append('value') + already_processed.add('value') self.value = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class propertyref +GDSClassesMapping = { + 'softwareassembly': softwareassembly, +} + + USAGE_TEXT = """ Usage: python .py [ -s ] """ + def usage(): - print USAGE_TEXT + print(USAGE_TEXT) sys.exit(1) def get_root_tag(node): tag = Tag_pattern_.match(node.tag).groups()[-1] - rootClass = globals().get(tag) + rootClass = GDSClassesMapping.get(tag) + if rootClass is None: + rootClass = globals().get(tag) return tag, rootClass -def parse(inFileName): - doc = parsexml_(inFileName) +def parse(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -4650,16 +5548,18 @@ def parse(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_=rootTag, -## namespacedef_='', -## pretty_print=True) +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='', +## pretty_print=True) return rootObj -def parseString(inString): - from StringIO import StringIO - doc = parsexml_(StringIO(inString)) +def parseEtree(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -4669,14 +5569,47 @@ def parseString(inString): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_="softwareassembly", -## namespacedef_='') + mapping = {} + rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping) + reverse_mapping = rootObj.gds_reverse_node_mapping(mapping) +## if not silence: +## content = etree_.tostring( +## rootElement, pretty_print=True, +## xml_declaration=True, encoding="utf-8") +## sys.stdout.write(content) +## sys.stdout.write('\n') + return rootObj, rootElement, mapping, reverse_mapping + + +def parseString(inString, silence=False): + '''Parse a string, create the object tree, and export it. + + Arguments: + - inString -- A string. This XML fragment should not start + with an XML declaration containing an encoding. + - silence -- A boolean. If False, export the object. + Returns -- The root object in the tree. + ''' + parser = None + rootNode= parsexmlstring_(inString, parser) + rootTag, rootClass = get_root_tag(rootNode) + if rootClass is None: + rootTag = 'softwareassembly' + rootClass = softwareassembly + rootObj = rootClass.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='') return rootObj -def parseLiteral(inFileName): - doc = parsexml_(inFileName) +def parseLiteral(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -4686,11 +5619,12 @@ def parseLiteral(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('#from sad import *\n\n') -## sys.stdout.write('import sad as model_\n\n') -## sys.stdout.write('rootObj = model_.rootTag(\n') -## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) -## sys.stdout.write(')\n') +## if not silence: +## sys.stdout.write('#from sad import *\n\n') +## sys.stdout.write('import sad as model_\n\n') +## sys.stdout.write('rootObj = model_.rootClass(\n') +## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) +## sys.stdout.write(')\n') return rootObj @@ -4708,6 +5642,7 @@ def main(): __all__ = [ + "affinity", "assemblycontroller", "componentfile", "componentfileref", @@ -4720,6 +5655,7 @@ def main(): "componentsupportedinterface", "connectinterface", "connections", + "devicerequires", "devicethatloadedthiscomponentref", "deviceusedbyapplication", "deviceusedbythiscomponentref", @@ -4729,13 +5665,20 @@ def main(): "findby", "findcomponent", "hostcollocation", + "hostcollocationcp", + "hostcollocationcpud", + "idvalue", "localfile", + "loggingconfig", "namingservice", + "option", + "options", "partitioning", "port", "property", "propertyref", "providesport", + "reservation", "resourcefactoryproperties", "simpleref", "simplesequenceref", @@ -4745,6 +5688,7 @@ def main(): "structvalue", "usesdevice", "usesdevicedependencies", + "usesdeviceref", "usesport", "values" - ] +] diff --git a/redhawk/src/base/framework/python/ossie/parsers/scd.py b/redhawk/src/base/framework/python/ossie/parsers/scd.py index 28d2c022f..ddf37dc84 100644 --- a/redhawk/src/base/framework/python/ossie/parsers/scd.py +++ b/redhawk/src/base/framework/python/ossie/parsers/scd.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- -# +# -*- coding: utf-8 -*- + # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. # @@ -18,70 +18,97 @@ # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. -# # -# Generated Thu Sep 12 14:49:31 2013 by generateDS.py version 2.7c. +# Generated Mon Jul 30 12:29:35 2018 by generateDS.py version 2.29.14. +# Python 2.7.5 (default, Nov 6 2016, 00:28:07) [GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] +# +# Command line options: +# ('-f', '') +# ('--silence', '') +# ('-m', '') +# ('-o', 'ossie/parsers/scd.py') +# +# Command line arguments: +# ../../../xml/xsd/scd.xsd +# +# Command line: +# /usr/bin/generateDS.py -f --silence -m -o "ossie/parsers/scd.py" ../../../xml/xsd/scd.xsd +# +# Current working directory (os.getcwd()): +# python # import sys -import getopt import re as re_ - -etree_ = None -Verbose_import_ = False -( XMLParser_import_none, XMLParser_import_lxml, - XMLParser_import_elementtree - ) = range(3) -XMLParser_import_library = None +import base64 +import datetime as datetime_ +import warnings as warnings_ try: - # lxml from lxml import etree as etree_ - XMLParser_import_library = XMLParser_import_lxml - if Verbose_import_: - print("running with lxml.etree") except ImportError: - try: - # cElementTree from Python 2.5+ - import xml.etree.cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree on Python 2.5+") - except ImportError: - try: - # ElementTree from Python 2.5+ - import xml.etree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree on Python 2.5+") - except ImportError: - try: - # normal cElementTree install - import cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree") - except ImportError: - try: - # normal ElementTree install - import elementtree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree") - except ImportError: - raise ImportError("Failed to import ElementTree from any known place") - -def parsexml_(*args, **kwargs): - if (XMLParser_import_library == XMLParser_import_lxml and - 'parser' not in kwargs): + from xml.etree import ElementTree as etree_ + + +Validate_simpletypes_ = True +if sys.version_info[0] == 2: + BaseStrType_ = basestring +else: + BaseStrType_ = str + + +def parsexml_(infile, parser=None, **kwargs): + if parser is None: # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. - kwargs['parser'] = etree_.ETCompatXMLParser() - doc = etree_.parse(*args, **kwargs) + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + doc = etree_.parse(infile, parser=parser, **kwargs) return doc +def parsexmlstring_(instring, parser=None, **kwargs): + if parser is None: + # Use the lxml ElementTree compatible parser so that, e.g., + # we ignore comments. + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + element = etree_.fromstring(instring, parser=parser, **kwargs) + return element + +# +# Namespace prefix definition table (and other attributes, too) # -# User methods +# The module generatedsnamespaces, if it is importable, must contain +# a dictionary named GeneratedsNamespaceDefs. This Python dictionary +# should map element type names (strings) to XML schema namespace prefix +# definitions. The export method for any class for which there is +# a namespace prefix definition, will export that definition in the +# XML representation of that element. See the export method of +# any generated element type class for a example of the use of this +# table. +# A sample table is: +# +# # File: generatedsnamespaces.py +# +# GenerateDSNamespaceDefs = { +# "ElementtypeA": "http://www.xxx.com/namespaceA", +# "ElementtypeB": "http://www.xxx.com/namespaceB", +# } +# + +try: + from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_ +except ImportError: + GenerateDSNamespaceDefs_ = {} + +# +# The root super-class for element type classes # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class @@ -89,67 +116,273 @@ def parsexml_(*args, **kwargs): try: from generatedssuper import GeneratedsSuper -except ImportError, exp: - +except ImportError as exp: + class GeneratedsSuper(object): + tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$') + class _FixedOffsetTZ(datetime_.tzinfo): + def __init__(self, offset, name): + self.__offset = datetime_.timedelta(minutes=offset) + self.__name = name + def utcoffset(self, dt): + return self.__offset + def tzname(self, dt): + return self.__name + def dst(self, dt): + return None def gds_format_string(self, input_data, input_name=''): return input_data - def gds_validate_string(self, input_data, node, input_name=''): + def gds_validate_string(self, input_data, node=None, input_name=''): + if not input_data: + return '' + else: + return input_data + def gds_format_base64(self, input_data, input_name=''): + return base64.b64encode(input_data) + def gds_validate_base64(self, input_data, node=None, input_name=''): return input_data def gds_format_integer(self, input_data, input_name=''): return '%d' % input_data - def gds_validate_integer(self, input_data, node, input_name=''): + def gds_validate_integer(self, input_data, node=None, input_name=''): return input_data def gds_format_integer_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_integer_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_integer_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + int(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of integers') - return input_data + return values def gds_format_float(self, input_data, input_name=''): - return '%f' % input_data - def gds_validate_float(self, input_data, node, input_name=''): + return ('%.15f' % input_data).rstrip('0') + def gds_validate_float(self, input_data, node=None, input_name=''): return input_data def gds_format_float_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_float_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_float_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of floats') - return input_data + return values def gds_format_double(self, input_data, input_name=''): return '%e' % input_data - def gds_validate_double(self, input_data, node, input_name=''): + def gds_validate_double(self, input_data, node=None, input_name=''): return input_data def gds_format_double_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_double_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_double_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of doubles') - return input_data + return values def gds_format_boolean(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean(self, input_data, node, input_name=''): + return ('%s' % input_data).lower() + def gds_validate_boolean(self, input_data, node=None, input_name=''): return input_data def gds_format_boolean_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_boolean_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: if value not in ('true', '1', 'false', '0', ): - raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")') + raise_parse_error( + node, + 'Requires sequence of booleans ' + '("true", "1", "false", "0")') + return values + def gds_validate_datetime(self, input_data, node=None, input_name=''): + return input_data + def gds_format_datetime(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + @classmethod + def gds_parse_datetime(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + time_parts = input_data.split('.') + if len(time_parts) > 1: + micro_seconds = int(float('0.' + time_parts[1]) * 1000000) + input_data = '%s.%s' % (time_parts[0], micro_seconds, ) + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt + def gds_validate_date(self, input_data, node=None, input_name=''): + return input_data + def gds_format_date(self, input_data, input_name=''): + _svalue = '%04d-%02d-%02d' % ( + input_data.year, + input_data.month, + input_data.day, + ) + try: + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format( + hours, minutes) + except AttributeError: + pass + return _svalue + @classmethod + def gds_parse_date(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d') + dt = dt.replace(tzinfo=tz) + return dt.date() + def gds_validate_time(self, input_data, node=None, input_name=''): return input_data + def gds_format_time(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%02d:%02d:%02d' % ( + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%02d:%02d:%02d.%s' % ( + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + def gds_validate_simple_patterns(self, patterns, target): + # pat is a list of lists of strings/patterns. We should: + # - AND the outer elements + # - OR the inner elements + found1 = True + for patterns1 in patterns: + found2 = False + for patterns2 in patterns1: + if re_.search(patterns2, target) is not None: + found2 = True + break + if not found2: + found1 = False + break + return found1 + @classmethod + def gds_parse_time(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + if len(input_data.split('.')) > 1: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt.time() def gds_str_lower(self, instring): return instring.lower() def get_path_(self, node): @@ -180,6 +413,38 @@ def get_class_obj_(self, node, default_class=None): return class_obj1 def gds_build_any(self, node, type_name=None): return None + @classmethod + def gds_reverse_node_mapping(cls, mapping): + return dict(((v, k) for k, v in mapping.iteritems())) + @staticmethod + def gds_encode(instring): + if sys.version_info[0] == 2: + return instring.encode(ExternalEncoding) + else: + return instring + @staticmethod + def convert_unicode(instring): + if isinstance(instring, str): + result = quote_xml(instring) + elif sys.version_info[0] == 2 and isinstance(instring, unicode): + result = quote_xml(instring).encode('utf8') + else: + result = GeneratedsSuper.gds_encode(str(instring)) + return result + def __eq__(self, other): + if type(self) != type(other): + return False + return self.__dict__ == other.__dict__ + def __ne__(self, other): + return not self.__eq__(other) + + def getSubclassFromModule_(module, class_): + '''Get the subclass of a class from a specific module.''' + name = class_.__name__ + 'Sub' + if hasattr(module, name): + return getattr(module, name) + else: + return None # @@ -205,29 +470,50 @@ def gds_build_any(self, node, type_name=None): Tag_pattern_ = re_.compile(r'({.*})?(.*)') String_cleanup_pat_ = re_.compile(r"[\n\r\s]+") Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)') +CDATA_pattern_ = re_.compile(r"", re_.DOTALL) + +# Change this to redirect the generated superclass module to use a +# specific subclass module. +CurrentSubclassModule_ = None # # Support/utility functions. # + def showIndent(outfile, level, pretty_print=True): if pretty_print: for idx in range(level): outfile.write(' ') + def quote_xml(inStr): + "Escape markup chars, but do not modify CDATA sections." if not inStr: return '' - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) + s2 = '' + pos = 0 + matchobjects = CDATA_pattern_.finditer(s1) + for mo in matchobjects: + s3 = s1[pos:mo.start()] + s2 += quote_xml_aux(s3) + s2 += s1[mo.start():mo.end()] + pos = mo.end() + s3 = s1[pos:] + s2 += quote_xml_aux(s3) + return s2 + + +def quote_xml_aux(inStr): + s1 = inStr.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 + def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') @@ -240,6 +526,7 @@ def quote_attrib(inStr): s1 = '"%s"' % s1 return s1 + def quote_python(inStr): s1 = inStr if s1.find("'") == -1: @@ -255,6 +542,7 @@ def quote_python(inStr): else: return '"""%s"""' % s1 + def get_all_text_(node): if node.text is not None: text = node.text @@ -265,6 +553,7 @@ def get_all_text_(node): text += child.tail return text + def find_attr_value_(attr_name, node): attrs = node.attrib attr_parts = attr_name.split(':') @@ -282,11 +571,9 @@ def find_attr_value_(attr_name, node): class GDSParseError(Exception): pass + def raise_parse_error(node, msg): - if XMLParser_import_library == XMLParser_import_lxml: - msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) - else: - msg = '%s (element %s)' % (msg, node.tag, ) + msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) raise GDSParseError(msg) @@ -305,6 +592,7 @@ class MixedContainer: TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 + TypeBase64 = 8 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type @@ -318,49 +606,104 @@ def getValue(self): return self.value def getName(self): return self.name - def export(self, outfile, level, name, namespace, pretty_print=True): + def export(self, outfile, level, name, namespace, + pretty_print=True): if self.category == MixedContainer.CategoryText: # Prevent exporting empty content as empty lines. - if self.value.strip(): + if self.value.strip(): outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace, name, pretty_print) + self.value.export( + outfile, level, namespace, name, + pretty_print=pretty_print) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: - outfile.write('<%s>%s' % (self.name, self.value, self.name)) + outfile.write('<%s>%s' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: - outfile.write('<%s>%d' % (self.name, self.value, self.name)) + outfile.write('<%s>%d' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: - outfile.write('<%s>%f' % (self.name, self.value, self.name)) + outfile.write('<%s>%f' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeDouble: - outfile.write('<%s>%g' % (self.name, self.value, self.name)) + outfile.write('<%s>%g' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeBase64: + outfile.write('<%s>%s' % ( + self.name, + base64.b64encode(self.value), + self.name)) + def to_etree(self, element): + if self.category == MixedContainer.CategoryText: + # Prevent exporting empty content as empty lines. + if self.value.strip(): + if len(element) > 0: + if element[-1].tail is None: + element[-1].tail = self.value + else: + element[-1].tail += self.value + else: + if element.text is None: + element.text = self.value + else: + element.text += self.value + elif self.category == MixedContainer.CategorySimple: + subelement = etree_.SubElement( + element, '%s' % self.name) + subelement.text = self.to_etree_simple() + else: # category == MixedContainer.CategoryComplex + self.value.to_etree(element) + def to_etree_simple(self): + if self.content_type == MixedContainer.TypeString: + text = self.value + elif (self.content_type == MixedContainer.TypeInteger or + self.content_type == MixedContainer.TypeBoolean): + text = '%d' % self.value + elif (self.content_type == MixedContainer.TypeFloat or + self.content_type == MixedContainer.TypeDecimal): + text = '%f' % self.value + elif self.content_type == MixedContainer.TypeDouble: + text = '%g' % self.value + elif self.content_type == MixedContainer.TypeBase64: + text = '%s' % base64.b64encode(self.value) + return text def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s",\n' % ( + self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class MemberSpec_(object): - def __init__(self, name='', data_type='', container=0): + def __init__(self, name='', data_type='', container=0, + optional=0, child_attrs=None, choice=None): self.name = name self.data_type = data_type self.container = container + self.child_attrs = child_attrs + self.choice = choice + self.optional = optional def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type @@ -375,6 +718,13 @@ def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container + def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs + def get_child_attrs(self): return self.child_attrs + def set_choice(self, choice): self.choice = choice + def get_choice(self): return self.choice + def set_optional(self, optional): self.optional = optional + def get_optional(self): return self.optional + def _cast(typ, value): if typ is None or value is None: @@ -385,10 +735,12 @@ def _cast(typ, value): # Data representation classes. # + class softwarecomponent(GeneratedsSuper): subclass = None superclass = None def __init__(self, corbaversion=None, componentrepid=None, componenttype=None, componentfeatures=None, interfaces=None, propertyfile=None): + self.original_tagname_ = None self.corbaversion = corbaversion self.componentrepid = componentrepid self.componenttype = componenttype @@ -396,6 +748,11 @@ def __init__(self, corbaversion=None, componentrepid=None, componenttype=None, c self.interfaces = interfaces self.propertyfile = propertyfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, softwarecomponent) + if subclass is not None: + return subclass(*args_, **kwargs_) if softwarecomponent.subclass: return softwarecomponent.subclass(*args_, **kwargs_) else: @@ -419,18 +776,35 @@ def set_interfaces(self, interfaces): self.interfaces = interfaces def get_propertyfile(self): return self.propertyfile def set_propertyfile(self, propertyfile): self.propertyfile = propertyfile propertyfileProp = property(get_propertyfile, set_propertyfile) + def hasContent_(self): + if ( + self.corbaversion is not None or + self.componentrepid is not None or + self.componenttype is not None or + self.componentfeatures is not None or + self.interfaces is not None or + self.propertyfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='softwarecomponent', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('softwarecomponent') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='softwarecomponent') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='softwarecomponent', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -444,73 +818,25 @@ def exportChildren(self, outfile, level, namespace_='', name_='softwarecomponent eol_ = '' if self.corbaversion is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%scorbaversion>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.corbaversion).encode(ExternalEncoding), input_name='corbaversion'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.corbaversion), input_name='corbaversion')), eol_)) if self.componentrepid is not None: self.componentrepid.export(outfile, level, namespace_, name_='componentrepid', pretty_print=pretty_print) if self.componenttype is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%scomponenttype>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.componenttype).encode(ExternalEncoding), input_name='componenttype'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.componenttype), input_name='componenttype')), eol_)) if self.componentfeatures is not None: self.componentfeatures.export(outfile, level, namespace_, name_='componentfeatures', pretty_print=pretty_print) if self.interfaces is not None: self.interfaces.export(outfile, level, namespace_, name_='interfaces', pretty_print=pretty_print) if self.propertyfile is not None: self.propertyfile.export(outfile, level, namespace_, name_='propertyfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.corbaversion is not None or - self.componentrepid is not None or - self.componenttype is not None or - self.componentfeatures is not None or - self.interfaces is not None or - self.propertyfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='softwarecomponent'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.corbaversion is not None: - showIndent(outfile, level) - outfile.write('corbaversion=%s,\n' % quote_python(self.corbaversion).encode(ExternalEncoding)) - if self.componentrepid is not None: - showIndent(outfile, level) - outfile.write('componentrepid=model_.componentRepId(\n') - self.componentrepid.exportLiteral(outfile, level, name_='componentrepid') - showIndent(outfile, level) - outfile.write('),\n') - if self.componenttype is not None: - showIndent(outfile, level) - outfile.write('componenttype=%s,\n' % quote_python(self.componenttype).encode(ExternalEncoding)) - if self.componentfeatures is not None: - showIndent(outfile, level) - outfile.write('componentfeatures=model_.componentFeatures(\n') - self.componentfeatures.exportLiteral(outfile, level, name_='componentfeatures') - showIndent(outfile, level) - outfile.write('),\n') - if self.interfaces is not None: - showIndent(outfile, level) - outfile.write('interfaces=model_.interfaces(\n') - self.interfaces.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.propertyfile is not None: - showIndent(outfile, level) - outfile.write('propertyfile=model_.propertyFile(\n') - self.propertyfile.exportLiteral(outfile, level, name_='propertyfile') - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -521,7 +847,8 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentrepid': obj_ = componentRepId.factory() obj_.build(child_) - self.set_componentrepid(obj_) + self.componentrepid = obj_ + obj_.original_tagname_ = 'componentrepid' elif nodeName_ == 'componenttype': componenttype_ = child_.text componenttype_ = self.gds_validate_string(componenttype_, node, 'componenttype') @@ -529,15 +856,18 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'componentfeatures': obj_ = componentFeatures.factory() obj_.build(child_) - self.set_componentfeatures(obj_) + self.componentfeatures = obj_ + obj_.original_tagname_ = 'componentfeatures' elif nodeName_ == 'interfaces': obj_ = interfaces.factory() obj_.build(child_) - self.set_interfaces(obj_) + self.interfaces = obj_ + obj_.original_tagname_ = 'interfaces' elif nodeName_ == 'propertyfile': obj_ = propertyFile.factory() obj_.build(child_) - self.set_propertyfile(obj_) + self.propertyfile = obj_ + obj_.original_tagname_ = 'propertyfile' # end class softwarecomponent @@ -545,9 +875,15 @@ class propertyFile(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, localfile=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) self.localfile = localfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, propertyFile) + if subclass is not None: + return subclass(*args_, **kwargs_) if propertyFile.subclass: return propertyFile.subclass(*args_, **kwargs_) else: @@ -559,26 +895,38 @@ def set_localfile(self, localfile): self.localfile = localfile def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) + def hasContent_(self): + if ( + self.localfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='propertyFile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('propertyFile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='propertyFile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='propertyFile', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='propertyFile'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='propertyFile', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -586,45 +934,24 @@ def exportChildren(self, outfile, level, namespace_='', name_='propertyFile', fr eol_ = '' if self.localfile is not None: self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='propertyFile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localFile(\n') - self.localfile.exportLiteral(outfile, level, name_='localfile') - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localFile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' # end class propertyFile @@ -632,9 +959,14 @@ class localFile(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, localFile) + if subclass is not None: + return subclass(*args_, **kwargs_) if localFile.subclass: return localFile.subclass(*args_, **kwargs_) else: @@ -643,55 +975,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='localFile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('localFile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='localFile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='localFile', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='localFile'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='localFile', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='localFile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -702,9 +1029,14 @@ class componentRepId(GeneratedsSuper): subclass = None superclass = None def __init__(self, repid=None): + self.original_tagname_ = None self.repid = _cast(None, repid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentRepId) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentRepId.subclass: return componentRepId.subclass(*args_, **kwargs_) else: @@ -713,55 +1045,50 @@ def factory(*args_, **kwargs_): def get_repid(self): return self.repid def set_repid(self, repid): self.repid = repid repidProp = property(get_repid, set_repid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentRepId', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentRepId') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentRepId') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentRepId', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='componentRepId'): if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - outfile.write(' repid=%s' % (self.gds_format_string(quote_attrib(self.repid).encode(ExternalEncoding), input_name='repid'), )) + already_processed.add('repid') + outfile.write(' repid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.repid), input_name='repid')), )) def exportChildren(self, outfile, level, namespace_='', name_='componentRepId', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentRepId'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - showIndent(outfile, level) - outfile.write('repid = "%s",\n' % (self.repid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('repid', node) if value is not None and 'repid' not in already_processed: - already_processed.append('repid') + already_processed.add('repid') self.repid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -772,12 +1099,18 @@ class componentFeatures(GeneratedsSuper): subclass = None superclass = None def __init__(self, supportsinterface=None, ports=None): + self.original_tagname_ = None if supportsinterface is None: self.supportsinterface = [] else: self.supportsinterface = supportsinterface self.ports = ports def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, componentFeatures) + if subclass is not None: + return subclass(*args_, **kwargs_) if componentFeatures.subclass: return componentFeatures.subclass(*args_, **kwargs_) else: @@ -786,23 +1119,37 @@ def factory(*args_, **kwargs_): def get_supportsinterface(self): return self.supportsinterface def set_supportsinterface(self, supportsinterface): self.supportsinterface = supportsinterface def add_supportsinterface(self, value): self.supportsinterface.append(value) - def insert_supportsinterface(self, index, value): self.supportsinterface[index] = value + def insert_supportsinterface_at(self, index, value): self.supportsinterface.insert(index, value) + def replace_supportsinterface_at(self, index, value): self.supportsinterface[index] = value supportsinterfaceProp = property(get_supportsinterface, set_supportsinterface) def get_ports(self): return self.ports def set_ports(self, ports): self.ports = ports portsProp = property(get_ports, set_ports) + def hasContent_(self): + if ( + self.supportsinterface or + self.ports is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='componentFeatures', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('componentFeatures') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='componentFeatures') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='componentFeatures', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -818,45 +1165,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='componentFeatures supportsinterface_.export(outfile, level, namespace_, name_='supportsinterface', pretty_print=pretty_print) if self.ports is not None: self.ports.export(outfile, level, namespace_, name_='ports', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.supportsinterface or - self.ports is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='componentFeatures'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('supportsinterface=[\n') - level += 1 - for supportsinterface_ in self.supportsinterface: - showIndent(outfile, level) - outfile.write('model_.supportsInterface(\n') - supportsinterface_.exportLiteral(outfile, level, name_='supportsInterface') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.ports is not None: - showIndent(outfile, level) - outfile.write('ports=model_.ports(\n') - self.ports.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -864,93 +1179,91 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = supportsInterface.factory() obj_.build(child_) self.supportsinterface.append(obj_) + obj_.original_tagname_ = 'supportsinterface' elif nodeName_ == 'ports': obj_ = ports.factory() obj_.build(child_) - self.set_ports(obj_) + self.ports = obj_ + obj_.original_tagname_ = 'ports' # end class componentFeatures class supportsInterface(GeneratedsSuper): subclass = None superclass = None - def __init__(self, supportsname=None, repid=None): - self.supportsname = _cast(None, supportsname) + def __init__(self, repid=None, supportsname=None): + self.original_tagname_ = None self.repid = _cast(None, repid) - pass + self.supportsname = _cast(None, supportsname) def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, supportsInterface) + if subclass is not None: + return subclass(*args_, **kwargs_) if supportsInterface.subclass: return supportsInterface.subclass(*args_, **kwargs_) else: return supportsInterface(*args_, **kwargs_) factory = staticmethod(factory) - def get_supportsname(self): return self.supportsname - def set_supportsname(self, supportsname): self.supportsname = supportsname - supportsnameProp = property(get_supportsname, set_supportsname) def get_repid(self): return self.repid def set_repid(self, repid): self.repid = repid repidProp = property(get_repid, set_repid) + def get_supportsname(self): return self.supportsname + def set_supportsname(self, supportsname): self.supportsname = supportsname + supportsnameProp = property(get_supportsname, set_supportsname) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='supportsInterface', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('supportsInterface') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='supportsInterface') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='supportsInterface', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='supportsInterface'): - if self.supportsname is not None and 'supportsname' not in already_processed: - already_processed.append('supportsname') - outfile.write(' supportsname=%s' % (self.gds_format_string(quote_attrib(self.supportsname).encode(ExternalEncoding), input_name='supportsname'), )) if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - outfile.write(' repid=%s' % (self.gds_format_string(quote_attrib(self.repid).encode(ExternalEncoding), input_name='repid'), )) - def exportChildren(self, outfile, level, namespace_='', name_='supportsInterface', fromsubclass_=False, pretty_print=True): - pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='supportsInterface'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + already_processed.add('repid') + outfile.write(' repid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.repid), input_name='repid')), )) if self.supportsname is not None and 'supportsname' not in already_processed: - already_processed.append('supportsname') - showIndent(outfile, level) - outfile.write('supportsname = "%s",\n' % (self.supportsname,)) - if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - showIndent(outfile, level) - outfile.write('repid = "%s",\n' % (self.repid,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('supportsname') + outfile.write(' supportsname=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.supportsname), input_name='supportsname')), )) + def exportChildren(self, outfile, level, namespace_='', name_='supportsInterface', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('supportsname', node) - if value is not None and 'supportsname' not in already_processed: - already_processed.append('supportsname') - self.supportsname = value value = find_attr_value_('repid', node) if value is not None and 'repid' not in already_processed: - already_processed.append('repid') + already_processed.add('repid') self.repid = value + value = find_attr_value_('supportsname', node) + if value is not None and 'supportsname' not in already_processed: + already_processed.add('supportsname') + self.supportsname = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class supportsInterface @@ -960,6 +1273,7 @@ class ports(GeneratedsSuper): subclass = None superclass = None def __init__(self, provides=None, uses=None): + self.original_tagname_ = None if provides is None: self.provides = [] else: @@ -969,6 +1283,11 @@ def __init__(self, provides=None, uses=None): else: self.uses = uses def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, ports) + if subclass is not None: + return subclass(*args_, **kwargs_) if ports.subclass: return ports.subclass(*args_, **kwargs_) else: @@ -977,25 +1296,40 @@ def factory(*args_, **kwargs_): def get_provides(self): return self.provides def set_provides(self, provides): self.provides = provides def add_provides(self, value): self.provides.append(value) - def insert_provides(self, index, value): self.provides[index] = value + def insert_provides_at(self, index, value): self.provides.insert(index, value) + def replace_provides_at(self, index, value): self.provides[index] = value providesProp = property(get_provides, set_provides) def get_uses(self): return self.uses def set_uses(self, uses): self.uses = uses def add_uses(self, value): self.uses.append(value) - def insert_uses(self, index, value): self.uses[index] = value + def insert_uses_at(self, index, value): self.uses.insert(index, value) + def replace_uses_at(self, index, value): self.uses[index] = value usesProp = property(get_uses, set_uses) + def hasContent_(self): + if ( + self.provides or + self.uses + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='ports', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('ports') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='ports') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='ports', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -1011,51 +1345,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='ports', fromsubcl provides_.export(outfile, level, namespace_, name_='provides', pretty_print=pretty_print) for uses_ in self.uses: uses_.export(outfile, level, namespace_, name_='uses', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.provides or - self.uses - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='ports'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('provides=[\n') - level += 1 - for provides_ in self.provides: - showIndent(outfile, level) - outfile.write('model_.provides(\n') - provides_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('uses=[\n') - level += 1 - for uses_ in self.uses: - showIndent(outfile, level) - outfile.write('model_.uses(\n') - uses_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -1063,25 +1359,33 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = provides.factory() obj_.build(child_) self.provides.append(obj_) + obj_.original_tagname_ = 'provides' elif nodeName_ == 'uses': obj_ = uses.factory() obj_.build(child_) self.uses.append(obj_) + obj_.original_tagname_ = 'uses' # end class ports class provides(GeneratedsSuper): subclass = None superclass = None - def __init__(self, providesname=None, repid=None, porttype=None, description=None): - self.providesname = _cast(None, providesname) + def __init__(self, repid=None, providesname=None, description=None, porttype=None): + self.original_tagname_ = None self.repid = _cast(None, repid) + self.providesname = _cast(None, providesname) + self.description = description if porttype is None: self.porttype = [] else: self.porttype = porttype - self.description = description def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, provides) + if subclass is not None: + return subclass(*args_, **kwargs_) if provides.subclass: return provides.subclass(*args_, **kwargs_) else: @@ -1093,37 +1397,51 @@ def set_description(self, description): self.description = description def get_porttype(self): return self.porttype def set_porttype(self, porttype): self.porttype = porttype def add_porttype(self, value): self.porttype.append(value) - def insert_porttype(self, index, value): self.porttype[index] = value + def insert_porttype_at(self, index, value): self.porttype.insert(index, value) + def replace_porttype_at(self, index, value): self.porttype[index] = value porttypeProp = property(get_porttype, set_porttype) - def get_providesname(self): return self.providesname - def set_providesname(self, providesname): self.providesname = providesname - providesnameProp = property(get_providesname, set_providesname) def get_repid(self): return self.repid def set_repid(self, repid): self.repid = repid repidProp = property(get_repid, set_repid) + def get_providesname(self): return self.providesname + def set_providesname(self, providesname): self.providesname = providesname + providesnameProp = property(get_providesname, set_providesname) + def hasContent_(self): + if ( + self.description is not None or + self.porttype + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='provides', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('provides') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='provides') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='provides', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='provides'): - if self.providesname is not None and 'providesname' not in already_processed: - already_processed.append('providesname') - outfile.write(' providesname=%s' % (self.gds_format_string(quote_attrib(self.providesname).encode(ExternalEncoding), input_name='providesname'), )) if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - outfile.write(' repid=%s' % (self.gds_format_string(quote_attrib(self.repid).encode(ExternalEncoding), input_name='repid'), )) + already_processed.add('repid') + outfile.write(' repid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.repid), input_name='repid')), )) + if self.providesname is not None and 'providesname' not in already_processed: + already_processed.add('providesname') + outfile.write(' providesname=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.providesname), input_name='providesname')), )) def exportChildren(self, outfile, level, namespace_='', name_='provides', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1131,85 +1449,56 @@ def exportChildren(self, outfile, level, namespace_='', name_='provides', fromsu eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) for porttype_ in self.porttype: porttype_.export(outfile, level, namespace_, name_='porttype', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.porttype - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='provides'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.providesname is not None and 'providesname' not in already_processed: - already_processed.append('providesname') - showIndent(outfile, level) - outfile.write('providesname = "%s",\n' % (self.providesname,)) - if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - showIndent(outfile, level) - outfile.write('repid = "%s",\n' % (self.repid,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('porttype=[\n') - level += 1 - for porttype_ in self.porttype: - showIndent(outfile, level) - outfile.write('model_.portType(\n') - porttype_.exportLiteral(outfile, level, name_='portType') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('providesname', node) - if value is not None and 'providesname' not in already_processed: - already_processed.append('providesname') - self.providesname = value value = find_attr_value_('repid', node) if value is not None and 'repid' not in already_processed: - already_processed.append('repid') + already_processed.add('repid') self.repid = value + value = find_attr_value_('providesname', node) + if value is not None and 'providesname' not in already_processed: + already_processed.add('providesname') + self.providesname = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': description_ = child_.text description_ = self.gds_validate_string(description_, node, 'description') self.description = description_ - if nodeName_ == 'porttype': + elif nodeName_ == 'porttype': obj_ = portType.factory() obj_.build(child_) self.porttype.append(obj_) + obj_.original_tagname_ = 'porttype' # end class provides class uses(GeneratedsSuper): subclass = None superclass = None - def __init__(self, usesname=None, repid=None, porttype=None, description=None): - self.usesname = _cast(None, usesname) + def __init__(self, repid=None, usesname=None, description=None, porttype=None): + self.original_tagname_ = None self.repid = _cast(None, repid) + self.usesname = _cast(None, usesname) + self.description = description if porttype is None: self.porttype = [] else: self.porttype = porttype - self.description = description def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, uses) + if subclass is not None: + return subclass(*args_, **kwargs_) if uses.subclass: return uses.subclass(*args_, **kwargs_) else: @@ -1221,37 +1510,51 @@ def set_description(self, description): self.description = description def get_porttype(self): return self.porttype def set_porttype(self, porttype): self.porttype = porttype def add_porttype(self, value): self.porttype.append(value) - def insert_porttype(self, index, value): self.porttype[index] = value + def insert_porttype_at(self, index, value): self.porttype.insert(index, value) + def replace_porttype_at(self, index, value): self.porttype[index] = value porttypeProp = property(get_porttype, set_porttype) - def get_usesname(self): return self.usesname - def set_usesname(self, usesname): self.usesname = usesname - usesnameProp = property(get_usesname, set_usesname) def get_repid(self): return self.repid def set_repid(self, repid): self.repid = repid repidProp = property(get_repid, set_repid) + def get_usesname(self): return self.usesname + def set_usesname(self, usesname): self.usesname = usesname + usesnameProp = property(get_usesname, set_usesname) + def hasContent_(self): + if ( + self.description is not None or + self.porttype + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='uses', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('uses') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='uses') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='uses', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='uses'): - if self.usesname is not None and 'usesname' not in already_processed: - already_processed.append('usesname') - outfile.write(' usesname=%s' % (self.gds_format_string(quote_attrib(self.usesname).encode(ExternalEncoding), input_name='usesname'), )) if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - outfile.write(' repid=%s' % (self.gds_format_string(quote_attrib(self.repid).encode(ExternalEncoding), input_name='repid'), )) + already_processed.add('repid') + outfile.write(' repid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.repid), input_name='repid')), )) + if self.usesname is not None and 'usesname' not in already_processed: + already_processed.add('usesname') + outfile.write(' usesname=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.usesname), input_name='usesname')), )) def exportChildren(self, outfile, level, namespace_='', name_='uses', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1259,70 +1562,35 @@ def exportChildren(self, outfile, level, namespace_='', name_='uses', fromsubcla eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) for porttype_ in self.porttype: porttype_.export(outfile, level, namespace_, name_='porttype', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.porttype - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='uses'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.usesname is not None and 'usesname' not in already_processed: - already_processed.append('usesname') - showIndent(outfile, level) - outfile.write('usesname = "%s",\n' % (self.usesname,)) - if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - showIndent(outfile, level) - outfile.write('repid = "%s",\n' % (self.repid,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('porttype=[\n') - level += 1 - for porttype_ in self.porttype: - showIndent(outfile, level) - outfile.write('model_.portType(\n') - porttype_.exportLiteral(outfile, level, name_='portType') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('usesname', node) - if value is not None and 'usesname' not in already_processed: - already_processed.append('usesname') - self.usesname = value value = find_attr_value_('repid', node) if value is not None and 'repid' not in already_processed: - already_processed.append('repid') + already_processed.add('repid') self.repid = value + value = find_attr_value_('usesname', node) + if value is not None and 'usesname' not in already_processed: + already_processed.add('usesname') + self.usesname = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': description_ = child_.text description_ = self.gds_validate_string(description_, node, 'description') self.description = description_ - if nodeName_ == 'porttype': + elif nodeName_ == 'porttype': obj_ = portType.factory() obj_.build(child_) self.porttype.append(obj_) + obj_.original_tagname_ = 'porttype' # end class uses @@ -1330,9 +1598,14 @@ class portType(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, portType) + if subclass is not None: + return subclass(*args_, **kwargs_) if portType.subclass: return portType.subclass(*args_, **kwargs_) else: @@ -1341,55 +1614,50 @@ def factory(*args_, **kwargs_): def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='portType', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('portType') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='portType') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='portType', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='portType'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='portType', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='portType'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1400,11 +1668,17 @@ class interfaces(GeneratedsSuper): subclass = None superclass = None def __init__(self, interface=None): + self.original_tagname_ = None if interface is None: self.interface = [] else: self.interface = interface def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, interfaces) + if subclass is not None: + return subclass(*args_, **kwargs_) if interfaces.subclass: return interfaces.subclass(*args_, **kwargs_) else: @@ -1413,20 +1687,33 @@ def factory(*args_, **kwargs_): def get_interface(self): return self.interface def set_interface(self, interface): self.interface = interface def add_interface(self, value): self.interface.append(value) - def insert_interface(self, index, value): self.interface[index] = value + def insert_interface_at(self, index, value): self.interface.insert(index, value) + def replace_interface_at(self, index, value): self.interface[index] = value interfaceProp = property(get_interface, set_interface) + def hasContent_(self): + if ( + self.interface + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='interfaces', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('interfaces') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='interfaces') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='interfaces', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -1440,38 +1727,13 @@ def exportChildren(self, outfile, level, namespace_='', name_='interfaces', from eol_ = '' for interface_ in self.interface: interface_.export(outfile, level, namespace_, name_='interface', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.interface - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='interfaces'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('interface=[\n') - level += 1 - for interface_ in self.interface: - showIndent(outfile, level) - outfile.write('model_.interface(\n') - interface_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -1479,20 +1741,27 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = interface.factory() obj_.build(child_) self.interface.append(obj_) + obj_.original_tagname_ = 'interface' # end class interfaces class interface(GeneratedsSuper): subclass = None superclass = None - def __init__(self, name=None, repid=None, inheritsinterface=None): - self.name = _cast(None, name) + def __init__(self, repid=None, name=None, inheritsinterface=None): + self.original_tagname_ = None self.repid = _cast(None, repid) + self.name = _cast(None, name) if inheritsinterface is None: self.inheritsinterface = [] else: self.inheritsinterface = inheritsinterface def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, interface) + if subclass is not None: + return subclass(*args_, **kwargs_) if interface.subclass: return interface.subclass(*args_, **kwargs_) else: @@ -1501,37 +1770,50 @@ def factory(*args_, **kwargs_): def get_inheritsinterface(self): return self.inheritsinterface def set_inheritsinterface(self, inheritsinterface): self.inheritsinterface = inheritsinterface def add_inheritsinterface(self, value): self.inheritsinterface.append(value) - def insert_inheritsinterface(self, index, value): self.inheritsinterface[index] = value + def insert_inheritsinterface_at(self, index, value): self.inheritsinterface.insert(index, value) + def replace_inheritsinterface_at(self, index, value): self.inheritsinterface[index] = value inheritsinterfaceProp = property(get_inheritsinterface, set_inheritsinterface) - def get_name(self): return self.name - def set_name(self, name): self.name = name - nameProp = property(get_name, set_name) def get_repid(self): return self.repid def set_repid(self, repid): self.repid = repid repidProp = property(get_repid, set_repid) + def get_name(self): return self.name + def set_name(self, name): self.name = name + nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + self.inheritsinterface + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='interface', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('interface') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='interface') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='interface', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='interface'): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - outfile.write(' repid=%s' % (self.gds_format_string(quote_attrib(self.repid).encode(ExternalEncoding), input_name='repid'), )) + already_processed.add('repid') + outfile.write(' repid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.repid), input_name='repid')), )) + if self.name is not None and 'name' not in already_processed: + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='interface', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1539,59 +1821,28 @@ def exportChildren(self, outfile, level, namespace_='', name_='interface', froms eol_ = '' for inheritsinterface_ in self.inheritsinterface: inheritsinterface_.export(outfile, level, namespace_, name_='inheritsinterface', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.inheritsinterface - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='interface'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - showIndent(outfile, level) - outfile.write('repid = "%s",\n' % (self.repid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('inheritsinterface=[\n') - level += 1 - for inheritsinterface_ in self.inheritsinterface: - showIndent(outfile, level) - outfile.write('model_.inheritsInterface(\n') - inheritsinterface_.exportLiteral(outfile, level, name_='inheritsInterface') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('name', node) - if value is not None and 'name' not in already_processed: - already_processed.append('name') - self.name = value value = find_attr_value_('repid', node) if value is not None and 'repid' not in already_processed: - already_processed.append('repid') + already_processed.add('repid') self.repid = value + value = find_attr_value_('name', node) + if value is not None and 'name' not in already_processed: + already_processed.add('name') + self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'inheritsinterface': obj_ = inheritsInterface.factory() obj_.build(child_) self.inheritsinterface.append(obj_) + obj_.original_tagname_ = 'inheritsinterface' # end class interface @@ -1599,9 +1850,14 @@ class inheritsInterface(GeneratedsSuper): subclass = None superclass = None def __init__(self, repid=None): + self.original_tagname_ = None self.repid = _cast(None, repid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, inheritsInterface) + if subclass is not None: + return subclass(*args_, **kwargs_) if inheritsInterface.subclass: return inheritsInterface.subclass(*args_, **kwargs_) else: @@ -1610,78 +1866,82 @@ def factory(*args_, **kwargs_): def get_repid(self): return self.repid def set_repid(self, repid): self.repid = repid repidProp = property(get_repid, set_repid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='inheritsInterface', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('inheritsInterface') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='inheritsInterface') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='inheritsInterface', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='inheritsInterface'): if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - outfile.write(' repid=%s' % (self.gds_format_string(quote_attrib(self.repid).encode(ExternalEncoding), input_name='repid'), )) + already_processed.add('repid') + outfile.write(' repid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.repid), input_name='repid')), )) def exportChildren(self, outfile, level, namespace_='', name_='inheritsInterface', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='inheritsInterface'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.repid is not None and 'repid' not in already_processed: - already_processed.append('repid') - showIndent(outfile, level) - outfile.write('repid = "%s",\n' % (self.repid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('repid', node) if value is not None and 'repid' not in already_processed: - already_processed.append('repid') + already_processed.add('repid') self.repid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class inheritsInterface +GDSClassesMapping = { + 'softwarecomponent': softwarecomponent, +} + + USAGE_TEXT = """ Usage: python .py [ -s ] """ + def usage(): - print USAGE_TEXT + print(USAGE_TEXT) sys.exit(1) def get_root_tag(node): tag = Tag_pattern_.match(node.tag).groups()[-1] - rootClass = globals().get(tag) + rootClass = GDSClassesMapping.get(tag) + if rootClass is None: + rootClass = globals().get(tag) return tag, rootClass -def parse(inFileName): - doc = parsexml_(inFileName) +def parse(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -1691,16 +1951,18 @@ def parse(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_=rootTag, -## namespacedef_='', -## pretty_print=True) +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='', +## pretty_print=True) return rootObj -def parseString(inString): - from StringIO import StringIO - doc = parsexml_(StringIO(inString)) +def parseEtree(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -1710,14 +1972,47 @@ def parseString(inString): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_="softwarecomponent", -## namespacedef_='') + mapping = {} + rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping) + reverse_mapping = rootObj.gds_reverse_node_mapping(mapping) +## if not silence: +## content = etree_.tostring( +## rootElement, pretty_print=True, +## xml_declaration=True, encoding="utf-8") +## sys.stdout.write(content) +## sys.stdout.write('\n') + return rootObj, rootElement, mapping, reverse_mapping + + +def parseString(inString, silence=False): + '''Parse a string, create the object tree, and export it. + + Arguments: + - inString -- A string. This XML fragment should not start + with an XML declaration containing an encoding. + - silence -- A boolean. If False, export the object. + Returns -- The root object in the tree. + ''' + parser = None + rootNode= parsexmlstring_(inString, parser) + rootTag, rootClass = get_root_tag(rootNode) + if rootClass is None: + rootTag = 'softwarecomponent' + rootClass = softwarecomponent + rootObj = rootClass.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='') return rootObj -def parseLiteral(inFileName): - doc = parsexml_(inFileName) +def parseLiteral(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: @@ -1727,11 +2022,12 @@ def parseLiteral(inFileName): rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('#from scd import *\n\n') -## sys.stdout.write('import scd as model_\n\n') -## sys.stdout.write('rootObj = model_.rootTag(\n') -## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) -## sys.stdout.write(')\n') +## if not silence: +## sys.stdout.write('#from scd import *\n\n') +## sys.stdout.write('import scd as model_\n\n') +## sys.stdout.write('rootObj = model_.rootClass(\n') +## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) +## sys.stdout.write(')\n') return rootObj @@ -1762,4 +2058,4 @@ def main(): "softwarecomponent", "supportsInterface", "uses" - ] +] diff --git a/redhawk/src/base/framework/python/ossie/parsers/spd.py b/redhawk/src/base/framework/python/ossie/parsers/spd.py index 8575ac478..3cfb92394 100644 --- a/redhawk/src/base/framework/python/ossie/parsers/spd.py +++ b/redhawk/src/base/framework/python/ossie/parsers/spd.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- -# +# -*- coding: utf-8 -*- + # This file is protected by Copyright. Please refer to the COPYRIGHT file # distributed with this source distribution. # @@ -18,70 +18,97 @@ # # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. -# # -# Generated Thu Sep 12 14:49:31 2013 by generateDS.py version 2.7c. +# Generated Mon Jul 30 12:29:35 2018 by generateDS.py version 2.29.14. +# Python 2.7.5 (default, Nov 6 2016, 00:28:07) [GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] +# +# Command line options: +# ('-f', '') +# ('--silence', '') +# ('-m', '') +# ('-o', 'ossie/parsers/spd.py') +# +# Command line arguments: +# ../../../xml/xsd/spd.xsd +# +# Command line: +# /usr/bin/generateDS.py -f --silence -m -o "ossie/parsers/spd.py" ../../../xml/xsd/spd.xsd +# +# Current working directory (os.getcwd()): +# python # import sys -import getopt import re as re_ - -etree_ = None -Verbose_import_ = False -( XMLParser_import_none, XMLParser_import_lxml, - XMLParser_import_elementtree - ) = range(3) -XMLParser_import_library = None +import base64 +import datetime as datetime_ +import warnings as warnings_ try: - # lxml from lxml import etree as etree_ - XMLParser_import_library = XMLParser_import_lxml - if Verbose_import_: - print("running with lxml.etree") except ImportError: - try: - # cElementTree from Python 2.5+ - import xml.etree.cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree on Python 2.5+") - except ImportError: - try: - # ElementTree from Python 2.5+ - import xml.etree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree on Python 2.5+") - except ImportError: - try: - # normal cElementTree install - import cElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with cElementTree") - except ImportError: - try: - # normal ElementTree install - import elementtree.ElementTree as etree_ - XMLParser_import_library = XMLParser_import_elementtree - if Verbose_import_: - print("running with ElementTree") - except ImportError: - raise ImportError("Failed to import ElementTree from any known place") - -def parsexml_(*args, **kwargs): - if (XMLParser_import_library == XMLParser_import_lxml and - 'parser' not in kwargs): + from xml.etree import ElementTree as etree_ + + +Validate_simpletypes_ = True +if sys.version_info[0] == 2: + BaseStrType_ = basestring +else: + BaseStrType_ = str + + +def parsexml_(infile, parser=None, **kwargs): + if parser is None: # Use the lxml ElementTree compatible parser so that, e.g., # we ignore comments. - kwargs['parser'] = etree_.ETCompatXMLParser() - doc = etree_.parse(*args, **kwargs) + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + doc = etree_.parse(infile, parser=parser, **kwargs) return doc +def parsexmlstring_(instring, parser=None, **kwargs): + if parser is None: + # Use the lxml ElementTree compatible parser so that, e.g., + # we ignore comments. + try: + parser = etree_.ETCompatXMLParser() + except AttributeError: + # fallback to xml.etree + parser = etree_.XMLParser() + element = etree_.fromstring(instring, parser=parser, **kwargs) + return element + +# +# Namespace prefix definition table (and other attributes, too) +# +# The module generatedsnamespaces, if it is importable, must contain +# a dictionary named GeneratedsNamespaceDefs. This Python dictionary +# should map element type names (strings) to XML schema namespace prefix +# definitions. The export method for any class for which there is +# a namespace prefix definition, will export that definition in the +# XML representation of that element. See the export method of +# any generated element type class for a example of the use of this +# table. +# A sample table is: +# +# # File: generatedsnamespaces.py +# +# GenerateDSNamespaceDefs = { +# "ElementtypeA": "http://www.xxx.com/namespaceA", +# "ElementtypeB": "http://www.xxx.com/namespaceB", +# } # -# User methods + +try: + from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_ +except ImportError: + GenerateDSNamespaceDefs_ = {} + +# +# The root super-class for element type classes # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class @@ -89,67 +116,273 @@ def parsexml_(*args, **kwargs): try: from generatedssuper import GeneratedsSuper -except ImportError, exp: - +except ImportError as exp: + class GeneratedsSuper(object): + tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$') + class _FixedOffsetTZ(datetime_.tzinfo): + def __init__(self, offset, name): + self.__offset = datetime_.timedelta(minutes=offset) + self.__name = name + def utcoffset(self, dt): + return self.__offset + def tzname(self, dt): + return self.__name + def dst(self, dt): + return None def gds_format_string(self, input_data, input_name=''): return input_data - def gds_validate_string(self, input_data, node, input_name=''): + def gds_validate_string(self, input_data, node=None, input_name=''): + if not input_data: + return '' + else: + return input_data + def gds_format_base64(self, input_data, input_name=''): + return base64.b64encode(input_data) + def gds_validate_base64(self, input_data, node=None, input_name=''): return input_data def gds_format_integer(self, input_data, input_name=''): return '%d' % input_data - def gds_validate_integer(self, input_data, node, input_name=''): + def gds_validate_integer(self, input_data, node=None, input_name=''): return input_data def gds_format_integer_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_integer_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_integer_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + int(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of integers') - return input_data + return values def gds_format_float(self, input_data, input_name=''): - return '%f' % input_data - def gds_validate_float(self, input_data, node, input_name=''): + return ('%.15f' % input_data).rstrip('0') + def gds_validate_float(self, input_data, node=None, input_name=''): return input_data def gds_format_float_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_float_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_float_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of floats') - return input_data + return values def gds_format_double(self, input_data, input_name=''): return '%e' % input_data - def gds_validate_double(self, input_data, node, input_name=''): + def gds_validate_double(self, input_data, node=None, input_name=''): return input_data def gds_format_double_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_double_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_double_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: try: - fvalue = float(value) - except (TypeError, ValueError), exp: + float(value) + except (TypeError, ValueError): raise_parse_error(node, 'Requires sequence of doubles') - return input_data + return values def gds_format_boolean(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean(self, input_data, node, input_name=''): + return ('%s' % input_data).lower() + def gds_validate_boolean(self, input_data, node=None, input_name=''): return input_data def gds_format_boolean_list(self, input_data, input_name=''): - return '%s' % input_data - def gds_validate_boolean_list(self, input_data, node, input_name=''): + return '%s' % ' '.join(input_data) + def gds_validate_boolean_list( + self, input_data, node=None, input_name=''): values = input_data.split() for value in values: if value not in ('true', '1', 'false', '0', ): - raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")') + raise_parse_error( + node, + 'Requires sequence of booleans ' + '("true", "1", "false", "0")') + return values + def gds_validate_datetime(self, input_data, node=None, input_name=''): + return input_data + def gds_format_datetime(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % ( + input_data.year, + input_data.month, + input_data.day, + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + @classmethod + def gds_parse_datetime(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + time_parts = input_data.split('.') + if len(time_parts) > 1: + micro_seconds = int(float('0.' + time_parts[1]) * 1000000) + input_data = '%s.%s' % (time_parts[0], micro_seconds, ) + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime( + input_data, '%Y-%m-%dT%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt + def gds_validate_date(self, input_data, node=None, input_name=''): + return input_data + def gds_format_date(self, input_data, input_name=''): + _svalue = '%04d-%02d-%02d' % ( + input_data.year, + input_data.month, + input_data.day, + ) + try: + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format( + hours, minutes) + except AttributeError: + pass + return _svalue + @classmethod + def gds_parse_date(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d') + dt = dt.replace(tzinfo=tz) + return dt.date() + def gds_validate_time(self, input_data, node=None, input_name=''): return input_data + def gds_format_time(self, input_data, input_name=''): + if input_data.microsecond == 0: + _svalue = '%02d:%02d:%02d' % ( + input_data.hour, + input_data.minute, + input_data.second, + ) + else: + _svalue = '%02d:%02d:%02d.%s' % ( + input_data.hour, + input_data.minute, + input_data.second, + ('%f' % (float(input_data.microsecond) / 1000000))[2:], + ) + if input_data.tzinfo is not None: + tzoff = input_data.tzinfo.utcoffset(input_data) + if tzoff is not None: + total_seconds = tzoff.seconds + (86400 * tzoff.days) + if total_seconds == 0: + _svalue += 'Z' + else: + if total_seconds < 0: + _svalue += '-' + total_seconds *= -1 + else: + _svalue += '+' + hours = total_seconds // 3600 + minutes = (total_seconds - (hours * 3600)) // 60 + _svalue += '{0:02d}:{1:02d}'.format(hours, minutes) + return _svalue + def gds_validate_simple_patterns(self, patterns, target): + # pat is a list of lists of strings/patterns. We should: + # - AND the outer elements + # - OR the inner elements + found1 = True + for patterns1 in patterns: + found2 = False + for patterns2 in patterns1: + if re_.search(patterns2, target) is not None: + found2 = True + break + if not found2: + found1 = False + break + return found1 + @classmethod + def gds_parse_time(cls, input_data): + tz = None + if input_data[-1] == 'Z': + tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC') + input_data = input_data[:-1] + else: + results = GeneratedsSuper.tzoff_pattern.search(input_data) + if results is not None: + tzoff_parts = results.group(2).split(':') + tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1]) + if results.group(1) == '-': + tzoff *= -1 + tz = GeneratedsSuper._FixedOffsetTZ( + tzoff, results.group(0)) + input_data = input_data[:-6] + if len(input_data.split('.')) > 1: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f') + else: + dt = datetime_.datetime.strptime(input_data, '%H:%M:%S') + dt = dt.replace(tzinfo=tz) + return dt.time() def gds_str_lower(self, instring): return instring.lower() def get_path_(self, node): @@ -180,6 +413,38 @@ def get_class_obj_(self, node, default_class=None): return class_obj1 def gds_build_any(self, node, type_name=None): return None + @classmethod + def gds_reverse_node_mapping(cls, mapping): + return dict(((v, k) for k, v in mapping.iteritems())) + @staticmethod + def gds_encode(instring): + if sys.version_info[0] == 2: + return instring.encode(ExternalEncoding) + else: + return instring + @staticmethod + def convert_unicode(instring): + if isinstance(instring, str): + result = quote_xml(instring) + elif sys.version_info[0] == 2 and isinstance(instring, unicode): + result = quote_xml(instring).encode('utf8') + else: + result = GeneratedsSuper.gds_encode(str(instring)) + return result + def __eq__(self, other): + if type(self) != type(other): + return False + return self.__dict__ == other.__dict__ + def __ne__(self, other): + return not self.__eq__(other) + + def getSubclassFromModule_(module, class_): + '''Get the subclass of a class from a specific module.''' + name = class_.__name__ + 'Sub' + if hasattr(module, name): + return getattr(module, name) + else: + return None # @@ -205,29 +470,50 @@ def gds_build_any(self, node, type_name=None): Tag_pattern_ = re_.compile(r'({.*})?(.*)') String_cleanup_pat_ = re_.compile(r"[\n\r\s]+") Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)') +CDATA_pattern_ = re_.compile(r"", re_.DOTALL) + +# Change this to redirect the generated superclass module to use a +# specific subclass module. +CurrentSubclassModule_ = None # # Support/utility functions. # + def showIndent(outfile, level, pretty_print=True): if pretty_print: for idx in range(level): outfile.write(' ') + def quote_xml(inStr): + "Escape markup chars, but do not modify CDATA sections." if not inStr: return '' - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) - s1 = s1.replace('&', '&') + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) + s2 = '' + pos = 0 + matchobjects = CDATA_pattern_.finditer(s1) + for mo in matchobjects: + s3 = s1[pos:mo.start()] + s2 += quote_xml_aux(s3) + s2 += s1[mo.start():mo.end()] + pos = mo.end() + s3 = s1[pos:] + s2 += quote_xml_aux(s3) + return s2 + + +def quote_xml_aux(inStr): + s1 = inStr.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 + def quote_attrib(inStr): - s1 = (isinstance(inStr, basestring) and inStr or - '%s' % inStr) + s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') @@ -240,6 +526,7 @@ def quote_attrib(inStr): s1 = '"%s"' % s1 return s1 + def quote_python(inStr): s1 = inStr if s1.find("'") == -1: @@ -255,6 +542,7 @@ def quote_python(inStr): else: return '"""%s"""' % s1 + def get_all_text_(node): if node.text is not None: text = node.text @@ -265,6 +553,7 @@ def get_all_text_(node): text += child.tail return text + def find_attr_value_(attr_name, node): attrs = node.attrib attr_parts = attr_name.split(':') @@ -282,11 +571,9 @@ def find_attr_value_(attr_name, node): class GDSParseError(Exception): pass + def raise_parse_error(node, msg): - if XMLParser_import_library == XMLParser_import_lxml: - msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) - else: - msg = '%s (element %s)' % (msg, node.tag, ) + msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, ) raise GDSParseError(msg) @@ -305,6 +592,7 @@ class MixedContainer: TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 + TypeBase64 = 8 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type @@ -318,49 +606,104 @@ def getValue(self): return self.value def getName(self): return self.name - def export(self, outfile, level, name, namespace, pretty_print=True): + def export(self, outfile, level, name, namespace, + pretty_print=True): if self.category == MixedContainer.CategoryText: # Prevent exporting empty content as empty lines. - if self.value.strip(): + if self.value.strip(): outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex - self.value.export(outfile, level, namespace, name, pretty_print) + self.value.export( + outfile, level, namespace, name, + pretty_print=pretty_print) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: - outfile.write('<%s>%s' % (self.name, self.value, self.name)) + outfile.write('<%s>%s' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: - outfile.write('<%s>%d' % (self.name, self.value, self.name)) + outfile.write('<%s>%d' % ( + self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: - outfile.write('<%s>%f' % (self.name, self.value, self.name)) + outfile.write('<%s>%f' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeDouble: + outfile.write('<%s>%g' % ( + self.name, self.value, self.name)) + elif self.content_type == MixedContainer.TypeBase64: + outfile.write('<%s>%s' % ( + self.name, + base64.b64encode(self.value), + self.name)) + def to_etree(self, element): + if self.category == MixedContainer.CategoryText: + # Prevent exporting empty content as empty lines. + if self.value.strip(): + if len(element) > 0: + if element[-1].tail is None: + element[-1].tail = self.value + else: + element[-1].tail += self.value + else: + if element.text is None: + element.text = self.value + else: + element.text += self.value + elif self.category == MixedContainer.CategorySimple: + subelement = etree_.SubElement( + element, '%s' % self.name) + subelement.text = self.to_etree_simple() + else: # category == MixedContainer.CategoryComplex + self.value.to_etree(element) + def to_etree_simple(self): + if self.content_type == MixedContainer.TypeString: + text = self.value + elif (self.content_type == MixedContainer.TypeInteger or + self.content_type == MixedContainer.TypeBoolean): + text = '%d' % self.value + elif (self.content_type == MixedContainer.TypeFloat or + self.content_type == MixedContainer.TypeDecimal): + text = '%f' % self.value elif self.content_type == MixedContainer.TypeDouble: - outfile.write('<%s>%g' % (self.name, self.value, self.name)) + text = '%g' % self.value + elif self.content_type == MixedContainer.TypeBase64: + text = '%s' % base64.b64encode(self.value) + return text def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \ - (self.category, self.content_type, self.name, self.value)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % ( + self.category, self.content_type, + self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) - outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \ - (self.category, self.content_type, self.name,)) + outfile.write( + 'model_.MixedContainer(%d, %d, "%s",\n' % ( + self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class MemberSpec_(object): - def __init__(self, name='', data_type='', container=0): + def __init__(self, name='', data_type='', container=0, + optional=0, child_attrs=None, choice=None): self.name = name self.data_type = data_type self.container = container + self.child_attrs = child_attrs + self.choice = choice + self.optional = optional def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type @@ -375,6 +718,13 @@ def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container + def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs + def get_child_attrs(self): return self.child_attrs + def set_choice(self, choice): self.choice = choice + def get_choice(self): return self.choice + def set_optional(self, optional): self.optional = optional + def get_optional(self): return self.optional + def _cast(typ, value): if typ is None or value is None: @@ -385,14 +735,16 @@ def _cast(typ, value): # Data representation classes. # + class softPkg(GeneratedsSuper): subclass = None superclass = None - def __init__(self, version=None, type_=None, id_=None, name=None, title=None, author=None, description=None, propertyfile=None, descriptor=None, implementation=None, usesdevice=None): - self.version = _cast(None, version) - self.type_ = _cast(None, type_) + def __init__(self, id_=None, name=None, type_=None, version=None, title=None, author=None, description=None, propertyfile=None, descriptor=None, implementation=None, usesdevice=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.name = _cast(None, name) + self.type_ = _cast(None, type_) + self.version = _cast(None, version) self.title = title if author is None: self.author = [] @@ -410,6 +762,11 @@ def __init__(self, version=None, type_=None, id_=None, name=None, title=None, au else: self.usesdevice = usesdevice def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, softPkg) + if subclass is not None: + return subclass(*args_, **kwargs_) if softPkg.subclass: return softPkg.subclass(*args_, **kwargs_) else: @@ -421,7 +778,8 @@ def set_title(self, title): self.title = title def get_author(self): return self.author def set_author(self, author): self.author = author def add_author(self, value): self.author.append(value) - def insert_author(self, index, value): self.author[index] = value + def insert_author_at(self, index, value): self.author.insert(index, value) + def replace_author_at(self, index, value): self.author[index] = value authorProp = property(get_author, set_author) def get_description(self): return self.description def set_description(self, description): self.description = description @@ -435,67 +793,87 @@ def set_descriptor(self, descriptor): self.descriptor = descriptor def get_implementation(self): return self.implementation def set_implementation(self, implementation): self.implementation = implementation def add_implementation(self, value): self.implementation.append(value) - def insert_implementation(self, index, value): self.implementation[index] = value + def insert_implementation_at(self, index, value): self.implementation.insert(index, value) + def replace_implementation_at(self, index, value): self.implementation[index] = value implementationProp = property(get_implementation, set_implementation) def get_usesdevice(self): return self.usesdevice def set_usesdevice(self, usesdevice): self.usesdevice = usesdevice def add_usesdevice(self, value): self.usesdevice.append(value) - def insert_usesdevice(self, index, value): self.usesdevice[index] = value + def insert_usesdevice_at(self, index, value): self.usesdevice.insert(index, value) + def replace_usesdevice_at(self, index, value): self.usesdevice[index] = value usesdeviceProp = property(get_usesdevice, set_usesdevice) - def get_version(self): return self.version - def set_version(self, version): self.version = version - versionProp = property(get_version, set_version) - def get_type(self): return self.type_ - def set_type(self, type_): self.type_ = type_ - typeProp = property(get_type, set_type) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) - def export(self, outfile, level, namespace_='', name_='softPkg', namespacedef_='', pretty_print=True): + def get_type(self): return self.type_ + def set_type(self, type_): self.type_ = type_ + typeProp = property(get_type, set_type) + def get_version(self): return self.version + def set_version(self, version): self.version = version + versionProp = property(get_version, set_version) + def hasContent_(self): + if ( + self.title is not None or + self.author or + self.description is not None or + self.propertyfile is not None or + self.descriptor is not None or + self.implementation or + self.usesdevice + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='softpkg', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('softpkg') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) - outfile.write('<%s%s%s' % (namespace_, 'softpkg', namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] - self.exportAttributes(outfile, level, already_processed, namespace_, name_='softPkg') + outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='softpkg') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='softpkg', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) - outfile.write('%s' % (namespace_, 'softpkg', eol_)) + outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) - def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='softPkg'): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='softpkg'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - def exportChildren(self, outfile, level, namespace_='', name_='softPkg', fromsubclass_=False, pretty_print=True): + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) + if self.type_ is not None and 'type_' not in already_processed: + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) + if self.version is not None and 'version' not in already_processed: + already_processed.add('version') + outfile.write(' version=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.version), input_name='version')), )) + def exportChildren(self, outfile, level, namespace_='', name_='softpkg', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.title is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%stitle>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.title), input_name='title')), eol_)) for author_ in self.author: author_.export(outfile, level, namespace_, name_='author', pretty_print=pretty_print) if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.propertyfile is not None: self.propertyfile.export(outfile, level, namespace_, name_='propertyfile', pretty_print=pretty_print) if self.descriptor is not None: @@ -504,118 +882,30 @@ def exportChildren(self, outfile, level, namespace_='', name_='softPkg', fromsub implementation_.export(outfile, level, namespace_, name_='implementation', pretty_print=pretty_print) for usesdevice_ in self.usesdevice: usesdevice_.export(outfile, level, namespace_, name_='usesdevice', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.title is not None or - self.author or - self.description is not None or - self.propertyfile is not None or - self.descriptor is not None or - self.implementation or - self.usesdevice - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='softPkg'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - showIndent(outfile, level) - outfile.write('version = "%s",\n' % (self.version,)) - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.title is not None: - showIndent(outfile, level) - outfile.write('title=%s,\n' % quote_python(self.title).encode(ExternalEncoding)) - showIndent(outfile, level) - outfile.write('author=[\n') - level += 1 - for author_ in self.author: - showIndent(outfile, level) - outfile.write('model_.author(\n') - author_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.propertyfile is not None: - showIndent(outfile, level) - outfile.write('propertyfile=model_.propertyFile(\n') - self.propertyfile.exportLiteral(outfile, level, name_='propertyfile') - showIndent(outfile, level) - outfile.write('),\n') - if self.descriptor is not None: - showIndent(outfile, level) - outfile.write('descriptor=model_.descriptor(\n') - self.descriptor.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('implementation=[\n') - level += 1 - for implementation_ in self.implementation: - showIndent(outfile, level) - outfile.write('model_.implementation(\n') - implementation_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('usesdevice=[\n') - level += 1 - for usesdevice_ in self.usesdevice: - showIndent(outfile, level) - outfile.write('model_.usesDevice(\n') - usesdevice_.exportLiteral(outfile, level, name_='usesDevice') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('version', node) - if value is not None and 'version' not in already_processed: - already_processed.append('version') - self.version = value - value = find_attr_value_('type', node) - if value is not None and 'type' not in already_processed: - already_processed.append('type') - self.type_ = value value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value + value = find_attr_value_('type', node) + if value is not None and 'type' not in already_processed: + already_processed.add('type') + self.type_ = value + value = find_attr_value_('version', node) + if value is not None and 'version' not in already_processed: + already_processed.add('version') + self.version = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'title': title_ = child_.text @@ -625,6 +915,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = author.factory() obj_.build(child_) self.author.append(obj_) + obj_.original_tagname_ = 'author' elif nodeName_ == 'description': description_ = child_.text description_ = self.gds_validate_string(description_, node, 'description') @@ -632,19 +923,23 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'propertyfile': obj_ = propertyFile.factory() obj_.build(child_) - self.set_propertyfile(obj_) + self.propertyfile = obj_ + obj_.original_tagname_ = 'propertyfile' elif nodeName_ == 'descriptor': obj_ = descriptor.factory() obj_.build(child_) - self.set_descriptor(obj_) + self.descriptor = obj_ + obj_.original_tagname_ = 'descriptor' elif nodeName_ == 'implementation': obj_ = implementation.factory() obj_.build(child_) self.implementation.append(obj_) + obj_.original_tagname_ = 'implementation' elif nodeName_ == 'usesdevice': obj_ = usesDevice.factory() obj_.build(child_) self.usesdevice.append(obj_) + obj_.original_tagname_ = 'usesdevice' # end class softPkg @@ -652,9 +947,14 @@ class localFile(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, localFile) + if subclass is not None: + return subclass(*args_, **kwargs_) if localFile.subclass: return localFile.subclass(*args_, **kwargs_) else: @@ -663,55 +963,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='localFile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('localFile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='localFile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='localFile', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='localFile'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='localFile', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='localFile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -722,9 +1017,15 @@ class propertyFile(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, localfile=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) self.localfile = localfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, propertyFile) + if subclass is not None: + return subclass(*args_, **kwargs_) if propertyFile.subclass: return propertyFile.subclass(*args_, **kwargs_) else: @@ -736,26 +1037,38 @@ def set_localfile(self, localfile): self.localfile = localfile def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) + def hasContent_(self): + if ( + self.localfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='propertyFile', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('propertyFile') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='propertyFile') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='propertyFile', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='propertyFile'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='propertyFile', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -763,45 +1076,24 @@ def exportChildren(self, outfile, level, namespace_='', name_='propertyFile', fr eol_ = '' if self.localfile is not None: self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='propertyFile'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localFile(\n') - self.localfile.exportLiteral(outfile, level, name_='localfile') - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localFile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' # end class propertyFile @@ -809,6 +1101,7 @@ class author(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None, company=None, webpage=None): + self.original_tagname_ = None if name is None: self.name = [] else: @@ -816,6 +1109,11 @@ def __init__(self, name=None, company=None, webpage=None): self.company = company self.webpage = webpage def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, author) + if subclass is not None: + return subclass(*args_, **kwargs_) if author.subclass: return author.subclass(*args_, **kwargs_) else: @@ -824,7 +1122,8 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name def add_name(self, value): self.name.append(value) - def insert_name(self, index, value): self.name[index] = value + def insert_name_at(self, index, value): self.name.insert(index, value) + def replace_name_at(self, index, value): self.name[index] = value nameProp = property(get_name, set_name) def get_company(self): return self.company def set_company(self, company): self.company = company @@ -832,18 +1131,32 @@ def set_company(self, company): self.company = company def get_webpage(self): return self.webpage def set_webpage(self, webpage): self.webpage = webpage webpageProp = property(get_webpage, set_webpage) + def hasContent_(self): + if ( + self.name or + self.company is not None or + self.webpage is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='author', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('author') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='author') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='author', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -857,50 +1170,20 @@ def exportChildren(self, outfile, level, namespace_='', name_='author', fromsubc eol_ = '' for name_ in self.name: showIndent(outfile, level, pretty_print) - outfile.write('<%sname>%s%s' % (namespace_, self.gds_format_string(quote_xml(name_).encode(ExternalEncoding), input_name='name'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(name_), input_name='name')), eol_)) if self.company is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%scompany>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.company).encode(ExternalEncoding), input_name='company'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.company), input_name='company')), eol_)) if self.webpage is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%swebpage>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.webpage).encode(ExternalEncoding), input_name='webpage'), namespace_, eol_)) - def hasContent_(self): - if ( - self.name or - self.company is not None or - self.webpage is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='author'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('name=[\n') - level += 1 - for name_ in self.name: - showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(name_).encode(ExternalEncoding)) - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - if self.company is not None: - showIndent(outfile, level) - outfile.write('company=%s,\n' % quote_python(self.company).encode(ExternalEncoding)) - if self.webpage is not None: - showIndent(outfile, level) - outfile.write('webpage=%s,\n' % quote_python(self.webpage).encode(ExternalEncoding)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.webpage), input_name='webpage')), eol_)) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -923,9 +1206,15 @@ class descriptor(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None, localfile=None): + self.original_tagname_ = None self.name = _cast(None, name) self.localfile = localfile def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, descriptor) + if subclass is not None: + return subclass(*args_, **kwargs_) if descriptor.subclass: return descriptor.subclass(*args_, **kwargs_) else: @@ -937,26 +1226,38 @@ def set_localfile(self, localfile): self.localfile = localfile def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + self.localfile is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='descriptor', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('descriptor') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='descriptor') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='descriptor', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='descriptor'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='descriptor', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -964,45 +1265,24 @@ def exportChildren(self, outfile, level, namespace_='', name_='descriptor', from eol_ = '' if self.localfile is not None: self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='descriptor'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localFile(\n') - self.localfile.exportLiteral(outfile, level, name_='localfile') - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localFile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' # end class descriptor @@ -1010,6 +1290,7 @@ class implementation(GeneratedsSuper): subclass = None superclass = None def __init__(self, id_=None, aepcompliance='aep_compliant', description=None, propertyfile=None, code=None, compiler=None, programminglanguage=None, humanlanguage=None, runtime=None, os=None, processor=None, dependency=None, usesdevice=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) self.aepcompliance = _cast(None, aepcompliance) self.description = description @@ -1036,6 +1317,11 @@ def __init__(self, id_=None, aepcompliance='aep_compliant', description=None, pr else: self.usesdevice = usesdevice def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, implementation) + if subclass is not None: + return subclass(*args_, **kwargs_) if implementation.subclass: return implementation.subclass(*args_, **kwargs_) else: @@ -1065,52 +1351,78 @@ def set_runtime(self, runtime): self.runtime = runtime def get_os(self): return self.os def set_os(self, os): self.os = os def add_os(self, value): self.os.append(value) - def insert_os(self, index, value): self.os[index] = value + def insert_os_at(self, index, value): self.os.insert(index, value) + def replace_os_at(self, index, value): self.os[index] = value osProp = property(get_os, set_os) def get_processor(self): return self.processor def set_processor(self, processor): self.processor = processor def add_processor(self, value): self.processor.append(value) - def insert_processor(self, index, value): self.processor[index] = value + def insert_processor_at(self, index, value): self.processor.insert(index, value) + def replace_processor_at(self, index, value): self.processor[index] = value processorProp = property(get_processor, set_processor) def get_dependency(self): return self.dependency def set_dependency(self, dependency): self.dependency = dependency def add_dependency(self, value): self.dependency.append(value) - def insert_dependency(self, index, value): self.dependency[index] = value + def insert_dependency_at(self, index, value): self.dependency.insert(index, value) + def replace_dependency_at(self, index, value): self.dependency[index] = value dependencyProp = property(get_dependency, set_dependency) def get_usesdevice(self): return self.usesdevice def set_usesdevice(self, usesdevice): self.usesdevice = usesdevice def add_usesdevice(self, value): self.usesdevice.append(value) - def insert_usesdevice(self, index, value): self.usesdevice[index] = value + def insert_usesdevice_at(self, index, value): self.usesdevice.insert(index, value) + def replace_usesdevice_at(self, index, value): self.usesdevice[index] = value usesdeviceProp = property(get_usesdevice, set_usesdevice) def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id + def set_id(self, id_): self.id_ = id_ idProp = property(get_id, set_id) def get_aepcompliance(self): return self.aepcompliance def set_aepcompliance(self, aepcompliance): self.aepcompliance = aepcompliance aepcomplianceProp = property(get_aepcompliance, set_aepcompliance) + def hasContent_(self): + if ( + self.description is not None or + self.propertyfile is not None or + self.code is not None or + self.compiler is not None or + self.programminglanguage is not None or + self.humanlanguage is not None or + self.runtime is not None or + self.os or + self.processor or + self.dependency or + self.usesdevice + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='implementation', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('implementation') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='implementation') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='implementation', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='implementation'): if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) - if self.aepcompliance is not None and 'aepcompliance' not in already_processed: - already_processed.append('aepcompliance') - outfile.write(' aepcompliance=%s' % (self.gds_format_string(quote_attrib(self.aepcompliance).encode(ExternalEncoding), input_name='aepcompliance'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) + if self.aepcompliance != "aep_compliant" and 'aepcompliance' not in already_processed: + already_processed.add('aepcompliance') + outfile.write(' aepcompliance=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.aepcompliance), input_name='aepcompliance')), )) def exportChildren(self, outfile, level, namespace_='', name_='implementation', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1118,7 +1430,7 @@ def exportChildren(self, outfile, level, namespace_='', name_='implementation', eol_ = '' if self.description is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sdescription>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.description), input_name='description')), eol_)) if self.propertyfile is not None: self.propertyfile.export(outfile, level, namespace_, name_='propertyfile', pretty_print=pretty_print) if self.code is not None: @@ -1139,138 +1451,21 @@ def exportChildren(self, outfile, level, namespace_='', name_='implementation', dependency_.export(outfile, level, namespace_, name_='dependency', pretty_print=pretty_print) for usesdevice_ in self.usesdevice: usesdevice_.export(outfile, level, namespace_, name_='usesdevice', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.description is not None or - self.propertyfile is not None or - self.code is not None or - self.compiler is not None or - self.programminglanguage is not None or - self.humanlanguage is not None or - self.runtime is not None or - self.os or - self.processor or - self.dependency or - self.usesdevice - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='implementation'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - if self.aepcompliance is not None and 'aepcompliance' not in already_processed: - already_processed.append('aepcompliance') - showIndent(outfile, level) - outfile.write('aepcompliance = "%s",\n' % (self.aepcompliance,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.description is not None: - showIndent(outfile, level) - outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding)) - if self.propertyfile is not None: - showIndent(outfile, level) - outfile.write('propertyfile=model_.propertyFile(\n') - self.propertyfile.exportLiteral(outfile, level, name_='propertyfile') - showIndent(outfile, level) - outfile.write('),\n') - if self.code is not None: - showIndent(outfile, level) - outfile.write('code=model_.code(\n') - self.code.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.compiler is not None: - showIndent(outfile, level) - outfile.write('compiler=model_.compiler(\n') - self.compiler.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.programminglanguage is not None: - showIndent(outfile, level) - outfile.write('programminglanguage=model_.programmingLanguage(\n') - self.programminglanguage.exportLiteral(outfile, level, name_='programminglanguage') - showIndent(outfile, level) - outfile.write('),\n') - if self.humanlanguage is not None: - showIndent(outfile, level) - outfile.write('humanlanguage=model_.humanLanguage(\n') - self.humanlanguage.exportLiteral(outfile, level, name_='humanlanguage') - showIndent(outfile, level) - outfile.write('),\n') - if self.runtime is not None: - showIndent(outfile, level) - outfile.write('runtime=model_.runtime(\n') - self.runtime.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - showIndent(outfile, level) - outfile.write('os=[\n') - level += 1 - for os_ in self.os: - showIndent(outfile, level) - outfile.write('model_.os(\n') - os_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('processor=[\n') - level += 1 - for processor_ in self.processor: - showIndent(outfile, level) - outfile.write('model_.processor(\n') - processor_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('dependency=[\n') - level += 1 - for dependency_ in self.dependency: - showIndent(outfile, level) - outfile.write('model_.dependency(\n') - dependency_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('usesdevice=[\n') - level += 1 - for usesdevice_ in self.usesdevice: - showIndent(outfile, level) - outfile.write('model_.usesDevice(\n') - usesdevice_.exportLiteral(outfile, level, name_='usesDevice') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value value = find_attr_value_('aepcompliance', node) if value is not None and 'aepcompliance' not in already_processed: - already_processed.append('aepcompliance') + already_processed.add('aepcompliance') self.aepcompliance = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'description': @@ -1280,43 +1475,53 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): elif nodeName_ == 'propertyfile': obj_ = propertyFile.factory() obj_.build(child_) - self.set_propertyfile(obj_) + self.propertyfile = obj_ + obj_.original_tagname_ = 'propertyfile' elif nodeName_ == 'code': obj_ = code.factory() obj_.build(child_) - self.set_code(obj_) + self.code = obj_ + obj_.original_tagname_ = 'code' elif nodeName_ == 'compiler': obj_ = compiler.factory() obj_.build(child_) - self.set_compiler(obj_) + self.compiler = obj_ + obj_.original_tagname_ = 'compiler' elif nodeName_ == 'programminglanguage': obj_ = programmingLanguage.factory() obj_.build(child_) - self.set_programminglanguage(obj_) + self.programminglanguage = obj_ + obj_.original_tagname_ = 'programminglanguage' elif nodeName_ == 'humanlanguage': obj_ = humanLanguage.factory() obj_.build(child_) - self.set_humanlanguage(obj_) + self.humanlanguage = obj_ + obj_.original_tagname_ = 'humanlanguage' elif nodeName_ == 'runtime': obj_ = runtime.factory() obj_.build(child_) - self.set_runtime(obj_) + self.runtime = obj_ + obj_.original_tagname_ = 'runtime' elif nodeName_ == 'os': obj_ = os.factory() obj_.build(child_) self.os.append(obj_) + obj_.original_tagname_ = 'os' elif nodeName_ == 'processor': obj_ = processor.factory() obj_.build(child_) self.processor.append(obj_) + obj_.original_tagname_ = 'processor' elif nodeName_ == 'dependency': obj_ = dependency.factory() obj_.build(child_) self.dependency.append(obj_) + obj_.original_tagname_ = 'dependency' elif nodeName_ == 'usesdevice': obj_ = usesDevice.factory() obj_.build(child_) self.usesdevice.append(obj_) + obj_.original_tagname_ = 'usesdevice' # end class implementation @@ -1324,12 +1529,18 @@ class code(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, localfile=None, entrypoint=None, stacksize=None, priority=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) self.localfile = localfile self.entrypoint = entrypoint self.stacksize = stacksize self.priority = priority def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, code) + if subclass is not None: + return subclass(*args_, **kwargs_) if code.subclass: return code.subclass(*args_, **kwargs_) else: @@ -1352,26 +1563,50 @@ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) def validate_codeFileType(self, value): # Validate type codeFileType, a restriction on xs:NMTOKEN. - pass + if value is not None and Validate_simpletypes_: + value = str(value) + enumerations = ['Executable', 'KernelModule', 'SharedLibrary', 'Driver'] + enumeration_respectee = False + for enum in enumerations: + if value == enum: + enumeration_respectee = True + break + if not enumeration_respectee: + warnings_.warn('Value "%(value)s" does not match xsd enumeration restriction on codeFileType' % {"value" : value.encode("utf-8")} ) + def hasContent_(self): + if ( + self.localfile is not None or + self.entrypoint is not None or + self.stacksize is not None or + self.priority is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='code', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('code') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='code') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='code', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='code'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') + already_processed.add('type_') outfile.write(' type=%s' % (quote_attrib(self.type_), )) def exportChildren(self, outfile, level, namespace_='', name_='code', fromsubclass_=False, pretty_print=True): if pretty_print: @@ -1382,65 +1617,32 @@ def exportChildren(self, outfile, level, namespace_='', name_='code', fromsubcla self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) if self.entrypoint is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sentrypoint>%s%s' % (namespace_, self.gds_format_string(quote_xml(self.entrypoint).encode(ExternalEncoding), input_name='entrypoint'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.entrypoint), input_name='entrypoint')), eol_)) if self.stacksize is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%sstacksize>%s%s' % (namespace_, self.gds_format_integer(self.stacksize, input_name='stacksize'), namespace_, eol_)) + outfile.write('%s%s' % (self.gds_format_integer(self.stacksize, input_name='stacksize'), eol_)) if self.priority is not None: showIndent(outfile, level, pretty_print) - outfile.write('<%spriority>%s%s' % (namespace_, self.gds_format_integer(self.priority, input_name='priority'), namespace_, eol_)) - def hasContent_(self): - if ( - self.localfile is not None or - self.entrypoint is not None or - self.stacksize is not None or - self.priority is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='code'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localFile(\n') - self.localfile.exportLiteral(outfile, level, name_='localfile') - showIndent(outfile, level) - outfile.write('),\n') - if self.entrypoint is not None: - showIndent(outfile, level) - outfile.write('entrypoint=%s,\n' % quote_python(self.entrypoint).encode(ExternalEncoding)) - if self.stacksize is not None: - showIndent(outfile, level) - outfile.write('stacksize=%d,\n' % self.stacksize) - if self.priority is not None: - showIndent(outfile, level) - outfile.write('priority=%d,\n' % self.priority) + outfile.write('%s%s' % (self.gds_format_integer(self.priority, input_name='priority'), eol_)) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value self.validate_codeFileType(self.type_) # validate type codeFileType def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localFile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' elif nodeName_ == 'entrypoint': entrypoint_ = child_.text entrypoint_ = self.gds_validate_string(entrypoint_, node, 'entrypoint') @@ -1449,7 +1651,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): sval_ = child_.text try: ival_ = int(sval_) - except (TypeError, ValueError), exp: + except (TypeError, ValueError) as exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'stacksize') self.stacksize = ival_ @@ -1457,7 +1659,7 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): sval_ = child_.text try: ival_ = int(sval_) - except (TypeError, ValueError), exp: + except (TypeError, ValueError) as exp: raise_parse_error(child_, 'requires integer: %s' % exp) ival_ = self.gds_validate_integer(ival_, node, 'priority') self.priority = ival_ @@ -1467,83 +1669,79 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): class compiler(GeneratedsSuper): subclass = None superclass = None - def __init__(self, version=None, name=None): - self.version = _cast(None, version) + def __init__(self, name=None, version=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass + self.version = _cast(None, version) def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, compiler) + if subclass is not None: + return subclass(*args_, **kwargs_) if compiler.subclass: return compiler.subclass(*args_, **kwargs_) else: return compiler(*args_, **kwargs_) factory = staticmethod(factory) - def get_version(self): return self.version - def set_version(self, version): self.version = version - versionProp = property(get_version, set_version) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def get_version(self): return self.version + def set_version(self, version): self.version = version + versionProp = property(get_version, set_version) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='compiler', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('compiler') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='compiler') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='compiler', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='compiler'): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - def exportChildren(self, outfile, level, namespace_='', name_='compiler', fromsubclass_=False, pretty_print=True): - pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='compiler'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - showIndent(outfile, level) - outfile.write('version = "%s",\n' % (self.version,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('version') + outfile.write(' version=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.version), input_name='version')), )) + def exportChildren(self, outfile, level, namespace_='', name_='compiler', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('version', node) - if value is not None and 'version' not in already_processed: - already_processed.append('version') - self.version = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value + value = find_attr_value_('version', node) + if value is not None and 'version' not in already_processed: + already_processed.add('version') + self.version = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class compiler @@ -1552,83 +1750,79 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): class programmingLanguage(GeneratedsSuper): subclass = None superclass = None - def __init__(self, version=None, name=None): - self.version = _cast(None, version) + def __init__(self, name=None, version=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass + self.version = _cast(None, version) def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, programmingLanguage) + if subclass is not None: + return subclass(*args_, **kwargs_) if programmingLanguage.subclass: return programmingLanguage.subclass(*args_, **kwargs_) else: return programmingLanguage(*args_, **kwargs_) factory = staticmethod(factory) - def get_version(self): return self.version - def set_version(self, version): self.version = version - versionProp = property(get_version, set_version) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def get_version(self): return self.version + def set_version(self, version): self.version = version + versionProp = property(get_version, set_version) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='programmingLanguage', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('programmingLanguage') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='programmingLanguage') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='programmingLanguage', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='programmingLanguage'): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - def exportChildren(self, outfile, level, namespace_='', name_='programmingLanguage', fromsubclass_=False, pretty_print=True): - pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='programmingLanguage'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - showIndent(outfile, level) - outfile.write('version = "%s",\n' % (self.version,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('version') + outfile.write(' version=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.version), input_name='version')), )) + def exportChildren(self, outfile, level, namespace_='', name_='programmingLanguage', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('version', node) - if value is not None and 'version' not in already_processed: - already_processed.append('version') - self.version = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value + value = find_attr_value_('version', node) + if value is not None and 'version' not in already_processed: + already_processed.add('version') + self.version = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class programmingLanguage @@ -1638,9 +1832,14 @@ class humanLanguage(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, humanLanguage) + if subclass is not None: + return subclass(*args_, **kwargs_) if humanLanguage.subclass: return humanLanguage.subclass(*args_, **kwargs_) else: @@ -1649,55 +1848,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='humanLanguage', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('humanLanguage') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='humanLanguage') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='humanLanguage', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='humanLanguage'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='humanLanguage', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='humanLanguage'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1707,83 +1901,79 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): class os(GeneratedsSuper): subclass = None superclass = None - def __init__(self, version=None, name=None): - self.version = _cast(None, version) + def __init__(self, name=None, version=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass + self.version = _cast(None, version) def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, os) + if subclass is not None: + return subclass(*args_, **kwargs_) if os.subclass: return os.subclass(*args_, **kwargs_) else: return os(*args_, **kwargs_) factory = staticmethod(factory) - def get_version(self): return self.version - def set_version(self, version): self.version = version - versionProp = property(get_version, set_version) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def get_version(self): return self.version + def set_version(self, version): self.version = version + versionProp = property(get_version, set_version) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='os', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('os') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='os') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='os', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='os'): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - def exportChildren(self, outfile, level, namespace_='', name_='os', fromsubclass_=False, pretty_print=True): - pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='os'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - showIndent(outfile, level) - outfile.write('version = "%s",\n' % (self.version,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('version') + outfile.write(' version=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.version), input_name='version')), )) + def exportChildren(self, outfile, level, namespace_='', name_='os', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('version', node) - if value is not None and 'version' not in already_processed: - already_processed.append('version') - self.version = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value + value = find_attr_value_('version', node) + if value is not None and 'version' not in already_processed: + already_processed.add('version') + self.version = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class os @@ -1793,9 +1983,14 @@ class processor(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, processor) + if subclass is not None: + return subclass(*args_, **kwargs_) if processor.subclass: return processor.subclass(*args_, **kwargs_) else: @@ -1804,55 +1999,50 @@ def factory(*args_, **kwargs_): def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='processor', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('processor') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='processor') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='processor', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='processor'): if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) def exportChildren(self, outfile, level, namespace_='', name_='processor', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='processor'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -1863,6 +2053,7 @@ class dependency(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, softpkgref=None, propertyref=None, simpleref=None, simplesequenceref=None, structref=None, structsequenceref=None): + self.original_tagname_ = None self.type_ = _cast(None, type_) self.softpkgref = softpkgref self.propertyref = propertyref @@ -1871,6 +2062,11 @@ def __init__(self, type_=None, softpkgref=None, propertyref=None, simpleref=None self.structref = structref self.structsequenceref = structsequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, dependency) + if subclass is not None: + return subclass(*args_, **kwargs_) if dependency.subclass: return dependency.subclass(*args_, **kwargs_) else: @@ -1897,26 +2093,43 @@ def set_structsequenceref(self, structsequenceref): self.structsequenceref = str def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) + def hasContent_(self): + if ( + self.softpkgref is not None or + self.propertyref is not None or + self.simpleref is not None or + self.simplesequenceref is not None or + self.structref is not None or + self.structsequenceref is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='dependency', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('dependency') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='dependency') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='dependency', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='dependency'): if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='dependency', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -1934,183 +2147,128 @@ def exportChildren(self, outfile, level, namespace_='', name_='dependency', from self.structref.export(outfile, level, namespace_, name_='structref', pretty_print=pretty_print) if self.structsequenceref is not None: self.structsequenceref.export(outfile, level, namespace_, name_='structsequenceref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.softpkgref is not None or - self.propertyref is not None or - self.simpleref is not None or - self.simplesequenceref is not None or - self.structref is not None or - self.structsequenceref is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='dependency'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.softpkgref is not None: - showIndent(outfile, level) - outfile.write('softpkgref=model_.softPkgRef(\n') - self.softpkgref.exportLiteral(outfile, level, name_='softpkgref') - showIndent(outfile, level) - outfile.write('),\n') - if self.propertyref is not None: - showIndent(outfile, level) - outfile.write('propertyref=model_.propertyRef(\n') - self.propertyref.exportLiteral(outfile, level, name_='propertyref') - showIndent(outfile, level) - outfile.write('),\n') - if self.simpleref is not None: - showIndent(outfile, level) - outfile.write('simpleref=model_.simpleref(\n') - self.simpleref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.simplesequenceref is not None: - showIndent(outfile, level) - outfile.write('simplesequenceref=model_.simplesequenceref(\n') - self.simplesequenceref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.structref is not None: - showIndent(outfile, level) - outfile.write('structref=model_.structref(\n') - self.structref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - if self.structsequenceref is not None: - showIndent(outfile, level) - outfile.write('structsequenceref=model_.structsequenceref(\n') - self.structsequenceref.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('type', node) if value is not None and 'type' not in already_processed: - already_processed.append('type') + already_processed.add('type') self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'softpkgref': obj_ = softPkgRef.factory() obj_.build(child_) - self.set_softpkgref(obj_) + self.softpkgref = obj_ + obj_.original_tagname_ = 'softpkgref' elif nodeName_ == 'propertyref': obj_ = propertyRef.factory() obj_.build(child_) - self.set_propertyref(obj_) + self.propertyref = obj_ + obj_.original_tagname_ = 'propertyref' elif nodeName_ == 'simpleref': obj_ = simpleref.factory() obj_.build(child_) - self.set_simpleref(obj_) + self.simpleref = obj_ + obj_.original_tagname_ = 'simpleref' elif nodeName_ == 'simplesequenceref': obj_ = simplesequenceref.factory() obj_.build(child_) - self.set_simplesequenceref(obj_) + self.simplesequenceref = obj_ + obj_.original_tagname_ = 'simplesequenceref' elif nodeName_ == 'structref': obj_ = structref.factory() obj_.build(child_) - self.set_structref(obj_) + self.structref = obj_ + obj_.original_tagname_ = 'structref' elif nodeName_ == 'structsequenceref': obj_ = structsequenceref.factory() obj_.build(child_) - self.set_structsequenceref(obj_) + self.structsequenceref = obj_ + obj_.original_tagname_ = 'structsequenceref' # end class dependency class runtime(GeneratedsSuper): subclass = None superclass = None - def __init__(self, version=None, name=None): - self.version = _cast(None, version) + def __init__(self, name=None, version=None): + self.original_tagname_ = None self.name = _cast(None, name) - pass + self.version = _cast(None, version) def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, runtime) + if subclass is not None: + return subclass(*args_, **kwargs_) if runtime.subclass: return runtime.subclass(*args_, **kwargs_) else: return runtime(*args_, **kwargs_) factory = staticmethod(factory) - def get_version(self): return self.version - def set_version(self, version): self.version = version - versionProp = property(get_version, set_version) def get_name(self): return self.name def set_name(self, name): self.name = name nameProp = property(get_name, set_name) + def get_version(self): return self.version + def set_version(self, version): self.version = version + versionProp = property(get_version, set_version) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='runtime', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('runtime') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='runtime') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='runtime', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='runtime'): - if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) - def exportChildren(self, outfile, level, namespace_='', name_='runtime', fromsubclass_=False, pretty_print=True): - pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='runtime'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): + already_processed.add('name') + outfile.write(' name=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.name), input_name='name')), )) if self.version is not None and 'version' not in already_processed: - already_processed.append('version') - showIndent(outfile, level) - outfile.write('version = "%s",\n' % (self.version,)) - if self.name is not None and 'name' not in already_processed: - already_processed.append('name') - showIndent(outfile, level) - outfile.write('name = "%s",\n' % (self.name,)) - def exportLiteralChildren(self, outfile, level, name_): + already_processed.add('version') + outfile.write(' version=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.version), input_name='version')), )) + def exportChildren(self, outfile, level, namespace_='', name_='runtime', fromsubclass_=False, pretty_print=True): pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('version', node) - if value is not None and 'version' not in already_processed: - already_processed.append('version') - self.version = value value = find_attr_value_('name', node) if value is not None and 'name' not in already_processed: - already_processed.append('name') + already_processed.add('name') self.name = value + value = find_attr_value_('version', node) + if value is not None and 'version' not in already_processed: + already_processed.add('version') + self.version = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass # end class runtime @@ -2120,10 +2278,15 @@ class propertyRef(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, value=None): + self.original_tagname_ = None self.refid = _cast(None, refid) self.value = _cast(None, value) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, propertyRef) + if subclass is not None: + return subclass(*args_, **kwargs_) if propertyRef.subclass: return propertyRef.subclass(*args_, **kwargs_) else: @@ -2135,66 +2298,57 @@ def set_refid(self, refid): self.refid = refid def get_value(self): return self.value def set_value(self, value): self.value = value valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='propertyRef', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('propertyRef') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='propertyRef') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='propertyRef', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='propertyRef'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), )) + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) def exportChildren(self, outfile, level, namespace_='', name_='propertyRef', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='propertyRef'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - showIndent(outfile, level) - outfile.write('value = "%s",\n' % (self.value,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value value = find_attr_value_('value', node) if value is not None and 'value' not in already_processed: - already_processed.append('value') + already_processed.add('value') self.value = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -2205,9 +2359,15 @@ class softPkgRef(GeneratedsSuper): subclass = None superclass = None def __init__(self, localfile=None, implref=None): + self.original_tagname_ = None self.localfile = localfile self.implref = implref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, softPkgRef) + if subclass is not None: + return subclass(*args_, **kwargs_) if softPkgRef.subclass: return softPkgRef.subclass(*args_, **kwargs_) else: @@ -2219,25 +2379,38 @@ def set_localfile(self, localfile): self.localfile = localfile def get_implref(self): return self.implref def set_implref(self, implref): self.implref = implref implrefProp = property(get_implref, set_implref) - def export(self, outfile, level, namespace_='', name_='softPkgRef', namespacedef_='', pretty_print=True): + def hasContent_(self): + if ( + self.localfile is not None or + self.implref is not None + ): + return True + else: + return False + def export(self, outfile, level, namespace_='', name_='softpkgref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('softpkgref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] - self.exportAttributes(outfile, level, already_processed, namespace_, name_='softPkgRef') + already_processed = set() + self.exportAttributes(outfile, level, already_processed, namespace_, name_='softpkgref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='softpkgref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) - def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='softPkgRef'): + def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='softpkgref'): pass - def exportChildren(self, outfile, level, namespace_='', name_='softPkgRef', fromsubclass_=False, pretty_print=True): + def exportChildren(self, outfile, level, namespace_='', name_='softpkgref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: @@ -2246,50 +2419,26 @@ def exportChildren(self, outfile, level, namespace_='', name_='softPkgRef', from self.localfile.export(outfile, level, namespace_, name_='localfile', pretty_print=pretty_print) if self.implref is not None: self.implref.export(outfile, level, namespace_, name_='implref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.localfile is not None or - self.implref is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='softPkgRef'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - if self.localfile is not None: - showIndent(outfile, level) - outfile.write('localfile=model_.localFile(\n') - self.localfile.exportLiteral(outfile, level, name_='localfile') - showIndent(outfile, level) - outfile.write('),\n') - if self.implref is not None: - showIndent(outfile, level) - outfile.write('implref=model_.implRef(\n') - self.implref.exportLiteral(outfile, level, name_='implref') - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'localfile': obj_ = localFile.factory() obj_.build(child_) - self.set_localfile(obj_) + self.localfile = obj_ + obj_.original_tagname_ = 'localfile' elif nodeName_ == 'implref': obj_ = implRef.factory() obj_.build(child_) - self.set_implref(obj_) + self.implref = obj_ + obj_.original_tagname_ = 'implref' # end class softPkgRef @@ -2297,9 +2446,14 @@ class implRef(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None): + self.original_tagname_ = None self.refid = _cast(None, refid) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, implRef) + if subclass is not None: + return subclass(*args_, **kwargs_) if implRef.subclass: return implRef.subclass(*args_, **kwargs_) else: @@ -2308,55 +2462,50 @@ def factory(*args_, **kwargs_): def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='implRef', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('implRef') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='implRef') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='implRef', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='implRef'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='implRef', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='implRef'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -2366,9 +2515,10 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): class usesDevice(GeneratedsSuper): subclass = None superclass = None - def __init__(self, type_=None, id_=None, propertyref=None, simpleref=None, simplesequenceref=None, structref=None, structsequenceref=None): - self.type_ = _cast(None, type_) + def __init__(self, id_=None, type_=None, propertyref=None, simpleref=None, simplesequenceref=None, structref=None, structsequenceref=None): + self.original_tagname_ = None self.id_ = _cast(None, id_) + self.type_ = _cast(None, type_) if propertyref is None: self.propertyref = [] else: @@ -2390,6 +2540,11 @@ def __init__(self, type_=None, id_=None, propertyref=None, simpleref=None, simpl else: self.structsequenceref = structsequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, usesDevice) + if subclass is not None: + return subclass(*args_, **kwargs_) if usesDevice.subclass: return usesDevice.subclass(*args_, **kwargs_) else: @@ -2398,57 +2553,78 @@ def factory(*args_, **kwargs_): def get_propertyref(self): return self.propertyref def set_propertyref(self, propertyref): self.propertyref = propertyref def add_propertyref(self, value): self.propertyref.append(value) - def insert_propertyref(self, index, value): self.propertyref[index] = value + def insert_propertyref_at(self, index, value): self.propertyref.insert(index, value) + def replace_propertyref_at(self, index, value): self.propertyref[index] = value propertyrefProp = property(get_propertyref, set_propertyref) def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) def get_simplesequenceref(self): return self.simplesequenceref def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref def add_simplesequenceref(self, value): self.simplesequenceref.append(value) - def insert_simplesequenceref(self, index, value): self.simplesequenceref[index] = value + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) def get_structref(self): return self.structref def set_structref(self, structref): self.structref = structref def add_structref(self, value): self.structref.append(value) - def insert_structref(self, index, value): self.structref[index] = value + def insert_structref_at(self, index, value): self.structref.insert(index, value) + def replace_structref_at(self, index, value): self.structref[index] = value structrefProp = property(get_structref, set_structref) def get_structsequenceref(self): return self.structsequenceref def set_structsequenceref(self, structsequenceref): self.structsequenceref = structsequenceref def add_structsequenceref(self, value): self.structsequenceref.append(value) - def insert_structsequenceref(self, index, value): self.structsequenceref[index] = value + def insert_structsequenceref_at(self, index, value): self.structsequenceref.insert(index, value) + def replace_structsequenceref_at(self, index, value): self.structsequenceref[index] = value structsequencerefProp = property(get_structsequenceref, set_structsequenceref) + def get_id(self): return self.id_ + def set_id(self, id_): self.id_ = id_ + idProp = property(get_id, set_id) def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ typeProp = property(get_type, set_type) - def get_id(self): return self.id_ - def set_id(self, id): self.id_ = id - idProp = property(get_id, set_id) + def hasContent_(self): + if ( + self.propertyref or + self.simpleref or + self.simplesequenceref or + self.structref or + self.structsequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='usesDevice', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('usesDevice') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='usesDevice') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='usesDevice', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='usesDevice'): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), )) if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id_).encode(ExternalEncoding), input_name='id'), )) + already_processed.add('id') + outfile.write(' id=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.id_), input_name='id')), )) + if self.type_ is not None and 'type_' not in already_processed: + already_processed.add('type_') + outfile.write(' type=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.type_), input_name='type')), )) def exportChildren(self, outfile, level, namespace_='', name_='usesDevice', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2464,127 +2640,48 @@ def exportChildren(self, outfile, level, namespace_='', name_='usesDevice', from structref_.export(outfile, level, namespace_, name_='structref', pretty_print=pretty_print) for structsequenceref_ in self.structsequenceref: structsequenceref_.export(outfile, level, namespace_, name_='structsequenceref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.propertyref or - self.simpleref or - self.simplesequenceref or - self.structref or - self.structsequenceref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='usesDevice'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.type_ is not None and 'type_' not in already_processed: - already_processed.append('type_') - showIndent(outfile, level) - outfile.write('type_ = "%s",\n' % (self.type_,)) - if self.id_ is not None and 'id' not in already_processed: - already_processed.append('id') - showIndent(outfile, level) - outfile.write('id = "%s",\n' % (self.id_,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('propertyref=[\n') - level += 1 - for propertyref_ in self.propertyref: - showIndent(outfile, level) - outfile.write('model_.propertyRef(\n') - propertyref_.exportLiteral(outfile, level, name_='propertyRef') - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('simplesequenceref=[\n') - level += 1 - for simplesequenceref_ in self.simplesequenceref: - showIndent(outfile, level) - outfile.write('model_.simplesequenceref(\n') - simplesequenceref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structref=[\n') - level += 1 - for structref_ in self.structref: - showIndent(outfile, level) - outfile.write('model_.structref(\n') - structref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') - showIndent(outfile, level) - outfile.write('structsequenceref=[\n') - level += 1 - for structsequenceref_ in self.structsequenceref: - showIndent(outfile, level) - outfile.write('model_.structsequenceref(\n') - structsequenceref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): - value = find_attr_value_('type', node) - if value is not None and 'type' not in already_processed: - already_processed.append('type') - self.type_ = value value = find_attr_value_('id', node) if value is not None and 'id' not in already_processed: - already_processed.append('id') + already_processed.add('id') self.id_ = value + value = find_attr_value_('type', node) + if value is not None and 'type' not in already_processed: + already_processed.add('type') + self.type_ = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'propertyref': obj_ = propertyRef.factory() obj_.build(child_) self.propertyref.append(obj_) + obj_.original_tagname_ = 'propertyref' elif nodeName_ == 'simpleref': obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' elif nodeName_ == 'simplesequenceref': obj_ = simplesequenceref.factory() obj_.build(child_) self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' elif nodeName_ == 'structref': obj_ = structref.factory() obj_.build(child_) self.structref.append(obj_) + obj_.original_tagname_ = 'structref' elif nodeName_ == 'structsequenceref': obj_ = structsequenceref.factory() obj_.build(child_) self.structsequenceref.append(obj_) + obj_.original_tagname_ = 'structsequenceref' # end class usesDevice @@ -2592,10 +2689,15 @@ class simpleref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, value=None): + self.original_tagname_ = None self.refid = _cast(None, refid) self.value = _cast(None, value) - pass def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, simpleref) + if subclass is not None: + return subclass(*args_, **kwargs_) if simpleref.subclass: return simpleref.subclass(*args_, **kwargs_) else: @@ -2607,66 +2709,57 @@ def set_refid(self, refid): self.refid = refid def get_value(self): return self.value def set_value(self, value): self.value = value valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='simpleref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('simpleref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='simpleref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='simpleref', pretty_print=pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simpleref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), )) + already_processed.add('value') + outfile.write(' value=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.value), input_name='value')), )) def exportChildren(self, outfile, level, namespace_='', name_='simpleref', fromsubclass_=False, pretty_print=True): pass - def hasContent_(self): - if ( - - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='simpleref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - if self.value is not None and 'value' not in already_processed: - already_processed.append('value') - showIndent(outfile, level) - outfile.write('value = "%s",\n' % (self.value,)) - def exportLiteralChildren(self, outfile, level, name_): - pass def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value value = find_attr_value_('value', node) if value is not None and 'value' not in already_processed: - already_processed.append('value') + already_processed.add('value') self.value = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): pass @@ -2677,9 +2770,15 @@ class simplesequenceref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, values=None): + self.original_tagname_ = None self.refid = _cast(None, refid) self.values = values def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, simplesequenceref) + if subclass is not None: + return subclass(*args_, **kwargs_) if simplesequenceref.subclass: return simplesequenceref.subclass(*args_, **kwargs_) else: @@ -2691,26 +2790,38 @@ def set_values(self, values): self.values = values def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.values is not None + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='simplesequenceref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('simplesequenceref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='simplesequenceref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='simplesequenceref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='simplesequenceref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='simplesequenceref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2718,58 +2829,47 @@ def exportChildren(self, outfile, level, namespace_='', name_='simplesequenceref eol_ = '' if self.values is not None: self.values.export(outfile, level, namespace_, name_='values', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.values is not None - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='simplesequenceref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - if self.values is not None: - showIndent(outfile, level) - outfile.write('values=model_.values(\n') - self.values.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'values': obj_ = values.factory() obj_.build(child_) - self.set_values(obj_) + self.values = obj_ + obj_.original_tagname_ = 'values' # end class simplesequenceref class structref(GeneratedsSuper): subclass = None superclass = None - def __init__(self, refid=None, simpleref=None): + def __init__(self, refid=None, simpleref=None, simplesequenceref=None): + self.original_tagname_ = None self.refid = _cast(None, refid) if simpleref is None: self.simpleref = [] else: self.simpleref = simpleref + if simplesequenceref is None: + self.simplesequenceref = [] + else: + self.simplesequenceref = simplesequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structref) + if subclass is not None: + return subclass(*args_, **kwargs_) if structref.subclass: return structref.subclass(*args_, **kwargs_) else: @@ -2778,31 +2878,51 @@ def factory(*args_, **kwargs_): def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) + def get_simplesequenceref(self): return self.simplesequenceref + def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref + def add_simplesequenceref(self, value): self.simplesequenceref.append(value) + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value + simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='structref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='structref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2810,51 +2930,31 @@ def exportChildren(self, outfile, level, namespace_='', name_='structref', froms eol_ = '' for simpleref_ in self.simpleref: simpleref_.export(outfile, level, namespace_, name_='simpleref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simpleref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + for simplesequenceref_ in self.simplesequenceref: + simplesequenceref_.export(outfile, level, namespace_, name_='simplesequenceref', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'simpleref': obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' + elif nodeName_ == 'simplesequenceref': + obj_ = simplesequenceref.factory() + obj_.build(child_) + self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' # end class structref @@ -2862,12 +2962,18 @@ class structsequenceref(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, structvalue=None): + self.original_tagname_ = None self.refid = _cast(None, refid) if structvalue is None: self.structvalue = [] else: self.structvalue = structvalue def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structsequenceref) + if subclass is not None: + return subclass(*args_, **kwargs_) if structsequenceref.subclass: return structsequenceref.subclass(*args_, **kwargs_) else: @@ -2876,31 +2982,44 @@ def factory(*args_, **kwargs_): def get_structvalue(self): return self.structvalue def set_structvalue(self, structvalue): self.structvalue = structvalue def add_structvalue(self, value): self.structvalue.append(value) - def insert_structvalue(self, index, value): self.structvalue[index] = value + def insert_structvalue_at(self, index, value): self.structvalue.insert(index, value) + def replace_structvalue_at(self, index, value): self.structvalue[index] = value structvalueProp = property(get_structvalue, set_structvalue) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid refidProp = property(get_refid, set_refid) + def hasContent_(self): + if ( + self.structvalue + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structsequenceref', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structsequenceref') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structsequenceref') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structsequenceref', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='structsequenceref'): if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - outfile.write(' refid=%s' % (self.gds_format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) + already_processed.add('refid') + outfile.write(' refid=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.refid), input_name='refid')), )) def exportChildren(self, outfile, level, namespace_='', name_='structsequenceref', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' @@ -2908,63 +3027,46 @@ def exportChildren(self, outfile, level, namespace_='', name_='structsequenceref eol_ = '' for structvalue_ in self.structvalue: structvalue_.export(outfile, level, namespace_, name_='structvalue', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.structvalue - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structsequenceref'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - if self.refid is not None and 'refid' not in already_processed: - already_processed.append('refid') - showIndent(outfile, level) - outfile.write('refid = "%s",\n' % (self.refid,)) - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('structvalue=[\n') - level += 1 - for structvalue_ in self.structvalue: - showIndent(outfile, level) - outfile.write('model_.structvalue(\n') - structvalue_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('refid', node) if value is not None and 'refid' not in already_processed: - already_processed.append('refid') + already_processed.add('refid') self.refid = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'structvalue': obj_ = structvalue.factory() obj_.build(child_) self.structvalue.append(obj_) + obj_.original_tagname_ = 'structvalue' # end class structsequenceref class structvalue(GeneratedsSuper): subclass = None superclass = None - def __init__(self, simpleref=None): + def __init__(self, simpleref=None, simplesequenceref=None): + self.original_tagname_ = None if simpleref is None: self.simpleref = [] else: self.simpleref = simpleref + if simplesequenceref is None: + self.simplesequenceref = [] + else: + self.simplesequenceref = simplesequenceref def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, structvalue) + if subclass is not None: + return subclass(*args_, **kwargs_) if structvalue.subclass: return structvalue.subclass(*args_, **kwargs_) else: @@ -2973,20 +3075,40 @@ def factory(*args_, **kwargs_): def get_simpleref(self): return self.simpleref def set_simpleref(self, simpleref): self.simpleref = simpleref def add_simpleref(self, value): self.simpleref.append(value) - def insert_simpleref(self, index, value): self.simpleref[index] = value + def insert_simpleref_at(self, index, value): self.simpleref.insert(index, value) + def replace_simpleref_at(self, index, value): self.simpleref[index] = value simplerefProp = property(get_simpleref, set_simpleref) + def get_simplesequenceref(self): return self.simplesequenceref + def set_simplesequenceref(self, simplesequenceref): self.simplesequenceref = simplesequenceref + def add_simplesequenceref(self, value): self.simplesequenceref.append(value) + def insert_simplesequenceref_at(self, index, value): self.simplesequenceref.insert(index, value) + def replace_simplesequenceref_at(self, index, value): self.simplesequenceref[index] = value + simplesequencerefProp = property(get_simplesequenceref, set_simplesequenceref) + def hasContent_(self): + if ( + self.simpleref or + self.simplesequenceref + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='structvalue', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('structvalue') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='structvalue') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='structvalue', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3000,38 +3122,15 @@ def exportChildren(self, outfile, level, namespace_='', name_='structvalue', fro eol_ = '' for simpleref_ in self.simpleref: simpleref_.export(outfile, level, namespace_, name_='simpleref', pretty_print=pretty_print) - def hasContent_(self): - if ( - self.simpleref - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='structvalue'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('simpleref=[\n') - level += 1 - for simpleref_ in self.simpleref: - showIndent(outfile, level) - outfile.write('model_.simpleref(\n') - simpleref_.exportLiteral(outfile, level) - showIndent(outfile, level) - outfile.write('),\n') - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + for simplesequenceref_ in self.simplesequenceref: + simplesequenceref_.export(outfile, level, namespace_, name_='simplesequenceref', pretty_print=pretty_print) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3039,6 +3138,12 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): obj_ = simpleref.factory() obj_.build(child_) self.simpleref.append(obj_) + obj_.original_tagname_ = 'simpleref' + elif nodeName_ == 'simplesequenceref': + obj_ = simplesequenceref.factory() + obj_.build(child_) + self.simplesequenceref.append(obj_) + obj_.original_tagname_ = 'simplesequenceref' # end class structvalue @@ -3046,11 +3151,17 @@ class values(GeneratedsSuper): subclass = None superclass = None def __init__(self, value=None): + self.original_tagname_ = None if value is None: self.value = [] else: self.value = value def factory(*args_, **kwargs_): + if CurrentSubclassModule_ is not None: + subclass = getSubclassFromModule_( + CurrentSubclassModule_, values) + if subclass is not None: + return subclass(*args_, **kwargs_) if values.subclass: return values.subclass(*args_, **kwargs_) else: @@ -3059,20 +3170,33 @@ def factory(*args_, **kwargs_): def get_value(self): return self.value def set_value(self, value): self.value = value def add_value(self, value): self.value.append(value) - def insert_value(self, index, value): self.value[index] = value + def insert_value_at(self, index, value): self.value.insert(index, value) + def replace_value_at(self, index, value): self.value[index] = value valueProp = property(get_value, set_value) + def hasContent_(self): + if ( + self.value + ): + return True + else: + return False def export(self, outfile, level, namespace_='', name_='values', namespacedef_='', pretty_print=True): + imported_ns_def_ = GenerateDSNamespaceDefs_.get('values') + if imported_ns_def_ is not None: + namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' + if self.original_tagname_ is not None: + name_ = self.original_tagname_ showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) - already_processed = [] + already_processed = set() self.exportAttributes(outfile, level, already_processed, namespace_, name_='values') if self.hasContent_(): outfile.write('>%s' % (eol_, )) - self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print) + self.exportChildren(outfile, level + 1, namespace_='', name_='values', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('%s' % (namespace_, name_, eol_)) else: @@ -3086,36 +3210,14 @@ def exportChildren(self, outfile, level, namespace_='', name_='values', fromsubc eol_ = '' for value_ in self.value: showIndent(outfile, level, pretty_print) - outfile.write('<%svalue>%s%s' % (namespace_, self.gds_format_string(quote_xml(value_).encode(ExternalEncoding), input_name='value'), namespace_, eol_)) - def hasContent_(self): - if ( - self.value - ): - return True - else: - return False - def exportLiteral(self, outfile, level, name_='values'): - level += 1 - self.exportLiteralAttributes(outfile, level, [], name_) - if self.hasContent_(): - self.exportLiteralChildren(outfile, level, name_) - def exportLiteralAttributes(self, outfile, level, already_processed, name_): - pass - def exportLiteralChildren(self, outfile, level, name_): - showIndent(outfile, level) - outfile.write('value=[\n') - level += 1 - for value_ in self.value: - showIndent(outfile, level) - outfile.write('%s,\n' % quote_python(value_).encode(ExternalEncoding)) - level -= 1 - showIndent(outfile, level) - outfile.write('],\n') + outfile.write('%s%s' % (self.gds_encode(self.gds_format_string(quote_xml(value_), input_name='value')), eol_)) def build(self, node): - self.buildAttributes(node, node.attrib, []) + already_processed = set() + self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) + return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): @@ -3126,73 +3228,118 @@ def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): # end class values +GDSClassesMapping = { + 'softpkg': softPkg, +} + + USAGE_TEXT = """ Usage: python .py [ -s ] """ + def usage(): - print USAGE_TEXT + print(USAGE_TEXT) sys.exit(1) def get_root_tag(node): tag = Tag_pattern_.match(node.tag).groups()[-1] - rootClass = globals().get(tag) + rootClass = GDSClassesMapping.get(tag) + if rootClass is None: + rootClass = globals().get(tag) return tag, rootClass -def parse(inFileName): - doc = parsexml_(inFileName) +def parse(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: - rootTag = 'softPkg' + rootTag = 'softpkg' rootClass = softPkg rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_=rootTag, -## namespacedef_='', -## pretty_print=True) +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='', +## pretty_print=True) return rootObj -def parseString(inString): - from StringIO import StringIO - doc = parsexml_(StringIO(inString)) +def parseEtree(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: - rootTag = 'softPkg' + rootTag = 'softpkg' rootClass = softPkg rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('\n') -## rootObj.export(sys.stdout, 0, name_="softPkg", -## namespacedef_='') + mapping = {} + rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping) + reverse_mapping = rootObj.gds_reverse_node_mapping(mapping) +## if not silence: +## content = etree_.tostring( +## rootElement, pretty_print=True, +## xml_declaration=True, encoding="utf-8") +## sys.stdout.write(content) +## sys.stdout.write('\n') + return rootObj, rootElement, mapping, reverse_mapping + + +def parseString(inString, silence=False): + '''Parse a string, create the object tree, and export it. + + Arguments: + - inString -- A string. This XML fragment should not start + with an XML declaration containing an encoding. + - silence -- A boolean. If False, export the object. + Returns -- The root object in the tree. + ''' + parser = None + rootNode= parsexmlstring_(inString, parser) + rootTag, rootClass = get_root_tag(rootNode) + if rootClass is None: + rootTag = 'softpkg' + rootClass = softPkg + rootObj = rootClass.factory() + rootObj.build(rootNode) + # Enable Python to collect the space used by the DOM. +## if not silence: +## sys.stdout.write('\n') +## rootObj.export( +## sys.stdout, 0, name_=rootTag, +## namespacedef_='') return rootObj -def parseLiteral(inFileName): - doc = parsexml_(inFileName) +def parseLiteral(inFileName, silence=False): + parser = None + doc = parsexml_(inFileName, parser) rootNode = doc.getroot() rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: - rootTag = 'softPkg' + rootTag = 'softpkg' rootClass = softPkg rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None -## sys.stdout.write('#from spd import *\n\n') -## sys.stdout.write('import spd as model_\n\n') -## sys.stdout.write('rootObj = model_.rootTag(\n') -## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) -## sys.stdout.write(')\n') +## if not silence: +## sys.stdout.write('#from spd import *\n\n') +## sys.stdout.write('import spd as model_\n\n') +## sys.stdout.write('rootObj = model_.rootClass(\n') +## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag) +## sys.stdout.write(')\n') return rootObj @@ -3227,11 +3374,11 @@ def main(): "runtime", "simpleref", "simplesequenceref", - "softPkg", - "softPkgRef", + "softpkg", + "softpkgref", "structref", "structsequenceref", "structvalue", "usesDevice", "values" - ] +] diff --git a/redhawk/src/base/framework/python/ossie/properties.py b/redhawk/src/base/framework/python/ossie/properties.py index 41278b2e0..64e422c22 100644 --- a/redhawk/src/base/framework/python/ossie/properties.py +++ b/redhawk/src/base/framework/python/ossie/properties.py @@ -33,6 +33,7 @@ import types import struct import inspect +from ossie.utils import rhtime # numpy types to Corba Type codes __NP_ALT_MAP = { @@ -128,7 +129,12 @@ CF._tc_complexULongLong, CF.complexULongLong, long, - CF._tc_complexULongLongSeq) + CF._tc_complexULongLongSeq), + 'utctime': (CF.UTCTime, + CF._tc_UTCTime, + CF.UTCTime, + CF.UTCTime, + CF._tc_UTCTimeSequence) } _SCA_TYPES = [ @@ -136,9 +142,18 @@ 'objref', 'octet', 'string', 'ulong', 'ushort', 'longlong', 'ulonglong', 'complexFloat', 'complexBoolean', 'complexULong', 'complexShort', 'complexOctet', 'complexChar', 'complexUShort', 'complexDouble', - 'complexLong', 'complexLongLong', 'complexULongLong' + 'complexLong', 'complexLongLong', 'complexULongLong', 'utctime' ] +def getTypeMap(): + return __TYPE_MAP + +def getTypeNameFromTC(_tc): + for _key in __TYPE_MAP: + if __TYPE_MAP[_key][1] == _tc: + return _key + return None + def getPyType(type_, alt_map=None): if alt_map: try: @@ -174,11 +189,11 @@ def _toPyComplex(data, type_): CF.complexType(real=A, imag=B) to complex(A,B) ''' - if type(data) == type({}): + if isinstance(data, dict): real = data["real"] imag = data["imag"] newdata = complex(real, imag) - elif type(data) == complex: + elif isinstance(data, complex): newdata = complex(data) else: # assume CF::complexType @@ -195,7 +210,7 @@ def to_pyvalue(data, type_,alt_py_tc=None): pytype = getPyType(type_,alt_py_tc) - if type(data) != pytype: + if not isinstance(data, pytype): # Handle boolean strings as a special case if type_ == "boolean" and type(data) in (str, unicode): try: @@ -212,8 +227,10 @@ def to_pyvalue(data, type_,alt_py_tc=None): data = pytype(data,0) else: data = pytype(data) + elif pytype == CF.UTCTime: + data = pytype(data['tcstatus'], data['twsec'], data['tfsec']) else: - data = pytype(data) + data = pytype(data) return data def to_xmlvalue(data, type_): @@ -249,6 +266,8 @@ def to_xmlvalue(data, type_): v=retval elif type_ == "boolean": v = str(bool(data)).lower() + elif type_ == "utctime": + v = rhtime.toString(data) elif type_ == "string": # Remove quotes added by repr v = v[1:-1] return v @@ -257,7 +276,7 @@ def _convertComplexToCFComplex(data, type_): cfComplexType = getCFType(type_) memberType = getMemberType(type_) - if type(data) == type({}): + if isinstance(data, dict): #if the data is already in the form of a CF:complex, recast real = data["real"] imag = data["imag"] @@ -281,6 +300,10 @@ def to_tc_value(data, type_, alt_map=None): # get the CF typecode tc = getTypeCode(type_) return CORBA.Any(tc, data) + elif type_.find('utctime') == 0: + tc = getTypeCode(type_) + _any = CORBA.Any(tc, data) + return CORBA.Any(tc, data) elif alt_map and alt_map.has_key(type_): pytype, tc = alt_map[type_] if tc == None: @@ -299,7 +322,6 @@ def to_tc_value(data, type_, alt_map=None): # Convert to the correct Python using alternate mapping data = to_pyvalue(data, type_, alt_map) return CORBA.Any(tc, data) - elif __TYPE_MAP.has_key(type_): # If the typecode is known, use that pytype, tc = __TYPE_MAP[type_] @@ -737,8 +759,11 @@ def inBounds(self, value): # The value is good if both real and imag members fall within # the bounds. - goodValue = boundChecker.inBounds(value["real"]) and boundChecker.inBounds(value["imag"]) - + if type(value) == dict: + goodValue = boundChecker.inBounds(value["real"]) and boundChecker.inBounds(value["imag"]) + else: + goodValue = boundChecker.inBounds(value.real) and boundChecker.inBounds(value.imag) + return goodValue def checkValue(self, value, obj): diff --git a/redhawk/src/base/framework/python/ossie/resource.py b/redhawk/src/base/framework/python/ossie/resource.py index 312bfa088..27df95c40 100644 --- a/redhawk/src/base/framework/python/ossie/resource.py +++ b/redhawk/src/base/framework/python/ossie/resource.py @@ -57,6 +57,15 @@ def _getCallback(obj, methodName): else: return None + +def _makeTime(status=-1, wsec=0, fsec=0): + _time = CF.UTCTime(status, wsec, fsec) + if status == -1: + _time.tcstatus = 1 + ts = time.time() + _time.twsec = int(ts) + _time.tfsec = ts-int(ts) + return _time class PropertyAttributeMixIn: """Include this MixIn with your Device if you want your properties to @@ -128,6 +137,17 @@ def isValid(self, portobj): # Uses ports are always CF.Port objects return (portobj != None) and (portobj._is_a(self.CF_PORT_REPID)) +class PortCallError(Exception): + def __init__(self, message='', connections={}): + _message = message + if len(connections) > 0: + _message += 'Connections available: ' + for key in connections: + _message += key+', ' + if len(connections) > 0: + _message = _message[:-2] + super(PortCallError, self).__init__(_message) + class providesport(_port): def isValid(self, portobj): """Validate that portobj is the expected type to get returned by PortSupplier.getPort() based on the repid.""" @@ -139,8 +159,7 @@ def isValid(self, portobj): # Support Classes for handling PropertyChange Listeners # class _PCL_Except(Exception): - def __init__(self,msg=""): - self.message = msg + pass # Monitoring thread... class _PropertyChangeThread(ThreadedComponent): @@ -175,7 +194,8 @@ def notify( self, prec, props ): evt = CF.PropertyChangeListener.PropertyChangeEvent( str(uuid.uuid1()), prec.regId, prec.rscId, - props) + props, + _makeTime()) if self.pub: self.pub.push( evt ) except: @@ -204,7 +224,8 @@ def notify( self, prec, props ): evt = CF.PropertyChangeListener.PropertyChangeEvent( str(uuid.uuid1()), prec.regId, prec.rscId, - props) + props, + _makeTime()) if self.listener: self.listener.propertyChange( evt ) except: @@ -291,7 +312,7 @@ def callback(self, id_, oldvalue, newvalue ): self.props[id_].recordChanged() class Resource(object): - def __init__(self, identifier, execparams, propertydefs=(), loggerName=None): + def __init__(self, identifier, execparams, propertydefs=(), loggerName=None, baseLogName=None): """propertydefs is a iterable of tuples that contain (propid, propname, type, mode, defaultvalue, units, action, kinds) @@ -322,15 +343,22 @@ def __init__(self, identifier, execparams, propertydefs=(), loggerName=None): self.loggingMacros = ossie.logger.GetDefaultMacros() ossie.logger.ResolveHostInfo( self.loggingMacros ) self.loggingCtx = None - self.loggingURL=None + self.loggingURL = None + self._origLevelSet = False if loggerName == None: self._logid = execparams.get("NAME_BINDING", self._id ) self._logid = self._logid.rsplit("_", 1)[0] else: self._logid = loggerName + if baseLogName == None: + baseLogName = execparams.get("NAME_BINDING", self._id ) self._logid = self._logid.replace(":","_") self._log = logging.getLogger(self._logid) + self._baseLog = logging.getLogger(baseLogName) + self._resourceLog = self._baseLog.getChildLogger('Resource', ossie.utils.log4py.SYSTEM_LOGS) + self._portSupplierLog = self._baseLog.getChildLogger('PortSupplier', ossie.utils.log4py.SYSTEM_LOGS) + self._propertySetLog = self._baseLog.getChildLogger('PropertySet', ossie.utils.log4py.SYSTEM_LOGS) loglevel = self._log.getEffectiveLevel() if loglevel == logging.NOTSET: self.logLevel = logging.INFO @@ -380,7 +408,7 @@ def __init_monitors(self): ######################################### # CF::Resource def start(self): - self._log.trace("start()") + self._resourceLog.trace("start()") # Check all ports for a startPort() method, and call it if one exists for portdef in self.__ports.itervalues(): port = portdef.__get__(self) @@ -389,7 +417,7 @@ def start(self): self._started = True def stop(self): - self._log.trace("stop()") + self._resourceLog.trace("stop()") # Check all ports for a stopPort() method, and call it if one exists for portdef in self.__ports.itervalues(): port = portdef.__get__(self) @@ -409,21 +437,21 @@ def _get_softwareProfile(self): ######################################### # CF::LifeCycle def initialize(self): - self._log.trace("initialize()") + self._resourceLog.trace("initialize()") if not self.__initialized: self.__initialized = True try: self.constructor() self.__init_monitors() except Exception, exc: - self._log.error("initialize(): %s", str(exc)) + self._resourceLog.error("initialize(): %s", str(exc)) raise CF.LifeCycle.InitializeError([str(exc)]) def constructor(self): pass def releaseObject(self): - self._log.trace("releaseObject()") + self._resourceLog.trace("releaseObject()") self.stopPropertyChangeMonitor() # disable logging that uses EventChannels ossie.logger.SetEventChannelManager(None) @@ -439,26 +467,26 @@ def getPort(self, name): """The default behavior of getPort() will automatically return ports as defined by 'usesport' and 'providesport' static class attributes.""" - self._log.trace("getPort(%s)", name) + self._portSupplierLog.trace("getPort(%s)", name) try: portdef = self.__ports[name] except KeyError: - self._log.warning("getPort() could not find port %s", name) + self._portSupplierLog.warning("getPort() could not find port %s", name) raise CF.PortSupplier.UnknownPort() else: portobj = portdef.__get__(self) if portobj == None: - self._log.warning("component did not implement port %s",name) + self._portSupplierLog.warning("component did not implement port %s",name) raise CF.PortSupplier.UnknownPort() port = portobj._this() if not portdef.isValid(port): - self._log.warning("getPort() for %s did match required repid", name) - self._log.trace("getPort() --> %s", port) + self._portSupplierLog.warning("getPort() for %s did match required repid", name) + self._portSupplierLog.trace("getPort() --> %s", port) return port def getPortSet(self): """Return list of ports for this Resource""" - self._log.trace("getPortSet()") + self._portSupplierLog.trace("getPortSet()") portList = [] for name, portdef in self.__ports.iteritems(): obj_ptr = self.getPort(name) @@ -466,12 +494,12 @@ def getPortSet(self): description = portdef.__doc__ direction = '' if isinstance(portdef, usesport): - direction = 'Uses' + direction = CF.PortSet.DIRECTION_USES elif isinstance(portdef, providesport): if repid == 'IDL:ExtendedEvent/MessageEvent:1.0': - direction = 'Bidir' + direction = CF.PortSet.DIRECTION_BIDIR else: - direction = 'Provides' + direction = CF.PortSet.DIRECTION_PROVIDES info = CF.PortSet.PortInfoType(obj_ptr, name, repid, description, direction) portList.append(info) @@ -488,11 +516,14 @@ def __loadPorts(self): # Common resource logging API # return logger assigned to resource - def getLogger(self): - return self._log + def getLogger(self, logid=None): + if logid != None: + return self._log + else: + return logging.getLogger(logid) # return a named logger - def getLogger(self, logid, assignToResource=False): + def getNamedLogger(self, logid, assignToResource=False): newLogger=logging.getLogger(logid) if assignToResource: self._logid = logid @@ -511,21 +542,6 @@ def setResourceContext(self, rscCtx ): rscCtx.apply(self.loggingMacros ) self.loggingCtx = rscCtx - def setLoggingContext(self, rscCtx ): - # apply resource context to macro definitions - if rscCtx: - rscCtx.apply(self.loggingMacros ) - self.loggingCtx = rscCtx - - elif self.loggingCtx : - self.loggingCtx.apply(self.loggingMacros ) - - # load logging configuration url to resource - self.setLogConfigURL( self.loggingURL ) - - # apply logging level - self.setLogLevel( self._logid, self.loglevel ) - # # set the logging context for the resouce. Use the contents of # of the url as the logging configuration data. If the @@ -572,6 +588,7 @@ def saveLoggingContext(self, logcfg_url, oldstyle_loglevel, rscCtx ): cfg_data=ossie.logger.GetConfigFileContents( logcfg_url ) if cfg_data and len(cfg_data) > 0 : self.logConfig = ossie.logger.ExpandMacros( cfg_data, self.loggingMacros ) + self.logConfig = ossie.logger.ConfigureWithContext( self.logConfig, self.loggingMacros ) except: pass @@ -589,20 +606,25 @@ def saveLoggingContext(self, logcfg_url, oldstyle_loglevel, rscCtx ): # assign an event channel manager to the logging library ossie.logger.SetEventChannelManager( self._ecm ) + if not self._origLevelSet: + self._origLevelSet = True; + self._origLogCfgURL = logcfg_url; + self._origLogLevel = oldstyle_loglevel; + self._origCtx = rscCtx; def setLogListenerCallback(self, logListenerCB ): self.logListenerCallback=logListenerCB - + def addPropertyChangeListener(self, id, callback): self._props.addChangeListener(callback, id) ######################################### # CF::LogConfiguration def _get_log_level(self): - if self._log: - lvl = ossie.logger.ConvertLog4ToCFLevel( self._log.getEffectiveLevel()) + if self._baseLog: + lvl = ossie.logger.ConvertLog4ToCFLevel( self._baseLog.getEffectiveLevel()) if lvl != self._logLevel: - self.logLevel = self._log.getEffectiveLevel() + self.logLevel = self._baseLog.getEffectiveLevel() self._logLevel = lvl return self._logLevel @@ -623,15 +645,17 @@ def log_level(self, newLogLevel=None ): ossie.logger.SetLogLevel( self._logid, newLogLevel ) self._logLevel = newLogLevel self.logLevel = ossie.logger.ConvertToLog4Level( newLogLevel ) + self._baseLog.setLevel(self.logLevel) def setLogLevel(self, logid, newLogLevel ): + if not self._baseLog.isLoggerInHierarchy(logid): + raise CF.UnknownIdentifier() + if ossie.logger.SupportedCFLevel(newLogLevel) == False: return if self.logListenerCallback and callable(self.logListenerCallback.logLevelChanged): - #self._logLevel = newLogLevel - #self.logLevel = ossie.logger.ConvertToLog4Level( newLogLevel ) self.logListenerCallback.logLevelChanged(logid, newLogLevel) else: ossie.logger.SetLogLevel( logid, newLogLevel ) @@ -639,6 +663,26 @@ def setLogLevel(self, logid, newLogLevel ): self._logLevel = newLogLevel self.logLevel = ossie.logger.ConvertToLog4Level( newLogLevel ) + def getLogLevel(self, logid ): + if not self._baseLog.isLoggerInHierarchy(logid): + raise CF.UnknownIdentifier() + + lvl = ossie.logger.ConvertLog4ToCFLevel(logging.getLogger(logid).getEffectiveLevel()) + return lvl + + def getNamedLoggers(self): + return self._baseLog.getCurrentLoggers() + + def resetLog(self): + for _logid in self._baseLog.getCurrentLoggers(): + if _logid == self._baseLog.name: + ossie.logger.SetLogLevel(_logid, -1) + _b_id = _logid + while _b_id != self._baseLog.name: + ossie.logger.SetLogLevel(_b_id, -1) + _b_id = logging.getLogger(_b_id).parent.name + self.setLoggingContext(self._origLogCfgURL, self._origLogLevel, self._origCtx) + def getLogConfig(self): return self.logConfig @@ -683,8 +727,10 @@ def retrieve_records_from_date(self, howMany, from_timetStamp ): # CF::TestableObject def runTest(self, testid, properties): """Override this function to provide the desired behavior.""" - self._log.trace("runTest()") + self._resourceLog.trace("runTest()") raise CF.TestableObject.UnknownTest() + + _propertyQueryTimestamp = 'QUERY_TIMESTAMP' ######################################### # CF::PropertySet @@ -692,7 +738,7 @@ def query(self, configProperties): self.propertySetAccess.acquire() # If the list is empty, get all props if configProperties == []: - self._log.trace("query all properties") + self._propertySetLog.trace("query all properties") try: rv = [] for propid in self._props.keys(): @@ -700,7 +746,7 @@ def query(self, configProperties): try: value = self._props.query(propid) except Exception, e: - self._log.error('Failed to query %s: %s', propid, e) + self._propertySetLog.error('Failed to query %s: %s', propid, e) value = any.to_any(None) prp = self._props.getPropDef(propid) if type(prp) == ossie.properties.struct_property: @@ -721,18 +767,22 @@ def query(self, configProperties): except: self.propertySetAccess.release() raise + #rv.append(CF.DataType(self._propertyQueryTimestamp, any.to_any(_makeTime()))) # otherwise get only the requested ones else: - self._log.trace("query %s properties", len(configProperties)) + self._propertySetLog.trace("query %s properties", len(configProperties)) try: unknownProperties = [] for prop in configProperties: + if prop.id == self._propertyQueryTimestamp: + prop.value = any.to_any(_makeTime()) + continue if self._props.has_id(prop.id) and self._props.isQueryable(prop.id): try: prop.value = self._props.query(prop.id) except Exception, e: - self._log.error('Failed to query %s: %s', prop.id, e) + self._propertySetLog.error('Failed to query %s: %s', prop.id, e) prp = self._props.getPropDef(prop.id) if type(prp) == ossie.properties.struct_property: newvalval = [] @@ -748,26 +798,23 @@ def query(self, configProperties): newvalval.append(v) prop.value = CORBA.Any(prop.value.typecode(), newvalval) else: - self._log.warning("property %s cannot be queried. valid Id: %s", - prop.id, self._props.has_id(prop.id)) unknownProperties.append(prop) except: self.propertySetAccess.release() raise if len(unknownProperties) > 0: - self._log.warning("query called with invalid properties %s", unknownProperties) self.propertySetAccess.release() raise CF.UnknownProperties(unknownProperties) rv = configProperties self.propertySetAccess.release() - self._log.trace("query -> %s properties", len(rv)) + self._propertySetLog.trace("query -> %s properties", len(rv)) return rv def initializeProperties(self, ctorProps): - self._log.trace("initializeProperties(%s)", ctorProps) + self._propertySetLog.trace("initializeProperties(%s)", ctorProps) with self.propertySetAccess: # Disallow multiple calls @@ -783,28 +830,28 @@ def initializeProperties(self, ctorProps): # run configure on property.. disable callback feature self._props.construct(prop.id, prop.value) except ValueError, e: - self._log.warning("Invalid value provided to construct for property %s %s", prop.id, e) + self._propertySetLog.warning("Invalid value provided to construct for property %s %s", prop.id, e) notSet.append(prop) else: - self._log.warning("Tried to construct non-existent, readonly, or property with action not equal to external %s", prop.id) + self._propertySetLog.warning("Tried to construct non-existent, readonly, or property with action not equal to external %s", prop.id) notSet.append(prop) except Exception, e: - self._log.exception("Unexpected exception.") + self._propertySetLog.exception("Unexpected exception.") notSet.append(prop) if notSet: if len(notSet) < len(ctorProps): - self._log.warning("Property initialization failed with partial configuration, %s", notSet) + self._propertySetLog.warning("Property initialization failed with partial configuration, %s", notSet) raise CF.PropertySet.PartialConfiguration(notSet) else: - self._log.warning("Property initialization failed with invalid configuration, %s", notSet) + self._propertySetLog.warning("Property initialization failed with invalid configuration, %s", notSet) raise CF.PropertySet.InvalidConfiguration("Failure", notSet) - self._log.trace("initializeProperties(%s)", ctorProps) + self._propertySetLog.trace("initializeProperties(%s)", ctorProps) def configure(self, configProperties): - self._log.trace("configure(%s)", configProperties) + self._propertySetLog.trace("configure(%s)", configProperties) self.propertySetAccess.acquire() notSet = [] error_message = '' @@ -814,29 +861,29 @@ def configure(self, configProperties): try: self._props.configure(prop.id, prop.value) except Exception, e: - self._log.warning("Invalid value provided to configure for property %s: %s", prop.id, e) + self._propertySetLog.warning("Invalid value provided to configure for property %s: %s", prop.id, e) notSet.append(prop) else: - self._log.warning("Tried to configure non-existent, readonly, or property with action not equal to external %s", prop.id) + self._propertySetLog.warning("Tried to configure non-existent, readonly, or property with action not equal to external %s", prop.id) notSet.append(prop) except Exception, e: error_message += str(e) - self._log.exception("Unexpected exception.") + self._propertySetLog.exception("Unexpected exception.") notSet.append(prop) if len(notSet) > 0 and len(notSet) < len(configProperties): self.propertySetAccess.release() - self._log.warning("Configure failed with partial configuration, %s", notSet) + self._propertySetLog.warning("Configure failed with partial configuration, %s", notSet) raise CF.PropertySet.PartialConfiguration(notSet) elif len(notSet) > 0 and len(notSet) >= len(configProperties): self.propertySetAccess.release() - self._log.warning("Configure failed with invalid configuration, %s", notSet) + self._propertySetLog.warning("Configure failed with invalid configuration, %s", notSet) raise CF.PropertySet.InvalidConfiguration("Failure: "+error_message, notSet) self.propertySetAccess.release() - self._log.trace("configure(%s)", configProperties) + self._propertySetLog.trace("configure(%s)", configProperties) def registerPropertyListener(self, listener, prop_ids, interval): - self._log.trace("registerPropertyListener(%s)", prop_ids) + self._propertySetLog.trace("registerPropertyListener(%s)", prop_ids) self.propertySetAccess.acquire() @@ -845,12 +892,12 @@ def registerPropertyListener(self, listener, prop_ids, interval): # If the list is empty, get all props if prop_ids == []: - self._log.trace("registering all properties") + self._propertySetLog.trace("registering all properties") for propid in self._props.keys(): if self._props.has_id(propid) and self._props.isQueryable(propid): props[propid] = _PCL_Monitor() else: - self._log.trace("registering %s properties", len(prop_ids)) + self._propertySetLog.trace("registering %s properties", len(prop_ids)) for pid in prop_ids: if self._props.has_id(pid) and self._props.isQueryable(pid): props[pid] = _PCL_Monitor() @@ -859,7 +906,7 @@ def registerPropertyListener(self, listener, prop_ids, interval): unknownProperties.append(CF.DataType(pid,value)) if len(unknownProperties) > 0: - self._log.warning("registerPropertyListener, called with invalid properties %s", unknownProperties) + self._propertySetLog.warning("registerPropertyListener, called with invalid properties %s", unknownProperties) self.propertySetAccess.release() raise CF.UnknownProperties(unknownProperties) @@ -867,21 +914,21 @@ def registerPropertyListener(self, listener, prop_ids, interval): pcl = None is_ec = False try: - self._log.debug("registerPropertyListener, Registering Event Channel...") + self._propertySetLog.debug("registerPropertyListener, Registering Event Channel...") pcl = _EC_PropertyChangeListener(listener) is_ec = True except: pcl = None - self._log.debug("registerPropertyListener, Try for PropertyChangeListener next...") + self._propertySetLog.debug("registerPropertyListener, Try for PropertyChangeListener next...") if is_ec == False: try: - self._log.debug("registerPropertyListener, Trying PropertyChangeListener interface...") + self._propertySetLog.debug("registerPropertyListener, Trying PropertyChangeListener interface...") pcl = _INF_PropertyChangeListener(listener) except: #print traceback.format_exc() pcl = None - self._log.warning("registerPropertyListener, Caller provided invalid registrant.") + self._propertySetLog.warning("registerPropertyListener, Caller provided invalid registrant.") self.propertySetAccess.release() raise CF.InvalidObjectReference("registerPropertyListener, Caller provided invalid registrant.") @@ -891,25 +938,25 @@ def registerPropertyListener(self, listener, prop_ids, interval): reg_id = rec.regId self._props.addChangeListener( rec.callback ) self._propChangeRegistry[reg_id] = rec - self._log.debug( "registerPropertyListener .. reg_id/interval: " + str(reg_id) + "/" + str(rec.reportInterval) + " callback: "+ str(self._propChangeRegistry[reg_id].callback)) + self._propertySetLog.debug( "registerPropertyListener .. reg_id/interval: " + str(reg_id) + "/" + str(rec.reportInterval) + " callback: "+ str(self._propChangeRegistry[reg_id].callback)) # start if self._propChangeThread and self._propChangeThread.isRunning() == False : - self._log.debug( "registerPropertyListener Starting PROPERTY CHANGE THREAD ... resource/reg_id: " + str(self._name) + "/" + str(reg_id) ) + self._propertySetLog.debug( "registerPropertyListener Starting PROPERTY CHANGE THREAD ... resource/reg_id: " + str(self._name) + "/" + str(reg_id) ) self._propChangeThread.startThread() self.propertySetAccess.release() return reg_id def unregisterPropertyListener(self, reg_id ): - self._log.debug("unregisterPropertyListener") + self._propertySetLog.debug("unregisterPropertyListener") self.propertySetAccess.acquire() if reg_id in self._propChangeRegistry: try: - self._log.debug("unregisterPropertyListener - Remove registration " + str(reg_id) ) + self._propertySetLog.debug("unregisterPropertyListener - Remove registration " + str(reg_id) ) self._props.removeChangeListener( self._propChangeRegistry[reg_id].callback ) del self._propChangeRegistry[reg_id] - self._log.debug("unregisterPropertyListener - Removed registration " + str(reg_id) ) + self._propertySetLog.debug("unregisterPropertyListener - Removed registration " + str(reg_id) ) except: pass @@ -918,9 +965,9 @@ def unregisterPropertyListener(self, reg_id ): self.stopPropertyChangeMonitor() self.propertySetAccess.release() - self._log.trace("unregisterPropertyListener") + self._propertySetLog.trace("unregisterPropertyListener") else: - self._log.trace("unregisterPropertyListener - end") + self._propertySetLog.trace("unregisterPropertyListener - end") self.propertySetAccess.release() raise CF.InvalidIdentifier() @@ -931,25 +978,25 @@ def startPropertyChangeMonitor(self): def stopPropertyChangeMonitor(self): if self._propChangeThread: - self._log.debug( "Stopping PROPERTY CHANGE THREAD ... ") + self._propertySetLog.debug( "Stopping PROPERTY CHANGE THREAD ... ") self._propChangeThread.stopThread() def _propertyChangeServiceFunction(self): - self._log.debug("_propertyChangeServiceFunction - START") + self._propertySetLog.debug("_propertyChangeServiceFunction - START") delay = 0.0 now = time.time() self.propertySetAccess.acquire() try: - self._log.debug("_propertyChangeServiceFunction - checking registry.... " + str(len(self._propChangeRegistry))) + self._propertySetLog.debug("_propertyChangeServiceFunction - checking registry.... " + str(len(self._propChangeRegistry))) for regid,rec in self._propChangeRegistry.iteritems(): # process changes for each property for k,p in rec.props.iteritems(): - self._log.debug("_propertyChangeServiceFunction - prop/set. " + str(k) + "/" + str(p.isSet())) + self._propertySetLog.debug("_propertyChangeServiceFunction - prop/set. " + str(k) + "/" + str(p.isSet())) try: if self._propMonitors[k].isChanged(): p.recordChanged() - self._log.debug("_propertyChangeServiceFunction - prop/changed. " + str(k) + "/" + str(self._propMonitors[k].isChanged())) + self._propertySetLog.debug("_propertyChangeServiceFunction - prop/changed. " + str(k) + "/" + str(self._propMonitors[k].isChanged())) except Exception, e: pass @@ -961,7 +1008,7 @@ def _propertyChangeServiceFunction(self): idx=0 # process changes for each property for k,p in rec.props.iteritems(): - self._log.debug("_propertyChangeServiceFunction - prop/set. " + str(k) + "/" + str(p.isSet())) + self._propertySetLog.debug("_propertyChangeServiceFunction - prop/set. " + str(k) + "/" + str(p.isSet())) if p.isChanged() == True : try: value = self._props.query(k) @@ -972,9 +1019,9 @@ def _propertyChangeServiceFunction(self): if len(rpt_props) > 0 and rec.pcl: try: - self._log.debug("_propertyChangeServiceFunction - sending notification reg_id:" + str( regid )) + self._propertySetLog.debug("_propertyChangeServiceFunction - sending notification reg_id:" + str( regid )) if rec.pcl.notify( rec, rpt_props ) != 0: - self._log.error("Publishing changes to PropertyChangeListener FAILED, reg_id:" + str( regid )) + self._propertySetLog.error("Publishing changes to PropertyChangeListener FAILED, reg_id:" + str( regid )) except: pass @@ -983,7 +1030,7 @@ def _propertyChangeServiceFunction(self): if delay == 0.0 : delay = dur if dur > 0 : delay = min( delay, dur ) - self._log.debug("_propertyChangeServiceFunction - delay :" + str( delay )) + self._propertySetLog.debug("_propertyChangeServiceFunction - delay :" + str( delay )) except: pass @@ -992,11 +1039,11 @@ def _propertyChangeServiceFunction(self): for k,mon in self._propMonitors.iteritems(): mon.reset() - self._log.debug("_propertyChangeServiceFunction - adjust thread delay :" + str( delay )) + self._propertySetLog.debug("_propertyChangeServiceFunction - adjust thread delay :" + str( delay )) if delay > 0 : self._propChangeThread.setThreadDelay(delay) self.propertySetAccess.release() - self._log.debug("_propertyChangeServiceFunction - STOP") + self._propertySetLog.debug("_propertyChangeServiceFunction - STOP") return -1 @@ -1166,9 +1213,6 @@ def _getOptions(classtype): print "They are provided as arguments pairs ID VALUE, for example:" print " %s INT_PARAM 5 STR_PARAM ABCDED" % sys.argv[0] print - print "Options:" - print " -i,--interactive Run the component in interactive test mode" - print print classtype.__doc__ sys.exit(2) except getopt.GetoptError: @@ -1178,9 +1222,6 @@ def _getOptions(classtype): print "They are provided as arguments pairs ID VALUE, for example:" print " %s INT_PARAM 5 STR_PARAM ABCDED" % sys.argv[0] print - print "Options:" - print " -i,--interactive Run the component in interactive test mode" - print print classtype.__doc__ sys.exit(2) return opts, args @@ -1191,14 +1232,9 @@ def setupSignalHandlers(): signal.signal(signal.SIGTERM, __exit_handler) def _getInteractive(opts): - """ - If opts contains '-1' or '--interactive', return True, otherwise - return False. - """ - interactive = False for opt, unused in opts: - if opt == "-i" or opt == "--interactive": + if opt == '-i' or opt == '--interactive': interactive = True return interactive @@ -1213,6 +1249,9 @@ def parseCommandLineArgs(componentclass): def start_component(componentclass, interactive_callback=None, thread_policy=None, loggerName=None): execparams, interactive = parseCommandLineArgs(componentclass) + if interactive: + print "Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain" + sys.exit(-1) name_binding="NOT SET" setupSignalHandlers() orb = None @@ -1254,6 +1293,17 @@ def start_component(componentclass, interactive_callback=None, thread_policy=Non except: pass + try: + obj = orb.string_to_object(execparams['NAMING_CONTEXT_IOR']) + applicationRegistrar = obj._narrow(CF.ApplicationRegistrar) + _app = containers.ApplicationContainer(applicationRegistrar._get_app()) + name = applicationRegistrar._get_app()._get_name() + tpath = dpath + if dpath[0] == '/': + tpath=dpath[1:] + dpath = tpath.split('/')[0]+"/"+name + except: + pass ## sets up logging during component startup ctx = ossie.logger.ComponentCtx( name = name_binding, diff --git a/redhawk/src/base/framework/python/ossie/service.py b/redhawk/src/base/framework/python/ossie/service.py index a71377e82..1ebce4616 100644 --- a/redhawk/src/base/framework/python/ossie/service.py +++ b/redhawk/src/base/framework/python/ossie/service.py @@ -65,6 +65,7 @@ def initLogging(self, svc_name, loggerName=None): self._logid = loggerName self._logid = self._logid.replace(":","_") self._log = logging.getLogger(self._logid) + self._baseLog = logging.getLogger(svc_name) loglevel = self._log.getEffectiveLevel() if loglevel == logging.NOTSET: self.logLevel = logging.INFO @@ -163,6 +164,10 @@ def start_service(serviceclass, thread_policy=None): import signal import getopt + if len(sys.argv) == 2: + if sys.argv[1] == '-i': + print "Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain" + sys.exit(-1) try: # IMPORTANT YOU CANNOT USE gnu_getopt OR OptionParser # because they will treat execparams with negative number diff --git a/redhawk/src/base/framework/python/ossie/threadedcomponent.py b/redhawk/src/base/framework/python/ossie/threadedcomponent.py index c345de846..d9496d3d7 100644 --- a/redhawk/src/base/framework/python/ossie/threadedcomponent.py +++ b/redhawk/src/base/framework/python/ossie/threadedcomponent.py @@ -20,6 +20,7 @@ import threading import time +import os # Limit exported symbols to official API; ProcessThread is exported for # backwards-compatibility @@ -47,10 +48,13 @@ def run(self): while not self.stop_signal.isSet(): try: state = self.target() - except Exception, e: - if hasattr(self.target.__self__,'_log'): - self.target.__self__._log.error("Exception detected in process function: "+str(e)) - raise + except Exception as exc: + log = getattr(self.target.__self__, '_baseLog', None) + if log: + log.fatal("Unhandled exception in process function: %s", exc, exc_info=True) + # Terminate the process on unhandled exceptions (sys.exit only + # exits the current thread, not the whole process) + os._exit(-1) if state == FINISH: return elif state == NOOP: diff --git a/redhawk/src/base/framework/python/ossie/utils/allocations/__init__.py b/redhawk/src/base/framework/python/ossie/utils/allocations/__init__.py new file mode 100644 index 000000000..18d89a780 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/allocations/__init__.py @@ -0,0 +1,27 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +# This is a stub file to provide the 'redhawk' namespace +# Python files generated for new IDLs will be added under this namespace +# e.g. 'redhawk.mynamespace' + +from helpers import * + +#__all__ = ['WrongInputType', 'MissingProperty', 'changeType', 'getType'] diff --git a/redhawk/src/base/framework/python/ossie/utils/allocations/helpers.py b/redhawk/src/base/framework/python/ossie/utils/allocations/helpers.py new file mode 100644 index 000000000..e541c655f --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/allocations/helpers.py @@ -0,0 +1,244 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +from ossie.cf import CF +import copy +from ossie import properties +import ossie.parsers.prf +from omniORB import tcInternal, CORBA, any + +class WrongInputType(Exception): + pass + +class BadValue(Exception): + pass + +class MissingProperty(Exception): + pass + +def tvCode(name): + if name == 'short': + return tcInternal.tv_short + elif name == 'long': + return tcInternal.tv_long + elif name == 'ushort': + return tcInternal.tv_ushort + elif name == 'ulong': + return tcInternal.tv_ulong + elif name == 'float': + return tcInternal.tv_float + elif name == 'double': + return tcInternal.tv_double + elif name == 'boolean': + return tcInternal.tv_boolean + elif name == 'char': + return tcInternal.tv_char + elif name == 'octet': + return tcInternal.tv_octet + elif name == 'string': + return tcInternal.tv_string + elif name == 'longlong': + return tcInternal.tv_longlong + elif name == 'ulonglong': + return tcInternal.tv_ulonglong + elif name == 'double': + return tcInternal.tv_longdouble + elif name == 'wchar': + return tcInternal.tv_wchar + raise BadValue('Could not find the sequence code for '+name) + +def createRequest(requestId=None, props={}, pools=[], devices=[], sourceId=''): + return CF.AllocationManager.AllocationRequestType(requestId, props, pools, devices, sourceId) + +def allTypes(): + retval = [] + for _t in properties.getTypeMap(): + retval.append(_t) + return retval + +def __typeCheck(props): + if not isinstance(props, list): + raise WrongInputType('The argument "props" must be a list of properties (i.e.: CF.DataType). Use properties.props_from_dict') + if len(props) == 0: + raise MissingProperty('The list of properties must be greater than zero') + for prop in props: + if not isinstance(prop, CF.DataType): + raise WrongInputType('The argument "props" must be a list of properties (i.e.: CF.DataType). Use properties.props_from_dict') + +def __getProp(props, prop_id): + for prop in props: + if prop.id == prop_id: + return prop + raise MissingProperty('Could not find the property with id: '+str(prop_id)) + +def __getPropIdx(props, prop_id): + idx = 0 + for prop in props: + if prop.id == prop_id: + return idx + idx = idx + 1 + raise MissingProperty('Could not find the property with id: '+str(prop_id)) + +def getType(props, prop_id, typeCheck=True): + ''' + return the property type for the element in the props sequence with id of prop_id + ''' + if typeCheck: + __typeCheck(props) + prop = __getProp(props, prop_id) + return properties.getTypeNameFromTC(prop.value._t) + +def setType(props, prop_id, _type, typeCheck=True): + ''' + change the property type for the element in the props sequence with id of prop_id + This method returns a copy of props (does not modify the props input) + ''' + if typeCheck: + __typeCheck(props) + if not properties.getTypeMap().has_key(_type): + raise BadValue('Type "'+_type+'" does not exist') + if _type == getType(props, prop_id, False): + return props + prop_idx = __getPropIdx(props, prop_id) + ret_props = copy.deepcopy(props) + if props[prop_idx].value._t._k == CORBA.tk_sequence: + ret_props[prop_idx].value._t._d = (props[prop_idx].value._t._d[0], tvCode(_type), props[prop_idx].value._t._d[2]) + else: + ret_props[prop_idx].value = properties.to_tc_value(props[prop_idx].value,_type) + return ret_props + +def matchTypes(props, prop_ids=[], prf=None): + ''' + match the property type for the element in the props sequence with id of prop_ids to the type specified in the prf file specified + This method returns a copy of props (does not modify the props input) + + if prop_ids is not length 0, only those properties are updated + if prop_ids is length 0, all matching properties (props vs prf xml string) are matched + + if prf file is one of the following: + - a Python open file handle; e.g.: fp = open('/data/rh/sdrroot/dom/foo.prf.xml','r') + - a CF::File object; e.g.: fp=fileMgr.open('/foo.prf.xml',True) + - a local filename; e.g.: '/data/rh/sdrroot/dom/foo.prf.xml' + ''' + __typeCheck(props) + + prfcontents='' + if prf: + if isinstance(prf, CF._objref_File): + _len = len(prfcontents) + while True: + prfcontents += fp.read(10000) + if _len == len(prfcontents): + break + _len = len(prfcontents) + prf.close() + elif isinstance(prf, file): + prfcontents = prf.read() + prf.close() + elif isinstance(prf, str): + fp = open(prf, 'r') + prfcontents = fp.read() + fp.close() + else: + raise WrongInputType('The prf argument must be: a string, a file object, or a CF.File object') + + parsedPrf = ossie.parsers.prf.parseString(prfcontents) + classes = {} + + structs = parsedPrf.get_struct()# + [_prop.get_struct() for _prop in parsedPrf.get_structsequence()] + structsseq = [_prop.get_struct() for _prop in parsedPrf.get_structsequence()] + for _prop in parsedPrf.get_struct(): + name = _prop.id_ + if _prop.name: + name = _prop.name + clazz = properties.xml_to_class(_prop) + classes[name] = clazz + for _prop in parsedPrf.get_structsequence(): + name = _prop.id_ + if _prop.name: + name = _prop.name + clazz = properties.xml_to_class(_prop.struct) + # mark as structseq by setting to length 1 list + classes[name] = [clazz] + for _prop in parsedPrf.get_simple(): + name = _prop.id_ + if _prop.name: + name = _prop.name + classes[name] = _prop + for _prop in parsedPrf.get_simplesequence(): + name = _prop.id_ + if _prop.name: + name = _prop.name + classes[name] = _prop + + if len(prop_ids) != 0: + for prop_id in prop_ids: + if not isinstance(prop_id, str): + raise WrongInputType('prop_id must be either a string or None') + if not classes.has_key(prop_id): + raise MissingProperty(prop_id+' is not defined in the given reference prf file') + props = setType(props, prop_id, classes[prop_id].type_, typeCheck=False) + return props + + for _prop in props: + if not classes.has_key(_prop.id): + raise MissingProperty('props contains property '+_prop.id+', but the given reference prf file does not have it defined') + if isinstance(classes[_prop.id], ossie.parsers.prf.simple) or isinstance(classes[_prop.id], ossie.parsers.prf.simpleSequence): + props = setType(props, _prop.id, classes[_prop.id].type_, typeCheck=False) + else: + if isinstance(classes[_prop.id], list): + _idx = __getPropIdx(props, _prop.id) + item_idx = 0 + for item in props[_idx].value._v: + for field in classes[_prop.id][0].__fields: + props[_idx].value._v[item_idx]._v = setType(props[_idx].value._v[item_idx]._v, field.id_, field.type_, typeCheck=False) + item_idx += 1 + else: + _idx = __getPropIdx(props, _prop.id) + for field in classes[_prop.id].__fields: + props[_idx].value._v = setType(props[_idx].value._v, field.id_, field.type_, typeCheck=False) + + return props + +def createProps(prop_dict, prf=None): + props = [] + for _key in prop_dict: + if isinstance(prop_dict[_key],dict): + vals = [] + for _subkey in prop_dict[_key]: + vals.append(CF.DataType(id=_subkey, value=any.to_any(prop_dict[_key][_subkey]))) + props.append(CF.DataType(id=_key, value = any.to_any(vals))) + elif isinstance(prop_dict[_key],list): + if len(prop_dict[_key]) == 0: + props.append(CF.DataType(id=_key, value=any.to_any(prop_dict[_key]))) + elif isinstance(prop_dict[_key][0],dict): + vals = [] + for _item in prop_dict[_key]: + subval = [] + for _subkey in _item: + subval.append(CF.DataType(id=_subkey, value=any.to_any(_item[_subkey]))) + vals.append(any.to_any(subval)) + props.append(CF.DataType(id=_key, value = any.to_any(vals))) + else: + props.append(CF.DataType(id=_key, value=any.to_any(prop_dict[_key]))) + else: + props.append(CF.DataType(id=_key, value=any.to_any(prop_dict[_key]))) + if prf: + props = matchTypes(props, prf=prf) + return props diff --git a/redhawk/src/base/framework/python/ossie/utils/bluefile/bluefile_helpers.py b/redhawk/src/base/framework/python/ossie/utils/bluefile/bluefile_helpers.py index c98a2199d..dc67caec9 100644 --- a/redhawk/src/base/framework/python/ossie/utils/bluefile/bluefile_helpers.py +++ b/redhawk/src/base/framework/python/ossie/utils/bluefile/bluefile_helpers.py @@ -18,22 +18,18 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # - -import numpy -from numpy import * -import platform -import bluefile - -from ossie.cf import CF -import ossie.properties -from ossie.cf import CF, CF__POA -from new import classobj -import array -import ossie.utils.bulkio.bulkio_helpers as bulkio_helpers import os import threading import time -import logging +import numpy +from new import classobj + +from ossie.cf import CF, CF__POA +import ossie.properties +from ossie.utils.bulkio import bulkio_helpers +from ossie.utils.log4py import logging + +import bluefile try: import bulkio from bulkio.bulkioInterfaces import BULKIO, BULKIO__POA @@ -43,8 +39,6 @@ logging.basicConfig() log = logging.getLogger(__name__) -arch = platform.machine() - # BLUE files use an epoch of Jan 1, 1950, while REDHAWK uses the Unix epoch # (Jan 1, 1970); REDHAWK_EPOCH_J1950 is the Unix epoch as a J1950 time REDHAWK_EPOCH_J1950 = 631152000.0 @@ -237,7 +231,7 @@ class BlueFileReader(object): Simple class used to send data to a port from an X-Midas file. It uses the header to generate a SRI. """ - def __init__(self, porttype): + def __init__(self, porttype, throttle=False): """ Instantiates a new object and generates a default StreamSRI. The porttype parameter corresponds to the type of data contained in the @@ -255,6 +249,7 @@ def __init__(self, porttype): 0.001, 1, 0, "sampleStream", True, []) self.port_lock = threading.Lock() + self._throttle=throttle self.done = False def connectPort(self, connection, connectionId): @@ -290,8 +285,13 @@ def pushSRI(self, H): def pushPacket(self, data, T, EOS, streamID): if self.refreshSRI: self.pushSRI(self.defaultStreamSRI) + self.port_lock.acquire() - try: + + if self._throttle: + time.sleep(len(data)*self.defaultStreamSRI.xdelta/2.0) + + try: try: for connId, port in self.outPorts.items(): if port != None: port.pushPacket(data, T, EOS, streamID) @@ -302,6 +302,9 @@ def pushPacket(self, data, T, EOS, streamID): finally: self.port_lock.release() + if self._throttle: + time.sleep(len(data)*self.defaultStreamSRI.xdelta/2.0) + def getPort(self): """ Returns a Port object of the type CF__POA.Port. @@ -354,9 +357,9 @@ def run(self, infile, pktsize=1024, streamID=None): # For complex float/double, get a view of the data as the scalar type # instead of the complex type if hdr['format'] == 'CF': - data = data.view(float32) + data = data.view(numpy.float32) elif hdr['format'] == 'CD': - data = data.view(float64) + data = data.view(numpy.float64) sz = len(data) self.done = False @@ -503,6 +506,8 @@ def pushSRI(self, H): data_format = data_format + 'D' elif self.port_type == BULKIO__POA.dataChar: data_format = data_format + 'B' + elif self.port_type == BULKIO__POA.dataOctet: + data_format = data_format + 'B' elif self.port_type == BULKIO__POA.dataUlong: data_format = data_format + 'L' elif self.port_type == BULKIO__POA.dataLong: diff --git a/redhawk/src/base/framework/python/ossie/utils/bulkio/bulkio_data_helpers.py b/redhawk/src/base/framework/python/ossie/utils/bulkio/bulkio_data_helpers.py index a1b9a82be..91f5bd5b3 100644 --- a/redhawk/src/base/framework/python/ossie/utils/bulkio/bulkio_data_helpers.py +++ b/redhawk/src/base/framework/python/ossie/utils/bulkio/bulkio_data_helpers.py @@ -723,7 +723,7 @@ class FileSource(object): """ Simple class used to push data into a port from a given array of data. """ - def __init__(self, porttype, byteswap=False, usesPortTypeDict=None): + def __init__(self, porttype, byteswap=False, usesPortTypeDict=None, throttle=False): """ Instantiates a new object and generates a default StreamSRI. The porttype parameter corresponds to the type of data contained in the @@ -780,6 +780,7 @@ def __init__(self, porttype, byteswap=False, usesPortTypeDict=None): self.connectionNormalization = {} self.connectionTranslation = {} self.usesPortTypeDict = usesPortTypeDict + self._throttle=throttle self.refreshSRI = False # Create default SRI self.sri=bulkio_helpers.defaultSRI @@ -864,6 +865,9 @@ def pushPacket(self, data, T, EOS, streamID): if EOS: # This deals with subsequent pushes with the same SRI self.refreshSRI = True + if self._throttle: + time.sleep(len(data)*self.sri.xdelta/2.0) + self.port_lock.acquire() try: try: @@ -892,6 +896,10 @@ def pushPacket(self, data, T, EOS, streamID): finally: self.port_lock.release() + if self._throttle: + time.sleep(len(data)*self.sri.xdelta/2.0) + + def getPort(self): """ Returns a Port object of the type CF__POA.Port. diff --git a/redhawk/src/base/framework/python/ossie/utils/bulkio/bulkio_helpers.py b/redhawk/src/base/framework/python/ossie/utils/bulkio/bulkio_helpers.py index 8122d9b0c..2684fb8ae 100644 --- a/redhawk/src/base/framework/python/ossie/utils/bulkio/bulkio_helpers.py +++ b/redhawk/src/base/framework/python/ossie/utils/bulkio/bulkio_helpers.py @@ -175,7 +175,12 @@ def formatData(dataSet, portRef=None, BULKIOtype=None): else: raise BadParamException("Must specify either a portRef or a dataType, but not both") - dataType = portIDL.split('/')[1].split(':')[0] + # Given the repo ID, get the fully-qualified interface name (in between the + # colons) and take just the last part, removing any module name(s) + dataType = portIDL.split(':')[1].split('/')[-1] + if dataType.endswith('Ext'): + # Treat extended interfaces as their base interface + dataType = dataType[:-3] dataSetType = type(dataSet) validSetTypes = [str, list, tuple] diff --git a/redhawk/src/base/framework/python/ossie/utils/docstring.py b/redhawk/src/base/framework/python/ossie/utils/docstring.py new file mode 100644 index 000000000..30682729c --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/docstring.py @@ -0,0 +1,76 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +class inherit_doc(object): + """ + Decorator to allow an object to inherit a docstring from another. If the + decorated object has a docstring, it is appended to the parent docstring, + with its indentation normalized. + """ + def __init__(self, obj): + self.doc = obj.__doc__ + + def _get_indent(self, doc): + """ + Returns the lowest indent level of the docstring. + """ + indent = None + for line in doc.split('\n'): + line_indent = self._get_line_indent(line) + if indent is None: + indent = line_indent + else: + indent = min(indent, line_indent) + if indent == 0: + return indent + return 0 + + def _get_line_indent(self, line): + for pos in xrange(len(line)): + if not line[pos].isspace(): + return pos + return 0 + + def _do_normalize(self, doc, indent): + """ + Generator function to perform line-by-line normalization of + indentation. + """ + source_indent = self._get_indent(doc) + for line in doc.split('\n'): + if line: + line = ' '*indent + line[source_indent:] + yield line + + def _normalize_indent(self, doc, indent): + """ + Normalizes the indentation of a docstring to the givent base + indentation level. + """ + return '\n'.join(self._do_normalize(doc, indent)) + + def __call__(self, obj): + if self.doc is not None: + indent = self._get_indent(self.doc) + original_doc = obj.__doc__ + obj.__doc__ = self.doc + if original_doc is not None: + obj.__doc__ += self._normalize_indent(original_doc, indent) + return obj diff --git a/redhawk/src/base/framework/python/ossie/utils/formatting.py b/redhawk/src/base/framework/python/ossie/utils/formatting.py index 389bb44a6..019754f48 100644 --- a/redhawk/src/base/framework/python/ossie/utils/formatting.py +++ b/redhawk/src/base/framework/python/ossie/utils/formatting.py @@ -33,6 +33,8 @@ def __init__(self, *headers): self._lines = [] self._limits = [-1] * self.columns self._enable_header = True + self._parent = None + self._children = [] @property def columns(self): @@ -41,6 +43,12 @@ def columns(self): def enable_header(self, state): self._enable_header = state + def set_parent_table(self, parent): + self._parent = parent + + def add_child(self, child): + self._children = child + def limit_column(self, index, length): self._limits[index] = length diff --git a/redhawk/src/base/framework/python/ossie/utils/log4py/__init__.py b/redhawk/src/base/framework/python/ossie/utils/log4py/__init__.py index 99747c945..0e942b083 100644 --- a/redhawk/src/base/framework/python/ossie/utils/log4py/__init__.py +++ b/redhawk/src/base/framework/python/ossie/utils/log4py/__init__.py @@ -40,12 +40,17 @@ import logging -# Define a TRACE logging level. +# Define additional logging levels logging.TRACE = 5 -logging.addLevelName(logging.TRACE, "TRACE") +logging.ALL = 1 logging.OFF = logging.FATAL+1 +logging.addLevelName(logging.TRACE, "TRACE") +logging.addLevelName(logging.ALL, "ALL") logging.addLevelName(logging.OFF, "OFF") +USER_LOGS = 'user' +SYSTEM_LOGS = 'system' + # Add a free-standing trace method. def _trace(msg, *args, **kw): logging.log(logging.TRACE, msg, *args, **kw) @@ -56,10 +61,62 @@ def _trace(msg, *args, **kw): # Extend logging class to add a "trace" method, and "getChild" if necessary. LoggerBase = logging.getLoggerClass() class RedhawkLogger(LoggerBase): + def __init__(self, name, level=0): + LoggerBase.__init__(self, name, level) + self._loggers = [name] + self._rh_parent = None + _ECM = None def trace(self, msg, *args, **kw): self.log(logging.TRACE, msg, *args, **kw) + def getCurrentLoggers(self): + return self._loggers + + def _setParent(self, parent): + self._rh_parent = parent + + def _addChildLogger(self, logname): + self._loggers.append(logname) + if self._rh_parent: + self._rh_parent._addChildLogger(logname) + + def isLoggerInHierarchy(self, search_name): + for _name in self._loggers: + _idx = _name.find(self.name); + if (_idx == 0): + if len(_name) > len(self.name): + if _name[len(self.name)] != '.': + continue + if _name.find(search_name) != 0: + continue + if len(_name) > len(search_name): + if (len(search_name) != 0) and (_name[len(search_name)] != '.'): + continue + return True + return False + + def getChildLogger(self, logname, ns=USER_LOGS): + _full_name = '' + _ns = ns + if _ns == 'user': + if '.' in self.name: + _ns = '' + if _ns and ((_ns != USER_LOGS) or ((_ns == USER_LOGS) and (not ('.'+USER_LOGS+'.' in self.name)))): + _full_name = self.name + '.' + _ns + '.' + logname + else: + _full_name = self.name + '.' + logname + if not _full_name in self._loggers: + self._loggers.append(_full_name) + if self._rh_parent: + self._rh_parent._addChildLogger(_full_name) + retval = logging.getLogger(_full_name) + try: + retval._setParent(self) + except: + pass + return logging.getLogger(_full_name) + @staticmethod def SetEventChannelManager(ECM): from ossie.utils.log4py.appenders import RH_LogEventAppender @@ -67,7 +124,6 @@ def SetEventChannelManager(ECM): RH_LogEventAppender.ECM = ECM for app in logging._handlerList: if isinstance(app, RH_LogEventAppender ): - #print "RedhawkLogger....setEventChannelManager, " + str(ECM) app.setEventChannelManager(ECM) if not hasattr(LoggerBase, 'getChild'): @@ -75,6 +131,6 @@ def getChild(self, suffix): return logging.getLogger(self.name + '.' + suffix) -del LoggerBase +#del LoggerBase logging.setLoggerClass(RedhawkLogger) diff --git a/redhawk/src/base/framework/python/ossie/utils/log4py/config.py b/redhawk/src/base/framework/python/ossie/utils/log4py/config.py index 40cb59cb3..c05167db8 100644 --- a/redhawk/src/base/framework/python/ossie/utils/log4py/config.py +++ b/redhawk/src/base/framework/python/ossie/utils/log4py/config.py @@ -163,7 +163,7 @@ def fileConfig(f, category=None): return _config(props,category) -def _config(props, category=None, disable_existing_loggers=1): +def _config(props, category=None, disable_existing_loggers=False): logging.shutdown() # critical section diff --git a/redhawk/src/base/framework/python/ossie/utils/log_helpers.py b/redhawk/src/base/framework/python/ossie/utils/log_helpers.py new file mode 100644 index 000000000..fc3c969a3 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/log_helpers.py @@ -0,0 +1,44 @@ + +from ossie.cf import CF + +def codeToString(_code): + if _code == CF.LogLevels.OFF: + return 'OFF' + elif _code == CF.LogLevels.FATAL: + return 'FATAL' + elif _code == CF.LogLevels.ERROR: + return 'ERROR' + elif _code == CF.LogLevels.WARN: + return 'WARN' + elif _code == CF.LogLevels.INFO: + return 'INFO' + elif _code == CF.LogLevels.DEBUG: + return 'DEBUG' + elif _code == CF.LogLevels.TRACE: + return 'TRACE' + elif _code == CF.LogLevels.ALL: + return 'ALL' + else: + raise Exception("Invalid code") + +def stringToCode(_codeString): + _newString = _codeString.upper() + if _newString == 'OFF': + return CF.LogLevels.OFF + elif _newString == 'FATAL': + return CF.LogLevels.FATAL + elif _newString == 'ERROR': + return CF.LogLevels.ERROR + elif _newString == 'WARN': + return CF.LogLevels.WARN + elif _newString == 'INFO': + return CF.LogLevels.INFO + elif _newString == 'DEBUG': + return CF.LogLevels.DEBUG + elif _newString == 'TRACE': + return CF.LogLevels.TRACE + elif _newString == 'ALL': + return CF.LogLevels.ALL + else: + raise Exception("Invalid code string. Options: 'OFF', 'FATAL', 'ERROR', 'WARN', 'INFO', 'DEBUG', 'TRACE', 'ALL'") + diff --git a/redhawk/src/base/framework/python/ossie/utils/model/__init__.py b/redhawk/src/base/framework/python/ossie/utils/model/__init__.py index ed6e05d00..060ec04f2 100644 --- a/redhawk/src/base/framework/python/ossie/utils/model/__init__.py +++ b/redhawk/src/base/framework/python/ossie/utils/model/__init__.py @@ -20,6 +20,7 @@ import commands as _commands +import sys as _sys import os as _os import copy as _copy import logging @@ -39,11 +40,16 @@ from ossie.properties import getMemberType from ossie.cf import ExtendedCF as _ExtendedCF from ossie.utils.formatting import TablePrinter +from ossie.utils.log_helpers import stringToCode from ossie.utils import prop_helpers +from ossie.utils import rhtime import warnings as _warnings +import cStringIO, pydoc from connect import * -_DEBUG = False +_warnings.filterwarnings('once',category=DeprecationWarning) + +_DEBUG = False _trackLaunchedApps = False _idllib = idllib.IDLLibrary() @@ -105,6 +111,11 @@ def _convertType(propType, val): real = int(real) imag = int(imag) newValue = complex(real, imag) + elif propType == 'utctime': + if type(val) == str: + newValue = rhtime.convert(val) + else: + newValue = val else: newValue = None return newValue @@ -188,24 +199,43 @@ class PortSupplier(object): def __init__(self): self._providesPortDict = {} self._usesPortDict = {} + self._listener_allocations = {} + + def _showPorts(self, ports, destfile=None): + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() - def _showPorts(self, ports): if ports: table = TablePrinter('Port Name', 'Port Interface') for port in ports.itervalues(): table.append(port['Port Name'], port['Port Interface']) - table.write() + table.write(f=destfile) else: - print "None" + print >>destfile, "None" + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() + + def api(self, destfile=None): + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() - def api(self): - print "Provides (Input) Ports ==============" - self._showPorts(self._providesPortDict) - print + print >>destfile, "Provides (Input) Ports ==============" + self._showPorts(self._providesPortDict, destfile=destfile) + print >>destfile, "\n" - print "Uses (Output) Ports ==============" - self._showPorts(self._usesPortDict) - print + print >>destfile, "Uses (Output) Ports ==============" + self._showPorts(self._usesPortDict, destfile=destfile) + print >>destfile, "\n" + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() def _getUsesPort(self, name): if not name in self._usesPortDict: @@ -284,9 +314,27 @@ def connect(self, providesComponent, providesPortName=None, usesPortName=None, c to find a matching port automatically. If there are multiple possible matches, a uses-side or provides-side port name may be necessary to resolve the ambiguity """ + + properties = [] + if hasattr(self, '_properties'): + properties = [p for p in self._properties if 'property' in p.kinds or 'configure' in p.kinds or 'execparam' in p.kinds] + tuner_status = None + valid_tuners = [] + for prop in properties: + if prop.id == 'FRONTEND::tuner_status': + tuner_status = prop + break + if tuner_status: + if not connectionId: + valid_tuners = [] + for tuner_idx in range(len(tuner_status)): + if not tuner_status[tuner_idx].enabled: + continue + valid_tuners.append(tuner_idx) + if not connectionId: - connectionId = str(_uuidgen()) - + connectionId = 'DCE_'+str(_uuidgen()) + if isinstance(providesComponent, PortSupplier): log.trace('Provides side is PortSupplier') # Remote side supports multiple ports. @@ -373,6 +421,21 @@ def connect(self, providesComponent, providesPortName=None, usesPortName=None, c log.trace("Provides endpoint '%s' has interface '%s'", providesEndpoint.getName(), providesEndpoint.getInterface()) usesPortRef = usesEndpoint.getReference() providesPortRef = providesEndpoint.getReference() + if ':BULKIO/' in usesEndpoint.getInterface(): + if len(valid_tuners) == 1: + allocation_id = tuner_status[valid_tuners[0]].allocation_id_csv.split(',')[0] + while True: + connectionId = str(_uuidgen()) + if not self._listener_allocations.has_key(connectionId): + break + import frontend + listen_alloc = frontend.createTunerListenerAllocation(allocation_id, connectionId) + retalloc = self.allocateCapacity(listen_alloc) + if not retalloc: + raise RuntimeError, "Unable to create a listener for allocation "+allocation_id+" on device "+usesEndpoint.getName() + self._listener_allocations[connectionId] = listen_alloc + elif len(valid_tuners) > 1: + raise RuntimeError, "More than one valid tuner allocation exists on the frontend interfaces device, so the ambiguity cannot be resolved. Please provide the connection id for the desired allocation" usesPortRef.connectPort(providesPortRef, connectionId) ConnectionManager.instance().registerConnection(connectionId, usesEndpoint, providesEndpoint) @@ -388,6 +451,9 @@ def disconnect(self, providesComponent): usesPortRef.disconnectPort(connectionId) except: pass + if self._listener_allocations.has_key(connectionId): + self.deallocateCapacity(self._listener_allocations[connectionId]) + self._listener_allocations.pop(connectionId) if isinstance(providesComponent, PortSupplier): providesComponent._disconnected(connectionId) manager.unregisterConnection(connectionId, uses) @@ -473,7 +539,14 @@ def query(self, props): else: return None - def api(self, externalPropInfo=None): + def api(self, externalPropInfo=None, destfile=None): + ''' + If destfile is None, output is sent to stdout + ''' + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() properties = [p for p in self._properties if 'property' in p.kinds or 'configure' in p.kinds or 'execparam' in p.kinds] if not properties: return @@ -489,7 +562,7 @@ def api(self, externalPropInfo=None): extId, propId = externalPropInfo table.enable_header(False) else: - print "Properties ==============" + print >>destfile, "Properties ==============" for prop in properties: if externalPropInfo: # Searching for a particular external property @@ -541,7 +614,10 @@ def api(self, externalPropInfo=None): currentValue = _formatSimple(prop, currentValue,prop.id) table.append(name, '('+scaType+')', str(prop.defValue), currentValue) - table.write() + table.write(f=destfile) + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() class PropertyEmitter(PropertySet): @@ -553,8 +629,11 @@ def __init__(self): def registerPropertyListener( self, obj, prop_ids=[], interval=1.0): self.__log.trace("registerPropertyListener('%s')", str(prop_ids)) + _obj = obj + if hasattr(obj, '_this'): + _obj = obj._this() if self.ref: - return self.ref.registerPropertyListener(obj, prop_ids, interval ) + return self.ref.registerPropertyListener(_obj, prop_ids, interval ) return None def unregisterPropertyListener( self, reg_id ): @@ -580,8 +659,20 @@ def log_level(self, newLogLevel=None ): else: self.ref._set_log_level( newLogLevel ) - def setLogLevel(self, logid, newLogLevel ): - self.ref.setLogLevel( logid, newLogLevel ) + def setLogLevel(self, logid, cf_log_lvl ): + _cf_log_lvl = cf_log_lvl + if type(cf_log_lvl) == str: + _cf_log_lvl = stringToCode(cf_log_lvl) + self.ref.setLogLevel( logid, _cf_log_lvl ) + + def getLogLevel(self, logger_id): + return self.ref.getLogLevel(logger_id) + + def getNamedLoggers(self): + return self.ref.getNamedLoggers() + + def resetLog(self): + self.ref.resetLog() def getLogConfig(self): return self.ref.getLogConfig() @@ -605,6 +696,7 @@ def __init__(self, ref, profile, spd, scd, prf, instanceName, refid, impl): self._prf = prf self._impl = impl self._instanceName = instanceName + self.name = instanceName # Add mapping of services operations and attributes found = False @@ -649,8 +741,20 @@ def log_level(self, newLogLevel=None ): else: self.ref._set_log_level( newLogLevel ) - def setLogLevel(self, logid, newLogLevel ): - self.ref.setLogLevel( logid, newLogLevel ) + def setLogLevel(self, logid, cf_log_lvl ): + _cf_log_lvl = cf_log_lvl + if type(cf_log_lvl) == str: + _cf_log_lvl = stringToCode(cf_log_lvl) + self.ref.setLogLevel( logid, _cf_log_lvl ) + + def getLogLevel(self, logger_id): + return self.ref.getLogLevel(logger_id) + + def getNamedLoggers(self): + return self.ref.getNamedLoggers() + + def resetLog(self): + self.ref.resetLog() def getLogConfig(self): return self.ref.getLogConfig() @@ -703,9 +807,9 @@ def _get_log_level(self): else: return 0 - def _set_log_level(self): + def _set_log_level(self, value): if self.ref: - return self.ref._set_log_level() + self.ref._set_log_level(value) def _get_softwareProfile(self): if self.ref: @@ -773,9 +877,30 @@ def log_level(self, newLogLevel=None ): if self.ref: self.ref._set_log_level( newLogLevel ) - def setLogLevel(self, logid, newLogLevel ): + def setLogLevel(self, logid, cf_log_lvl ): + _cf_log_lvl = cf_log_lvl + if type(cf_log_lvl) == str: + _cf_log_lvl = stringToCode(cf_log_lvl) if self.ref: - self.ref.setLogLevel( logid, newLogLevel ) + self.ref.setLogLevel( logid, _cf_log_lvl ) + + def getLogLevel(self, logger_id): + if self.ref: + return self.ref.getLogLevel(logger_id) + else: + None + + def getNamedLoggers(self): + if self.ref: + return self.ref.getNamedLoggers() + else: + None + + def resetLog(self): + if self.ref: + self.ref.resetLog() + else: + None def getLogConfig(self): if self.ref: @@ -878,10 +1003,15 @@ def deallocateCapacity(self, props): if _DEBUG == True: print ("attempted to deallocate a non-existent allocation") - def api(self): - print 'Allocation Properties ======' + def api(self, destfile=None): + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() + + print >>destfile, 'Allocation Properties ======' if not self._allocProps: - print 'None' + print >>destfile, 'None' return table = TablePrinter('Property Name', '(Data Type)', 'Action') @@ -894,8 +1024,10 @@ def api(self): structdef = prop for member in structdef.members.itervalues(): table.append(' '+member.clean_name, member.type) - table.write() - + table.write(f=destfile) + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() class LoadableDevice(Device): def load(self, fs, fileName, loadKind): @@ -1112,7 +1244,7 @@ def _getPropertySet(self, \ defValue = _convertType(propType, val) id_clean = _prop_helpers._cleanId(prop) # Add individual property - id_clean = _prop_helpers.addCleanName(id_clean, prop.get_id(), _displayNames, _duplicateNames) + id_clean = _prop_helpers.addCleanName(id_clean, prop.get_id(), _displayNames, _duplicateNames, namesp=structProp.get_id()) members.append((prop.get_id(), propType, defValue, id_clean)) structDefValue[prop.get_id()] = defValue if defValue != None: @@ -1126,7 +1258,7 @@ def _getPropertySet(self, \ defValue = None id_clean = _prop_helpers._cleanId(prop) # Add individual property - id_clean = _prop_helpers.addCleanName(id_clean, prop.get_id(), _displayNames, _duplicateNames) + id_clean = _prop_helpers.addCleanName(id_clean, prop.get_id(), _displayNames, _duplicateNames, namesp=structProp.get_id()) members.append((prop.get_id(), propType, defValue, id_clean)) structDefValue[prop.get_id()] = defValue if defValue == None: @@ -1173,7 +1305,7 @@ def _getPropertySet(self, \ id_clean = _prop_helpers._cleanId(prp) # Add struct member members.append((prp.get_id(), propType, defValue, id_clean)) - _prop_helpers.addCleanName(id_clean, prp.get_id(), _displayNames, _duplicateNames) + _prop_helpers.addCleanName(id_clean, prp.get_id(), _displayNames, _duplicateNames, namesp=prop.get_id()) for prp in prop.get_struct().get_simplesequence(): propType = self._getPropType(prp) vals = prp.get_values() @@ -1184,7 +1316,7 @@ def _getPropertySet(self, \ id_clean = _prop_helpers._cleanId(prp) # Adds struct member members.append((prp.get_id(), propType, defValue, id_clean)) - _prop_helpers.addCleanName(id_clean, prp.get_id(), _displayNames, _duplicateNames) + _prop_helpers.addCleanName(id_clean, prp.get_id(), _displayNames, _duplicateNames, namesp=prop.get_id()) structSeqDefValue = None structValues = prop.get_structvalue() @@ -1281,6 +1413,7 @@ def __init__(self, spd, scd, prf, instanceName, refid, impl, pid=0, devs=[]): super(ComponentBase, self).__init__(prf, refid) self._spd = spd self._scd = scd + self.instanceName = instanceName self._instanceName = instanceName self._impl = impl self._pid = pid @@ -1318,7 +1451,7 @@ def _buildAPI(self): if _DEBUG == True: try: - self.api() + self.api(destfile=_sys.stdout) except: pass diff --git a/redhawk/src/base/framework/python/ossie/utils/model/connect.py b/redhawk/src/base/framework/python/ossie/utils/model/connect.py index 83d3608b0..828d06579 100644 --- a/redhawk/src/base/framework/python/ossie/utils/model/connect.py +++ b/redhawk/src/base/framework/python/ossie/utils/model/connect.py @@ -20,6 +20,7 @@ import logging import threading +import omniORB log = logging.getLogger(__name__) @@ -42,7 +43,11 @@ def getInterface(self): return self.port['Port Interface'] def hasComponent(self, component): - return self.supplier._refid == component._refid + try: + return self.supplier._refid == component._refid + except AttributeError: + # Other object is not a port supplier + return False def getRefid(self): return self.supplier._refid @@ -68,7 +73,11 @@ def getInterface(self): return 'IDL:CF/Resource:1.0' def hasComponent(self, component): - return self.component._refid == component._refid + try: + return self.component._refid == component._refid + except AttributeError: + # Other object is not a component + return False def getRefid(self): return self.component._refid @@ -196,11 +205,13 @@ def refreshConnections(self, components): def _breakConnection(self, identifier, uses, provides): log.debug("Breaking connection '%s'", identifier) + omniORB.setClientCallTimeout(1500) try: usesPort = uses.getReference() usesPort.disconnectPort(identifier) except: log.warn("Ignoring exception breaking connection '%s'", identifier) + omniORB.setClientCallTimeout(0) uses.disconnected(identifier) provides.disconnected(identifier) diff --git a/redhawk/src/base/framework/python/ossie/utils/prop_helpers.py b/redhawk/src/base/framework/python/ossie/utils/prop_helpers.py index 24a5a5e0b..dedc79c84 100644 --- a/redhawk/src/base/framework/python/ossie/utils/prop_helpers.py +++ b/redhawk/src/base/framework/python/ossie/utils/prop_helpers.py @@ -32,6 +32,7 @@ from omniORB import CORBA as _CORBA from omniORB import tcInternal as _tcInternal import copy as _copy +import cStringIO, pydoc import struct as _struct import string as _string import operator as _operator @@ -41,6 +42,8 @@ from ossie.parsers.prf import configurationKind as _configurationKind SCA_TYPES = globals()['_SCA_TYPES'] +_warnings.filterwarnings('once',category=DeprecationWarning) + # Map the type of the complex number (e.g., complexFloat) to the # type of the real and imaginary members (e.g., float). __COMPLEX_SIMPLE_TYPE_MAP = { @@ -241,18 +244,23 @@ def getPropNameDict(prf): -Prevents duplicate entries within a component -Allows for get/set on components with invalid chars in ID ''' -def addCleanName(cleanName, id, _displayNames, _duplicateNames): +def addCleanName(cleanName, id, _displayNames, _duplicateNames, namesp=None): + retval=cleanName if not _displayNames.has_key(cleanName): _displayNames[cleanName] = id - _duplicateNames[cleanName] = 0 - return cleanName - elif _displayNames[cleanName] == id: - return cleanName - else: - count = _duplicateNames[cleanName] + 1 - _displayNames[cleanName + str(count)] = id - _duplicateNames[cleanName] = count - return cleanName + str(count) + # maintain a count of clean name for each namespace context + _duplicateNames[cleanName] = { namesp : 0 } + return retval + elif _displayNames[cleanName] != id: + if namesp in _duplicateNames[cleanName]: + count = _duplicateNames[cleanName][namespace] + 1 + _displayNames[cleanName + str(count)] = id + _duplicateNames[cleanName][namespace] = count + retval=cleanName + str(count) + else: + _duplicateNames[cleanName][namesp] = 0 + retval = cleanName + return retval def _cleanId(prop): translation = 48*"_"+_string.digits+7*"_"+_string.ascii_uppercase+6*"_"+_string.ascii_lowercase+133*"_" @@ -366,11 +374,14 @@ def _getStructsSimpleProps(self,simple,prop): if i.clean_name == prop.id_: for k in prop.get_configurationkind(): kinds.append(k.get_kindtype()) - if i.members[_cleanId(simple)]._enums != None: - enums = i.members[_cleanId(simple)]._enums + mname = _cleanId(simple) + if mname in i._memberNames: + mname = i._memberNames[mname] + if i.members[mname]._enums != None: + enums = i.members[mname]._enums if self.mode != "writeonly": - value = str(i.members[_cleanId(simple)]) - defVal = str(i.members[_cleanId(simple)].defValue) + value = str(i.members[mname]) + defVal = str(i.members[mname].defValue) type = str(self.compRef._getPropType(simple)) return defVal, value, type, kinds, enums @@ -383,34 +394,42 @@ def _getStructsSimpleSeqProps(self, sprop, prop): if i.clean_name == prop.id_: for k in prop.get_configurationkind(): kinds.append(k.get_kindtype()) - if i.members[_cleanId(sprop)].__dict__.has_key("_enums"): - if i.members[_cleanId(sprop)]._enums != None: - enums = i.members[_cleanId(sprop)]._enums + cname = _cleanId(sprop) + if cname in i._memberNames: + cname = i._memberNames[cname] + if i.members[cname].__dict__.has_key("_enums"): + if i.members[cname]._enums != None: + enums = i.members[cname]._enums if self.mode != "writeonly": - values = i.members[_cleanId(sprop)] - defVal = i.members[_cleanId(sprop)].defValue + values = i.members[cname] + defVal = i.members[cname].defValue type = str(self.compRef._getPropType(sprop)) return defVal, values, type, kinds, enums - def api(self): + def api(self, destfile=None): + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() + kinds = [] - print "\nProperty\n--------" - print "% -*s %s" % (17,"ID:",self.id) - print "% -*s %s" % (17,"Type:",self.type) + print >>destfile, "\nProperty\n--------" + print >>destfile, "% -*s %s" % (17,"ID:",self.id) + print >>destfile, "% -*s %s" % (17,"Type:",self.type) simpleOrSequence = False if self.type != "structSeq" and self.type != "struct": simpleOrSequence = True - print "% -*s %s" % (17,"Default Value:", self.defValue) + print >>destfile, "% -*s %s" % (17,"Default Value:", self.defValue) if self.mode != "writeonly": - print "% -*s %s" % (17,"Value: ", self.queryValue()) + print >>destfile, "% -*s %s" % (17,"Value: ", self.queryValue()) try: if self._enums != None: - print "% -*s %s" % (17,"Enumumerations:", self._enums) + print >>destfile, "% -*s %s" % (17,"Enumumerations:", self._enums) except: simpleOrSequence = True if self.type != "struct": - print "% -*s %s" % (17,"Action:", self.action) - print "% -*s %s" % (17,"Mode: ", self.mode) + print >>destfile, "% -*s %s" % (17,"Action:", self.action) + print >>destfile, "% -*s %s" % (17,"Mode: ", self.mode) if self.type == "struct": structTable = TablePrinter('Name','Data Type','Default Value', 'Current Value','Enumerations') @@ -425,17 +444,17 @@ def api(self): defVal,value, type, kinds,enums = self._getStructsSimpleProps(sprop,prop) structTable.append(sprop.get_id(),type,str(defVal),str(value),enums) if first: - print "% -*s %s" % (17,"Kinds: ", ', '.join(kinds)) + print >>destfile, "% -*s %s" % (17,"Kinds: ", ', '.join(kinds)) first = False for sprop in prop.get_simplesequence(): defVal,values,type,kinds,enums = self._getStructsSimpleSeqProps(sprop, prop) structTable.append(sprop.get_id(),type,defVal,values,enums) if first: - print "% -*s %s" % (17,"Kinds: ",', '.join(kinds)) + print >>destfile, "% -*s %s" % (17,"Kinds: ",', '.join(kinds)) first = False structTable.write() elif self.type == "sequence": - print "sequence: ",type(self) + print >>destfile, "sequence: ",type(self) elif self.type == "structSeq": structNum = -1 @@ -449,8 +468,8 @@ def api(self): structTable.append(prop.id_, prop.get_type()) for prop in prop.get_struct().get_simplesequence(): structTable.append(prop.id_, prop.get_type()) - print "% -*s %s" % (17,"Kinds: ", ', '.join(kinds)) - print "\nStruct\n======" + print >>destfile, "% -*s %s" % (17,"Kinds: ", ', '.join(kinds)) + print >>destfile, "\nStruct\n======" structTable.write() simpleTable = TablePrinter('Index','Name','Value') @@ -463,7 +482,7 @@ def api(self): for key in s.keys(): simpleTable.append(str(structNum),key,str(s[key])) if self.mode != "writeonly": - print "\nSimple Properties\n=================" + print >>destfile, "\nSimple Properties\n=================" simpleTable.write() elif simpleOrSequence: @@ -471,8 +490,11 @@ def api(self): if prop.id_ == self.id: for kind in prop.get_kind(): kinds.append(kind.get_kindtype()) - print "% -*s %s" % (17,"Kinds: ", ', '.join(kinds)) - + print >>destfile, "% -*s %s" % (17,"Kinds: ", ', '.join(kinds)) + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() def _isNested(self): return self._parent is not None @@ -719,7 +741,7 @@ def __init__(self, id, valueType, enum, compRef, kinds,defValue=None, parent=Non structRef, structSeqRef, structSeqIdx """ if valueType not in SCA_TYPES: - raise('"' + str(valueType) + '"' + ' is not a valid valueType, choose from\n ' + str(SCA_TYPES)) + raise(Exception('"' + str(valueType) + '"' + ' is not a valid valueType, choose from\n ' + str(SCA_TYPES))) # Initialize the parent Property.__init__(self, id, type=valueType, kinds=kinds,compRef=compRef, mode=mode, action=action, parent=parent, @@ -834,6 +856,8 @@ def __repr__(self, *args): value = self.queryValue() if value != None: ret=str(value) + else: + raise Exception, 'Could not perform query, "' + str(self.id) + '" is a writeonly property' return ret def __str__(self, *args): @@ -904,6 +928,8 @@ def __init__(self, id, valueType, kinds, compRef, defValue=None, parent=None, mo # to determine complexity, as in the case of a struct sequence, # the value of self.valueType may not be a string. self.complex = True + elif self.valueType == 'utctime': + self.typecode = getCFSeqType(self.valueType) def _getItemKey(self): return self.id @@ -1002,7 +1028,8 @@ def toAny(self, value): def __repr__(self): if self.mode != "writeonly": return repr(self.queryValue()) - return '' + else: + raise Exception, 'Could not perform query, "' + str(self.id) + '" is a writeonly property' def __str__(self): return self.__repr__() @@ -1204,6 +1231,8 @@ def __repr__(self): currValue = "" if self.mode != "writeonly": currValue = self.queryValue() + else: + raise Exception, 'Could not perform query, "' + str(self.id) + '" is a writeonly property' structView = "ID: " + self.id for key in currValue: try: diff --git a/redhawk/src/base/framework/python/ossie/utils/redhawk/base.py b/redhawk/src/base/framework/python/ossie/utils/redhawk/base.py index 3380476b4..41e509bd0 100644 --- a/redhawk/src/base/framework/python/ossie/utils/redhawk/base.py +++ b/redhawk/src/base/framework/python/ossie/utils/redhawk/base.py @@ -27,30 +27,103 @@ from ossie.cf import CF as _CF import ossie.utils as _utils from ossie.utils.sca import importIDL as _importIDL +from ossie.logger import ConvertLevelNameToDebugLevel import atexit +import signal as _signal +import time as _time +import traceback class _envContainer(object): - def __init__(self, domain, stdout): - self.domain = int(domain) + def __init__(self, process, stdout): + self.process = process self.stdout = stdout +def __waitTermination( process, timeout=5.0, pause=0.1): + while process and process.poll() is None and timeout > 0.0: + timeout -= pause + _time.sleep(pause) + return process.poll() != None + +def __terminate_process( process, signals=(_signal.SIGINT, _signal.SIGTERM, _signal.SIGKILL) ): + if process and process.poll() != None: + return + try: + for sig in signals: + _os.kill(process.pid, sig) + if __waitTermination(process): + break + process.wait() + except OSError, e: + pass + finally: + pass + def _cleanup_domain(): try: - _os.kill(globals()['currentdomain'].domain,2) + if globals().has_key('currentdomain'): + __terminate_process( globals()['currentdomain'].process) + x = globals().pop('currentdomain') + if x : del x except: + traceback.print_exc() pass + if globals().has_key('currentdevmgrs'): + for x in globals()['currentdevmgrs']: + try: + __terminate_process(x.process) + except: + traceback.print_exc() + pass + x = globals().pop('currentdevmgrs') + if x : del x -atexit.register(_cleanup_domain) +def _shutdown_session(): + if globals().has_key('attached_domains'): + doms = globals()['attached_domains'] + if len(doms) > 0: + doms[0].orb.shutdown(True) + globals()['attached_domains'] = [] + _cleanup_domain() + +atexit.register(_shutdown_session) + +def _getDCDFile(sdrroot, dcdFile): + """ + Try to find the DCD file, either as an absolute path or relative to SDRROOT + """ + # The DCD file may just be a directory name under $SDRROOT/dev/nodes + if not dcdFile.endswith('.dcd.xml'): + # Path did not include a final DCD file + node_path = _os.path.join(sdrroot, 'dev/nodes/') + dcdFile + node_path += '/DeviceManager.dcd.xml' + if _os.path.exists(node_path): + return node_path + + # If the file exists as-is, return it + if _os.path.exists(dcdFile): + return dcdFile + + # Try an SDR-relative path + sdr_path = _os.path.join(sdrroot, 'dev/') + dcdFile + if _os.path.exists(sdr_path): + return sdr_path + + # Could not fine any matching path + return None + -def kickDomain(domain_name=None, kick_device_managers=True, device_managers=[], detached=False, sdrroot=None, stdout=None, logfile=None): +def kickDomain(domain_name=None, kick_device_managers=True, device_managers=[], detached=False, sdrroot=None, stdout=None, logfile=None, debug_level=None, device_managers_debug_levels=[]): """Kick-start a REDHAWK domain. domain_name: the name that should be used kick_device_managers: one or more Device Managers should be automatically started device_managers: if kick_device_managers set to True, list of Device Managers to start. If the list is empty, then start all Device Managers in - $SDRROOT/dev/nodes + $SDRROOT/dev/nodes. List can be node names i.e. GPP_node or absolute path to DCD files detached: determine whether the life cycle of the started Domain and Device Managers should follow the lifecycle of the current Python session sdrroot: use this sdr root. If set to None, then use $SDRROOT stdout: filename where stdout should be redirected. None sends stdout to /dev/null + debug_level: debug level to pass on command line: FATAL, ERROR, WARN, INFO, DEBUG, TRACE + device_managers_debug_levels = list of debug levels to pass on command line with corresponding device_managers + """ if sdrroot == None: @@ -66,7 +139,11 @@ def kickDomain(domain_name=None, kick_device_managers=True, device_managers=[], if logfile: args.append('-logcfgfile') args.append(logfile) - + + if debug_level: + args.append('-debug') + args.append(str(ConvertLevelNameToDebugLevel(debug_level))) + if domain_name != None: args.append('--domainname') args.append(domain_name) @@ -102,9 +179,10 @@ def kickDomain(domain_name=None, kick_device_managers=True, device_managers=[], if globals().has_key('currentdomain'): globals()['currentdomain'] = None - globals()['currentdomain'] = _envContainer(sp.pid, stdout_fp) + globals()['currentdomain'] = _envContainer(sp, stdout_fp) if kick_device_managers: + dm_procs=[] if len(device_managers) == 0: base = sdrroot + '/dev/nodes' for (directory,sub,files) in _os.walk(base): @@ -117,10 +195,16 @@ def kickDomain(domain_name=None, kick_device_managers=True, device_managers=[], break if foundDCD: device_managers.append(directory[len(sdrroot)+4:]+'/'+filename) - for device_manager in device_managers: + + for idx, device_manager in enumerate(device_managers): + dcd_file = _getDCDFile(sdrroot, device_manager) + if not dcd_file: + print "Unable to locate DCD file for '%s'" % device_manager + continue + args = ['nodeBooter'] args.append('-d') - args.append(device_manager) + args.append(dcd_file) args.append('-sdrroot') args.append(sdrroot) args.append('--domainname') @@ -128,7 +212,20 @@ def kickDomain(domain_name=None, kick_device_managers=True, device_managers=[], if logfile: args.append('-logcfgfile') args.append(logfile) + if device_managers_debug_levels and len(device_managers_debug_levels) > 0 : + dlevel = None + if idx < len(device_managers_debug_levels): + dlevel = device_managers_debug_levels[idx] + if dlevel: + args.append('-debug') + args.append(str(ConvertLevelNameToDebugLevel(dlevel))) sp = _utils.Popen(args, executable=None, cwd=_os.getcwd(), close_fds=True, stdin=_devnull, stdout=stdout_fp, preexec_fn=_os.setpgrp) + dm_procs.append( _envContainer(sp, stdout_fp) ) + + if globals().has_key('currentdevmgrs'): + globals()['currentdevmgrs'] += dm_procs + else: + globals()['currentdevmgrs'] = dm_procs dom = attach(domain_name) @@ -198,4 +295,9 @@ def attach(domain=None, location=None, connectDomainEvents=True): dom_entry = _core.Domain(name=str(domain), location=location, connectDomainEvents=connectDomainEvents) + if not globals().has_key('attached_domains'): + globals()['attached_domains'] = [] + + globals()['attached_domains'].append(dom_entry) + return dom_entry diff --git a/redhawk/src/base/framework/python/ossie/utils/redhawk/channels.py b/redhawk/src/base/framework/python/ossie/utils/redhawk/channels.py index dd6ddfe7b..594724ee0 100644 --- a/redhawk/src/base/framework/python/ossie/utils/redhawk/channels.py +++ b/redhawk/src/base/framework/python/ossie/utils/redhawk/channels.py @@ -59,6 +59,20 @@ def disconnect(self): class ODMListener(ChannelListener): CHANNEL_NAME = 'ODM_Channel' + @notification + def eventChannelAdded(self, event): + """ + A event channel was added. + """ + log.trace('eventChannelAdded %s', event) + + @notification + def eventChannelRemoved(self, event): + """ + A event channel was removed. + """ + log.trace('eventChannelRemoved %s', event) + @notification def deviceManagerAdded(self, event): """ @@ -136,6 +150,7 @@ def __init__(self): StandardEvent.DEVICE: ODMListener.deviceAdded, StandardEvent.APPLICATION_FACTORY: ODMListener.applicationFactoryAdded, StandardEvent.APPLICATION: ODMListener.applicationAdded, + StandardEvent.EVENT_CHANNEL: ODMListener.eventChannelAdded, StandardEvent.SERVICE: ODMListener.serviceAdded }, StandardEvent._tc_DomainManagementObjectRemovedEventType: { @@ -143,6 +158,7 @@ def __init__(self): StandardEvent.DEVICE: ODMListener.deviceRemoved, StandardEvent.APPLICATION_FACTORY: ODMListener.applicationFactoryRemoved, StandardEvent.APPLICATION: ODMListener.applicationRemoved, + StandardEvent.EVENT_CHANNEL: ODMListener.eventChannelRemoved, StandardEvent.SERVICE: ODMListener.serviceRemoved }, } diff --git a/redhawk/src/base/framework/python/ossie/utils/redhawk/component.py b/redhawk/src/base/framework/python/ossie/utils/redhawk/component.py index cf6e92c34..a2d4a2fce 100644 --- a/redhawk/src/base/framework/python/ossie/utils/redhawk/component.py +++ b/redhawk/src/base/framework/python/ossie/utils/redhawk/component.py @@ -18,11 +18,19 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # +import cStringIO, pydoc + from ossie.utils.model import ComponentBase, Resource, PropertySet, PortSupplier class DomainComponent(ComponentBase): def __init__(self, profile, spd, scd, prf, instanceName, refid, impl, pid=0, devs=[]): - super(DomainComponent,self).__init__(spd, scd, prf, instanceName, refid, impl, pid, devs) + # Remove waveform from the instance name for ComponentBase so that the + # short name is visible as 'instanceName' + baseName = instanceName.split('/')[-1] + super(DomainComponent,self).__init__(spd, scd, prf, baseName, refid, impl, pid, devs) + # Retain the "waveformId/instantiationId" in _instanceName for backward + # compatibility + self._instanceName = instanceName self._profile = profile self.ports = [] @@ -37,17 +45,26 @@ def __init__(self, profile, spd, scd, prf, instanceName, refid, impl, pid=0, dev ##################################### - def api(self, showComponentName=True, showInterfaces=True, showProperties=True, externalPropInfo=None): + def api(self, showComponentName=True, showInterfaces=True, showProperties=True, externalPropInfo=None, destfile=None): ''' Inspect interfaces and properties for the component ''' + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() + className = self.__class__.__name__ if showComponentName == True: - print className+" [" + str(self.name) + "]:" + print >>destfile, className+" [" + str(self.name) + "]:" if showInterfaces == True: - PortSupplier.api(self) + PortSupplier.api(self, destfile=destfile) if showProperties == True and self._properties != None: - PropertySet.api(self, externalPropInfo) + PropertySet.api(self, externalPropInfo, destfile=destfile) + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() class Component(DomainComponent, Resource): diff --git a/redhawk/src/base/framework/python/ossie/utils/redhawk/core.py b/redhawk/src/base/framework/python/ossie/utils/redhawk/core.py index bc1d5ec9c..a95932fc7 100644 --- a/redhawk/src/base/framework/python/ossie/utils/redhawk/core.py +++ b/redhawk/src/base/framework/python/ossie/utils/redhawk/core.py @@ -26,11 +26,13 @@ from ossie.cf import CF__POA as _CF__POA from ossie.cf import ExtendedCF as _ExtendedCF from ossie.cf import StandardEvent +import CosEventChannelAdmin from omniORB import CORBA as _CORBA import CosNaming as _CosNaming import sys as _sys import time as _time import datetime as _datetime +import cStringIO, pydoc import weakref import threading import logging @@ -42,6 +44,7 @@ from ossie.utils.model import _idllib from ossie.utils.model import ConnectionManager as _ConnectionManager from ossie.utils.model import * +from ossie.utils.log_helpers import stringToCode from ossie.utils.notify import notification from ossie.utils import weakobj @@ -158,6 +161,23 @@ def _get_name(self): pass return retval + def _get_stopTimeout(self): + retval = None + if self.ref: + try: + retval = self.ref._get_stopTimeout() + except: + pass + return retval + + def _set_stopTimeout(self, timeout): + retval = None + if self.ref: + try: + self.ref._set_stopTimeout(timeout) + except: + pass + def _get_registeredComponents(self): retval = None if self.ref: @@ -218,7 +238,16 @@ def releaseObject(self): self._domain.removeApplication(self) except: raise - + + def metrics(self, components, attributes): + retval = None + if self.ref: + try: + retval = self.ref.metrics(components, attributes) + except: + raise + return retval + @property def aware(self): retval = False @@ -249,6 +278,16 @@ def componentImplementations(self): pass return retval + @property + def stopTimeout(self): + retval = [] + if self.ref: + try: + retval = self.ref._get_stopTimeout() + except: + pass + return retval + @property def registeredComponents(self): retval = [] @@ -327,6 +366,7 @@ def __getattribute__(self, name): return getattr(self._acRef, name) except AttributeError: pass + raise AttributeError('App object has no attribute ' + str(name)) def __setattr__(self, name, value): @@ -407,6 +447,7 @@ def _populateComponents(self): if compDev.componentId == refid: devs.append(compDev.assignedDeviceId) except: + # unable to load the component information (the component is non-responsive) continue spd, scd, prf = _readProfile(profile, self._domain.fileManager) new_comp = Component(profile, spd, scd, prf, compRef, instanceName, refid, implId, pid, devs) @@ -416,25 +457,30 @@ def _populateComponents(self): if refid.find(self.assemblyController) >= 0: self._acRef = new_comp - def api(self): + def api(self, destfile=None): # Display components, their properties, and external ports - print "Waveform [" + self.ns_name + "]" - print "---------------------------------------------------" + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() - print "External Ports ==============" - PortSupplier.api(self) + print >>destfile, "Waveform [" + self.ns_name + "]" + print >>destfile, "---------------------------------------------------" - print "Components ==============" + print >>destfile, "External Ports ==============" + PortSupplier.api(self, destfile=destfile) + + print >>destfile, "Components ==============" for count, comp_entry in enumerate(self.comps): name = comp_entry.name if comp_entry._get_identifier().find(self.assemblyController) != -1: name += " (Assembly Controller)" - print "%d. %s" % (count+1, name) - print "\n" + print >>destfile, "%d. %s" % (count+1, name) + print >>destfile, "\n" # Display AC props if self._acRef: - self._acRef.api(showComponentName=False, showInterfaces=False, showProperties=True) + self._acRef.api(showComponentName=False, showInterfaces=False, showProperties=True, destfile=destfile) # Loops through each external prop looking for a component to use to display the internal prop value for extId in self._externalProps.keys(): @@ -442,10 +488,14 @@ def api(self): for comp_entry in self.comps: if comp_entry._get_identifier().find(compRefId) != -1: # Pass along external prop info to component api() - comp_entry.api(showComponentName=False,showInterfaces=False,showProperties=True, externalPropInfo=(extId, propId)) + comp_entry.api(showComponentName=False,showInterfaces=False,showProperties=True, externalPropInfo=(extId, propId), destfile=destfile) break - print + print >>destfile, '\n' + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() def _populatePorts(self, fs=None): """Add all port descriptions to the component instance""" @@ -692,8 +742,10 @@ def serviceUnregistered(self, identifier): def __init__(self, name="", devMgr=None, dcd=None, domain=None, idmListener=None, odmListener=None): self.name = name + self._instanceName = name self.ref = devMgr self.id = self.ref._get_identifier() + self._id = self.id self._domain = domain self._dcd = dcd self.fs = self.ref._get_fileSys() @@ -802,6 +854,72 @@ def _get_domMgr(self): except: pass return retval + + def setLogLevel(self, logger_id, cf_log_lvl ): + _cf_log_lvl = cf_log_lvl + if type(cf_log_lvl) == str: + _cf_log_lvl = stringToCode(cf_log_lvl) + if self.ref : + try: + self.ref.setLogLevel(logger_id, _cf_log_lvl) + except: + raise + + def getLogLevel(self, logger_id): + if self.ref : + try: + return self.ref.getLogLevel(logger_id) + except: + raise + + def getLogConfig(self): + if self.ref : + try: + return self.ref.getLogConfig() + except: + raise + + def setLogConfig(self, new_config): + if self.ref : + try: + self.ref.setLogConfig(new_config) + except: + raise + + def setLogConfigURL(self, new_config_url): + if self.ref : + try: + self.ref.setLogConfigURL(new_config_url) + except: + raise + + def _get_log_level(self): + if self.ref : + try: + return self.ref._get_log_level() + except: + raise + + def _set_log_level(self, value): + if self.ref : + try: + self.ref._set_log_level(value) + except: + raise + + def getNamedLoggers(self): + if self.ref : + try: + return self.ref.getNamedLoggers() + except: + raise + + def resetLog(self): + if self.ref : + try: + self.ref.resetLog() + except: + raise @property def domMgr(self): @@ -1186,9 +1304,8 @@ def deviceMgrEndPoint( self, devMgr_name, port_name ) : def objectRefEndPoint( self, obj_ref, port_name ) : restype = _CF.ConnectionManager.EndpointResolutionType(objectRef=obj_ref) return _CF.ConnectionManager.EndpointRequest(restype, port_name) - - - def connect( self, usesEndPoint, providesEndPoint, requesterId, connectionId ): + + def connect(self, usesEndPoint, providesEndPoint, requesterId='default', connectionId=''): retval=None if self.ref: try: @@ -1198,12 +1315,9 @@ def connect( self, usesEndPoint, providesEndPoint, requesterId, connectionId ): return retval def disconnect( self, connectionRecordId ): - print 'core disconnect' if self.ref: try: - print 'begin core disconnect' self.ref.disconnect( connectionRecordId ) - print 'done core disconnect' except: raise @@ -1219,11 +1333,88 @@ def listConnections( self, count=0 ): return retval +class EventChannel(CorbaObject): + def __init__(self, ref, name): + CorbaObject.__init__(self, ref) + self.name = name + self.ref = self.ref._narrow(CosEventChannelAdmin.EventChannel) + class EventChannelManager(CorbaObject): - def __init__(self, ref=None): + def __init__(self, ref=None, odmListener=None): CorbaObject.__init__(self, ref) + + self.__evtChannels = DomainObjectList(weakobj.boundmethod(self.__getEventChannels), + weakobj.boundmethod(self.__newEventChannel), + lambda x: x.name) + + self.__evtChannels.itemAdded.addListener(weakobj.boundmethod(self.eventChannelAdded)) + self.__evtChannels.itemRemoved.addListener(weakobj.boundmethod(self.eventChannelRemoved)) + + if odmListener: + weakobj.addListener(odmListener.eventChannelAdded, self.__eventChannelAddedEvent) + weakobj.addListener(odmListener.eventChannelRemoved, self.__eventChannelRemovedEvent) + self.__odmListener = odmListener + + def __inspectEventChannelManager(self, unique=None): + if type(unique) == EventChannel: + return [unique] + channels, _iter = self.ref.listChannels(0) + _retval = [] + status = True + while status: + status, _chans = _iter.next_n(100) + for _chan in _chans: + _chanref = self.ref.get(_chan.channel_name) + if not unique: + _retval.append(EventChannel(_chanref, _chan.channel_name)) + else: + if _chanref._is_equivalent(unique): + return [EventChannel(unique, _chan.channel_name)] + return _retval + + + def __getEventChannels(self): + # If the ODM channel is not connected, force an update to the list. + chans = self.__inspectEventChannelManager() + return chans + + def __newEventChannel(self, evtChannel): + retval = self.__inspectEventChannelManager(evtChannel) + if len(retval) == 1: + return retval[0] + return EventChannel(evtChannel, '') + + def __eventChannelAddedEvent(self, event): + try: + self.__evtChannels.add(event.sourceId, event.sourceIOR) + except Exception as e: + # The event channel is already gone or otherwise unavailable. + pass + + def __eventChannelRemovedEvent(self, event): + self.__evtChannels.remove(event.sourceId) + + @property + def eventChannels(self): + if not self.__odmListener: + self.__evtChannels.sync() + return self.__evtChannels.values() + @notification + def eventChannelAdded(self, evtChannel): + """ + The device manager 'deviceManager' was added to the system. + """ + pass + + @notification + def eventChannelRemoved(self, evtChannel): + """ + The device manager 'deviceManager' was added to the system. + """ + pass + def release( self, channelName ): if self.ref: try: @@ -1231,6 +1422,15 @@ def release( self, channelName ): except: raise + def get( self, channelName ): + retval=None + if self.ref: + try: + retval = self.ref.get(channelName) + except: + raise + return retval + def create( self, channelName ): retval=None if self.ref: @@ -1296,7 +1496,23 @@ def listRegistrants( self, channelName="", count=0 ): raise return retval - + def registerConsumer(self, consumer, req): + ''' + consumer is an event consumer (i.e.: ossie.events.Receiver) + req is an EventRegistration (i.e.: CF.EventRegistration) + ''' + if hassattr(consumer, '_this'): + return self.ref.registerConsumer(consumer._this(), req) + return self.ref.registerConsumer(consumer, req) + + def registerPublisher(self, req, disconnectReceiver): + ''' + req is an EventRegistration (i.e.: CF.EventRegistration) + disconnectReceiver is an optional event consumer (i.e.: ossie.events.Receiver) + ''' + #if hassattr(disconnectReceiver, '_this'): + # return self.ref.registerPublisher(req, disconnectReceiver._this()) + return self.ref.registerPublisher(req, disconnectReceiver) class Domain(_CF__POA.DomainManager, QueryableBase, PropertyEmitter): """The Domain is a descriptor for a Domain Manager. @@ -1411,14 +1627,19 @@ def __init__(self, name="DomainName1", location=None, connectDomainEvents=True): raise StandardError, "Did not find domain "+name self.ref = obj._narrow(_CF.DomainManager) - self.fileManager = self.ref._get_fileMgr() + try: + self.fileManager = self.ref._get_fileMgr() + except: + raise StandardError('Domain Manager '+self.name+' is not available') + self.id = self.ref._get_identifier() + self._id = self.id try: spd, scd, prf = _readProfile("/mgr/DomainManager.spd.xml", self.fileManager) super(Domain, self).__init__(prf, self.id) except Exception, e: pass - + self._buildAPI() self.__deviceManagers = DomainObjectList(weakobj.boundmethod(self._get_deviceManagers), @@ -1440,6 +1661,8 @@ def __init__(self, name="DomainName1", location=None, connectDomainEvents=True): if connectDomainEvents: self.__connectIDMChannel() self.__connectODMChannel() + + self.__eventChannelMgr = self.getEventChannelMgr() def _populateApps(self): self.__setattr__('_waveformsUpdated', True) @@ -1496,6 +1719,10 @@ def apps(self): self.__applications.sync() return self.__applications.values() + @property + def eventChannels(self): + return self.__eventChannelMgr.eventChannels + @property def devices(self): devs = [] @@ -1696,34 +1923,7 @@ def _get_domainManagerProfile(self): except: pass return retval - - def _get_allocationMgr(self): - retval = None - if self.ref: - try: - retval = self.ref._get_allocationMgr() - except: - pass - return retval - - def _get_connectionMgr(self): - retval = None - if self.ref: - try: - retval = self.ref._get_connectionMgr() - except: - pass - return retval - - def _get_eventChannelMgr(self): - retval = None - if self.ref: - try: - retval = self.ref._get_eventChannelMgr() - except: - pass - return retval - + def _get_name(self): retval = '' if self.ref: @@ -1797,7 +1997,7 @@ def query(self, props): except: raise return retval - + def registerDevice(self, device, deviceManager): if self.ref: try: @@ -1901,11 +2101,104 @@ def getConnectionMgr(self): def getEventChannelMgr(self): if self.ref and self.__eventChannelMgr == None : try: - self.__eventChannelMgr = EventChannelManager(self.ref._get_eventChannelMgr()) + self.__eventChannelMgr = EventChannelManager(self.ref._get_eventChannelMgr(), odmListener=self.__odmListener) + except: + raise + return self.__eventChannelMgr + + def _get_allocationMgr(self): + if self.ref and self.__allocationMgr == None : + try: + self.__allocationMgr = AllocationManager(self.ref._get_allocationMgr()) + except: + raise + return self.__allocationMgr + + def _get_connectionMgr(self): + if self.ref and self.__connectionMgr == None : + try: + self.__connectionMgr = ConnectionManager(self.ref._get_connectionMgr()) + except: + raise + return self.__connectionMgr + + def _get_eventChannelMgr(self): + if self.ref and self.__eventChannelMgr == None : + try: + self.__eventChannelMgr = EventChannelManager(self.ref._get_eventChannelMgr(), odmListener=self.__odmListener) except: raise return self.__eventChannelMgr + def _get_log_level(self): + ret=None + if self.ref : + try: + ret=self.ref._get_log_level() + except: + raise + return ret + + def _set_log_level(self, cf_log_lvl ): + if self.ref : + try: + self.ref._set_log_level(cf_log_lvl) + except: + raise + + def getLogConfig(self): + if self.ref : + try: + return self.ref.getLogConfig() + except: + raise + return None + + def setLogConfig(self, cfg): + if self.ref : + try: + self.ref.setLogConfig(cfg) + except: + raise + + def setLogConfigURL(self, cfg_url): + if self.ref : + try: + self.ref.setLogConfigURL(cfg_url) + except: + raise + + def setLogLevel(self, logger_id, cf_log_lvl ): + _cf_log_lvl = cf_log_lvl + if type(cf_log_lvl) == str: + _cf_log_lvl = stringToCode(cf_log_lvl) + if self.ref : + try: + self.ref.setLogLevel(logger_id, _cf_log_lvl) + except: + raise + + def getLogLevel(self, logger_id): + if self.ref : + try: + return self.ref.getLogLevel(logger_id) + except: + raise + + def getNamedLoggers(self): + if self.ref : + try: + return self.ref.getNamedLoggers() + except: + raise + + def resetLog(self): + if self.ref : + try: + self.ref.resetLog() + except: + raise + # End external Domain Manager API ######################################## diff --git a/redhawk/src/base/framework/python/ossie/utils/redhawk/device.py b/redhawk/src/base/framework/python/ossie/utils/redhawk/device.py index eacdbfdfc..5bbb769dc 100644 --- a/redhawk/src/base/framework/python/ossie/utils/redhawk/device.py +++ b/redhawk/src/base/framework/python/ossie/utils/redhawk/device.py @@ -19,6 +19,7 @@ # import warnings +import cStringIO, pydoc from ossie.cf import CF from ossie.utils.notify import notification @@ -164,10 +165,19 @@ def __operationalStateChangeEvent(self, deviceId, stateChangeFrom, stateChangeTo def __usageStateChangeEvent(self, deviceId, stateChangeFrom, stateChangeTo): self.__usageState.update(stateChangeTo) - def api(self): - super(DomainDevice,self).api() - print - model.Device.api(self) + def api(self, destfile=None): + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() + + super(DomainDevice,self).api(destfile=destfile) + print >>destfile, '\n' + model.Device.api(self, destfile=destfile) + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() class Device(DomainDevice, model.Device): def __init__(self, profile, spd, scd, prf, deviceRef, instanceName, refid, impl=None, idmListener=None): diff --git a/redhawk/src/base/framework/python/ossie/utils/redhawk/model.py b/redhawk/src/base/framework/python/ossie/utils/redhawk/model.py index da715d96b..529b5ada9 100644 --- a/redhawk/src/base/framework/python/ossie/utils/redhawk/model.py +++ b/redhawk/src/base/framework/python/ossie/utils/redhawk/model.py @@ -252,7 +252,7 @@ def values(self): self.lock() try: if not self.isCached: - self.sync() + self.sync(), self.__data return self.__data.values() finally: self.unlock() diff --git a/redhawk/src/base/framework/python/ossie/utils/redhawk/sad_template.py b/redhawk/src/base/framework/python/ossie/utils/redhawk/sad_template.py index 072e0f8e2..efd286001 100644 --- a/redhawk/src/base/framework/python/ossie/utils/redhawk/sad_template.py +++ b/redhawk/src/base/framework/python/ossie/utils/redhawk/sad_template.py @@ -25,20 +25,20 @@ def __init__(self): self.componentfile='\n \n \n \n' - self.componentplacement='\n \n \n \n @__COMPONENTNAME__@\n \n \n \n \n \n' + self.componentplacement='\n \n \n \n @__COMPONENTNAME__@\n \n \n \n \n \n' - self.assemblycontroller='\n \n \n \n' + self.assemblycontroller='\n \n \n \n' self.externalport='\n \n@__USESPORT__@\n@__PROVIDESPORT__@\n \n' - self.usesport='\n \n @__PORTNAME__@\n \n \n' + self.usesport='\n \n @__PORTNAME__@\n \n \n' - self.providesport='\n \n @__PORTNAME__@\n \n \n' + self.providesport='\n \n @__PORTNAME__@\n \n \n' - self.componentsupportedinterface='\n \n @__PORTINTERFACE__@\n \n \n' + self.componentsupportedinterface='\n \n @__PORTINTERFACE__@\n \n \n' - self.externalusesport='\n \n @__PORTNAME__@\n \n \n' + self.externalusesport='\n \n @__PORTNAME__@\n \n \n' - self.externalprovidesport='\n \n @__PORTNAME__@\n \n \n' + self.externalprovidesport='\n \n @__PORTNAME__@\n \n \n' - self.connectinterface='\n \n@__USESPORT__@\n@__PROVIDESPORT__@\n \n' + self.connectinterface='\n \n@__USESPORT__@\n@__PROVIDESPORT__@\n \n' diff --git a/redhawk/src/base/framework/python/ossie/utils/rhconnection/__init__.py b/redhawk/src/base/framework/python/ossie/utils/rhconnection/__init__.py new file mode 100644 index 000000000..0f7a0eb83 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/rhconnection/__init__.py @@ -0,0 +1,25 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +# This is a stub file to provide the 'redhawk' namespace +# Python files generated for new IDLs will be added under this namespace +# e.g. 'redhawk.mynamespace' + +from helpers import * diff --git a/redhawk/src/base/framework/python/ossie/utils/rhconnection/helpers.py b/redhawk/src/base/framework/python/ossie/utils/rhconnection/helpers.py new file mode 100644 index 000000000..5a42d2d41 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/rhconnection/helpers.py @@ -0,0 +1,131 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +from ossie.cf import CF +import ossie.utils.redhawk + +class CannotResolve(Exception): + pass + +class NotSandboxObject(Exception): + pass + +class CannotResolveRef(Exception): + pass + +def makeEndPoint(obj, port_name='', rsc_id=None, force=False): + ''' + obj: a sandbox or CORBA-accessible object + port_name: the port name (where applicable). Use an empty string for base supported interface + rsc_id: the unique domain id for the Domain object/resource. This string is optional (the function tries to figure out what it should be) + force: return an endpoint (objref) if nothing else matches + ''' + try: + return makeEndPointFromPy(obj, port_name, rsc_id) + except NotSandboxObject: + pass + except Exception as e: + raise + + try: + return makeEndPointFromRef(obj, port_name, rsc_id) + except CannotResolveRef: + pass + except Exception as e: + raise + + if not force: + raise CannotResolve('Object '+str(obj)+' could not be resolved to a sandbox object or CORBA-accessible Domain object') + + restype = CF.ConnectionManager.EndpointResolutionType(objectRef='') + return CF.ConnectionManager.EndpointRequest(restype, port_name) + +def makeEndPointFromPy(obj, port_name='', rsc_id=None): + + if isinstance(obj, ossie.utils.redhawk.device.DomainDevice): + _id = rsc_id + if not _id: + _id = obj._id + restype = CF.ConnectionManager.EndpointResolutionType(deviceId=_id) + return CF.ConnectionManager.EndpointRequest(restype, port_name) + elif isinstance(obj, ossie.utils.redhawk.core.App): + _id = rsc_id + if not _id: + _id = obj._id + restype = CF.ConnectionManager.EndpointResolutionType(applicationId=_id) + return CF.ConnectionManager.EndpointRequest(restype, port_name) + elif isinstance(obj, ossie.utils.redhawk.component.Component): + _id = rsc_id + if not _id: + _id = obj._id + restype = CF.ConnectionManager.EndpointResolutionType(componentId=_id) + return CF.ConnectionManager.EndpointRequest(restype, port_name) + elif isinstance(obj, ossie.utils.redhawk.core.EventChannel): + _id = rsc_id + if not _id: + _id = obj.name + restype = CF.ConnectionManager.EndpointResolutionType(channelName=_id) + return CF.ConnectionManager.EndpointRequest(restype, port_name) + elif isinstance(obj, ossie.utils.redhawk.core.Service): + _id = rsc_id + if not _id: + _id = obj.name + restype = CF.ConnectionManager.EndpointResolutionType(serviceName=_id) + return CF.ConnectionManager.EndpointRequest(restype, port_name) + elif isinstance(obj, ossie.utils.redhawk.core.DeviceManager): + _id = rsc_id + if not _id: + _id = obj._id + restype = CF.ConnectionManager.EndpointResolutionType(deviceMgrId=_id) + return CF.ConnectionManager.EndpointRequest(restype, port_name) + raise NotSandboxObject('Object '+str(obj)+' is not a sandbox object') + +def makeEndPointFromRef(obj, port_name='', rsc_id=None): + _id = rsc_id + try: + if hasattr(obj, '_this'): + repid = obj._this()._NP_RepositoryId + else: + repid = obj._NP_RepositoryId + except: + raise Exception('Object must have repository id') + + if not _id: # could just be object + try: + _id = obj._get_identifier() + except: + pass + + if repid == 'IDL:omni/omniEvents/EventChannel:1.0' and not _id: + raise CannotResolveRef('Object '+str(obj)+' is an Event Channel. An Event Channel name must be provided (rsc_id)') + + if 'Device' in repid and _id != None: # executable, loadable, base, or any of the aggregate devices + restype = CF.ConnectionManager.EndpointResolutionType(deviceId=_id) + return CF.ConnectionManager.EndpointRequest(restype, port_name) + elif repid == 'IDL:CF/Application:1.0' and _id != None: + restype = CF.ConnectionManager.EndpointResolutionType(applicationId=_id) + return CF.ConnectionManager.EndpointRequest(restype, port_name) + elif repid == 'IDL:CF/Resource:1.0' and _id != None: + restype = CF.ConnectionManager.EndpointResolutionType(componentId=_id) + return CF.ConnectionManager.EndpointRequest(restype, port_name) + elif repid == 'IDL:CF/DeviceManager:1.0' and name != None: + restype = CF.ConnectionManager.EndpointResolutionType(channelName=name) + return CF.ConnectionManager.EndpointRequest(restype, port_name) + raise CannotResolveRef('Object '+str(obj)+' cannot be resolved to a Device, Application, Component, Event Channel, or Device Manager. A reference is insufficient to resolve the object further') diff --git a/redhawk/src/base/framework/python/ossie/utils/rhtime/__init__.py b/redhawk/src/base/framework/python/ossie/utils/rhtime/__init__.py new file mode 100644 index 000000000..0f7a0eb83 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/rhtime/__init__.py @@ -0,0 +1,25 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +# This is a stub file to provide the 'redhawk' namespace +# Python files generated for new IDLs will be added under this namespace +# e.g. 'redhawk.mynamespace' + +from helpers import * diff --git a/redhawk/src/base/framework/python/ossie/utils/rhtime/helpers.py b/redhawk/src/base/framework/python/ossie/utils/rhtime/helpers.py new file mode 100644 index 000000000..dec1c1618 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/rhtime/helpers.py @@ -0,0 +1,158 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK bulkioInterfaces. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +import time +import copy +import math + +from ossie.cf import CF +from omniORB import any as _any + +def now(): + """ + Generates a CF.UTCTime object using the current + CPU time that you can use in the pushPacket call + """ + ts = time.time() + return CF.UTCTime(1, int(ts), ts - int(ts)) + +def notSet(): + """ + Generates a CF.UTCTime object with zero time + and an invalid flag. This is used by the automatic EOS + """ + return CF.UTCTime(1, 0.0, 0.0) + +def queryTimestamp(): + return CF.DataType(id='QUERY_TIMESTAMP',value=_any.to_any(None)) + +def cpuTimeStamp(): + return now() + +def create( whole_secs=-1.0, fractional_secs=-1.0 ): + """ + Generates a CF.UTCTime object using the current + CPU time that you can use in the pushPacket call + """ + wsec = whole_secs; + fsec = fractional_secs; + if wsec < 0.0 and fsec < 0.0 : + ts=time.time() + wsec=int(ts) + fsec = ts-int(ts) + + return CF.UTCTime(1, wsec, fsec ) + +def compare(T1, T2): + """ + Will compare two CF.UTCTime objects and return True + if they are both equal, and false otherwise + """ + if not T1 or not T2: + return False + + if T1.tcstatus != T2.tcstatus: + return False + if T1.tfsec != T2.tfsec: + return False + if T1.twsec != T2.twsec: + return False + return True + +def addSampleOffset(T, numSamples=0, xdelta=0.0): + tstamp = copy.deepcopy(T) + tstamp.twsec += int(numSamples*xdelta) + tstamp.tfsec += numSamples*xdelta - int(numSamples*xdelta) + if tstamp.tfsec >= 1.0: + tstamp.twsec += 1 + tstamp.tfsec -= 1.0 + return tstamp + +def normalize(tstamp): + # Get fractional adjustment from whole seconds + fadj, tstamp.twsec = math.modf(tstamp.twsec) + + # Adjust fractional seconds and get whole seconds adjustment + tstamp.tfsec, wadj = math.modf(tstamp.tfsec + fadj) + + # If fractional seconds are negative, borrow a second from the whole + # seconds to make it positive, normalizing to [0,1) + if (tstamp.tfsec < 0.0): + tstamp.tfsec += 1.0; + wadj -= 1.0; + + tstamp.twsec += wadj; + +def difference(t1, t2): + return (t1.twsec - t2.twsec) + (t1.tfsec - t2.tfsec) + +def add(t1, offset): + return iadd(copy.copy(t1), offset) + +def iadd(t1, offset): + fractional, whole = math.modf(offset) + t1.twsec += whole + t1.tfsec += fractional + normalize(t1) + return t1 + +def sub(t1, other): + if isinstance(other, CF.UTCTime): + return difference(t1, other) + else: + return isub(copy.copy(t1), other) + +def isub(t1, offset): + return iadd(t1, -offset) + +def compare(t1, t2): + if not isinstance(t2, CF.UTCTime): + return -1 + if (t1.twsec == t2.twsec): + return cmp(t1.tfsec, t2.tfsec) + else: + return cmp(t1.twsec, t2.twsec) + +def toString(tstamp): + # Break out the whole seconds into a GMT time + gmt = time.gmtime(tstamp.twsec) + # Append the fractional seconds down to microsecond precision + fractional = int(round(tstamp.tfsec * 1e6)) + return '%04d:%02d:%02d::%02d:%02d:%02d.%06d' % (gmt.tm_year, gmt.tm_mon, gmt.tm_mday, gmt.tm_hour, + gmt.tm_min, gmt.tm_sec, fractional) + +def convert(timeString): + if timeString == 'now': + return now() + _sets = timeString.split(':') + if len(_sets) != 7: + return CF.UTCTime(0,0,0) + _year, _month, _day, _blank, _hours, _minutes, _seconds = timeString.split(':') + _full_seconds = float(_seconds) + _time = time.mktime((int(_year),int(_month),int(_day),int(_hours),int(_minutes),int(_full_seconds),0,0,0))-time.timezone + return CF.UTCTime(1, _time, _full_seconds - int(_full_seconds)) + # Break out the whole seconds into a GMT time + +# Insert the arithmetic functions as operators on the PrecisionUTCTime class +CF.UTCTime.__add__ = add +CF.UTCTime.__iadd__ = iadd +CF.UTCTime.__sub__ = sub +CF.UTCTime.__isub__ = isub +CF.UTCTime.__str__ = toString +CF.UTCTime.__cmp__ = compare diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/base.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/base.py index 8d762827d..7cf5cd4c4 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sandbox/base.py +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/base.py @@ -19,19 +19,14 @@ # import os -import logging -import warnings as _warnings -import time from ossie import parsers -from ossie.cf import CF -from ossie import properties as _properties -from ossie.utils import log4py +from ossie.utils.log4py import logging from ossie.utils import weakobj -from ossie.utils.model import PortSupplier, PropertySet, ComponentBase, CorbaObject from ossie.utils.model.connect import ConnectionManager from ossie.utils.uuid import uuid4 -from ossie.utils.sandbox.events import EventChannel + +from model import SandboxComponent, SandboxDevice, SandboxService, SandboxEventChannel log = logging.getLogger(__name__) @@ -189,7 +184,8 @@ def getEventChannels(self): def _removeEventChannel(self, name): del self._eventChannels[name] - def _get_started(self): + @property + def started(self): return self._started def start(self): @@ -219,8 +215,8 @@ def reset(self): component.reset() def launch(self, descriptor, instanceName=None, refid=None, impl=None, - debugger=None, window=None, execparams={}, configure={}, - initialize=True, timeout=None, objType=None, stdout=None): + debugger=None, window=None, properties={}, configure=True, + initialize=True, timeout=None, objType=None, shared=True, stdout=None): sdrRoot = self.getSdrRoot() # Parse the component XML profile. @@ -234,8 +230,14 @@ def launch(self, descriptor, instanceName=None, refid=None, impl=None, # Check that we can launch the component. comptype = scd.get_componenttype() - if comptype not in self.__comptypes__: - raise NotImplementedError, "No support for component type '%s'" % comptype + if comptype == 'resource': + clazz = SandboxComponent + elif comptype in ('device', 'loadabledevice', 'executabledevice'): + clazz = SandboxDevice + elif comptype == 'service': + clazz = SandboxService + else: + raise NotImplementedError("No support for component type '%s'" % comptype) # Generate/check instance name. if not instanceName: @@ -245,16 +247,26 @@ def launch(self, descriptor, instanceName=None, refid=None, impl=None, # Generate/check identifier. if not refid: - refid = str(uuid4()) + refid = 'DCE:'+str(uuid4()) elif not self._checkInstanceId(refid, comptype): raise ValueError, "User-specified identifier '%s' already in use" % (refid,) # If possible, determine the correct placement of properties - execparams, initProps, configure = self._sortOverrides(prf, execparams, configure) + execparams, initProps, configProps = self._sortOverrides(prf, properties) + if not configure: + configProps = None # Determine the class for the component type and create a new instance. - return self._launch(profile, spd, scd, prf, instanceName, refid, impl, execparams, - initProps, initialize, configure, debugger, window, timeout, stdout) + comp = clazz(self, profile, spd, scd, prf, instanceName, refid, impl) + launcher = self._createLauncher(comptype, execparams, initProps, initialize, configProps, debugger, window, timeout, shared, stdout) + if not launcher: + raise NotImplementedError("No support for component type '%s'" % comptype) + comp._launcher = launcher + + # Launch the component + comp._kick() + + return comp def shutdown(self): # Clean up any event channels created by this sandbox instance. @@ -268,10 +280,10 @@ def catalog(self, searchPath=None, objType="components"): files[profile['name']] = profile['profile'] return files - def _sortOverrides(self, prf, execparams, configure): + def _sortOverrides(self, prf, properties): if not prf: - # No PRF file, assume the properties are correct as-is - return execparams, {}, configure + # No PRF file, assume all properties are execparams. + return properties, {}, {} # Classify the PRF properties by which stage of initialization they get # set: 'commandline', 'initialize', 'configure' or None (not settable). @@ -284,34 +296,29 @@ def _sortOverrides(self, prf, execparams, configure): if not name in stages: stages[name] = stage - # Check properties that do not belong in execparams - arguments = {} - for key, value in execparams.iteritems(): - if key in stages and stages[key] != 'commandline': - raise ValueError("Non-command line property '%s' given in execparams" % key) - arguments[key] = value + # Add in system-defined execparams that users are allowed to override + stages['DEBUG_LEVEL'] = 'commandline' + stages['LOGGING_CONFIG_URI'] = 'commandline' - # Sort configure properties into the appropriate stage of initialization + # Sort properties into the appropriate stage of initialization + execparams = {} initProps = {} - if configure is not None: - configProps = {} - for key, value in configure.iteritems(): - if not key in stages: - log.warning("Unknown property '%s'" , key) - continue - stage = stages[key] - if stage == 'commandline': - arguments[key] = value - elif stage == 'initialize': - initProps[key] = value - elif stage == 'configure': - configProps[key] = value - else: - log.warning("Property '%s' cannot be set at launch", key) - else: - configProps = None + configProps = {} + for key, value in properties.iteritems(): + if not key in stages: + log.warning("Unknown property '%s'" , key) + continue + stage = stages[key] + if stage == 'commandline': + execparams[key] = value + elif stage == 'initialize': + initProps[key] = value + elif stage == 'configure': + configProps[key] = value + else: + log.warning("Property '%s' cannot be set at launch", key) - return arguments, initProps, configProps + return execparams, initProps, configProps def _getInitializationStage(self, prop, kinds, commandline=False): # Helper method to classify the initialization stage for a particular @@ -346,106 +353,21 @@ def _getInitializationStages(self, prf): for prop in prf.get_structsequence(): yield prop, self._getInitializationStage(prop, prop.get_configurationkind()) - -class SandboxComponent(ComponentBase): - def __init__(self, sandbox, profile, spd, scd, prf, instanceName, refid, impl): - super(SandboxComponent,self).__init__(spd, scd, prf, instanceName, refid, impl) - self._sandbox = sandbox - self._profile = profile - self._componentName = spd.get_name() - self._propRef = {} - self._configRef = {} - self._msgSupplierHelper = None - for prop in self._getPropertySet(kinds=('configure',), modes=('readwrite', 'writeonly'), includeNil=False): - if prop.defValue is None: - continue - self._configRef[str(prop.id)] = prop.defValue - for prop in self._getPropertySet(kinds=('property',), includeNil=False, commandline=False): - if prop.defValue is None: - continue - self._propRef[str(prop.id)] = prop.defValue - - self.__ports = None - - def _readProfile(self): - sdrRoot = self._sandbox.getSdrRoot() - self._spd, self._scd, self._prf = sdrRoot.readProfile(self._profile) - - def _kick(self): - self.ref = self._launch() - self._sandbox._registerComponent(self) - - @property - def _ports(self): - #DEPRECATED: replaced with ports - _warnings.warn("'_ports' is deprecated", DeprecationWarning) - return self.ports - - @property - def ports(self): - if self.__ports == None: - self.__ports = self._populatePorts() - return self.__ports - - def reset(self): - self.releaseObject() - self._readProfile() - self._kick() - self.initialize() - self._parseComponentXMLFiles() - self._buildAPI() - # Clear cached ports list - self.__ports = None - - def releaseObject(self): - # Break any connections involving this component. + def _breakConnections(self, target): + # Break any connections involving this object. manager = ConnectionManager.instance() for _identifier, (identifier, uses, provides) in manager.getConnections().items(): - if uses.hasComponent(self) or provides.hasComponent(self): + if uses.hasComponent(target) or provides.hasComponent(target): manager.breakConnection(identifier, uses) manager.unregisterConnection(identifier, uses) - self._sandbox._unregisterComponent(self) - super(SandboxComponent,self).releaseObject() - - def api(self): - ''' - Inspect interfaces and properties for the component - ''' - print "Component [" + str(self._componentName) + "]:" - PortSupplier.api(self) - PropertySet.api(self) - - def sendMessage(self, msg, msgId=None, msgPort=None, restrict=True ): - """ - send a message out a component's message event port - - msg : dictionary of information to send or an any object - msgId : select a specific message structure property from the component, if None will - choose first available message property structure for the component - msgPort : select a specified message event port to use, if None will try to autoselect - restrict : if True, will restrict msgId to only those message ids defined by the component - if False, will allow for ad-hoc message to be sent - """ - if self._msgSupplierHelper == None: - import ossie.utils - self._msgSupplierHelper = ossie.utils.sb.io_helpers.MsgSupplierHelper(self) - if self.ref and self.ref._get_started() == True and self._msgSupplierHelper: - return self._msgSupplierHelper.sendMessage( msg, msgId, msgPort, restrict ) - return False - -class SandboxEventChannel(EventChannel, CorbaObject): - def __init__(self, name, sandbox): - EventChannel.__init__(self, name) - CorbaObject.__init__(self) - self._sandbox = sandbox - self._instanceName = name - - def destroy(self): - # Break any connections involving this event channel. - manager = ConnectionManager.instance() - for _identifier, (identifier, uses, provides) in manager.getConnections().items(): - if provides.hasComponent(self): - manager.breakConnection(identifier, uses) - manager.unregisterConnection(identifier, uses) - self._sandbox._removeEventChannel(self._instanceName) - EventChannel.destroy(self) + + +class SandboxLauncher(object): + def launch(self, comp): + raise NotImplementedError('launch') + + def setup(self, comp): + raise NotImplementedError('setup') + + def terminate(self, comp): + raise NotImplementedError('terminate') diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/debugger.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/debugger.py index 6dd88b392..e4b55a6a1 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sandbox/debugger.py +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/debugger.py @@ -21,10 +21,25 @@ import commands import os import sys +import socket class Debugger(object): - def __init__(self, command): + def __init__(self, command, option_value_join, **opts): self.command = command + self.arguments = [] + for name, value in opts.iteritems(): + if value is True: + value = 'yes' + elif value is False: + value = 'no' + if value == '': + self.arguments.append(name) + else: + if option_value_join: + self.arguments.append(name + option_value_join + str(value)) + else: + self.arguments.append(name) + self.arguments.append(str(value)) def isInteractive(self): return True @@ -35,12 +50,26 @@ def modifiesCommand(self): def canAttach(self): return False + def envUpdate(self): + return {} + class GDB(Debugger): - def __init__(self, attach=True): + def __init__(self, attach=True, **opts): status, gdb = commands.getstatusoutput('which gdb') if status: raise RuntimeError, 'gdb cannot be found' - super(GDB,self).__init__(gdb) + pass_opts = {} + for name, value in opts.iteritems(): + if len(name) == 1: + name = '-'+name + pass_opts[name] = value + elif name[0] == '-' and name[1] != '-': + pass_opts[name] = value + elif name[:2] != '--': + name = '--'+name + name = name.replace('_','-') + pass_opts[name] = value + super(GDB,self).__init__(gdb, '=', **pass_opts) self._attach = attach def modifiesCommand(self): @@ -50,20 +79,23 @@ def canAttach(self): return self._attach def attach(self, process): - return self.command, [process.command(), str(process.pid())] + return self.command, ['-p', str(process.pid())] + self.arguments def wrap(self, command, arguments): - return self.command, ['--args', command] + arguments + return self.command, ['--args', command] + arguments + self.arguments + + def name(self): + return 'gdb' class PDB(Debugger): - def __init__(self): - super(PDB,self).__init__(PDB.findPDB()) + def __init__(self, **opts): + super(PDB,self).__init__(PDB.findPDB(), None, **opts) def modifiesCommand(self): return True def wrap(self, command, arguments): - return self.command, [command] + arguments + return self.command, [command] + arguments + self.arguments @staticmethod def findPDB(): @@ -73,25 +105,68 @@ def findPDB(): return filename raise RuntimeError, 'pdb cannot be found' + def name(self): + return 'pdb' + +class JDB(Debugger): + def __init__(self, attach=True, **opts): + status, jdb = commands.getstatusoutput('which jdb') + if status: + raise RuntimeError, 'jdb cannot be found' + pass_opts = {} + for name, value in opts.iteritems(): + if name[0] != '-': + name = '-'+name + name = name.replace('_','-') + pass_opts[name] = value + super(JDB,self).__init__(jdb, None, **opts) + self._lastport = 5680 + self._attach = attach + + def modifiesCommand(self): + return False + + def canAttach(self): + return self._attach + + def attach(self, process): + return self.command, ['-attach', str(self._lastport)] + self.arguments + + def wrap(self, command, arguments): + return command, arguments + self.arguments + + def envUpdate(self): + _open = False + s=socket.socket(socket.AF_INET, socket.SOCK_STREAM) + while not _open: + try: + s.bind((socket.gethostbyname(socket.gethostname()), self._lastport)) + _open = True + s.close() + except: + self._lastport += 1 + return {'JAVA_TOOL_OPTIONS':'-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address='+str(self._lastport)} + + def name(self): + return 'jdb' class Valgrind(Debugger): - def __init__(self, quiet=False, verbose=False, **opts): + def __init__(self, **opts): status, valgrind = commands.getstatusoutput('which valgrind') if status: raise RuntimeError, 'valgrind cannot be found' - super(Valgrind,self).__init__(valgrind) - self.arguments = [] - if quiet: - self.arguments.append('-q') - if verbose: - self.arguments.append('-v') + pass_opts = {} for name, value in opts.iteritems(): - optname = '--' + name.replace('_','-') - if value is True: - value = 'yes' - elif value is False: - value = 'no' - self.arguments.append(optname + '=' + str(value)) + if len(name) == 1: + name = '-'+name + pass_opts[name] = value + elif name[0] == '-' and name[1] != '-': + pass_opts[name] = value + elif name[:2] != '--': + name = '--'+name + name = name.replace('_','-') + pass_opts[name] = value + super(Valgrind,self).__init__(valgrind, '=', **pass_opts) def modifiesCommand(self): return True @@ -102,3 +177,5 @@ def isInteractive(self): def wrap(self, command, arguments): return self.command, self.arguments + [command] + arguments + def name(self): + return 'valgrind' diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/events.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/events.py index 9b5fbe376..89f1a8198 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sandbox/events.py +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/events.py @@ -35,28 +35,25 @@ class EventChannel(CosEventChannelAdmin__POA.EventChannel): class ProxyPushConsumer(CosEventChannelAdmin__POA.ProxyPushConsumer): - __slots__ = ('_channel', '_admin', '_supplier', '_connected') + __slots__ = ('_channel', '_admin', '_supplier') def __init__(self, channel, admin): self._channel = channel self._admin = admin self._supplier = None - self._connected = False def push(self, data): - if not self._connected: - raise CosEventComm.Disconnected self._channel.push(data) def connect_push_supplier(self, supplier): - if self._connected: + if self._supplier: raise CosEventChannelAdmin.AlreadyConnected self._channel.supplierConnected(supplier) - self._connected = True self._supplier = supplier def disconnect_push_consumer(self): - self._channel.supplierDisconnected(self._supplier) + if self._supplier: + self._channel.supplierDisconnected(self._supplier) self._admin.remove_consumer(self) def destroy(self): diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/helper.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/helper.py new file mode 100644 index 000000000..0ba7b025c --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/helper.py @@ -0,0 +1,218 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +from ossie.utils.log4py import logging +from ossie.utils.model import PortSupplier +from ossie.utils.model.connect import ConnectionManager +from ossie.utils.uuid import uuid4 + +from ossie.threadedcomponent import * + +def default_sandbox(): + # Use a deferred import to avoid a circular dependency with the 'sb' module + from ossie.utils.sb import domainless + return domainless._getSandbox() + +class SandboxMeta(type): + def __call__(self, *args, **kwargs): + # Pick off sandbox-specific arguments, which are not given to the + # helper class __init__ method + sandbox = kwargs.pop('sandbox', None) + if sandbox is None: + sandbox = default_sandbox() + auto_start = kwargs.pop('autoStart', True) + + # Create/initialize the helper + obj = super(SandboxMeta, self).__call__(*args, **kwargs) + + # Create a unique instance name, and register with the sandbox + name = sandbox._createInstanceName(obj.__class__.__name__) + obj._registerWithSandbox(sandbox, name) + + # Set a sensible default logger based on the module and instance name + obj.log = logging.getLogger(obj.__module__).getChild(name) + + # Perform any post-registration initialization + obj._initializeHelper() + + # Auto-start helpers + if auto_start and sandbox.started: + obj.start() + + return obj + +class SandboxHelper(PortSupplier): + __metaclass__ = SandboxMeta + + def __init__(self): + PortSupplier.__init__(self) + + self._refid = str(uuid4()) + self._port = None + self._started = False + + def _registerWithSandbox(self, sandbox, instanceName): + self._sandbox = sandbox + self._instanceName = instanceName + self._sandbox._registerComponent(self) + + def _addUsesPort(self, name, repoID, portClass, custom={}): + port_dict = { + 'Port Name': name, + 'Port Interface': repoID, + 'Port Class': portClass + } + port_dict.update(custom) + self._usesPortDict[name] = port_dict + + def _addProvidesPort(self, name, repoID, portClass, custom={}): + port_dict = { + 'Port Name': name, + 'Port Interface': repoID, + 'Port Class': portClass + } + port_dict.update(custom) + self._providesPortDict[name] = port_dict + + def _createPort(self, portDict, name=None): + clazz = portDict['Port Class'] + if not name: + name = portDict['Port Name'] + port = clazz(name) + + # Automatically start the port if the helper has already been started + if self.started: + self._startPort(port) + + # Allow subclasses to perform additional post-creation logic + self._portCreated(port, portDict) + + return port + + @property + def started(self): + return self._started + + def start(self): + if self._started: + return + self._startHelper() + self._started = True + + def stop(self): + if not self._started: + return + self._stopHelper() + self._started = False + + def _startPort(self, port): + if hasattr(port, 'startPort'): + port.startPort() + + def _stopPort(self, port): + if hasattr(port, 'stopPort'): + port.stopPort() + + def _startPorts(self): + if self._port: + self._startPort(self._port) + + def _stopPorts(self): + if self._port: + self._stopPort(self._port) + + def releaseObject(self): + # Break any connections involving this helper + manager = ConnectionManager.instance() + for identifier, uses, provides in manager.getConnections().itervalues(): + if uses.hasComponent(self) or provides.hasComponent(self): + usesRef = uses.getReference() + usesRef.disconnectPort(identifier) + manager.unregisterConnection(identifier, uses) + self._sandbox._unregisterComponent(self) + + def reset(self): + pass + + def getPort(self, portName): + if self._port: + if portName != self._port.name: + raise RuntimeError(self.__class__.__name__ + ' only supports 1 port type at a time') + else: + port_dict = self._usesPortDict.get(portName, None) + if port_dict is None: + port_dict = self._providesPortDict.get(portName, None) + if port_dict is None: + raise RuntimeError("Unknown port '%s'" % portName) + self._port = self._createPort(port_dict) + + return self._port._this() + + # Extension points for subclasses + def _initializeHelper(self): + # Equivalent to component constructor() method; override to perform any + # initialization that requires that the helper has registered with the + # sandbox (e.g., any setup that requires knowing the instance name) + pass + + def _portCreated(self, port, portDict): + # Extension point for subclasses to perform any post port-creation + # tasks (e.g., setting a mode based on the port type) + pass + + def _startHelper(self): + # Subclasses should override this method rather than start() to provide + # additional start behavior + self._startPorts() + + def _stopHelper(self): + # Subclasses should override this method rather than stop() to provide + # additional stop behavior + self._stopPorts() + + +class ThreadedSandboxHelper(SandboxHelper, ThreadedComponent): + def __init__(self): + SandboxHelper.__init__(self) + ThreadedComponent.__init__(self) + + def _startHelper(self): + super(ThreadedSandboxHelper,self)._startHelper() + self.startThread() + + def _stopHelper(self): + super(ThreadedSandboxHelper,self)._stopHelper() + self.stopThread() + + def process(self): + try: + return self._threadFunc() + except Exception: + import traceback + traceback.print_exc() + return FINISH + + def _threadFunc(self): + return FINISH + +class ThreadStatus(object): + NOOP = NOOP + NORMAL = NORMAL + FINISH = FINISH diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/ide.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/ide.py index eab61e3c7..3d673bf9f 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sandbox/ide.py +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/ide.py @@ -32,9 +32,10 @@ from ossie.cf import ExtendedCF from ossie.cf import CF -from ossie.utils.model import Resource, Device, CorbaObject +from ossie.utils.model import CorbaObject -from base import SdrRoot, Sandbox, SandboxComponent +from base import SdrRoot, Sandbox, SandboxLauncher +from model import SandboxComponent, SandboxDevice log = logging.getLogger(__name__) @@ -80,72 +81,36 @@ def getLocation(self): return 'REDHAWK IDE virtual SDR' -class IDEMixin(object): +class IDELauncher(SandboxLauncher): def __init__(self, execparams, initProps, configProps): self._execparams = execparams self._initProps = initProps self._configProps = configProps - def _launch(self): + def launch(self, comp): # Pack the execparams into an array of string-valued properties properties = [CF.DataType(k, to_any(str(v))) for k, v in self._execparams.iteritems()] # Pack the remaining props by having the component do the conversion - properties.extend(self._itemToDataType(k,v) for k,v in self._initProps.iteritems()) - properties.extend(self._itemToDataType(k,v) for k,v in self._configProps.iteritems()) + properties.extend(comp._itemToDataType(k,v) for k,v in self._initProps.iteritems()) + properties.extend(comp._itemToDataType(k,v) for k,v in self._configProps.iteritems()) # Tell the IDE to launch a specific implementation, if given - if self._impl is not None: - properties.append(CF.DataType('__implementationID', to_any(self._impl))) + if comp._impl is not None: + properties.append(CF.DataType('__implementationID', to_any(comp._impl))) - ref = self._sandbox._createResource(self._profile, self._instanceName, properties) + ref = comp._sandbox._createResource(comp._profile, comp._instanceName, properties) # The IDE sandbox API only allows us to specify the instance name, not # the identifier, so update by querying the component itself - self._refid = ref._get_identifier() + comp._refid = ref._get_identifier() return ref - def _getExecparams(self): - return {} - - def _terminate(self): + def setup(self, comp): pass - -class IDESandboxComponent(SandboxComponent, IDEMixin): - def __init__(self, sandbox, profile, spd, scd, prf, instanceName, refid, impl, execparams, - initProps, configProps): - SandboxComponent.__init__(self, sandbox, profile, spd, scd, prf, instanceName, refid, impl) - IDEMixin.__init__(self, execparams, initProps, configProps) - self._parseComponentXMLFiles() - self._buildAPI() - - def _getExecparams(self): - return dict((str(ep.id), ep.defValue) for ep in self._getPropertySet(kinds=('execparam',), includeNil=False)) - - -class IDEComponent(IDESandboxComponent, Resource): - def __init__(self, *args, **kwargs): - Resource.__init__(self) - IDESandboxComponent.__init__(self, *args, **kwargs) - - def __repr__(self): - return "" % (self._instanceName, id(self)) - - -class IDEDevice(IDESandboxComponent, Device): - def __init__(self, *args, **kwargs): - Device.__init__(self) - IDESandboxComponent.__init__(self, *args, **kwargs) - Device._buildAPI(self) - - def __repr__(self): - return "" % (self._instanceName, id(self)) - - def api(self): - IDESandboxComponent.api(self) - print - Device.api(self) + def terminate(self, comp): + pass class IDEService(CorbaObject): @@ -158,11 +123,6 @@ def __repr__(self): class IDESandbox(Sandbox): - __comptypes__ = { - 'resource': IDEComponent, - 'device': IDEDevice, - } - def __init__(self, ideRef): super(IDESandbox, self).__init__() self.__ide = ideRef @@ -189,13 +149,11 @@ def _checkInstanceId(self, refid, componentType='resource'): # "valid" return True - def _launch(self, profile, spd, scd, prf, instanceName, refid, impl, execparams, - initProps, initialize, configProps, debugger, window, timeout, stdout=None): - # Determine the class for the component type and create a new instance. - clazz = self.__comptypes__[scd.get_componenttype()] - comp = clazz(self, profile, spd, scd, prf, instanceName, refid, impl, execparams, initProps, configProps) - comp._kick() - return comp + def _createLauncher(self, comptype, execparams, initProps, initialize, configProps, debugger, + window, timeout, shared, stdout=None): + if comptype in ('resource', 'device', 'loadabledevice', 'executabledevice'): + return IDELauncher(execparams, initProps, configProps) + return None def _createResource(self, profile, name, qualifiers=[]): log.debug("Creating resource '%s' with profile '%s'", name, profile) @@ -230,12 +188,12 @@ def _scanChalkboard(self): try: resource = desc.resource if resource._is_a('IDL:CF/Device:1.0'): - clazz = IDEDevice + clazz = SandboxDevice refid = resource._get_identifier() instanceName = resource._get_label() impl = self.__ide._get_deviceManager().getComponentImplementationId(refid) else: - clazz = IDEComponent + clazz = SandboxComponent refid = resource._get_identifier() if ':DCE:' in refid: # Components launched from the Python console have the @@ -257,10 +215,9 @@ def _scanChalkboard(self): # ask the resource itself. profile = resource._get_softwareProfile() - # Create the component/device sandbox wrapper, disabling the - # automatic launch since it is already running + # Create the component/device sandbox wrapper spd, scd, prf = self.getSdrRoot().readProfile(profile) - comp = clazz(self, profile, spd, scd, prf, instanceName, refid, impl, {}, {}, {}) + comp = clazz(self, profile, spd, scd, prf, instanceName, refid, impl) comp.ref = resource self.__components[instanceName] = comp except Exception, e: @@ -382,3 +339,6 @@ def getService(self, name): def getServices(self): self._scanServices() return self.__services.values() + + def getType(self): + return 'IDE' diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/launcher.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/launcher.py index 851cab26e..482b0a20b 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sandbox/launcher.py +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/launcher.py @@ -22,28 +22,17 @@ import logging import signal import time -import commands import threading import tempfile import subprocess - -from omniORB import CORBA, URI +import platform +import zipfile from ossie.utils import log4py from ossie import parsers from ossie.utils.popen import Popen -from devmgr import DeviceManagerStub -from naming import NamingContextStub -from debugger import GDB, PDB, Valgrind -from terminal import XTerm - -__all__ = ('ResourceLauncher', 'DeviceLauncher', 'ServiceLauncher') - -# Prepare the ORB -orb = CORBA.ORB_init() -poa = orb.resolve_initial_references("RootPOA") -poa._get_the_POAManager().activate() +__all__ = ('LocalProcess', 'VirtualDevice') log = logging.getLogger(__name__) @@ -62,28 +51,33 @@ def __init__(self, command, arguments, environment=None, stdout=None): cwd=os.getcwd(), env=environment, stdout=stdout, stderr=subprocess.STDOUT, preexec_fn=os.setpgrp) - self.__tracker = threading.Thread(target=self.monitorChild) - self.__tracker.daemon = True - self.__tracker.start() + self.__tracker = None + self.__callback = None + self.__children = [] - def monitorChild(self): - pid = self.__process.pid + def setTerminationCallback(self, callback): + if not self.__tracker: + # Nothing is currently waiting for notification, start monitor. + name = 'process-%d-tracker' % self.pid() + self.__tracker = threading.Thread(name=name, target=self._monitorProcess) + self.__tracker.daemon = True + self.__tracker.start() + self.__callback = callback + + def _monitorProcess(self): try: - self.__process.communicate()[0] - if self.__terminateRequested or self.__process.returncode == 0: - return - for idx in range(len(self.__arguments)): - if self.__arguments[idx] == 'NAME_BINDING': - if len(self.__arguments)>=idx+1: - print 'Component '+self.__arguments[idx+1]+' (pid='+str(pid)+') has died' - else: - print 'Component with process id '+str(pid)+'has died' + status = self.__process.wait() except: - if self.__terminateRequested or self.__process.returncode == 0: - return - print 'Component with process id '+str(pid)+'has died' + # If wait fails, don't bother with notification. + return + if self.__callback: + self.__callback(self.pid(), status) def terminate(self): + for child in self.__children: + child.terminate() + self.__children = [] + for sig, timeout in self.STOP_SIGNALS: try: log.debug('Killing process group %s with signal %s', self.__process.pid, sig) @@ -115,112 +109,90 @@ def pid(self): def isAlive(self): return self.__process and self.__process.poll() is None + def addChild(self, process): + self.__children.append(process) -class DebuggerProcess(object): - def __init__(self, debugger, child): - self.__debugger = debugger - self.__child = child - def terminate(self): - self.__debugger.terminate() - self.__child.terminate() +class VirtualDevice(object): + def __init__(self): + self._processor = platform.machine() + if self._processor == 'i686': + # Map from Linux standard machine name to REDHAWK + self._processor = 'x86' + self._osName = platform.system() - def requestTermination(self): - self.__child.requestTermination() + log.debug("VirtualDevice processor '%s' OS '%s'", self._processor, self._osName) - def command(self): - return self.__child.command() + def _matchProcessor(self, implementation): + if not implementation.get_processor(): + # Implementation specifies no processor dependency + return True - def pid(self): - return self.__child.pid() + for proc in implementation.get_processor(): + if proc.get_name() == self._processor: + return True + return False - def isAlive(self): - return self.__child.isAlive() - - -class LocalLauncher(object): - def __init__(self, profile, identifier, name, sandbox): - self._sandbox = sandbox - self._profile = profile - self._xmlpath = os.path.dirname(self._profile) - self._identifier = identifier - self._name = name - - def _selectImplementation(self, spd): - for implementation in spd.get_implementation(): - entry_point = self._getEntryPoint(implementation) - if os.path.exists(entry_point): - return implementation - raise RuntimeError, "Softpkg '%s' has no usable entry point" % spd.get_name() - - def _getImplementation(self, spd, identifier): - for implementation in spd.get_implementation(): - if implementation.get_id() == identifier: - return implementation - raise KeyError, "Softpkg '%s' has no implementation '%s'" % (spd.get_name(), identifier) - - def _getEntryPoint(self, implementation): - entry_point = implementation.get_code().get_entrypoint() - if not entry_point.startswith('/'): - entry_point = os.path.join(self._xmlpath, entry_point) - return entry_point - - def _getDomainPath(self): - return {"DOM_PATH" : "/dom/sandbox/"} - - def execute(self, spd, impl, execparams, debugger, window, timeout=None, stdout=None): - # Find a suitable implementation. - if impl: - implementation = self._getImplementation(spd, impl) - else: - implementation = self._selectImplementation(spd) - log.trace("Using implementation '%s'", implementation.get_id()) + def _matchOS(self, implementation): + if not implementation.get_os(): + # Implementation specifies no OS dependency + return True + + for operating_system in implementation.get_os(): + if operating_system.get_name() == self._osName: + return True + return False + + def _checkImplementation(self, sdrroot, profile, impl): + # Match device properties + log.trace("Checking processor and OS for implementation '%s'", impl.get_id()) + if not self._matchProcessor(impl) or not self._matchOS(impl): + return False + + # Check that localfile points to a real location + localfile = impl.get_code().get_localfile().get_name() + filename = sdrroot.relativePath(profile, localfile) + log.trace("Checking localfile '%s' ('%s')", localfile, filename) + if not os.path.exists(filename): + return False + + # If the implementation has an entry point, make sure it exists too + if impl.get_code().get_entrypoint(): + entry_point = impl.get_code().get_entrypoint() + filename = sdrroot.relativePath(profile, entry_point) + log.trace("Checking entrypoint '%s' ('%s')", entry_point, filename) + if not os.path.exists(filename): + return False + + return True - # Make sure the entry point can be run. - entry_point = self._getEntryPoint(implementation) - if not os.access(entry_point, os.X_OK|os.R_OK): - raise RuntimeError, "Entry point '%s' is not executable" % entry_point - log.trace("Using entry point '%s'", entry_point) + def matchImplementation(self, sdrroot, profile, spd): + for impl in spd.get_implementation(): + if self._checkImplementation(sdrroot, profile, impl): + return impl + raise RuntimeError, "Softpkg '%s' has no usable implementation" % spd.get_name() + + def execute(self, entryPoint, deps, execparams, debugger, window, stdout=None): + # Make sure the entry point exists and can be run. + if not os.path.exists(entryPoint): + raise RuntimeError, "Entry point '%s' does not exist" % entryPoint + elif not os.access(entryPoint, os.X_OK|os.R_OK): + raise RuntimeError, "Entry point '%s' is not executable" % entryPoint + log.trace("Using entry point '%s'", entryPoint) # Process softpkg dependencies and modify the child environment. environment = dict(os.environ.items()) - for dependency in implementation.get_dependency(): - for varname, pathname in self._resolveDependency(implementation, dependency): - self._extendEnvironment(environment, varname, pathname) + for dependency in deps: + self._processDependency(environment, dependency) for varname in ('LD_LIBRARY_PATH', 'PYTHONPATH', 'CLASSPATH'): - log.trace('%s=%s', varname, environment.get(varname, '')) - - # Get required execparams based on the component type - execparams.update(self._getRequiredExecparams()) - - ''' - execparams.update(self._getDomainPath()) - if execparams.has_key('LOGGING_CONFIG_URI'): - if execparams['LOGGING_CONFIG_URI'].find("sca:") == 0: - execparams['LOGGING_CONFIG_URI'] += "?fs=" + orb.object_to_string(self.__namingContext._this()),DeviceManagerStub - pass - ''' + log.trace('%s=%s', varname, environment.get(varname, '').split(':')) # Convert execparams into arguments. arguments = [] for name, value in execparams.iteritems(): arguments += [name, str(value)] - if isinstance(debugger,basestring): - try: - if debugger == 'pdb': - debugger = PDB() - elif debugger == 'gdb': - debugger = GDB() - elif debugger == 'valgrind': - debugger = Valgrind() - else: - raise RuntimeError, 'not supported' - except Exception, e: - log.warning('Cannot run debugger %s (%s)', debugger, e) - debugger = None - if window: window_mode = 'monitor' else: @@ -228,20 +200,14 @@ def execute(self, spd, impl, execparams, debugger, window, timeout=None, stdout= if debugger and debugger.modifiesCommand(): # Run the command in the debugger. - command, arguments = debugger.wrap(entry_point, arguments) - default_timeout = 60.0 + command, arguments = debugger.wrap(entryPoint, arguments) if debugger.isInteractive() and not debugger.canAttach(): - if not window: - window = XTerm() window_mode = 'direct' else: # Run the command directly. - command = entry_point - default_timeout = 10.0 - - # Provided timeout takes precedence - if timeout is None: - timeout = default_timeout + command = entryPoint + if debugger: + environment.update(debugger.envUpdate()) if window_mode == 'monitor': # Open up a window for component output. @@ -249,7 +215,7 @@ def execute(self, spd, impl, execparams, debugger, window, timeout=None, stdout= tempdir = tempfile.mkdtemp() fifoname = os.path.join(tempdir, 'fifo') os.mkfifo(fifoname) - window_command, window_args = window.command('/usr/bin/tail', ['-n', '+0', '-f', fifoname], self._name) + window_command, window_args = window.command('/usr/bin/tail', ['-n', '+0', '-f', fifoname]) window_proc = LocalProcess(window_command, window_args) stdout = open(fifoname, 'w') os.unlink(fifoname) @@ -260,159 +226,39 @@ def execute(self, spd, impl, execparams, debugger, window, timeout=None, stdout= command, arguments = window.command(command, arguments) process = LocalProcess(command, arguments, environment, stdout) - # Wait for the component to register with the virtual naming service or - # DeviceManager. - sleepIncrement = 0.1 - while self.getReference() is None: - if not process.isAlive(): - raise RuntimeError, "%s '%s' terminated before registering with virtual environment" % (self._getType(), self._name) - time.sleep(sleepIncrement) - timeout -= sleepIncrement - if timeout < 0: - process.terminate() - raise RuntimeError, "%s '%s' did not register with virtual environment" % (self._getType(), self._name) - - # Store the CORBA reference. - ref = self.getReference() - - # Attach a debugger to the process. - if debugger and debugger.canAttach(): - if not window: - window = XTerm() - debug_command, debug_args = debugger.attach(process) - debug_command, debug_args = window.command(debug_command, debug_args) - debug_process = LocalProcess(debug_command, debug_args) - process = DebuggerProcess(debug_process, process) - - return process, ref - - # this function checks that the base dependencies match an impl exactly - def _equalDeps(self, base, impl): - if len(base[0]) != len(impl[0]): - return False - if len(base[1]) != len(impl[1]): - return False - for val in base[0]: - if not val in impl[0]: - return False - for val in base[1]: - if not val in impl[1]: - return False - return True - - # this function checks if the base has a dependency not supported by impl for non-zero impls - def _subsetDeps(self, base, impl): - foundMatch = True - if len(impl[0]) != 0: - foundMatch = False - for val in base[0]: - if val in impl[0]: - foundMatch = True - if not foundMatch: - return False - if len(impl[1]) != 0: - foundMatch = False - for val in base[1]: - if val in impl[1]: - foundMatch = True - if not foundMatch: - return False - return True + return process - def _assembleOsProc(self, depimpl): - impl_os = [] - impl_proc = [] - for operating_system in depimpl.get_os(): - impl_os.append(operating_system.get_name()) - for proc in depimpl.get_processor(): - impl_proc.append(proc.get_name()) - return impl_os, impl_proc - - - def _findExactMatch(self, dep_spd, dep_base): - impl = None - for depimpl in dep_spd.get_implementation(): - impl_os, impl_proc = self._assembleOsProc(depimpl) - if self._equalDeps(dep_base,(impl_os,impl_proc)): - impl = depimpl - break - return impl - - def _findGenericMatch(self, dep_spd, dep_base): - impl = None - for depimpl in dep_spd.get_implementation(): - impl_os, impl_proc = self._assembleOsProc(depimpl) - if self._subsetDeps(dep_base,(impl_os,impl_proc)): - impl = depimpl - break - return impl - - def _resolveDependency(self, implementation, dependency): - softpkg = dependency.get_softpkgref() - if not softpkg: - return [] - filename = softpkg.get_localfile().get_name() - log.trace("Resolving softpkg dependency '%s'", filename) - local_filename = self._sandbox.getSdrRoot()._sdrPath('dom' + filename) - dep_spd = parsers.spd.parse(local_filename) - dep_impl = softpkg.get_implref() - if dep_impl: - impl = self._getImplementation(dep_spd, dep_impl.get_refid()) - else: # no implementation requested. Search for a matching implementation - try: - dep_base_os = [] - dep_base_proc = [] - for operating_system in implementation.get_os(): - dep_base_os.append(operating_system.get_name()) - for proc in implementation.get_processor(): - dep_base_proc.append(proc.get_name()) - impl = self._findExactMatch(dep_spd, (dep_base_os, dep_base_proc)) - if impl == None: - impl = self._findGenericMatch(dep_spd, (dep_base_os, dep_base_proc)) - except: - raise RuntimeError, "Softpkg '%s' has no implementation" % dep_spd.get_name() - envvars = [] - if impl != None: - log.trace("Using implementation '%s'", impl.get_id()) - dep_localfile = impl.get_code().get_localfile().name - - # Resolve nested dependencies. - for dep in impl.dependency: - envvars.extend(self._resolveDependency(implementation, dep)) - - localfile = os.path.join(os.path.dirname(local_filename), dep_localfile) - envvars.insert(0, self._getDependencyConfiguration(localfile)) - if not self._isSharedLibrary(localfile) and not self._isPythonLibrary(localfile) and not self._isJarfile(localfile): - envvars.insert(0, ('OCTAVE_PATH', localfile)) - return envvars - - def _getDependencyConfiguration(self, localfile): - if self._isSharedLibrary(localfile): - return ('LD_LIBRARY_PATH', os.path.dirname(localfile)) - elif self._isPythonLibrary(localfile): - return ('PYTHONPATH', os.path.dirname(localfile)) - elif self._isJarfile(localfile): - return ('CLASSPATH', localfile) + def _processDependency(self, environment, filename): + if self._isSharedLibrary(filename): + self._extendEnvironment(environment, "LD_LIBRARY_PATH", os.path.dirname(filename)) + elif self._isPythonLibrary(filename): + self._extendEnvironment(environment, "PYTHONPATH", os.path.dirname(filename)) + elif self._isJarfile(filename): + self._extendEnvironment(environment, "CLASSPATH", filename) else: - # Assume it's a set of shared libraries. - return ('LD_LIBRARY_PATH', localfile) + self._extendEnvironment(environment, "LD_LIBRARY_PATH", filename) + self._extendEnvironment(environment, "OCTAVE_PATH", filename) def _isSharedLibrary(self, filename): - status, output = commands.getstatusoutput('nm ' + filename) - return status == 0 + try: + with open(filename, 'rb') as f: + return f.read(4) == '\x7fELF' + except: + return False def _isJarfile(self, filename): - return filename.endswith('.jar') + return filename.endswith('.jar') and zipfile.is_zipfile(filename) def _isPythonLibrary(self, filename): - if os.path.splitext(filename)[1] in ('.py', '.pyc', '.pyo'): + PYTHON_EXTS = ('.py', '.pyc', '.pyo') + if os.path.splitext(filename)[1] in PYTHON_EXTS: # File is a Python module return True elif os.path.isdir(filename): # Check for Python package - initpath = os.path.join(filename, '__init__.py') - for initfile in (initpath, initpath+'c', initpath+'o'): - if os.path.exists(initfile): + initpath = os.path.join(filename, '__init__') + for ext in PYTHON_EXTS: + if os.path.exists(initpath + ext): return True return False @@ -426,60 +272,3 @@ def _extendEnvironment(self, env, keyname, value): return oldvalue.insert(0,value) env[keyname] = ':'.join(oldvalue) - -class ResourceLauncher(LocalLauncher): - def __init__(self, profile, identifier, name, sdrroot): - super(ResourceLauncher,self).__init__(profile, identifier, name, sdrroot) - self.__namingContext = NamingContextStub() - log.trace('Activating virtual NamingContext') - self.__namingContextId = poa.activate_object(self.__namingContext) - - def __del__(self): - log.trace('Deactivating virtual NamingContext') - poa.deactivate_object(self.__namingContextId) - - def getReference(self): - return self.__namingContext.getObject(self._name) - - def _getRequiredExecparams(self): - return {'COMPONENT_IDENTIFIER': self._identifier, - 'NAMING_CONTEXT_IOR': orb.object_to_string(self.__namingContext._this()), - 'PROFILE_NAME': self._profile, - 'NAME_BINDING': self._name} - - def _getType(self): - return 'resource' - -class ServiceLauncher(LocalLauncher): - def getReference(self): - return DeviceManagerStub.instance().getService(self._name) - - def _getRequiredExecparams(self): - devmgr_stub = DeviceManagerStub.instance() - devmgr_ior = orb.object_to_string(devmgr_stub._this()) - - return {'DEVICE_MGR_IOR': devmgr_ior, - 'SERVICE_NAME': self._name} - - def _getType(self): - return 'service' - -class DeviceLauncher(LocalLauncher): - def getReference(self): - return DeviceManagerStub.instance().getDevice(self._identifier) - - def _getRequiredExecparams(self): - devmgr_stub = DeviceManagerStub.instance() - devmgr_ior = orb.object_to_string(devmgr_stub._this()) - # Create (or reuse) IDM channel. - idm_channel = self._sandbox.createEventChannel('IDM_Channel') - idm_ior = orb.object_to_string(idm_channel.ref) - - return {'DEVICE_ID': self._identifier, - 'DEVICE_LABEL': self._name, - 'DEVICE_MGR_IOR': devmgr_ior, - 'IDM_CHANNEL_IOR': idm_ior, - 'PROFILE_NAME': self._profile} - - def _getType(self): - return 'device' diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/local.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/local.py index bd59d16dd..fc878c757 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sandbox/local.py +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/local.py @@ -23,15 +23,30 @@ import fnmatch import time import copy +import pydoc import warnings +from omniORB import CORBA +from omniORB.any import to_any + from ossie import parsers -from ossie.utils.model import Service, Resource, Device +from ossie.cf import CF from ossie.utils.model.connect import ConnectionManager +from ossie.utils.uuid import uuid4 -from base import SdrRoot, Sandbox, SandboxComponent +from base import SdrRoot, Sandbox, SandboxLauncher, SandboxComponent +from devmgr import DeviceManagerStub +from naming import ApplicationRegistrarStub import launcher -import pydoc +from debugger import GDB, JDB, PDB, Valgrind, Debugger +import terminal + +warnings.filterwarnings('once',category=DeprecationWarning) + +# Prepare the ORB +orb = CORBA.ORB_init() +poa = orb.resolve_initial_references("RootPOA") +poa._get_the_POAManager().activate() log = logging.getLogger(__name__) @@ -42,10 +57,21 @@ def __init__(self, sdrroot): def _sdrPath(self, filename): # Give precedence to filenames that are valid as-is if os.path.isfile(filename): - return filename + # Convert to an absolute path, to avoid any problems with relative + # paths when passed to other contexts + return os.path.abspath(filename) # Assume the filename points to somewhere in SDRROOT return os.path.join(self.__sdrroot, filename) + def domPath(self, filename): + return os.path.join(self.__sdrroot, 'dom' + filename) + + def relativePath(self, base, path): + if path.startswith('/'): + return self.domPath(path) + else: + return os.path.join(os.path.dirname(base), path) + def _fileExists(self, filename): return os.path.isfile(filename) @@ -94,155 +120,349 @@ def findProfile(self, descriptor, objType=None): return super(LocalSdrRoot,self).findProfile(descriptor, objType=objType) -class LocalMixin(object): - def __init__(self, execparams, debugger, window, timeout, stdout=None): - self._process = None +class LocalLauncher(SandboxLauncher): + def __init__(self, execparams, initProps, initialize, configProps, debugger, window, timeout, shared, stdout=None): self._execparams = execparams self._debugger = debugger self._window = window + self._initProps = initProps + self._initialize = initialize + self._configProps = configProps self._timeout = timeout + self._shared = shared self._stdout = stdout - def _launch(self): - launchFactory = self.__launcher__(self._profile, self._refid, self._instanceName, self._sandbox) - execparams = self._getExecparams() + def _getImplementation(self, spd, identifier): + for implementation in spd.get_implementation(): + if implementation.get_id() == identifier: + return implementation + raise KeyError, "Softpkg '%s' has no implementation '%s'" % (spd.get_name(), identifier) + + def _resolveDependencies(self, sdrRoot, device, implementation): + dep_files = [] + for dependency in implementation.get_dependency(): + softpkg = dependency.get_softpkgref() + if not softpkg: + continue + filename = softpkg.get_localfile().get_name() + log.trace("Resolving softpkg dependency '%s'", filename) + local_filename = sdrRoot.domPath(filename) + dep_spd = parsers.spd.parse(local_filename) + dep_impl = softpkg.get_implref() + if dep_impl: + impl = self._getImplementation(dep_spd, dep_impl.get_refid()) + else: + # No implementation requested, find one that matches the device + impl = device.matchImplementation(sdrRoot, local_filename, dep_spd) + + log.trace("Using implementation '%s'", impl.get_id()) + dep_localfile = impl.get_code().get_localfile().get_name() + dep_files.append(sdrRoot.relativePath(local_filename, dep_localfile)) + + # Resolve nested dependencies. + dep_files.extend(self._resolveDependencies(sdrRoot, device, impl)) + + return dep_files + + def _cleanHeap(self, pid): + filename = '/dev/shm/heap-'+str(pid) + if (os.path.isfile(filename)): + os.remove(filename) + + def launch(self, comp): + # Build up the full set of command line arguments + execparams = comp._getExecparams() execparams.update(self._execparams) - proc, ref = launchFactory.execute(self._spd, self._impl, execparams, self._debugger, self._window, self._timeout, self._stdout) - self._process = proc - self._pid = self._process.pid() + execparams.update(self._getRequiredExecparams(comp)) + + # Set up the debugger if requested + debugger = self._debugger + try: + if isinstance(debugger, basestring): + if debugger == 'pdb': + debugger = PDB() + elif debugger == 'jdb': + debugger = JDB() + elif debugger == 'gdb': + debugger = GDB() + elif debugger == 'valgrind': + debugger = Valgrind() + elif isinstance(debugger, Debugger): + # check for PDB, JDB, Valgrind, or GDB + pass + elif debugger is None: + pass + else: + raise RuntimeError, 'not supported' + except Exception, e: + log.warning('Cannot run debugger %s (%s)', debugger, e) + debugger = None + + # If using an interactive debugger that directly runs the command, put + # it in a window so it doesn't compete for the terminal. + window = self._window + if debugger and debugger.modifiesCommand(): + if debugger.isInteractive() and not debugger.canAttach(): + if not window: + window = 'xterm' + + # Allow symbolic names for windows + if isinstance(window, basestring): + try: + if window == 'xterm': + window = terminal.XTerm(comp._instanceName) + elif window == 'gnome-terminal': + window = terminal.GnomeTerm(comp._instanceName) + else: + raise RuntimeError, 'not supported' + except Exception, e: + log.warning('Cannot run terminal %s (%s)', window, e) + debugger = None + + # Find a suitable implementation + device = launcher.VirtualDevice() + sdrroot = comp._sandbox.getSdrRoot() + if comp._impl: + impl = self._getImplementation(comp._spd, comp._impl) + else: + impl = device.matchImplementation(sdrroot, comp._profile, comp._spd) + log.trace("Using implementation '%s'", impl.get_id()) + + # Resolve all dependency localfiles + deps = self._resolveDependencies(sdrroot, device, impl) + + # Execute the entry point, either on the virtual device or the Sandbox + # component host + entry_point = sdrroot.relativePath(comp._profile, impl.get_code().get_entrypoint()) + if impl.get_code().get_type() == 'SharedLibrary': + if self._shared: + container = comp._sandbox._getComponentHost(_debugger = debugger) + else: + container = comp._sandbox._launchComponentHost(comp._instanceName, _debugger = debugger) + container.executeLinked(entry_point, [], execparams, deps) + process = container._process + else: + process = device.execute(entry_point, deps, execparams, debugger, window, self._stdout) + + # Set up a callback to notify when the component exits abnormally. + name = comp._instanceName + def terminate_callback(pid, status): + self._cleanHeap(pid) + if status > 0: + print 'Component %s (pid=%d) exited with status %d' % (name, pid, status) + elif status < 0: + print 'Component %s (pid=%d) terminated with signal %d' % (name, pid, -status) + process.setTerminationCallback(terminate_callback) + + # Wait for the component to register with the virtual naming service or + # DeviceManager. + if self._timeout is None: + # Default timeout depends on whether the debugger might increase + # the startup time + if debugger and debugger.modifiesCommand(): + timeout = 60.0 + else: + timeout = 10.0 + else: + timeout = self._timeout + sleepIncrement = 0.1 + while self.getReference(comp) is None: + if not process.isAlive(): + raise RuntimeError, "%s '%s' terminated before registering with virtual environment" % (self._getType(), comp._instanceName) + time.sleep(sleepIncrement) + timeout -= sleepIncrement + if timeout < 0: + process.terminate() + raise RuntimeError, "%s '%s' did not register with virtual environment" % (self._getType(), comp._instanceName) + + # Attach a debugger to the process. + if debugger and debugger.canAttach(): + if not window: + window = terminal.XTerm('%s (%s)' % (debugger.name(), comp._instanceName)) + debug_command, debug_args = debugger.attach(process) + debug_command, debug_args = window.command(debug_command, debug_args) + debug_process = launcher.LocalProcess(debug_command, debug_args) + process.addChild(debug_process) + + # Store the process on the component proxy. + if impl.get_code().get_type() == 'SharedLibrary' and self._shared: + comp._process = None + comp._pid = None + else: + comp._process = process + comp._pid = process.pid() + + # Return the now-resolved CORBA reference. + ref = self.getReference(comp) + try: + # Occasionally, when a lot of components are launched from the + # sandbox, omniORB may have a cached connection where the other end + # has terminated (this is particularly a problem with Java, because + # the Sun ORB never closes connections on shutdown). If the new + # component just happens to have the same TCP/IP address and port, + # the first time we try to reach the component, it will get a + # CORBA.COMM_FAILURE exception even though the reference is valid. + # In this case, a call to _non_existent() should cause omniORB to + # clean up the stale socket, and subsequent calls behave normally. + ref._non_existent() + except: + pass return ref - - def _requestTermination(self): - self._process.requestTermination() - def _getExecparams(self): - return {} + def setup(self, comp): + # Initialize the component unless asked not to. + if self._initialize: + # Set initial property values for 'property' kind properties + initvals = comp._getInitializeProperties() + initvals.update(self._initProps) + try: + comp.initializeProperties(initvals) + except: + log.exception('Failure in component property initialization') + + # Actually initialize the component + comp.initialize() + + # Configure component with default values unless requested not to (e.g., + # when launched from a SAD file). + if self._configProps is not None: + # Set initial configuration properties (pre-2.0 components) + initvals = comp._getInitialConfigureProperties() + initvals.update(self._configProps) + try: + comp.configure(initvals) + except: + log.exception('Failure in component configuration') + + def terminate(self, comp): + if comp._process: + # Give the process a little time (50ms) to exit after releaseObject() + # returns before sending termination signals + timeout = 50e-3 + end = time.time() + timeout + while comp._process.isAlive() and time.time() < end: + time.sleep(1e-3) - def _terminate(self): - if self._process: # Kill child process (may be multiple processes in the case of a debugger) - self._process.terminate() - self._process = None + comp._process.terminate() + comp._process = None - def _processAlive(self): - if self._process: - return self._process.isAlive() - else: - return False -class LocalSandboxComponent(SandboxComponent, LocalMixin): - def __init__(self, sdrroot, profile, spd, scd, prf, instanceName, refid, impl, - execparams, debugger, window, timeout, stdout=None): - SandboxComponent.__init__(self, sdrroot, profile, spd, scd, prf, instanceName, refid, impl) - LocalMixin.__init__(self, execparams, debugger, window, timeout, stdout) +class LocalComponentLauncher(LocalLauncher): + def launch(self, *args, **kwargs): + self.__registrar = ApplicationRegistrarStub() + log.trace('Activating virtual ApplicationRegistrar') + namingContextId = poa.activate_object(self.__registrar) + try: + return LocalLauncher.launch(self, *args, **kwargs) + finally: + log.trace('Deactivating virtual ApplicationRegistrar') + poa.deactivate_object(namingContextId) + del self.__registrar - self._kick() + def getReference(self, component): + return self.__registrar.getObject(component._instanceName) - self._parseComponentXMLFiles() - self._buildAPI() + def _getRequiredExecparams(self, component): + return {'COMPONENT_IDENTIFIER': component._refid, + 'NAMING_CONTEXT_IOR': orb.object_to_string(self.__registrar._this()), + 'PROFILE_NAME': component._profile, + 'NAME_BINDING': component._instanceName} - def _getExecparams(self): - execparams = dict((str(ep.id), ep.defValue) for ep in self._getPropertySet(kinds=('execparam',), includeNil=False)) - commandline_property = dict((str(ep.id), ep.defValue) for ep in self._getPropertySet(kinds=('property',), includeNil=False,commandline=True)) - execparams.update(commandline_property) - return execparams + def _getType(self): + return 'resource' - def releaseObject(self): - try: - self._requestTermination() - super(LocalSandboxComponent,self).releaseObject() - except: - # Tolerate exceptions (e.g., the object has already been released) - # and continue on to ensure that the process still gets terminated. - pass - # Give the process a little time (50ms) to exit after releaseObject() - # returns before sending termination signals - timeout = 50e-3 - end = time.time() + timeout - while self._processAlive() and time.time() < end: - time.sleep(1e-3) +class LocalDeviceLauncher(LocalLauncher): + def getReference(self, device): + return DeviceManagerStub.instance().getDevice(device._refid) - self._terminate() + def _getRequiredExecparams(self, device): + devmgr_stub = DeviceManagerStub.instance() + devmgr_ior = orb.object_to_string(devmgr_stub._this()) + # Create (or reuse) IDM channel. + idm_channel = device._sandbox.createEventChannel('IDM_Channel') + idm_ior = orb.object_to_string(idm_channel.ref) + return {'DEVICE_ID': device._refid, + 'DEVICE_LABEL': device._instanceName, + 'DEVICE_MGR_IOR': devmgr_ior, + 'IDM_CHANNEL_IOR': idm_ior, + 'PROFILE_NAME': device._profile} -class LocalComponent(LocalSandboxComponent, Resource): - __launcher__ = launcher.ResourceLauncher + def _getType(self): + return 'device' - def __init__(self, *args, **kwargs): - Resource.__init__(self) - LocalSandboxComponent.__init__(self, *args, **kwargs) - def __repr__(self): - return "" % (self._instanceName, id(self)) +class LocalServiceLauncher(LocalLauncher): + def setup(self, service): + # Services don't get initialized or configured + return -class LocalDevice(LocalSandboxComponent, Device): - __launcher__ = launcher.DeviceLauncher + def getReference(self, service): + return DeviceManagerStub.instance().getService(service._instanceName) - def __init__(self, *args, **kwargs): - Device.__init__(self) - LocalSandboxComponent.__init__(self, *args, **kwargs) + def _getRequiredExecparams(self, service): + devmgr_stub = DeviceManagerStub.instance() + devmgr_ior = orb.object_to_string(devmgr_stub._this()) - Device._buildAPI(self) + return {'DEVICE_MGR_IOR': devmgr_ior, + 'SERVICE_NAME': service._instanceName} - def __repr__(self): - return "" % (self._instanceName, id(self)) + def _getType(self): + return 'service' - def api(self): - LocalSandboxComponent.api(self) - print - Device.api(self) +class ComponentHost(SandboxComponent): + def __init__(self, *args, **kwargs): + SandboxComponent.__init__(self, *args, **kwargs) -class LocalService(Service, LocalMixin): - __launcher__ = launcher.ServiceLauncher - - def __init__(self, sdrroot, profile, spd, scd, prf, instanceName, refid, impl, - execparams, debugger, window, timeout, stdout=None): - self._sandbox = sdrroot - Service.__init__(self, None, profile, spd, scd, prf, instanceName, refid, impl) - LocalMixin.__init__(self, execparams, debugger, window, timeout, stdout) - self.ref = self._launch() - self.populateMemberFunctions() - - self._sandbox._addService(self) - - def _getExecparams(self): - if not self._prf: - return {} - execparams = {} - for prop in self._prf.get_simple(): - # Skip non-execparam properties - kinds = set(k.get_kindtype() for k in prop.get_kind()) - if ('execparam' not in kinds) and ('property' not in kinds): - continue - if 'property' in kinds: - if prop.get_commandline() == 'false': - continue - # Only include properties with values - value = prop.get_value() - if value is not None: - execparams[prop.get_id()] = value - return execparams + def _register(self): + pass - def __repr__(self): - return "" % (self._instanceName, id(self)) + def _unregister(self): + pass + + def executeLinked(self, entryPoint, options, parameters, deps): + log.debug('Executing shared library %s %s', entryPoint, ' '.join('%s=%s' % (k,v) for k,v in parameters.iteritems())) + params = [CF.DataType(k, to_any(str(v))) for k, v in parameters.iteritems()] + self.ref.executeLinked(entryPoint, options, params, deps) class LocalSandbox(Sandbox): - __comptypes__ = { - 'resource': LocalComponent, - 'device': LocalDevice, - 'loadabledevice': LocalDevice, - 'executabledevice': LocalDevice, - 'service': LocalService - } - - def __init__(self, sdrroot): + def __init__(self, sdrroot=None): super(LocalSandbox, self).__init__() self.__components = {} self.__services = {} + if not sdrroot: + sdrroot = os.environ['SDRROOT'] self._sdrroot = LocalSdrRoot(sdrroot) + self.__container = None + + def _getComponentHost(self, _debugger=None): + if self.__container is None: + self.__container = self._launchComponentHost(_debugger=_debugger) + return self.__container + + def _launchComponentHost(self, instanceName=None, _debugger=None): + # Directly create the sandbox object instead of going through launch() + profile = self._sdrroot.domPath('/mgr/rh/ComponentHost/ComponentHost.spd.xml') + spd, scd, prf = self._sdrroot.readProfile(profile) + if instanceName is None: + instanceName = self._createInstanceName('ComponentHost', 'resource') + refid = str(uuid4()) + comp = ComponentHost(self, profile, spd, scd, prf, instanceName, refid, None) + + # Likewise, since the specific component type is known, create the + # launcher directly. The deployment root is overridden to point to the + # root of the local filesystem; all component paths provided to the + # component host will be absolute. + execparams = {'RH::DEPLOYMENT_ROOT':'/'} + if not isinstance(_debugger, Valgrind): + _debugger = None + comp._launcher = LocalComponentLauncher(execparams, {}, True, {}, _debugger, None, None, False) + comp._kick() + return comp def _getComponentContainer(self, componentType): if componentType == 'service': @@ -273,57 +493,17 @@ def _checkInstanceId(self, refid, componentType): return False return True - def _launch(self, profile, spd, scd, prf, instanceName, refid, impl, execparams, - initProps, initialize, configProps, debugger, window, timeout, stdout=None): - # Determine the class for the component type and create a new instance. - comptype = scd.get_componenttype() - clazz = self.__comptypes__[comptype] - comp = clazz(self, profile, spd, scd, prf, instanceName, refid, impl, execparams, debugger, window, timeout, stdout) - - try: - # Occasionally, when a lot of components are launched from the - # sandbox, omniORB may have a cached connection where the other end - # has terminated (this is particularly a problem with Java, because - # the Sun ORB never closes connections on shutdown). If the new - # component just happens to have the same TCP/IP address and port, - # the first time we try to reach the component, it will get a - # CORBA.COMM_FAILURE exception even though the reference is valid. - # In this case, a call to _non_existent() should cause omniORB to - # clean up the stale socket, and subsequent calls behave normally. - comp.ref._non_existent() - except: - pass - - # Services don't get initialized or configured - if comptype == 'service': - return comp - - # Initialize the component unless asked not to. - if initialize: - # Set initial property values for 'property' kind properties - initvals = copy.deepcopy(comp._propRef) - initvals.update(initProps) - try: - comp.initializeProperties(initvals) - except: - log.exception('Failure in component property initialization') - - # Actually initialize the component - comp.initialize() - - # Configure component with default values unless requested not to (e.g., - # when launched from a SAD file). - if configProps is not None: - # Make a copy of the default properties, and update with any passed-in - # properties that were not already passed to initializeProperties() - initvals = copy.deepcopy(comp._configRef) - initvals.update(configProps) - try: - comp.configure(initvals) - except: - log.exception('Failure in component configuration') - - return comp + def _createLauncher(self, comptype, execparams, initProps, initialize, configProps, debugger, + window, timeout, shared, stdout): + if comptype == 'resource': + clazz = LocalComponentLauncher + elif comptype in ('device', 'loadabledevice', 'executabledevice'): + clazz = LocalDeviceLauncher + elif comptype == 'service': + clazz = LocalServiceLauncher + else: + return None + return clazz(execparams, initProps, initialize, configProps, debugger, window, timeout, shared, stdout) def getComponents(self): return self.__components.values() @@ -374,20 +554,34 @@ def setSdrRoot(self, path): def shutdown(self): ConnectionManager.instance().cleanup() self.stop() + + # Clean up all components for name, component in self.__components.items(): log.debug("Releasing component '%s'", name) try: component.releaseObject() except: log.debug("Component '%s' raised an exception while exiting", name) + self.__components = {} + + # Terminate all services for name, service in self.__services.items(): log.debug("Terminating service '%s'", name) try: service._terminate() except: log.debug("Service '%s' raised an exception while terminating", name) - self.__components = {} self.__services = {} + + # Clean up the component host + if self.__container: + log.debug('Releasing component host') + try: + self.__container.releaseObject() + except: + log.debug('Component host raised an exception while terminating') + self.__container = None + super(LocalSandbox,self).shutdown() def browse(self, searchPath=None, objType=None,withDescription=False): @@ -480,3 +674,6 @@ def browse(self, searchPath=None, objType=None,withDescription=False): output_text += '%-30s%-30s%-30s%-30s\n' % (v1,v2,v3,v4) output_text += "\n" pydoc.pager(output_text) + + def getType(self): + return "local" diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/model.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/model.py new file mode 100644 index 000000000..a389fdc24 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/model.py @@ -0,0 +1,247 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import warnings +import cStringIO, pydoc + +from ossie.utils.model import CorbaObject +from ossie.utils.model import PortSupplier, PropertySet, ComponentBase +from ossie.utils.model import Resource, Device, Service + +from ossie.utils.sandbox.events import EventChannel + +warnings.filterwarnings('once',category=DeprecationWarning) + +class SandboxMixin(object): + def __init__(self, sandbox): + self._sandbox = sandbox + + def _kick(self): + self.ref = self._launcher.launch(self) + self._launcher.setup(self) + self._register() + + def _terminate(self): + if self._launcher: + self._launcher.terminate(self) + + def _register(self): + raise NotImplemented('_register') + + def _getExecparams(self): + raise NotImplemented('_getExecparams') + + +class SandboxResource(ComponentBase, SandboxMixin): + def __init__(self, sandbox, profile, spd, scd, prf, instanceName, refid, impl): + ComponentBase.__init__(self, spd, scd, prf, instanceName, refid, impl) + SandboxMixin.__init__(self, sandbox) + self._profile = profile + self._componentName = spd.get_name() + + self.__ports = None + self._msgSupplierHelper = None + self._parseComponentXMLFiles() + self._buildAPI() + + def _getInitializeProperties(self): + properties = {} + for prop in self._getPropertySet(kinds=('property',), includeNil=False, commandline=False): + if prop.defValue is None: + continue + properties[prop.id] = prop.defValue + return properties + + def _getInitialConfigureProperties(self): + properties = {} + for prop in self._getPropertySet(kinds=('configure',), modes=('readwrite', 'writeonly'), includeNil=False): + if prop.defValue is None: + continue + properties[prop.id] = prop.defValue + return properties + + def _getExecparams(self): + execparams = dict((str(ep.id), ep.defValue) for ep in self._getPropertySet(kinds=('execparam',), includeNil=False)) + for prop in self._getPropertySet(kinds=('property',), includeNil=False, commandline=True): + execparams[prop.id] = prop.defValue + return execparams + + def _readProfile(self): + sdrRoot = self._sandbox.getSdrRoot() + self._spd, self._scd, self._prf = sdrRoot.readProfile(self._profile) + + def _register(self): + self._sandbox._registerComponent(self) + + def _unregister(self): + self._sandbox._unregisterComponent(self) + + @property + def _ports(self): + #DEPRECATED: replaced with ports + warnings.warn("'_ports' is deprecated", DeprecationWarning) + return self.ports + + @property + def ports(self): + if self.__ports == None: + self.__ports = self._populatePorts() + return self.__ports + + def reset(self): + self.releaseObject() + self._readProfile() + self._kick() + self.initialize() + self._parseComponentXMLFiles() + self._buildAPI() + # Clear cached ports list + self.__ports = None + + def releaseObject(self): + # Break any connections involving this component. + self._sandbox._breakConnections(self) + + # Unregister from the sandbox + self._unregister() + + try: + # Call superclass release, which calls the CORBA method. + super(SandboxResource,self).releaseObject() + except: + # Tolerate exceptions (e.g., the object has already been released) + # and continue on to ensure that the process still gets terminated. + pass + + # Allow the launcher to peform any follow-up cleanup. + SandboxMixin._terminate(self) + + def api(self, destfile=None): + ''' + Inspect interfaces and properties for the component + ''' + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() + + print >>destfile, "Component [" + str(self._componentName) + "]:" + PortSupplier.api(self, destfile=destfile) + PropertySet.api(self, destfile=destfile) + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() + + def sendMessage(self, msg, msgId=None, msgPort=None, restrict=True ): + """ + send a message out a component's message event port + + msg : dictionary of information to send or an any object + msgId : select a specific message structure property from the component, if None will + choose first available message property structure for the component + msgPort : select a specified message event port to use, if None will try to autoselect + restrict : if True, will restrict msgId to only those message ids defined by the component + if False, will allow for ad-hoc message to be sent + """ + if self._msgSupplierHelper == None: + import ossie.utils + self._msgSupplierHelper = ossie.utils.sb.io_helpers.MsgSupplierHelper(self) + if self.ref and self.ref._get_started() == True and self._msgSupplierHelper: + return self._msgSupplierHelper.sendMessage( msg, msgId, msgPort, restrict ) + return False + +class SandboxComponent(SandboxResource, Resource): + def __init__(self, *args, **kwargs): + Resource.__init__(self) + SandboxResource.__init__(self, *args, **kwargs) + + def __repr__(self): + return "<%s component '%s' at 0x%x>" % (self._sandbox.getType(), self._instanceName, id(self)) + + +class SandboxDevice(SandboxResource, Device): + def __init__(self, *args, **kwargs): + Device.__init__(self) + SandboxResource.__init__(self, *args, **kwargs) + + Device._buildAPI(self) + + def __repr__(self): + return "<%s device '%s' at 0x%x>" % (self._sandbox.getType(), self._instanceName, id(self)) + + def api(self, destfile=None): + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() + + SandboxResource.api(self, destfile=destfile) + print >>destfile, '\n' + Device.api(self, destfile=destfile) + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() + + +class SandboxService(Service, SandboxMixin): + def __init__(self, sandbox, profile, spd, scd, prf, instanceName, refid, impl): + Service.__init__(self, None, profile, spd, scd, prf, instanceName, refid, impl) + SandboxMixin.__init__(self, sandbox) + + def _register(self): + self.populateMemberFunctions() + self._sandbox._addService(self) + + def _getExecparams(self): + if not self._prf: + return {} + execparams = {} + for prop in self._prf.get_simple(): + # Skip non-execparam properties + kinds = set(k.get_kindtype() for k in prop.get_kind()) + if ('execparam' not in kinds) and ('property' not in kinds): + continue + if 'property' in kinds: + if prop.get_commandline() != 'true': + continue + # Only include properties with values + value = prop.get_value() + if value is not None: + execparams[prop.get_id()] = value + return execparams + + def __repr__(self): + return "<%s service '%s' at 0x%x>" % (self._instanceName, self._sandbox.getType(), id(self)) + + +class SandboxEventChannel(EventChannel, CorbaObject): + def __init__(self, name, sandbox): + EventChannel.__init__(self, name) + CorbaObject.__init__(self) + self._sandbox = sandbox + self._instanceName = name + + def destroy(self): + # Break any connections involving this event channel. + self._sandbox._breakConnections(self) + self._sandbox._removeEventChannel(self._instanceName) + EventChannel.destroy(self) diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/naming.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/naming.py index 87795140f..a321850ca 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sandbox/naming.py +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/naming.py @@ -18,13 +18,13 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # -import logging import threading +from omniORB import CORBA, URI import CosNaming, CosNaming__POA -from omniORB import CORBA, URI -from ossie.utils import log4py +from ossie.cf import CF, CF__POA +from ossie.utils.log4py import logging log = logging.getLogger(__name__) @@ -41,35 +41,35 @@ class NamingContextStub(CosNaming__POA.NamingContextExt): """ def __init__(self): - self.__context = {} - self.__lock = threading.RLock() + self._context = {} + self._lock = threading.RLock() def bind(self, name, object): uri = URI.nameToString(name) - self.__lock.acquire() + self._lock.acquire() try: - if uri in self.__context: + if uri in self._context: raise CosNaming.NamingContext.AlreadyBound() log.debug('Binding "%s" into virtual NamingContext', uri) - self.__context[uri] = object + self._context[uri] = object finally: - self.__lock.release() + self._lock.release() def rebind(self, name, object): uri = URI.nameToString(name) log.debug('Rebinding "%s" into virtual NamingContext', uri) - self.__lock.acquire() + self._lock.acquire() try: - self.__context[uri] = object + self._context[uri] = object finally: - self.__lock.release() + self._lock.release() def getObject(self, name): - self.__lock.acquire() + self._lock.acquire() try: - return self.__context.get(name, None) + return self._context.get(name, None) finally: - self.__lock.release() + self._lock.release() def to_name(self, name): """ @@ -127,3 +127,22 @@ def to_name(self, name): raise ValueError("NamingContextStub:to_name() '%s' is an invalid name" % name) return [] + + +class ApplicationRegistrarStub(CF__POA.ApplicationRegistrar, NamingContextStub): + """ + Class to extend virtual NamingContext to support ApplicationRegistrar + operations. + """ + def _get_app(self): + return None + + def _get_domMgr(self): + return None + + def registerComponent(self, name, obj): + with self._lock: + if name in self._context: + raise CF.DuplicateName() + log.debug('Registering component "%s" into virtual ApplicationRegistrar', name) + self._context[name] = obj diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/plugin.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/plugin.py new file mode 100644 index 000000000..60de910b9 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/plugin.py @@ -0,0 +1,46 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +from ossie.utils.log4py import logging + +_log = logging.getLogger(__name__) +_plugins = None + +def _load_plugins(): + global _plugins + if _plugins is not None: + return + + _plugins = [] + import pkg_resources + for entry_point in pkg_resources.iter_entry_points('redhawk.sandbox.helpers'): + _log.trace("Loading plugin '%s'", entry_point.name) + try: + plugin = entry_point.load() + except: + # Ignore errors in plugin load + _log.exception("Failed to load plugin '%s'", entry_point.name) + continue + _plugins.append((entry_point.name, plugin)) + + +def plugins(): + _load_plugins() + return _plugins[:] diff --git a/redhawk/src/base/framework/python/ossie/utils/sandbox/terminal.py b/redhawk/src/base/framework/python/ossie/utils/sandbox/terminal.py index 344438282..a938f5064 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sandbox/terminal.py +++ b/redhawk/src/base/framework/python/ossie/utils/sandbox/terminal.py @@ -21,23 +21,26 @@ import commands class Terminal(object): - def __init__(self, command): + def __init__(self, command, title): status, self.__command = commands.getstatusoutput('which '+command) if status: raise RuntimeError, command + ' cannot be found' + self._title = title def _termOpts(self): return [] def command(self, command, arguments, title=None): options = self._termOpts() + if not title: + title = self._title if title: options += self._titleArgs(title) return self.__command, options + self._execArgs(command, arguments) class XTerm(Terminal): - def __init__(self): - super(XTerm,self).__init__('xterm') + def __init__(self, title=None): + super(XTerm,self).__init__('xterm', title) def _titleArgs(self, title): return ['-T', title] @@ -46,8 +49,8 @@ def _execArgs(self, command, arguments): return ['-e', command] + arguments class GnomeTerm(Terminal): - def __init__(self): - super(GnomeTerm,self).__init__('gnome-terminal') + def __init__(self, title=None): + super(GnomeTerm,self).__init__('gnome-terminal', title) def _titleArgs(self, title): return ['-t', title] diff --git a/redhawk/src/base/framework/python/ossie/utils/sb/__init__.py b/redhawk/src/base/framework/python/ossie/utils/sb/__init__.py index 8aa7f2238..21627109b 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sb/__init__.py +++ b/redhawk/src/base/framework/python/ossie/utils/sb/__init__.py @@ -56,13 +56,15 @@ - Ambiguities can be resolved with usesPortName and/or providesPortName. Helpers are provided to manage data - - DataSource(), DataSink(): - Push vectors from Python to components and back. - - FileSource(), FileSink(): + - StreamSource, StreamSink: + Push data from Python to components and back. + - FileSource, FileSink: Push data from a file into components and back. - - MessageSource(), MessageSink(): + - MessageSource, MessageSink: Push messages from Python to components and back. - - SoundSink(): + - PropertyChangeListener: + Container to receive asynchronous property change events + - SoundSink: Playback audio data from BULKIO streams - compareSRI(): Compares the content of two SRI values @@ -116,12 +118,20 @@ """ from domainless import * from io_helpers import * +from prop_change_helpers import * from block_process import * + try: from bulkio.bulkioInterfaces import BULKIO except: # BULKIO is not installed pass -from plots import * -from audio import * +import helpers +from helpers import * + +# Add plug-in extensions +from ossie.utils.sandbox.plugin import plugins +for name, plugin in plugins(): + globals()[name] = plugin + del name, plugin diff --git a/redhawk/src/base/framework/python/ossie/utils/sb/block_process.py b/redhawk/src/base/framework/python/ossie/utils/sb/block_process.py index 1efc83886..d37ef3d3a 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sb/block_process.py +++ b/redhawk/src/base/framework/python/ossie/utils/sb/block_process.py @@ -34,10 +34,7 @@ __all__ = ('proc',) class ProcessingTimeout(Exception): - def __init__(self): - pass - def __str__(self): - return "Processing timed out before completion" + pass def findInDataCoverterPortName(dataFormat): portName = '' diff --git a/redhawk/src/base/framework/python/ossie/utils/sb/domainless.py b/redhawk/src/base/framework/python/ossie/utils/sb/domainless.py index 54fccf850..b452a837c 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sb/domainless.py +++ b/redhawk/src/base/framework/python/ossie/utils/sb/domainless.py @@ -104,12 +104,13 @@ import sys import logging import string as _string -import cStringIO +import cStringIO, pydoc import warnings import traceback from omniORB import CORBA, any from ossie import parsers +from ossie import properties as core_properties from ossie.utils import prop_helpers from ossie.utils.model import _DEBUG from ossie.utils.model import * @@ -119,13 +120,15 @@ from ossie.utils.sandbox import LocalSandbox, IDESandbox import ossie.utils.sandbox +warnings.filterwarnings('once',category=DeprecationWarning) + # Limit exported symbols __all__ = ('show', 'loadSADFile', 'IDELocation', 'connectedIDE', 'getIDE_REF', 'start', 'getSDRROOT', 'setSDRROOT', 'Component', 'generateSADXML', 'getDEBUG', 'setDEBUG', 'getComponent', 'IDE_REF', 'setIDE_REF', 'stop', 'catalog', 'redirectSTDOUT', 'orb', 'reset', 'launch', 'api', 'createEventChannel', 'getEventChannel', 'getService', 'browse', - 'release', '_get_started') + 'release', 'started', '_get_started') # Set up logging log = logging.getLogger(__name__) @@ -398,19 +401,56 @@ def generateSADXML(waveform_name): with_ac = with_partitioning.replace('@__ASSEMBLYCONTROLLER__@', assemblycontroller) # Loop over connections connectinterface = '' - for connection in _currentState['Component Connections'].values(): - usesport = Sad_template.usesport.replace('@__PORTNAME__@',connection['Uses Port Name']) - usesport = usesport.replace('@__COMPONENTINSTANCE__@',connection['Uses Component']._refid) - if connection['Provides Port Name'] == "CF:Resource": + #for connection in _currentState['Component Connections'].values(): + _connection_map = ConnectionManager.instance().getConnections() + for _tmp_connection in _connection_map: + connection_id = _connection_map[_tmp_connection][0] + uses_side = _connection_map[_tmp_connection][1] + uses_name = uses_side.getName() + if len(uses_name.split('/')) != 2: + continue + uses_inst_name = uses_name.split('/')[0] + uses_inst_id = None + for component in sandbox.getComponents(): + if component._instanceName == uses_inst_name: + if component._refid[:3] == 'DCE': + comprefid = component._refid.split(':')[0]+':'+component._refid.split(':')[1] + else: + comprefid = component._refid.split(':')[0] + uses_inst_id = comprefid + break + if not uses_inst_id: + continue + usesport = Sad_template.usesport.replace('@__PORTNAME__@',uses_side.getPortName()) + usesport = usesport.replace('@__COMPONENTINSTANCE__@',uses_inst_id) + provides_side = _connection_map[_tmp_connection][2] + supported_interface = False + provides_name = provides_side.getName() + if len(provides_name.split('/')) == 1: + supported_interface = True + else: + provides_name = provides_side.getName().split('/')[0] + provides_inst_id = None + for component in sandbox.getComponents(): + if component._instanceName == provides_name: + if component._refid[:3] == 'DCE': + comprefid = component._refid.split(':')[0]+':'+component._refid.split(':')[1] + else: + comprefid = component._refid.split(':')[0] + provides_inst_id = comprefid + break + if not provides_inst_id: + continue + if supported_interface: # component support interface - providesport = Sad_template.componentsupportedinterface.replace('@__PORTINTERFACE__@',connection['Provides Port Interface']) - providesport = providesport.replace('@__COMPONENTINSTANCE__@',connection['Provides Component']._refid) + providesport = Sad_template.componentsupportedinterface.replace('@__PORTINTERFACE__@','IDL:CF/Resource:1.0') + providesport = providesport.replace('@__COMPONENTINSTANCE__@',provides_inst_id) else: - providesport = Sad_template.providesport.replace('@__PORTNAME__@',connection['Provides Port Name']) - providesport = providesport.replace('@__COMPONENTINSTANCE__@',connection['Provides Component']._refid) + providesport = Sad_template.providesport.replace('@__PORTNAME__@',provides_side.getPortName()) + providesport = providesport.replace('@__COMPONENTINSTANCE__@',provides_inst_id) connectinterface += Sad_template.connectinterface.replace('@__USESPORT__@',usesport) connectinterface = connectinterface.replace('@__PROVIDESPORT__@',providesport) - connectinterface = connectinterface.replace('@__CONNECTID__@',str(uuid4())) + connectinterface = connectinterface.replace('@__CONNECTID__@',connection_id) with_connections = with_ac.replace('@__CONNECTINTERFACE__@',connectinterface) # External ports are ignored with_connections = with_connections.replace('@__EXTERNALPORTS__@',"") @@ -425,6 +465,85 @@ def __init__(self, id, value=None, type=None): self.value = value self.type = type +def convertStringToComplex(value, basetype): + negative_imag = False + _split = value.split('+') + if value[1:].find('-') != -1: + _split = value.split('-') + if len(_split) == 3: # negative real, negative imaginary + if _split[0] == '': + _split.pop(0) + _split[0] = '-'+_split[0] + negative_imag = True + real_idx = -1 + imag_idx = -1 + for idx in range(len(_split)): + if 'j' in _split[idx]: + if imag_idx != -1: + raise Exception("the proposed overload (id="+id+") is not a complex number ("+value+")") + imag_idx = idx + else: + if real_idx != -1: + raise Exception("the proposed overload (id="+id+") is not a complex number ("+value+")") + real_idx = idx + _real = None + _imag = None + if real_idx != -1: + _real = basetype(_split[real_idx]) + if imag_idx != -1: + _imag_str = _split[imag_idx].replace('j', '') + _imag = basetype(_imag_str) + if negative_imag: + _imag = _imag * -1 + + if not _real and not _imag: + return None + if _real and not _imag: + return complex(basetype(_real), 0) + if not _real and _imag: + return complex(0, basetype(_imag)) + return complex(basetype(_real), basetype(_imag)) + +def convertToValue(valuetype, value): + if valuetype == 'string' or valuetype == 'char': + return value + elif valuetype == 'boolean': + if type(value) == list: + return [bool(s) for s in value] + else: + return bool(value) + elif valuetype == 'complexBoolean': + if type(value) == list: + return [convertStringToComplex(s, bool) for s in value] + else: + return convertStringToComplex(value, bool) + elif valuetype == 'ulong' or valuetype == 'short' or valuetype == 'octet' or \ + valuetype == 'ushort' or valuetype == 'long' or valuetype == 'longlong' or \ + valuetype == 'ulonglong': + if type(value) == list: + return [int(s) for s in value] + else: + return int(value) + elif valuetype == 'complexULong' or valuetype == 'complexShort' or valuetype == 'complexOctet' or \ + valuetype == 'complexUShort' or valuetype == 'complexLong' or valuetype == 'complexLongLong' or \ + valuetype == 'complexULongLong': + if type(value) == list: + return [convertStringToComplex(s, int) for s in value] + else: + return convertStringToComplex(value, int) + elif valuetype == 'float' or valuetype == 'double': + if type(value) == list: + return [float(s) for s in value] + else: + return float(value) + elif valuetype == 'complexFloat' or valuetype == 'complexDouble': + if type(value) == list: + return [convertStringToComplex(s, float) for s in value] + else: + return convertStringToComplex(value, float) + else: + raise Exception('bad value type') + def overloadProperty(component, simples=None, simpleseq=None, struct=None, structseq=None): if len(component._properties) > 0: allProps = dict([(str(prop.id),prop) for prop in component._properties]) @@ -434,34 +553,17 @@ def overloadProperty(component, simples=None, simpleseq=None, struct=None, struc for overload in simples: if overload.id == entry.id: allProps.pop(overload.id) - if entry.valueType == 'string' or entry.valueType == 'char': - setattr(component, entry.clean_name, overload.value) - elif entry.valueType == 'boolean': - setattr(component, entry.clean_name, bool(overload.value)) - elif entry.valueType == 'ulong' or entry.valueType == 'short' or entry.valueType == 'octet' or \ - entry.valueType == 'ushort' or entry.valueType == 'long' or entry.valueType == 'longlong' or \ - entry.valueType == 'ulonglong': - setattr(component, entry.clean_name, int(overload.value)) - elif entry.valueType == 'float' or entry.valueType == 'double': - setattr(component, entry.clean_name, float(overload.value)) - else: - print "the proposed overload (id="+entry.id+") is not of a supported type ("+entry.valueType+")" + try: + setattr(component, entry.clean_name, convertToValue(entry.valueType, overload.value)) + except Exception, e: + print e, "Problem overloading id="+entry.id for overload in simpleseq: if overload.id == entry.id: allProps.pop(overload.id) - if entry.valueType == 'string' or entry.valueType == 'char': - setattr(component, entry.clean_name, overload.value) - elif entry.valueType == 'boolean': - setattr(component, entry.clean_name, [bool(s) for s in overload.value]) - elif entry.valueType == 'ulong' or entry.valueType == 'short' or entry.valueType == 'octet' or \ - entry.valueType == 'ushort' or entry.valueType == 'long' or entry.valueType == 'longlong' or \ - entry.valueType == 'ulonglong': - stuff=[int(s) for s in overload.value] - setattr(component, entry.clean_name, stuff) - elif entry.valueType == 'float' or entry.valueType == 'double': - setattr(component, entry.clean_name, [float(s) for s in overload.value]) - else: - print "the proposed overload (id="+entry.id+") is not of a supported type ("+entry.valueType+")" + try: + setattr(component, entry.clean_name, convertToValue(entry.valueType, overload.value)) + except Exception, e: + print e, "Problem overloading id="+entry.id for overload in struct: if overload.id == entry.id: allProps.pop(overload.id) @@ -504,18 +606,10 @@ def overloadProperty(component, simples=None, simpleseq=None, struct=None, struc # cleanup struct key if it has illegal characters... st_clean = st_clean.translate(translation) - if simple[1] == 'string' or simple[1] == 'char': - structValue[st_clean] = overload.value[_ov_key] - elif simple[1] == 'boolean': - structValue[st_clean] = bool(overload.value[_ov_key]) - elif simple[1] == 'ulong' or simple[1] == 'short' or simple[1] == 'octet' or \ - simple[1] == 'ushort' or simple[1] == 'long' or simple[1] == 'longlong' or \ - simple[1] == 'ulonglong': - structValue[st_clean] = int(overload.value[_ov_key]) - elif simple[1] == 'float' or simple[1] == 'double': - structValue[st_clean] = float(overload.value[_ov_key]) - else: - print "the proposed overload (id="+entry.id+") is not of a supported type ("+entry.valueType+")" + try: + structValue[st_clean] = convertToValue(simple[1], overload.value[_ov_key]) + except Exception, e: + print e, "Problem overloading id="+entry.id if _DEBUG: print "setattr ", component, " clean name ", entry.clean_name, " struct ", structValue @@ -562,19 +656,10 @@ def overloadProperty(component, simples=None, simpleseq=None, struct=None, struc # cleanup struct key if it has illegal characters... st_clean = st_clean.translate(translation) - if simple[1] == 'string' or simple[1] == 'char': - structValue[st_clean] = overloadedValue[_ov_key] - elif simple[1] == 'boolean': - structValue[st_clean] = bool(overloadedValue[_ov_key]) - elif simple[1] == 'ulong' or simple[1] == 'short' or simple[1] == 'octet' or \ - simple[1] == 'ushort' or simple[1] == 'long' or simple[1] == 'longlong' or \ - simple[1] == 'ulonglong': - structValue[st_clean] = int(overloadedValue[_ov_key]) - elif simple[1] == 'float' or simple[1] == 'double': - structValue[st_clean] = float(overloadedValue[_ov_key]) - else: - print "the proposed overload (id="+entry.id+") is not of a supported type ("+entry.valueType+")" - + try: + structValue[st_clean] = convertToValue(simple[1], overloadedValue[_ov_key]) + except Exception, e: + print e, "Problem overloading id="+entry.id structSeqValue.append(structValue) setattr(component, entry.clean_name, structSeqValue) @@ -585,6 +670,19 @@ def overloadProperty(component, simples=None, simpleseq=None, struct=None, struc if allProps[prop].mode != "readonly" and 'configure' in allProps[prop].kinds: setattr(component, allProps[prop].clean_name, allProps[prop].defValue) +def _loadStructMembers(parent): + simples = parent.get_simpleref() + value = {} + for simple in simples: + value[str(simple.refid)] = str(simple.value) + simpleseqs = parent.get_simplesequenceref() + for simpleseq in simpleseqs: + _seq = [] + for seq_value in simpleseq.values.get_value(): + _seq.append(str(seq_value)) + value[str(simpleseq.refid)] = _seq + return value + def loadSADFile(filename, props={}): ''' Load the graph/configuration described in the SAD file onto the sandbox @@ -618,6 +716,7 @@ def loadSADFile(filename, props={}): sad = parsers.sad.parseString(sadFileString) log.debug("waveform ID '%s'", sad.get_id()) log.debug("waveform name '%s'", sad.get_name()) + waveform_modifier = ':'+sad.get_name() validRequestedComponents = {} # Loop over each entry to determine SPD filenames and which components are kickable for component in sad.componentfiles.get_componentfile(): @@ -635,7 +734,7 @@ def loadSADFile(filename, props={}): # Need to determine which component is the assembly controller assemblyControllerRefid = None if sad.assemblycontroller: - assemblyControllerRefid = sad.assemblycontroller.get_componentinstantiationref().get_refid() + assemblyControllerRefid = sad.assemblycontroller.get_componentinstantiationref().get_refid() + waveform_modifier log.debug("ASSEMBLY CONTROLLER component instantiation ref '%s'", assemblyControllerRefid) if not assemblyControllerRefid: log.warn('SAD file did not specify an assembly controller') @@ -645,7 +744,7 @@ def loadSADFile(filename, props={}): # externprops=[] if sad.get_externalproperties(): - externprops=[ { 'comprefid' : x.comprefid, 'propid' : x.propid, 'externalpropid' : x.externalpropid } for x in sad.get_externalproperties().get_property() ] + externprops=[ { 'comprefid' : x.comprefid + waveform_modifier, 'propid' : x.propid, 'externalpropid' : x.externalpropid } for x in sad.get_externalproperties().get_property() ] log.debug( "External Props: %s", externprops ) # Loop over each entry to determine actual instance name for component @@ -655,7 +754,6 @@ def loadSADFile(filename, props={}): for hostCollocation in sad.get_partitioning().get_hostcollocation(): componentPlacements.extend(hostCollocation.get_componentplacement()) - log.debug("Creating start order for: %s", filename ) startorder={} for c in componentPlacements: @@ -685,7 +783,7 @@ def loadSADFile(filename, props={}): refid = component.componentfileref.refid if validRequestedComponents.has_key(refid): instanceName = component.get_componentinstantiation()[0].get_usagename() - instanceID = component.get_componentinstantiation()[0].id_ + instanceID = component.get_componentinstantiation()[0].id_ + waveform_modifier log.debug("launching component '%s'", instanceName) properties=component.get_componentinstantiation()[0].get_componentproperties() #simples @@ -743,7 +841,7 @@ def loadSADFile(filename, props={}): simple_exec_vals[container.id] = container.value try: # NB: Explicitly request no configure call is made on the component - newComponent = launch(componentName, instanceName,instanceID,configure=None,execparams=simple_exec_vals, objType="components") + newComponent = launch(componentName, instanceName, instanceID, configure=False, properties=simple_exec_vals, objType="components") launchedComponents.append(newComponent) except Exception as e: msg = "Failed to launch component '%s', REASON: %s" % (instanceName, str(e)) @@ -755,7 +853,7 @@ def loadSADFile(filename, props={}): for connection in sad.connections.get_connectinterface(): if connection != None: connectionID = None - if connection.get_id() != "": + if connection.get_id(): connectionID = connection.get_id() log.debug("CONNECTION INTERFACE: connection ID '%s'", connection.get_id()) usesPortComponent = None @@ -774,7 +872,11 @@ def loadSADFile(filename, props={}): log.debug("CONNECTION INTERFACE: uses port component ref '%s'", usesPortComponentRefid) # Loop through launched components to find one containing the uses port to be connected for component in launchedComponents: - if component._refid == usesPortComponentRefid: + if component._refid[:3] == 'DCE': + comprefid = component._refid.split(':')[0]+':'+component._refid.split(':')[1] + else: + comprefid = component._refid.split(':')[0] + if comprefid == usesPortComponentRefid: usesPortComponent = component break @@ -791,7 +893,11 @@ def loadSADFile(filename, props={}): log.debug("CONNECTION INTERFACE: provides port component ref '%s'", providesPortComponentRefid) # Loop through launched components to find one containing the provides port to be connected for component in launchedComponents: - if component._refid == providesPortComponentRefid: + if component._refid[:3] == 'DCE': + comprefid = component._refid.split(':')[0]+':'+component._refid.split(':')[1] + else: + comprefid = component._refid.split(':')[0] + if comprefid == providesPortComponentRefid: providesPortComponent = component break elif connection.get_componentsupportedinterface() != None: @@ -805,7 +911,11 @@ def loadSADFile(filename, props={}): print "loadSADFile(): CONNECTION INTERFACE: componentsupportedinterface port component ref " + str(connection.get_componentsupportedinterface().get_componentinstantiationref().get_refid()) # Loop through launched components to find one containing the provides port to be connected for component in launchedComponents: - if component._refid == providesPortComponentRefid: + if component._refid[:3] == 'DCE': + comprefid = component._refid.split(':')[0]+':'+component._refid.split(':')[1] + else: + comprefid = component._refid.split(':')[0] + if comprefid == providesPortComponentRefid: providesPortComponent = component break elif connection.get_findby(): @@ -829,12 +939,12 @@ def loadSADFile(filename, props={}): assemblyController = False sandboxComponent = None if validRequestedComponents.has_key(refid): - instanceID = component.get_componentinstantiation()[0].id_ + instanceID = component.get_componentinstantiation()[0].id_ + waveform_modifier componentProps = None if len(launchedComponents) > 0: for comp in launchedComponents: if instanceID == comp._refid: - componentProps = comp._configRef + componentProps = comp._getInitialConfigureProperties() if instanceID == assemblyControllerRefid: assemblyController = True sandboxComponent = comp @@ -887,10 +997,8 @@ def loadSADFile(filename, props={}): for struct in structs: if not (struct.refid in configurable[sandboxComponent._instanceName]): continue - simples = struct.get_simpleref() value = {} - for simple in simples: - value[str(simple.refid)] = str(simple.value) + value.update(_loadStructMembers(struct)) if struct.refid in props and assemblyController: value = props[struct.refid] props.pop(struct.refid) @@ -909,10 +1017,8 @@ def loadSADFile(filename, props={}): continue values_vals = [] for struct_template in structseq.get_structvalue(): - simples = struct_template.get_simpleref() value = {} - for simple in simples: - value[str(simple.refid)] = str(simple.value) + value.update(_loadStructMembers(struct_template)) values_vals.append(value) if structseq.refid in props and assemblyController: values_vals = props[structseq.refid] @@ -1080,7 +1186,12 @@ def __new__(self, except RuntimeError, e: # Turn RuntimeErrors into AssertionErrors to match legacy expectation. raise AssertionError, "Unable to launch component: '%s'" % e -def api(descriptor, objType=None): +def api(descriptor, objType=None, destfile=None): + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() + sdrRoot = _getSandbox().getSdrRoot() profile = sdrRoot.findProfile(descriptor, objType=objType) spd, scd, prf = sdrRoot.readProfile(profile) @@ -1095,21 +1206,21 @@ def api(descriptor, objType=None): else: description = spd.description if description: - print '\nDescription ======================\n' - print description - print '\nPorts ======================' - print '\nUses (output)' + print >>destfile, '\nDescription ======================\n' + print >>destfile, description + print >>destfile, '\nPorts ======================' + print >>destfile, '\nUses (output)' table = TablePrinter('Port Name', 'Port Interface') for uses in scd.get_componentfeatures().get_ports().get_uses(): table.append(uses.get_usesname(), uses.get_repid()) - table.write() - print '\nProvides (input)' + table.write(f=destfile) + print >>destfile, '\nProvides (input)' table = TablePrinter('Port Name', 'Port Interface') for provides in scd.get_componentfeatures().get_ports().get_provides(): table.append(provides.get_providesname(), provides.get_repid()) - table.write() + table.write(f=destfile) - print '\nProperties ======================\n' + print >>destfile, '\nProperties ======================\n' table = TablePrinter('id', 'type') if prf != None: for simple in prf.simple: @@ -1134,12 +1245,18 @@ def api(descriptor, objType=None): table.append(' '+prop.get_id(),prop.get_type()) for prop in struct.get_struct().get_simplesequence(): table.append(' '+prop.get_id(),prop.get_type()) - table.write() - + table.write(f=destfile) + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() def _get_started(): - return _getSandbox()._get_started() + warnings.warn('_get_started() is deprecated, use started()', DeprecationWarning) + return started() + +def started(): + return _getSandbox().started def start(): _getSandbox().start() @@ -1155,8 +1272,9 @@ def release(): _getSandbox().shutdown() def launch(descriptor, instanceName=None, refid=None, impl=None, - debugger=None, window=None, execparams={}, configure={}, - initialize=True, timeout=None, objType=None, stdout=None): + debugger=None, window=None, execparams={}, configure=True, + initialize=True, timeout=None, objType=None, properties={}, + shared=True, stdout=None): """ Execute a softpkg, returning a proxy object. This is a factory function that may return a component, device or service depending on the SPD. @@ -1177,26 +1295,56 @@ def launch(descriptor, instanceName=None, refid=None, impl=None, impl - Implementation ID to execute. If not given, the first implementation whose entry point exists will be used. debugger - Debugger to attach to the executable (default: None). + Options: gdb (C++), jdb (Java), pdb (Python) window - Terminal to receive command input/output. If not given, output will be directed to stdout, and component will not receive input. - execparams - Execparams to override on component execution. - configure - If a dictionary, a set of key/value pairs to override the - initial configuration values of the component. - If None, defer configuration of the component to the - caller (generally used by loadSADFile). + properties - Dictionary of key/value pairs to override the initial + property values of the component. + configure - If true, call configure() with the default values for + properties of kind 'configure' after launching the + component. + If false, defer configuration to the caller (generally + used by loadSADFile). + DEPRECATED: If a dictionary, a set of key/value pairs + to override the initial configuration values of the + component. All property kinds should be included in the + 'properties' argument. initialize - If true, call initialize() after launching the component. - If false, defer initialization to ther caller. + If false, defer initialization to the caller. timeout - Time, in seconds, to wait for launch to complete. If not given, the default is 10 seconds, except when running with a debugger, in which case the default is 60 seconds. objType - The type that you would like to launch. Options are component, device, or service. If not given, all types will be searched for with the descriptor given. + shared - Launch this component into a shared address space, if + possible. stdout - File object to send stdout/stderr to. - """ - return _getSandbox().launch(descriptor, instanceName, refid, impl, debugger, - window, execparams, configure, initialize, timeout, objType, stdout) + + Deprecated arguments: + execparams - Execparams to override on component execution. All property + kinds should included in the 'properties' argument. + """ + # Check for deprecation conditions + if isinstance(configure, dict) or execparams: + if properties: + raise ValueError("'properties' argument cannot be mixed with 'configure' overrides or 'execparams'") + # Combine the overrides from configure and execparams into a single + # properties dictionary, with the latter having precedence + properties = {} + if isinstance(configure, dict) and len(configure) != 0: + warnings.warn("'configure' argument is deprecated for property overrides; use 'properties'.", DeprecationWarning) + properties.update(configure) + configure = True + if execparams and len(execparams) != 0: + warnings.warn("'execparams' argument is deprecated; use 'properties'.", DeprecationWarning) + properties.update(execparams) + + return _getSandbox().launch(descriptor=descriptor, instanceName=instanceName, refid=refid, + impl=impl, debugger=debugger, window=window, properties=properties, + initialize=initialize, configure=configure, timeout=timeout, + objType=objType, shared=shared, stdout=stdout) def createEventChannel(name, exclusive=False): """ diff --git a/redhawk/src/base/framework/python/ossie/utils/sb/helpers.py b/redhawk/src/base/framework/python/ossie/utils/sb/helpers.py new file mode 100644 index 000000000..27069fde2 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/sb/helpers.py @@ -0,0 +1,34 @@ + +from pydoc import pager as _pager + +__all__ = [ 'PagerWithHeader', + 'Pager' + ] + +def PagerWithHeader( src_generator, num_lines=25, header=None, repeat_header=None): + """ + Apply simple pager controller to the ouptput of a specified generator. + + src_generator - a generator object that will yield lines to display + num_lines - number of lines to display before requesting manual input + header - a header row to display, this can be multi-line object + repeat_header - number of lines to repeat between header blocks + """ + for index,line in enumerate(src_generator): + if header and repeat_header: + if (index % repeat_header) == 0 : + if type(header) == list: + for x in header: + print x + else: + print header + if index % num_lines == 0 and index: + input=raw_input("Hit any key to continue press q to quit ") + if input.lower() == 'q': + break + else: + print line + + +def Pager( doc ): + _pager(doc) diff --git a/redhawk/src/base/framework/python/ossie/utils/sb/io_helpers.py b/redhawk/src/base/framework/python/ossie/utils/sb/io_helpers.py index d8d343a31..d49fb5faa 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sb/io_helpers.py +++ b/redhawk/src/base/framework/python/ossie/utils/sb/io_helpers.py @@ -29,8 +29,8 @@ import domainless as _domainless import threading as _threading import ossie.utils.bulkio.bulkio_helpers as _bulkio_helpers -import ossie.utils.bluefile.bluefile_helpers as _bluefile_helpers -import ossie.utils.bulkio.bulkio_data_helpers as _bulkio_data_helpers +from ossie.utils.bluefile import bluefile_helpers +from ossie.utils.bulkio import bulkio_data_helpers import ossie.utils.bluefile.bluefile as _bluefile from ossie import properties as _properties from ossie import events as _events @@ -38,6 +38,9 @@ import shlex as _shlex import time as _time import signal as _signal +import warnings +import cStringIO, pydoc +import sys as _sys import os as _os import subprocess as _subprocess import Queue as _Queue @@ -46,9 +49,11 @@ import socket as _socket from omniORB import any as _any from omniORB import CORBA as _CORBA +from omniORB import tcInternal +import copy as _copy import omniORB as _omniORB import CosEventComm__POA -import warnings as _warnings +import traceback from ossie.utils.model import PortSupplier, OutputBase from ossie.utils.model.connect import ConnectionManager @@ -62,7 +67,7 @@ __all__ = ('DataSink', 'DataSource', 'FileSink', 'FileSource', 'MessageSink', 'MessageSource','MsgSupplierHelper', 'Plot', 'SRIKeyword', 'compareSRI', 'helperBase', 'probeBULKIO','createSDDSStreamDefinition', 'DataSourceSDDS', - 'DataSinkSDDS') + 'DataSinkSDDS', 'createTimeStamp', 'createSRI', 'compareKeywordLists') def compareSRI(a, b): ''' @@ -93,12 +98,32 @@ def compareSRI(a, b): if len(a.keywords) != len(b.keywords): return False for keyA, keyB in zip(a.keywords, b.keywords): + if keyA.id != keyB.id: + return False + if keyA.value._t != keyB.value._t: + return False + if keyA.value._v != keyB.value._v: + return False + return True + +def compareKeywordLists( a, b ): + for keyA, keyB in zip(a, b): + if keyA.id != keyB.id: + return False if keyA.value._t != keyB.value._t: return False if keyA.value._v != keyB.value._v: return False return True +def _getAnyValue(key): + if key._format[0]=='[' and key._format[-1]==']': + expectedType = _properties.getTypeCode(key._format[1:-1]) + expectedTypeCode = tcInternal.createTypeCode((tcInternal.tv_sequence, expectedType._d, 0)) + return _CORBA.Any(expectedTypeCode, key._value) + else: + return _properties.to_tc_value(key._value,str(key._format)) + def _checkComplex(data): for item in data: if isinstance(item, complex): @@ -130,7 +155,14 @@ def reset(self): pass class MessageSink(helperBase, PortSupplier): - def __init__(self, messageId = None, messageFormat = None, messageCallback = None): + ''' + Received structured messages + if storeMessages is True, then messages can be retrieved through the getMessages function + The internal message queue is emptied when messages are retrieved, so if storeMessages is True, + make sure to regularly retrieve the available messages to empty out the internal list + + ''' + def __init__(self, messageId = None, messageFormat = None, messageCallback = None, storeMessages = False): helperBase.__init__(self) PortSupplier.__init__(self) self._flowOn = False @@ -138,6 +170,7 @@ def __init__(self, messageId = None, messageFormat = None, messageCallback = Non self._messageId = messageId self._messageFormat = messageFormat self._messageCallback = messageCallback + self._storeMessages = storeMessages self._providesPortDict = {} self._providesPortDict['msgIn'] = { 'Port Interface': 'IDL:ExtendedEvent/MessageEvent:1.0', @@ -152,12 +185,15 @@ def __del__(self): def messageCallback(self, msgId, msgData): print msgId, msgData + def getMessages(self): + return self._messagePort.getMessages() + def getPort(self, portName): try: if self._messageCallback == None: self._messageCallback = self.messageCallback if self._messagePort == None: - self._messagePort = _events.MessageConsumerPort(thread_sleep=0.1) + self._messagePort = _events.MessageConsumerPort(thread_sleep=0.1, storeMessages = self._storeMessages) self._messagePort.registerMessage(self._messageId, self._messageFormat, self._messageCallback) return self._messagePort._this() @@ -165,9 +201,18 @@ def getPort(self, portName): log.error("MessageSink:getPort(): failed " + str(e)) return None - def api(self): - print "Component MessageSink :" - PortSupplier.api(self) + def api(self, destfile=None): + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() + + print >>destfile, "Component MessageSink :" + PortSupplier.api(self, destfile=destfile) + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() def start(self): if self._messagePort : self._messagePort.startPort() @@ -291,9 +336,18 @@ def getUsesPort(self): log.error("MessageSource:getUsesPort(): failed " + str(e)) return None - def api(self): - print "Component MessageSource :" - PortSupplier.api(self) + def api(self, destfile=None): + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() + + print >>destfile, "Component MessageSource :" + PortSupplier.api(self, destfile=destfile) + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() def start(self): self._flowOn = True @@ -566,13 +620,22 @@ def getPortByName(self, portName): # as the portName should be set via self.supportedPorts. raise Exception, "Port name " + portName + " not found." - def api(self): + def api(self, destfile=None): """ Prints application programming interface (API) information and returns. """ - print "Component " + self.__class__.__name__ + " :" - PortSupplier.api(self) + localdef_dest = False + if destfile == None: + localdef_dest = True + destfile = cStringIO.StringIO() + + print >>destfile, "Component " + self.__class__.__name__ + " :" + PortSupplier.api(self, destfile=destfile) + + if localdef_dest: + pydoc.pager(destfile.getvalue()) + destfile.close() class _SourceBase(_DataPortBase): @@ -661,7 +724,7 @@ def _buildAPI(self): if _domainless._DEBUG == True: print self.className + ":_buildAPI()" - self.api() + self.api(destfile=_sys.stdout) def getPort(self, name): if name in self._connections: @@ -674,7 +737,6 @@ def getPort(self, name): class _SinkBase(_DataPortBase): - def __init__(self, formats=None): """ Forward parameters to parent constructor. @@ -707,7 +769,7 @@ def _buildAPI(self): if _domainless._DEBUG == True: print self.className + ":_buildAPI()" - self.api() + self.api(destfile=_sys.stdout) def getPortType(self, portName): """ @@ -741,7 +803,6 @@ def eos(self): self._sink exists. """ - if self._sink == None: return False return self._sink.gotEOS @@ -770,7 +831,8 @@ def __init__(self, startTime = 0.0, streamID = None, blocking = True, - subsize = 0): + subsize = 0, + throttle = False): self._filename = filename self._midasFile = midasFile @@ -783,6 +845,7 @@ def __init__(self, self._streamID = streamID self._blocking = blocking self._sri = None + self._throttle = throttle self._byteswap = False self._defaultDataFormat = '16t' @@ -899,13 +962,13 @@ def setupFileReader(self): # If input file is a Midas Blue file if self._midasFile == True: # define source helper component - self._src = _bluefile_helpers.BlueFileReader(eval(portType)) + self._src = bluefile_helpers.BlueFileReader(eval(portType), throttle=self._throttle) # else, input file is binary file else: - self._src = _bulkio_data_helpers.FileSource(eval(portType),self._byteswap, portTypes) + self._src = bulkio_data_helpers.FileSource(eval(portType),self._byteswap, portTypes, throttle=self._throttle) keywords = [] for key in self._SRIKeywords: - keywords.append(_CF.DataType(key._name, _properties.to_tc_value(key._value,str(key._format)))) + keywords.append(_CF.DataType(key._name, _getAnyValue(key))) if self._streamID == None: self._streamID = self._filename.split('/')[-1] @@ -959,9 +1022,15 @@ def stop(self): self._src.EOS = True class FileSink(_SinkBase): - def __init__(self,filename=None, midasFile=False): + """ + To use a different sink (for custom data processing) for regular files, assign the new class to sinkClass + To use a different sink for blue files, assign the new class to sinkBlueClass + """ + def __init__(self,filename=None, midasFile=False, sinkClass=bulkio_data_helpers.FileSink, sinkBlueClass=bluefile_helpers.BlueFileWriter): _SinkBase.__init__(self) + self.sinkClass = sinkClass + self.sinkBlueClass = sinkBlueClass if _domainless._DEBUG == True: print className + ":__init__() filename " + str(filename) print className + ":__init__() midasFile " + str(midasFile) @@ -976,10 +1045,10 @@ def getPort(self, portName): # If output file is a Midas Blue file if self._midasFile == True: # define source helper component - self._sink = _bluefile_helpers.BlueFileWriter(self._filename,eval(self._sinkPortType)) + self._sink = self.sinkBlueClass(self._filename,eval(self._sinkPortType)) # else, output file is binary file else: - self._sink = _bulkio_data_helpers.FileSink(self._filename, eval(self._sinkPortType)) + self._sink = self.sinkClass(self._filename, eval(self._sinkPortType)) if self._sink != None: self._sinkPortObject = self._sink.getPort() @@ -1029,14 +1098,14 @@ class DataSinkSDDS(_SinkBase): It is the responsibility of the user to consume the SDDS data - DataSinkSDDS manages attachment Ids under the port (self._sink) dictionary attachments + DataSinkSDDS manages attachment Ids under the port (sinkClass) dictionary attachments register an attach callback by passing a function to registerAttachCallback register an detach callback by passing a function to registerDetachCallback """ - def __init__(self): + def __init__(self, sinkClass=bulkio_data_helpers.SDDSSink): _SinkBase.__init__(self, formats=['sdds']) - self._sink = _bulkio_data_helpers.SDDSSink(self) + self._sink = sinkClass(self) self.attach_cb = self.__attach_cb self.detach_cb = self.__detach_cb @@ -1077,7 +1146,16 @@ def __init__(self): Helper to handle the generation of SDDS metadata forwarding """ _SourceBase.__init__(self, bytesPerPush = 0, dataFormat='sdds', formats=['sdds']) - self._src = _bulkio_data_helpers.SDDSSource() + self._src = bulkio_data_helpers.SDDSSource() + self._blocking = True + self._streamdefs = {} + + def start(self): + pass + + def stop(self): + pass + def attach(self, streamData=None, name=None): """ streamData: type BULKIO.SDDSStreamDefinition @@ -1098,6 +1176,8 @@ def attach(self, streamData=None, name=None): if not isinstance(name, str): raise Exception("name must be of ") retval = self._src.attach(streamData, name) + if retval: + self._streamdefs[name] = streamData return retval def detach(self, attachId=''): @@ -1108,11 +1188,97 @@ def detach(self, attachId=''): if not isinstance(attachId, str): raise Exception("attachId must be of ") self._src.detach(attachId) + try: + self._streamdefs.pop(attachid,None) + except: + pass def _createArraySrcInst(self, srcPortType): return self._src + def getStreamDef( self, name=None, hostip=None, pkts=1000, block=True, returnSddsAnalyzer=True): + # grab data if stream definition is available + sdef =None + aid=name + if not aid: + if len(self._streamdefs) == 0: + raise Exception("No attachment have been made, use grabData or call attach") + + aid = self._streamdefs.keys()[0] + print "Defaults to first entry, attach id = ", aid + sdef = self._streamdefs[aid] + else: + sdef = sefl._streamdefs[aid] + + if not sdef: + raise Exception("No SDDS stream definition for attach id:" + aid ) + + if not hostip: + hostip = _socket.gethostbyname(_socket.gethostname()) + + return self.getData( sdef.multicastAddress, hostip, sdef.port, packets, block=block, returnSDDSAnalyzer=returnSDDSAnalyzer) + + + def getData( self, mgroup, hostip, port=29495, pkts=1000, pktlen=1080, block=True, returnSddsAnalyzer=True): + totalRead=0.0 + startTime = _time.time() + sock = None + ismulticast=False + blen=10240 + bytesRead=0 + requestedBytes=pkts*pktlen + data=[] + rawdata='' + try: + try: + ip_class=int(mgroup.split('.')[0]) + if ip_class == '224' or ip_class == '239': + ismulticast=True + except: + pass + + #print " Capturing ", mgroup, " host ", hostip, " port ", port + sock = _socket.socket(_socket.AF_INET, _socket.SOCK_DGRAM, _socket.IPPROTO_UDP) + sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) + sock.bind(("",port)) + if ismulticast: + mreq=struct.pack('4s4s',_socket.inet_aton(mgroup),_socket.inet_aton(hostip)) + sock.setsockopt(_socket.IPPROTO_IP, _socket.IP_ADD_MEMBERSHIP, mreq) + print "Capturing Socket Interface: (MULTICAST) Host Interface: " + hostip + " Multicast: " + mgroup + " Port: "+ str(port) + else: + print "Capturing Socket Interface: (UDP) Host Interface: " + hostip + " Source Address: " + mgroup + " Port: "+ str(port) + ncnt=0 + while totalRead < requestedBytes: + rcvddata = sock.recv(blen,_socket.MSG_WAITALL) + rawdata=rawdata+rcvddata + data=data+list(rcvddata) + totalRead = totalRead + len(rcvddata) + ncnt += 1 + print " read ", ncnt, " pkt ", len(rcvddata) + except KeyboardInterrupt,e : + traceback.print_exc() + print "Exception during packet capture: " + str(e) + except Exception, e : + traceback.print_exc() + print "Exception during packet capture: " + str(e) + finally: + endTime=_time.time() + deltaTime=endTime -startTime + if sock: sock.close() + print "Elapsed Time: ", deltaTime, " Total Data (kB): ", totalRead/1000.0, " Rate (kBps): ", (totalRead/1000.0)/deltaTime + if returnSddsAnalyzer: + from ossie.utils.sdds import SDDSAnalyzer + return SDDSAnalyzer( rawdata, pkts, pktlen, totalRead ) + else: + return data, rawdata, (pktlen,pkts,totalRead) + + class DataSource(_SourceBase): + ''' + Soure of Bulk IO data. Supported data format strings: + char, short, long, float, double, longlong, octet, ushort, ulong, ulonglong + throttle: when True, data will match sampleRate (provided in the push function) + ''' def __init__(self, data = None, dataFormat = None, @@ -1120,7 +1286,10 @@ def __init__(self, bytesPerPush = 512000, startTime = 0.0, blocking = True, - subsize = 0): + subsize = 0, + sri = None, + throttle = False): + warnings.warn("DataSource is deprecated, use StreamSource", DeprecationWarning) fmts=['char','short','long','float','double','longlong','octet','ushort', 'ulong', 'ulonglong', 'file','xml' ] self.threadExited = None @@ -1134,8 +1303,9 @@ def __init__(self, self._sampleRate = None self._onPushSampleRate = None self._complexData = None + self._streamID = None self._SRIKeywords = [] - self._sri = None + self._sri = sri self._startTime = startTime self._timePush = None self._blocking = blocking @@ -1143,6 +1313,7 @@ def __init__(self, self._runThread = None self._dataQueue = _Queue.Queue() self._currentSampleTime = self._startTime + self._throttle = throttle # Track unsent packets so that callers can monitor for when all packets # have really been sent; checking for an empty queue only tells whether @@ -1155,9 +1326,9 @@ def __init__(self, def _createArraySrcInst(self, srcPortType): if srcPortType != "_BULKIO__POA.dataXML": - return _bulkio_data_helpers.ArraySource(eval(srcPortType)) + return bulkio_data_helpers.ArraySource(eval(srcPortType)) else: - return _bulkio_data_helpers.XmlArraySource(eval(srcPortType)) + return bulkio_data_helpers.XmlArraySource(eval(srcPortType)) def start(self): @@ -1171,14 +1342,50 @@ def start(self): self._runThread.setDaemon(True) self._runThread.start() + + def sri(self): + if self._sri != None: + return _copy.copy(self._sri) + else: + # make a default sri from the current state + keywords = [] + try: + for key in self._SRIKeywords: + keywords.append(_CF.DataType(key._name, _getAnyValue(key))) + except: + pass + candidateSri = _BULKIO.StreamSRI(1, 0.0, 1, 0, self._subsize, 0.0, 0, 0, 0, + "defaultStreamID", self._blocking, keywords) + if self._sampleRate > 0.0: + candidateSri.xdelta = 1.0/float(self._sampleRate) + + if self._complexData and self._complexData == True: + candidateSri.mode = 1 + + if self._startTime >= 0.0: + candidateSri.xstart = self._startTime + return candidateSri + + def sendEOS(self, streamID=None): + if streamID: + self.push([],EOS=True, streamID=streamID ) + else: + if self._sri: + self.push([],EOS=True, streamID=self._sri.streamID ) + else: + self.push([],EOS=True ) + + def push(self, data, EOS = False, - streamID = "defaultStreamID", + streamID = None, sampleRate = None, complexData = False, SRIKeywords = [], - loop = None): + loop = None, + sri = None, + ts = None ): """ Push an arbitrary data vector @@ -1198,13 +1405,32 @@ def push(self, data = _bulkio_helpers.pythonComplexListToBulkioComplex(data, itemType) complexData = True - if sampleRate == None and self._onPushSampleRate != None: - sampleRate = self._onPushSampleRate - elif sampleRate == None and self._onPushSampleRate == None: - sampleRate = 1.0 - self._onPushSampleRate = sampleRate + # if no stream id is provided then try and use prior stream id + if streamID == None: + if self._sri == None and sri == None: + if self._streamID != None: + streamID = self._streamID + elif sri != None: + streamID = sri.streamID + elif self._sri != None: + streamID = self._sri.streamID + + # if no stream id is provided then use default + if streamID == None: + streamID = "defaultStreamID" + + if sampleRate == None: + # if no sample rate provide and no sri provide then use prior sample rate if available + if sri != None: + if sri.xdelta > 0.0: + self._onPushSampleRate = 1.0/sri.xdelta + if self._onPushSampleRate != None: + sampleRate = self._onPushSampleRate else: - self._onPushSampleRate = sampleRate + # if we are given a new sample rate then save this off + if self._onPushSampleRate == None or ( self._onPushSampleRate != sampleRate ): + # save off, data consumer thread can be slower so we can use sri + self._onPushSampleRate = sampleRate self._dataQueue.put((data, EOS, @@ -1212,7 +1438,9 @@ def push(self, sampleRate, complexData, SRIKeywords, - loop)) + loop, + sri, + ts)) self._packetQueued() def _packetQueued(self): @@ -1263,45 +1491,101 @@ def pushThread(self): complexData = dataset[4] SRIKeywords = dataset[5] loop = dataset[6] + sri = dataset[7] + ts = dataset[8] # If loop is set in method call, override class attribute if loop != None: self._loop = loop try: - self._sampleRate = sampleRate - self._complexData = complexData - self._SRIKeywords = SRIKeywords - self._streamID = streamID - candidateSri = None - # If any SRI info is set, call pushSRI - if streamID != None or \ - sampleRate != None or \ - complexData != None or \ - len(SRIKeywords) > 0: - keywords = [] - for key in self._SRIKeywords: - keywords.append(_CF.DataType(key._name, _properties.to_tc_value(key._value,str(key._format)))) - candidateSri = _BULKIO.StreamSRI(1, 0.0, 1, 0, self._subsize, 0.0, 0, 0, 0, - streamID, self._blocking, keywords) - - if sampleRate > 0.0: - candidateSri.xdelta = 1.0/float(sampleRate) + if sri == None and self._sri == None : + if sampleRate == None : sampleRate=1.0 + self._sampleRate = sampleRate + self._complexData = complexData + self._SRIKeywords = SRIKeywords + self._streamID = streamID + candidateSri = None + # If any SRI info is set, call pushSRI + if streamID != None or \ + sampleRate != None or \ + complexData != None or \ + len(SRIKeywords) > 0: + keywords = [] + for key in self._SRIKeywords: + keywords.append(_CF.DataType(key._name, _getAnyValue(key))) + candidateSri = _BULKIO.StreamSRI(1, 0.0, 1, 0, self._subsize, 0.0, 0, 0, 0, + streamID, self._blocking, keywords) + + if sampleRate > 0.0: + candidateSri.xdelta = 1.0/float(sampleRate) + self._onPushSampleRate = sampleRate + + if complexData == True: + candidateSri.mode = 1 + else: + candidateSri.mode = 0 + + if self._startTime >= 0.0: + candidateSri.xstart = self._startTime + + else: + candidateSri = _BULKIO.StreamSRI(1, 0.0, 1, 0, self._subsize, 0.0, 0, 0, 0, + "defaultStreamID", self._blocking, []) + else: + candidateSri = _copy.copy(self._sri) + if sri != None: + # user supplied sri + candidateSri = sri + + if streamID and streamID != candidateSri.streamID: + candidateSri.streamID = streamID + self._streamID = streamID + + if sampleRate == None: + sampleRate = 1.0/candidateSri.xdelta + self._sampleRate = sampleRate + self._onPushSampleRate = sampleRate + else: + if sampleRate > 0.0: + candidateSri.xdelta = 1.0/float(sampleRate) + self._sampleRate = sampleRate + self._onPushSampleRate = sampleRate if complexData == True: candidateSri.mode = 1 - else: - candidateSri.mode = 0 + self._complexData = 1 + + self._complexData = candidateSri.mode if self._startTime >= 0.0: candidateSri.xstart = self._startTime - else: - candidateSri = _BULKIO.StreamSRI(1, 0.0, 1, 0, self._subsize, 0.0, 0, 0, 0, - "defaultStreamID", self._blocking, []) + + # handle keywords + if len(SRIKeywords) > 0 : + self._SRIKeywords = SRIKeywords + # need to keep order for compareSRI + ckeys = [ x.id for x in candidateSri.keywords ] + keywords = candidateSri.keywords[:] + for key in self._SRIKeywords: + # if current sri contains they keyword then overwrite else append + kw = _CF.DataType(key._name, _getAnyValue(key)) + if key._name in ckeys: + # replace that keyword + for x in range(len(keywords)): + if keywords[x].id == kw.id : + keywords[x] = kw + else: + keywords.append(kw) + candidateSri.keywords = keywords if self._sri==None or not compareSRI(candidateSri, self._sri): - self._sri = candidateSri + self._sri = _copy.copy(candidateSri) self._pushSRIAllConnectedPorts(sri = self._sri) + # if we are give a timestamp then use it as is + if ts != None: + self._currentSampleTime = ts + # Call pushPacket # If necessary, break data into chunks of pktSize for each # pushPacket @@ -1384,6 +1668,11 @@ def _pushPackets(self, packetEOS, streamID, srcPortType) + # covert time stamp if necessary + if isinstance(currentSampleTime,_BULKIO.PrecisionUTCTime): + self._currentSampleTime = currentSampleTime.twsec + currentSampleTime.tfsec + currentSampleTime = self._currentSampleTime + self._currentSampleTime += sampleTimeForPush currentSampleTime += sampleTimeForPush else: @@ -1414,22 +1703,32 @@ def _pushPacket(self, data = _bulkio_helpers.formatData(data, BULKIOtype=eval(srcPortType)) - T = _BULKIO.PrecisionUTCTime(_BULKIO.TCM_CPU, - _BULKIO.TCS_VALID, - 0.0, - int(currentSampleTime), - currentSampleTime - int(currentSampleTime)) + if isinstance(currentSampleTime,_BULKIO.PrecisionUTCTime): + T= currentSampleTime + else: + T = _BULKIO.PrecisionUTCTime(_BULKIO.TCM_CPU, + _BULKIO.TCS_VALID, + 0.0, + int(currentSampleTime), + currentSampleTime - int(currentSampleTime)) + if self._throttle: + if self._sampleRate != None: + _time.sleep(len(data)/(self._sampleRate*2.0)) + if srcPortType != "_BULKIO__POA.dataXML": - _bulkio_data_helpers.ArraySource.pushPacket(arraySrcInst, + bulkio_data_helpers.ArraySource.pushPacket(arraySrcInst, data = data, T = T, EOS = EOS, streamID = streamID) else: - _bulkio_data_helpers.XmlArraySource.pushPacket(arraySrcInst, + bulkio_data_helpers.XmlArraySource.pushPacket(arraySrcInst, data = data, EOS = EOS, streamID = streamID) + if self._throttle: + if self._sampleRate != None: + _time.sleep(len(data)/(self._sampleRate*2.0)) def _pushSRIAllConnectedPorts(self, sri): for connection in self._connections.values(): @@ -1439,9 +1738,10 @@ def _pushSRIAllConnectedPorts(self, sri): def _pushSRI(self, arraySrcInst, srcPortType, sri): if srcPortType != "_BULKIO__POA.dataXML": - _bulkio_data_helpers.ArraySource.pushSRI(arraySrcInst, sri) + #print "_pushSRI ", sri + bulkio_data_helpers.ArraySource.pushSRI(arraySrcInst, sri) else: - _bulkio_data_helpers.XmlArraySource.pushSRI(arraySrcInst, sri) + bulkio_data_helpers.XmlArraySource.pushSRI(arraySrcInst, sri) def waitAllPacketsSent(self, timeout=None): """ @@ -1470,9 +1770,16 @@ def stop(self): raise AssertionError, self.className + ":stop() failed to exit thread" class DataSink(_SinkBase): - def __init__(self): + """ + To use a different sink (for custom data processing), assign the new class to sinkClass + To use a different sink for XML data, assign the new class to sinkXmlClass + """ + def __init__(self, sinkClass=bulkio_data_helpers.ArraySink, sinkXmlClass=bulkio_data_helpers.XmlArraySink): + warnings.warn("DataSink is deprecated, use StreamSink instead", DeprecationWarning) fmts=['char','short','long','float','double','longlong','octet','ushort', 'ulong', 'ulonglong', 'file','xml' ] _SinkBase.__init__(self, formats=fmts) + self.sinkClass = sinkClass + self.sinkXmlClass = sinkXmlClass def getPort(self, portName): if _domainless._DEBUG == True: @@ -1482,9 +1789,9 @@ def getPort(self, portName): # Set up output array sink if str(portName) == "xmlIn": - self._sink = _bulkio_data_helpers.XmlArraySink(eval(self._sinkPortType)) + self._sink = self.sinkXmlClass(eval(self._sinkPortType)) else: - self._sink = _bulkio_data_helpers.ArraySink(eval(self._sinkPortType)) + self._sink = self.sinkClass(eval(self._sinkPortType)) if self._sink != None: self._sinkPortObject = self._sink.getPort() @@ -1516,7 +1823,7 @@ def getData(self, length=None, eos_block=False, tstamps=False): eos_block: setting to True creates a blocking call until eos is received tstamps: setting to True makes the return value a tuple, where the first element is the data set and the second element is a series of tuples - containing the element index number of and timestamp + containing the element index number and the timestamp for that index ''' isChar = self._sink.port_type == _BULKIO__POA.dataChar @@ -1525,6 +1832,10 @@ def getData(self, length=None, eos_block=False, tstamps=False): if eos_block: self._sink.waitEOS() (retval, timestamps) = self._sink.retrieveData(length=length) + if not retval: + if tstamps: + return ([],[]) + return [] if isChar: # Converts char values into their numeric equivalents def from_char(data): @@ -1538,7 +1849,7 @@ def from_char(data): else: retval = from_char(retval) if tstamps: - return (retval,timestamps) + return (retval, timestamps) else: return retval @@ -1598,8 +1909,12 @@ def setup(self,portIOR, dataType=None, componentName=None, usesPortName=None): pass class probeBULKIO(_SinkBase): - def __init__(self): + """ + To use a different sink (for custom data processing), assign the new class to sinkClass + """ + def __init__(self, sinkClass=bulkio_data_helpers.ProbeSink): _SinkBase.__init__(self) + self._sinkClass = sinkClass def getPort(self, portName): if _domainless._DEBUG == True: @@ -1608,7 +1923,7 @@ def getPort(self, portName): self._sinkPortType = self.getPortType(portName) # Set up output array sink - self._sink = _bulkio_data_helpers.ProbeSink(eval(self._sinkPortType)) + self._sink = self._sinkClass(eval(self._sinkPortType)) if self._sink != None: self._sinkPortObject = self._sink.getPort() @@ -1706,16 +2021,40 @@ class SRIKeyword(object): This is used in the Input series as the element in the SRIKeywords list name and value correspond to the id/value pair format is a string that describes the data type casting that needs to happen - - short, ushort - - float, double - - long, ulong - - longlong, ulonglong - - char - - octet + - short, ushort, complexShort, complexUShort + - float, double, complexFloat, complexDouble + - long, ulong, complexLong, complexULong + - longlong, ulonglong, complexLongLong, complexULongLong + - char, complexChar + - octet, complexOctet - string - - boolean + - boolean, complexBoolean + For sequences, encase the data type in brackets (e.g.: [short]) ''' def __init__(self, name, value, format): - self._name = name - self._value = value - self._format = format + # validate format is legal type to convert to + if format[0] == '[' and format[-1] == ']': + if format[1:-1] in _properties.getTypeMap().keys(): + self._name = name + self._value = value + self._format = format + else: + raise RuntimeError("Unsupported format type: " + format) + elif format in _properties.getTypeMap().keys(): + self._name = name + self._value = value + self._format = format + else: + raise RuntimeError("Unsupported format type: " + format) + +def createTimeStamp(): + return _bulkio_helpers.createCPUTimestamp() + +def createSRI(streamID='defaultStreamID', sampleRate=1.0, mode=0, blocking=True ): + xd=1.0 + if sampleRate and sampleRate > 0.0: + xd = 1.0/float(sampleRate) + + return _BULKIO.StreamSRI(hversion=1, xstart=0.0, xdelta=xd, + xunits=1, subsize=0, ystart=0.0, ydelta=0.0, + yunits=0, mode=mode, streamID=streamID, blocking=blocking, keywords=[]) diff --git a/redhawk/src/base/framework/python/ossie/utils/sb/plots.py b/redhawk/src/base/framework/python/ossie/utils/sb/plots.py index 7534ab275..457d39ae2 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sb/plots.py +++ b/redhawk/src/base/framework/python/ossie/utils/sb/plots.py @@ -18,13 +18,15 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # -import logging import threading import time import struct +import warnings from omniORB.any import from_any +from ossie.utils.log4py import logging + def _deferred_imports(): # Importing PyQt4 and matplotlib may take a long time--more than a second # worst case--but neither one is needed at import time (or possibly ever). @@ -39,7 +41,8 @@ def _deferred_imports(): from matplotlib.backends.backend_agg import FigureCanvasAgg import numpy - from bulkio.bulkioInterfaces import BULKIO__POA + import bulkio + from bulkio.bulkioInterfaces import BULKIO # Rebind the function to do nothing in future calls def _deferred_imports(): @@ -53,106 +56,40 @@ def _deferred_imports(): else: raise RuntimeError("Missing required package for sandbox plots: '%s'" % e) -from ossie.utils.bulkio import bulkio_data_helpers, bulkio_helpers from ossie.utils.model import PortSupplier from ossie.utils.model.connect import PortEndpoint from ossie.utils.sb import domainless -from io_helpers import helperBase +from ossie.utils.sandbox.helper import ThreadedSandboxHelper, ThreadStatus __all__ = ('LinePlot', 'LinePSD', 'RasterPlot', 'RasterPSD', 'XYPlot') log = logging.getLogger(__name__) -class PlotSink(bulkio_data_helpers.ArraySink): - """ - Helper sink subclass that discards data when it is not started, so that - plots do not increase memory use unbounded if they are not running. - - Supports automatic conversion of complex data, and allows the user to - force complex behavior regardless of SRI mode. - """ - def __init__(self, porttype): - super(PlotSink,self).__init__(porttype) - self._started = False - self._forceComplex = False - self._sriChanged = False - - def start(self): - self._started = True - super(PlotSink,self).start() - - def stop(self): - super(PlotSink,self).stop() - self._started = False - self.data = [] - - def pushPacket(self, data, ts, EOS, stream_id): - if not self._started: - return - super(PlotSink,self).pushPacket(data, ts, EOS, stream_id) - - def pushSRI(self, H): - # Turn the keywords into a dictionary for easy lookup - H.keywords = dict((dt.id, from_any(dt.value)) for dt in H.keywords) - super(PlotSink,self).pushSRI(H) - self._sriChanged = True - - def sriChanged(self): - changed = self._sriChanged - self._sriChanged = False - return changed - - def retrieveData(self, length): - data, times = super(PlotSink,self).retrieveData(length) - if self.sri.mode or self._forceComplex: - data2, times2 = super(PlotSink,self).retrieveData(length) - data = bulkio_helpers.bulkioComplexToPythonComplexList(data + data2) - times.extend(times2) - return data, times - -class CharSink(PlotSink): - def __init__(self): - super(CharSink,self).__init__(BULKIO__POA.dataChar) - - def pushPacket(self, data, ts, EOS, stream_id): - data = struct.unpack('%db' % len(data), data) - super(CharSink,self).pushPacket(data, ts, EOS, stream_id) - - -class OctetSink(PlotSink): - def __init__(self): - super(OctetSink,self).__init__(BULKIO__POA.dataOctet) - - def pushPacket(self, data, ts, EOS, stream_id): - data = struct.unpack('%dB' % len(data), data) - super(OctetSink,self).pushPacket(data, ts, EOS, stream_id) - - -class PlotBase(helperBase, PortSupplier): +class PlotBase(ThreadedSandboxHelper): """ Abstract base class for all matplotlib-based plots. Manages the provides port dictionary, the matplotlib figure, and the plot update thread. """ def __init__(self): _deferred_imports() - helperBase.__init__(self) - PortSupplier.__init__(self) + ThreadedSandboxHelper.__init__(self) + + # Use 1/10th of a second for sleeping when there are no updates + self.setThreadDelay(0.1) # Create provides port dictionary. - self._providesPortDict = {} - for interface in ('Char', 'Short', 'Long', 'Float', 'Ulong', - 'Double', 'LongLong', 'Octet', 'UlongLong', - 'Ushort'): - name = '%sIn' % interface.lower() - self._providesPortDict[name] = { - 'Port Interface': 'IDL:BULKIO/data%s:1.0' % interface, - 'Port Name': name - } - - self.breakBlock = False - self._thread = None - self._started = domainless._getSandbox()._get_started() + self._addProvidesPort('charIn', 'IDL:BULKIO/dataChar:1.0', bulkio.InCharPort) + self._addProvidesPort('octetIn', 'IDL:BULKIO/dataOctet:1.0', bulkio.InOctetPort) + self._addProvidesPort('shortIn', 'IDL:BULKIO/dataShort:1.0', bulkio.InShortPort) + self._addProvidesPort('ushortIn', 'IDL:BULKIO/dataUshort:1.0', bulkio.InUShortPort) + self._addProvidesPort('longIn', 'IDL:BULKIO/dataLong:1.0', bulkio.InLongPort) + self._addProvidesPort('ulongIn', 'IDL:BULKIO/dataUlong:1.0', bulkio.InULongPort) + self._addProvidesPort('longlongIn', 'IDL:BULKIO/dataLongLong:1.0', bulkio.InLongLongPort) + self._addProvidesPort('ulonglongIn', 'IDL:BULKIO/dataUlongLong:1.0', bulkio.InULongLongPort) + self._addProvidesPort('floatIn', 'IDL:BULKIO/dataFloat:1.0', bulkio.InFloatPort) + self._addProvidesPort('doubleIn', 'IDL:BULKIO/dataDouble:1.0', bulkio.InDoublePort) + self._addProvidesPort('bitIn', 'IDL:BULKIO/dataBit:1.0', bulkio.InBitPort) # Create a new figure and axes. self._figure = pyplot.figure() @@ -162,6 +99,7 @@ def __init__(self): # Let subclasses set up the axes. self._configureAxes() + self._updateAxes() # Matplotlib 0.99 does not give an easy way to listen for window close # events; as a result, we have to get the underlying backend-specific @@ -183,19 +121,13 @@ def closeEvent(event): lock = threading.Lock() oldDraw = self._canvas.draw def draw(): - lock.acquire() - try: + with lock: oldDraw() - finally: - lock.release() self._canvas.draw = draw oldPaint = self._canvas.paintEvent def paintEvent(e): - lock.acquire() - try: + with lock: oldPaint(e) - finally: - lock.release() self._canvas.paintEvent = paintEvent self._renderLock = lock @@ -205,22 +137,6 @@ def _getWindow(self): return manager.window return None - def _createSink(self, interface): - # Create the type-specific sink servant. - interface = interface.split(':')[1] - namespace, interface = interface.split('/') - if interface == 'dataChar': - sink = CharSink() - elif interface == 'dataOctet': - sink = OctetSink() - else: - skeleton = getattr(BULKIO__POA, interface) - sink = PlotSink(skeleton) - if self._thread: - # Plot is started; start sink. - sink.start() - return sink - def _redraw(self): """ Redraw the plot and ask the widget to display the update. @@ -239,41 +155,17 @@ def _redraw(self): finally: self._renderLock.release() - def _run(self): + def _threadFunc(self): """ Main method for plot update thread; do not call directly. """ - while not self.breakBlock: - # Continually update the plot, unless there is nothing to do (i.e., - # not connected to a data source). - if self._update(): - self._redraw() - else: - time.sleep(0.1) - - def start(self): - """ - Start the plot, if it is not already started. The plot will continually - read input data and refresh the display until it is stopped. - """ - self.breakBlock = False - self._started = True - if self._thread: - return - self._thread = threading.Thread(target=self._run) - self._thread.setDaemon(True) - self._thread.start() - - def stop(self): - """ - Stop the plot. The plot will discontinue reading input data and updating - the display. - """ - self.breakBlock = True - self._started = False - if self._thread: - self._thread.join() - self._thread = None + # Update the plot, unless there is nothing to do (i.e., not connected + # to a data source). + if self._update(): + self._redraw() + return ThreadStatus.NORMAL + else: + return ThreadStatus.NOOP def close(self): """ @@ -293,6 +185,45 @@ def releaseObject(self): super(PlotBase,self).releaseObject() self.close() + def _getUnitLabel(self, units): + return { + BULKIO.UNITS_NONE: '', + BULKIO.UNITS_TIME: 'Time (sec)', + BULKIO.UNITS_DELAY: 'Delay (sec)', + BULKIO.UNITS_FREQUENCY: 'Frequency (Hz)', + BULKIO.UNITS_TIMECODE: 'Time code format', + BULKIO.UNITS_DISTANCE: 'Distance (m)', + BULKIO.UNITS_VELOCITY: 'Velocity (m/sec)', + BULKIO.UNITS_ACCELERATION: 'Acceleration (m/sec^2)', + BULKIO.UNITS_JERK: 'Jerk (m/sec^3)', + BULKIO.UNITS_DOPPLER: 'Doppler (Hz)', + BULKIO.UNITS_DOPPLERRATE: 'Doppler rate (Hz/sec)', + BULKIO.UNITS_ENERGY: 'Energy (J)', + BULKIO.UNITS_POWER: 'Power (W)', + BULKIO.UNITS_MASS: 'Mass (g)' + }.get(units, '') + + def _getXLabel(self): + return self._getUnitLabel(self._getXUnits()) + + def _getXUnits(self): + return BULKIO.UNITS_NONE + + def _getYLabel(self): + return self._getUnitLabel(self._getYUnits()) + + def _getYUnits(self): + return BULKIO.UNITS_NONE + + def _configureAxes(self): + pass + + def _updateAxes(self): + xlabel = self._getXLabel() + self._plot.xaxis.set_label_text(xlabel) + + ylabel = self._getYLabel() + self._plot.yaxis.set_label_text(ylabel) class PlotEndpoint(PortEndpoint): def __init__(self, plot, port, connectionId): @@ -335,7 +266,7 @@ def _disconnected(self, connectionId): try: for name, trace in self._lines.iteritems(): if trace['id'] == connectionId: - trace['sink'].stop() + trace['port'].stopPort() line = trace['line'] line.remove() del self._lines[name] @@ -344,65 +275,59 @@ def _disconnected(self, connectionId): self._linesLock.release() def getPort(self, name): - self._linesLock.acquire() - try: - sink = self._lines[name]['sink'] - return sink.getPort() - finally: - self._linesLock.release() + with self._linesLock: + return self._lines[name]['port']._this() def _lineOptions(self): return {} def _addTrace(self, port, name): - self._linesLock.acquire() - try: + with self._linesLock: traceName = '%s-%s' % (port['Port Name'], name) if traceName in self._lines: raise KeyError, "Trace '%s' already exists" % traceName - sink = self._createSink(port['Port Interface']) + port = self._createPort(port, traceName) options = self._lineOptions() line, = self._plot.plot([], [], label=name, scalex=False, scaley=False, **options) - trace = { 'sink': sink, + trace = { 'port': port, 'xdelta': None, 'line': line, 'id': name } self._lines[traceName] = trace - finally: - self._linesLock.release() - if self._started: - self.start() def _updateTrace(self, trace): - sink = trace['sink'] + port = trace['port'] line = trace['line'] # Read next frame. - data, timestamps = sink.retrieveData(length=self._frameSize) - if not data: - return - x_data, y_data = self._formatData(data, sink.sri) + stream = port.getCurrentStream(bulkio.const.NON_BLOCKING) + if not stream: + return False + block = stream.read(self._frameSize) + if not block: + return False + x_data, y_data = self._formatData(block, stream) line.set_data(x_data, y_data) # Check for new xdelta and update canvas if necessary. - if sink.sriChanged(): - trace['xdelta'] = sink.sri.xdelta - xmin, xmax = self._getXRange(sink.sri) + if block.sriChanged: + trace['xdelta'] = block.xdelta + xmin, xmax = self._getXRange(stream) self._plot.set_xlim(xmin, xmax) + return True def _update(self): # Get a copy of the current set of lines to update, then release the # lock to do the reads. This allows the read to be interrupted (e.g., # if a source is disconnected) without deadlock. - self._linesLock.acquire() - try: + with self._linesLock: traces = self._lines.values() - finally: - self._linesLock.release() - for trace in traces: - self._updateTrace(trace) + + redraw = [self._updateTrace(trace) for trace in traces] + if not any(redraw): + return False if self._ymin is None or self._ymax is None: self._plot.relim() @@ -419,29 +344,27 @@ def _update(self): return True - def start(self): + def _startHelper(self): log.debug("Starting line plot '%s'", self._instanceName) - super(LineBase,self).start() - # Start all associated sinks. + super(LineBase,self)._startHelper() + # Start all associated ports. self._linesLock.acquire() try: for trace in self._lines.itervalues(): - trace['sink'].start() + trace['port'].startPort() finally: self._linesLock.release() - start.__doc__ = PlotBase.start.__doc__ - def stop(self): + def _stopHelper(self): log.debug("Stopping line plot '%s'", self._instanceName) - # Stop all associated sinks. + # Stop all associated port. self._linesLock.acquire() try: for trace in self._lines.itervalues(): - trace['sink'].stop() + trace['port'].stopPort() finally: self._linesLock.release() - super(LineBase,self).stop() - stop.__doc__ = PlotBase.stop.__doc__ + super(LineBase,self)._stopHelper() # Plot settings def _setYView(self, ymin, ymax): @@ -452,42 +375,40 @@ def _setXView(self, xmin, xmax): self._plot.set_xlim(xmin, xmax) self._redraw() - def get_ymin(self): + def _check_yrange(self, ymin, ymax): + if ymin is None or ymax is None: + return + if ymax < ymin: + raise ValueError, 'Y-axis bounds cannot overlap (%d > %d)' % (ymin, ymax) + + @property + def ymin(self): """ The lower bound of the Y-axis. If set to None, the lower bound will be determined automatically per-frame based on the data. """ return self._ymin - def set_ymin(self, ymin): + @ymin.setter + def ymin(self, ymin): self._check_yrange(ymin, self._ymax) self._ymin = ymin self._setYView(self._ymin, self._ymax) - ymin = property(get_ymin, set_ymin) - del get_ymin, set_ymin - - def get_ymax(self): + @property + def ymax(self): """ The upper bound of the Y-axis. If set to None, the upper bound will be determined automatically per-frame based on the data. """ return self._ymax - def _check_yrange(self, ymin, ymax): - if ymin is None or ymax is None: - return - if ymax < ymin: - raise ValueError, 'Y-axis bounds cannot overlap (%d > %d)' % (ymin, ymax) - - def set_ymax(self, ymax): + @ymax.setter + def ymax(self, ymax): self._check_yrange(self._ymin, ymax) self._ymax = ymax self._setYView(self._ymin, self._ymax) - ymax = property(get_ymax, set_ymax) - del get_ymax, set_ymax - class LinePlot(LineBase): """ @@ -506,12 +427,16 @@ def __init__(self, frameSize=1024, ymin=None, ymax=None): """ super(LinePlot,self).__init__(frameSize, ymin, ymax) - def _configureAxes(self): - self._plot.xaxis.set_label_text('Time (s)') + def _getXUnits(self): + return BULKIO.UNITS_TIME - def _formatData(self, data, sri): - xdelta = sri.xdelta - times = numpy.arange(len(data)) * xdelta + def _formatData(self, block, stream): + # Bit blocks don't have a .complex attribute; default to False + if getattr(block, 'complex', False): + data = block.buffer[::2] + else: + data = block.buffer + times = numpy.arange(len(data)) * block.xdelta return times, data def _getXRange(self, sri): @@ -526,38 +451,40 @@ class PSDBase(object): def __init__(self, nfft): self._nfft = nfft - def _getFreqOffset(self, sri): - offset = sri.keywords.get('CHAN_RF', None) - if offset is None: - offset = sri.keywords.get('COL_RF', 0.0) - return offset + def _getFreqOffset(self, stream): + if stream.hasKeyword('CHAN_RF'): + return stream.getKeyword('CHAN_RF') + elif stream.hasKeyword('COL_RF'): + return stream.getKeyword('COL_RF') + else: + return 0.0 - def _psd(self, data, sri): - offset = self._getFreqOffset(sri) - y_data, x_data = mlab.psd(data, NFFT=self._nfft, Fs=self._getSampleRate(sri)) + def _psd(self, data, stream): + y_data, x_data = mlab.psd(data, NFFT=self._nfft, Fs=self._getSampleRate(stream)) + offset = self._getFreqOffset(stream) if offset: x_data += offset return y_data, x_data - def _getSampleRate(self, sri): - if sri.xdelta > 0.0: + def _getSampleRate(self, stream): + if stream.xdelta > 0.0: # Round sample rate to an integral value to account for the fact # that there is typically some rounding error in the xdelta value. - return round(1.0 / sri.xdelta) + return round(1.0 / stream.xdelta) else: # Bad SRI xdelta, use normalized value. return 1.0 - def _getFreqRange(self, sri): - nyquist = 0.5 * self._getSampleRate(sri) + def _getFreqRange(self, stream): + nyquist = 0.5 * self._getSampleRate(stream) upper = nyquist - if sri.mode: + if stream.complex: # Negative and positive frequencies. lower = -nyquist else: # Non-negative frequencies only. lower = 0 - offset = self._getFreqOffset(sri) + offset = self._getFreqOffset(stream) upper += offset lower += offset return lower, upper @@ -590,15 +517,23 @@ def __init__(self, nfft=1024, frameSize=None, ymin=None, ymax=None): PSDBase.__init__(self, nfft) def _configureAxes(self): - self._plot.xaxis.set_label_text('Frequency (Hz)') self._plot.set_yscale('log') - def _formatData(self, data, sri): + def _getXUnits(self): + return BULKIO.UNITS_FREQUENCY + + def _formatData(self, block, stream): + # Bit blocks don't have a .complex attribute; default to False + if getattr(block, 'complex', False): + data = block.cxbuffer + else: + data = block.buffer + # Calculate PSD of input data. - data, freqs = self._psd(data, sri) + data, freqs = self._psd(data, stream) # Return x data (frequencies) and y data (magnitudes) - return freqs, data.reshape(len(data)) + return freqs, data.reshape(-1) def _getXRange(self, sri): return self._getFreqRange(sri) @@ -610,129 +545,199 @@ class RasterBase(PlotBase): Y-axis, while magnitude (Z-axis) is displayed using a color map. The meaning of the X-axis varies depending on the data being displayed. """ - def __init__(self, frameSize=1024, imageWidth=1024, imageHeight=1024, zmin=-1, zmax=1, defaultValue=0.0): + def __init__(self, zmin, zmax, lines=None, readSize=None): PlotBase.__init__(self) - - self._frameSize = frameSize - self._sink = None - - # Raster state - self._imageData = numpy.ones((imageHeight, imageWidth)) * defaultValue - self._image = self._plot.imshow(self._imageData, extent=(0, 1, 1, 0)) self._zmin = zmin self._zmax = zmax + self._readSize = readSize + self._frameSize = None + self._frameOffset = 0 + self._bitMode = False + if lines is None: + self._lines = 512 + else: + self._lines = self._check_lines(lines) + + # Raster state: start with a 1x1 image with the default value + self._imageData = self._createImageData(1, 1) + self._buffer = self._imageData.reshape((1,)) + self._bufferOffset = 0 + self._image = self._plot.imshow(self._imageData, extent=(0, 1, 1, 0)) norm = self._getNorm(self._zmin, self._zmax) self._image.set_norm(norm) - self._row = 0 - self._xdelta = None + self._plot.set_aspect('auto') - # Maintain aspect ratio of image - self._aspect = float(imageHeight)/imageWidth - self._plot.set_aspect(self._aspect) + # Use a horizontal yellow line to indicate the redraw position + self._line = self._plot.axhline(y=0, color='y') - # Add a colorbar - self._colorbar = self._figure.colorbar(self._image) + self._stateLock = threading.Lock() - def _formatData(self, data, sri): + def _createImageData(self, width, height): + data = numpy.empty((height, width)) + data[:] = self._zmin return data - def getPort(self, name): - if self._sink: - if name != self._sinkName: - raise RuntimeError, "Raster plot only supports one port at a time (using '%s')" % self._sinkName + def _getImageSize(self): + return (self._frameSize, self._lines) + + def _portCreated(self, port, portDict): + super(RasterBase,self)._portCreated(port, portDict) + self._image.set_interpolation('nearest') + if port.name == 'bitIn': + self._bitMode = True + self._setBitMode() else: - port = self._providesPortDict[name] - self._sink = self._createSink(port['Port Interface']) - self._sinkName = name - return self._sink.getPort() + # Add a colorbar + self._colorbar = self._figure.colorbar(self._image) + + return port + + def _formatData(self, block, stream): + return block.buffer + + def _setBitMode(self): + pass + + def _getReadSize(self): + return self._readSize def _update(self): - if not self._sink: + if not self._port: return False # Read and format data. - data, timestamps = self._sink.retrieveData(length=self._frameSize) - if not data: - return - sri = self._sink.sri - data = self._formatData(data, sri) + stream = self._port.getCurrentStream(bulkio.const.NON_BLOCKING) + if not stream: + return False + block = stream.read(self._getReadSize()) + if not block: + return False + + with self._stateLock: + self._frameSize = self._getFrameSize(stream) + frame_offset = self._frameOffset + image_width, image_height = self._getImageSize() + + update_image = False + if self._imageData.shape != (image_height,image_width): + # TODO: Save references to old buffer? + self._imageData = self._createImageData(image_width, image_height) + self._buffer = self._imageData.reshape(-1) + self._bufferOffset = 0 + update_image = True # If xdelta changes, update the X and Y ranges. - if self._sink.sriChanged(): + redraw = True + if block.sriChanged or update_image: # Update the X and Y ranges - x_min, x_max = self._getXRange(sri) - y_min, y_max = self._getYRange(sri) + x_min, x_max = self._getXRange(stream, self._frameSize) + y_min, y_max = self._getYRange(stream, self._frameSize) self._image.set_extent((x_min, x_max, y_max, y_min)) - # Preserve the aspect ratio based on the image size. - x_range = x_max - x_min + # Trigger a redraw to update the axes + redraw = True + else: + x_min, x_max, y_min, y_max = self._image.get_extent() + + # Update the framebuffer + data = self._formatData(block, stream) + start = (self._bufferOffset + frame_offset) % len(self._buffer) + end = start + len(data) + self._writeBuffer(self._buffer, start, end, data) + self._bufferOffset = (self._bufferOffset + len(data)) % len(self._buffer) + + last_row = start // self._frameSize + new_row = self._bufferOffset // self._frameSize + if new_row != last_row: + # Move the draw position indicator y_range = y_max - y_min - self._plot.set_aspect(x_range/y_range*self._aspect) - self._xdelta = sri.xdelta + ypos = abs(y_min + ((new_row+1) * y_range)) / self._imageData.shape[0] + self._line.set_ydata([ypos, ypos]) - # Resample data from frame size to image size. - height, width = self._imageData.shape - indices_out = numpy.linspace(0, len(data)-1, width) - indices_in = numpy.arange(len(data)) - data = numpy.interp(indices_out, indices_in, data) + # Update the image; by only doing this when at least one row is + # completed, we can reduce the amount of time spent in redraws + update_image = True - # Store the new row and update the image data. - self._imageData[self._row] = data - self._image.set_array(self._imageData) + # Only redraw the image from the framebuffer if something has changed + if update_image: + self._image.set_data(self._imageData) + redraw = True - # Advance row pointer - self._row = (self._row + 1) % height + return redraw - return True + def _writeBuffer(self, dest, start, end, data): + if end <= len(dest): + dest[start:end] = data + else: + count = len(dest) - start + dest[start:] = data[:count] + remain = end - len(dest) + dest[:remain] = data[count:] - def start(self): - log.debug("Starting raster plot '%s'", self._instanceName) - if self._sink: - self._sink.start() - super(RasterBase,self).start() - start.__doc__ = PlotBase.start.__doc__ + # Plot settings + def _check_lines(self, lines): + lines = int(lines) + if lines <= 0: + raise ValueError('lines must be a positive integer') + return lines + + @property + def lines(self): + """ + Number of frames worth of history to display. Must be greater than 0. + """ + return self._lines - def stop(self): - log.debug("Stopping raster plot '%s'", self._instanceName) - if self._sink: - self._sink.stop() - super(RasterBase,self).stop() - stop.__doc__ = PlotBase.stop.__doc__ + @lines.setter + def lines(self, lines): + with self._stateLock: + self._lines = self._check_lines(lines) - # Plot settings def _check_zrange(self, zmin, zmax): if zmax < zmin: raise ValueError, 'Z-axis bounds cannot overlap (%d > %d)' % (zmin, zmax) - def get_zmin(self): + def _update_zrange(self, zmin, zmax): + self._zmin = zmin + self._zmax = zmax + norm = self._getNorm(self._zmin, self._zmax) + self._image.set_norm(norm) + + @property + def zmin(self): """ The lower bound of the Z-axis. """ return self._zmin - def set_zmin(self, zmin): + @zmin.setter + def zmin(self, zmin): self._check_zrange(zmin, self._zmax) - self._zmin = zmin - norm = self._getNorm(self._zmin, self._zmax) - self._image.set_norm(norm) - - zmin = property(get_zmin, set_zmin) - del get_zmin, set_zmin + self._update_zrange(zmin, self._zmax) - def get_zmax(self): + @property + def zmax(self): """ The upper bound of the Z-axis. """ return self._zmax - def set_zmax(self, zmax): + @zmax.setter + def zmax(self, zmax): self._check_zrange(self._zmin, zmax) - self._zmax = zmax - norm = self._getNorm(self._zmin, self._zmax) - self._image.set_norm(norm) + self._update_zrange(self._zmin, zmax) + + @property + def readSize(self): + return self._readSize - zmax = property(get_zmax, set_zmax) - del get_zmax, set_zmax + @readSize.setter + def readSize(self, size): + if size is not None: + size = int(size) + if size <= 0: + raise ValueError('read size must be a positive integer') + self._readSize = size class RasterPlot(RasterBase): @@ -741,50 +746,117 @@ class RasterPlot(RasterBase): while the X-axis represents intra-frame time. The Z-axis, mapped to a color range, represents the magnitude of each sample. """ - def __init__(self, frameSize=1024, imageWidth=1024, imageHeight=1024, zmin=-1.0, zmax=1.0): + def __init__(self, frameSize=None, imageWidth=None, imageHeight=None, zmin=-1.0, zmax=1.0, lines=None, readSize=None): """ Create a new raster plot. Arguments: frameSize - Number of elements to draw per line - imageWidth - Width of the backing image in pixels - imageHeight - Height of the backing image in pixels zmin, zmax - Z-axis (magnitude) constraints. Data is clamped to the range [zmin, zmax]. - - If the frame size is not equal to the image width, the input line will - be linearly resampled to the image width. + lines - Number of frames worth of history to display (default 512). + readSize - Number of elements to read from the data stream at a + time (default is to use packet size) + + Deprecated arguments: + imageWidth - Width of the backing image in pixels (width is always + effective frame size) + imageHeight - Height of the backing image in pixels (use lines) """ - super(RasterPlot,self).__init__(frameSize, imageWidth, imageHeight, zmin, zmax, defaultValue=zmin) + if imageHeight is not None: + if lines is not None: + raise ValueError("'lines' and 'imageHeight' cannot be combined") + warnings.warn('imageHeight is deprecated, use lines', DeprecationWarning) + lines = imageHeight + super(RasterPlot,self).__init__(zmin, zmax, lines=lines, readSize=readSize) + self._frameSizeOverride = frameSize def _getNorm(self, zmin, zmax): - return matplotlib.colors.Normalize(zmin, zmax) + if self._bitMode: + return matplotlib.colors.NoNorm() + else: + return matplotlib.colors.Normalize(zmin, zmax) - def _configureAxes(self): - self._plot.xaxis.set_label_text('Time offset (s)') - self._plot.yaxis.set_label_text('Time (s)') + def _setBitMode(self): + self._update_zrange(0, 1) - def _getXRange(self, sri): + def _getXLabel(self): + return 'Time offset (s)' + + def _getYUnits(self): + return BULKIO.UNITS_TIME + + def _getXRange(self, sri, frameSize): # X range is time per line. - return 0, sri.xdelta * self._frameSize + return 0, sri.xdelta * frameSize - def _getYRange(self, sri): + def _getYRange(self, sri, frameSize): # First, get the X range. - x_min, x_max = self._getXRange(sri) + x_min, x_max = self._getXRange(sri, frameSize) x_range = x_max - x_min # Y range is the total time across all lines. - height, width = self._imageData.shape - return 0, height*x_range + return 0, frameSize*x_range - def _formatData(self, data, sri): + def _formatData(self, block, stream): # Image data cannot be complex; just use the real component. - if sri.mode: - return [x.real for x in data] + if self._bitMode: + return block.buffer.unpack() + elif block.complex: + return block.buffer[::2] + else: + return block.buffer + + def _getFrameSize(self, stream): + if self._frameSizeOverride is not None: + # Explicit frame size override + return self._frameSizeOverride + elif stream.subsize > 0: + # Stream is 2-dimensional, use frame size + return stream.subsize else: - return data + # Auto frame size, default to 1K + return 1024 + def _check_zrange(self, zmin, zmax): + if zmin != 0: + raise ValueError('Z-axis minimum is always 0 with bit data') + elif zmax != 1: + raise ValueError('Z-axis minimum is always 1 with bit data') + super(RasterPlot,self)._check_zrange(zmin, zmax) + + @property + def frameSize(self): + """ + The number of elements in a single frame (in other words, a single line + of the raster). If set to None, the frame size is automatically + determined from the data stream's subsize, defaulting to 1024 if + subsize is 0. + """ + return self._frameSizeOverride + + @frameSize.setter + def frameSize(self, frameSize): + if frameSize is not None: + frameSize = int(frameSize) + if frameSize <= 0: + raise ValueError('frame size must be a positive value') + with self._stateLock: + self._frameSizeOverride = frameSize + + @property + def frameOffset(self): + """ + Offset, in number of real samples, to adjust the frame start. + """ + return self._frameOffset + + @frameOffset.setter + def frameOffset(self, offset): + frameOffset = int(offset) + with self._stateLock: + self._frameOffset = offset class RasterPSD(RasterBase, PSDBase): """ @@ -798,7 +870,7 @@ class RasterPSD(RasterBase, PSDBase): The Z-axis (magnitude) is displayed using a logarithmic scale. """ - def __init__(self, nfft=1024, frameSize=None, imageWidth=1024, imageHeight=1024, zmin=1.0e-16, zmax=1.0): + def __init__(self, nfft=1024, frameSize=None, imageWidth=None, imageHeight=None, zmin=1.0e-16, zmax=1.0, lines=None): """ Create a new raster PSD plot. @@ -809,38 +881,77 @@ def __init__(self, nfft=1024, frameSize=None, imageWidth=1024, imageHeight=1024, defaults to the FFT size. Must be less than or equal to FFT size. imageWidth - Width of the backing image in pixels - imageHeight - Height of the backing image in pixels zmin, zmax - Z-axis (magnitude) constraints. Data is clamped to the range [zmin, zmax]. + lines - Number of frames worth of history to display (default 512). + - If the size of the PSD output (nfft/2+1) is not equal to the image - width, the PSD output will be linearly resampled to the image width. + Deprecated arguments: + imageWidth - Width of the backing image in pixels (width is always + determined from nfft) + imageHeight - Height of the backing image in pixels (use lines) """ - if not frameSize: - frameSize = nfft - RasterBase.__init__(self, frameSize, imageWidth, imageHeight, zmin, zmax, defaultValue=zmin) + if imageWidth is not None: + warnings.warn('imageWidth is deprecated', DeprecationWarning) + if imageHeight is not None: + if lines is not None: + raise ValueError("'lines' and 'imageHeight' cannot be combined") + warnings.warn('imageHeight is deprecated, use lines', DeprecationWarning) + lines = imageHeight + RasterBase.__init__(self, zmin, zmax, lines=lines, readSize=frameSize) PSDBase.__init__(self, nfft) def _getNorm(self, zmin, zmax): return matplotlib.colors.LogNorm(zmin, zmax) - def _configureAxes(self): - self._plot.xaxis.set_label_text('Frequency (Hz)') - self._plot.yaxis.set_label_text('Time (s)') + def _getXUnits(self): + return BULKIO.UNITS_FREQUENCY - def _getXRange(self, sri): + def _getYUnits(self): + return BULKIO.UNITS_TIME + + def _getXRange(self, sri, frameSize): return self._getFreqRange(sri) - def _getYRange(self, sri): + def _getYRange(self, stream, frameSize): # Y range is the total time across all lines. - dtime = sri.xdelta * self._nfft - height, width = self._imageData.shape - return 0, height*dtime + dtime = stream.xdelta * self._nfft + return 0, frameSize *dtime + + def _getFrameSize(self, stream): + if stream.complex: + return self._nfft + else: + return (self._nfft//2) + 1 + + def _getReadSize(self): + if self._readSize is not None: + return self._readSize + return self._nfft + + def _formatData(self, block, stream): + if self._bitMode: + data = block.buffer.unpack() + elif block.complex: + data = block.cxbuffer + else: + data = block.buffer - def _formatData(self, data, sri): # Calculate PSD of input data. - data, freqs = self._psd(data, sri) - return data.reshape(len(data)) + data, freqs = self._psd(data, stream) + return data.reshape(-1) + + @property + def nfft(self): + return self._nfft + + @nfft.setter + def nfft(self, nfft): + nfft = int(nfft) + if nfft <= 0: + raise ValueError('FFT size must be a positive value') + with self._stateLock: + self._nfft = nfft class XYPlot(LineBase): @@ -869,13 +980,18 @@ def __init__(self, frameSize=1024, xmin=-1.0, xmax=1.0, ymin=-1.0, ymax=1.0): self._plot.axhline(0, color='black') self._plot.axvline(0, color='black') - def _configureAxes(self): - self._plot.xaxis.set_label_text('Real') - self._plot.yaxis.set_label_text('Imaginary') + def _getXLabel(self): + return 'Real' + + def _getYLabel(self): + return 'Imaginary' def _formatData(self, data, sri): # Split real and imaginary components into X and Y coodinates. - return [x.real for x in data], [y.imag for y in data] + if sri.complex: + return data.buffer[::2], data.buffer[1::2] + else: + return data.buffer, [0.0]*len(data.buffer) def _getXRange(self, sri): return self._xmin, self._xmax @@ -889,36 +1005,29 @@ def _check_xrange(self, xmin, xmax): if xmax < xmin: raise ValueError, 'X-axis bounds cannot overlap (%f > %f)' % (xmin, xmax) - def _createSink(self, *args, **kwargs): - sink = super(XYPlot, self)._createSink(*args, **kwargs) - sink._forceComplex = True - return sink - # Plot properties - def get_xmin(self): + @property + def xmin(self): """ The lower bound of the X-axis. """ return self._xmin - def set_xmin(self, xmin): + @xmin.setter + def xmin(self, xmin): self._check_yrange(xmin, self._xmax) self._xmin = xmin self._setXView(self._xmin, self._xmax) - xmin = property(get_xmin, set_xmin) - del get_xmin, set_xmin - - def get_xmax(self): + @property + def xmax(self): """ The upper bound of the X-axis. """ return self._xmax - def set_xmax(self, xmax): + @xmax.setter + def xmax(self, xmax): self._check_xrange(self._xmin, xmax) self._xmax = xmax self._setXView(self._xmin, self._xmax) - - xmax = property(get_xmax, set_xmax) - del get_xmax, set_xmax diff --git a/redhawk/src/base/framework/python/ossie/utils/sb/prop_change_helpers.py b/redhawk/src/base/framework/python/ossie/utils/sb/prop_change_helpers.py new file mode 100644 index 000000000..8fe8ac69b --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/sb/prop_change_helpers.py @@ -0,0 +1,109 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +from ossie.cf import CF__POA +from ossie import properties + +__all__ = ('PropertyChangeListener',) + +class PropertyChangeListener(CF__POA.PropertyChangeListener): + ''' + Container to process property change events + + Example usage: + def my_property_change_callback(event_id, registration_id, resource_id, properties, timestamp): + print event_id, registration_id, resource_id, properties, timestamp + + myl = sb.PropertyChangeListener(defaultCallback=my_property_change_callback) + regid=comp.registerPropertyListener( myl, ['prop1'], float(0.5)) + + Note: + - If no callbacks are provided, the contents of the event are directed to stdout + - Multiple properties associated with the same callback are triggered together when receive in the same event + ''' + def __init__(self, defaultCallback = None, changeCallbacks = {}): + ''' + defaultCallback is triggered when no callback matches + changeCallbacks is a dictionary of callbacks: + key: id (string) to trigger the callback + value: callback function + ''' + self._changeCallbacks = changeCallbacks + self._defaultCallback = defaultCallback + + def __del__(self): + pass + + def updateCallback(self, _prop_id, _callback=None): + ''' + _prop_id is the property id (string) + _callback is a function of the form: + fn(event_id, registration_id, resource_id, properties, timestamp) + ''' + if type(_prop_id) != str: + raise Exception('Invalid property id. It must be a strings') + if _callback == None: + if not self._changeCallbacks.has_key(_prop_id): + raise Exception('Invalid key:', _prop_id) + self._changeCallbacks.pop(_prop_id) + return + self._changeCallbacks.update({_prop_id:_callback}) + + def getCallback(self, _prop_id=''): + ''' + Set _prop_id for the unique key. Use an empty list to return all callbacks + ''' + if _prop_id == []: + return self._changeCallbacks + if self._changeCallbacks.has_key(_prop_id): + return self._changeCallbacks[_prop_id] + raise Exception('Invalid property id. No callback registered under that id') + + def propertyChange(self, _propChEv) : + if type(self._changeCallbacks) != dict: + print 'Invalid change callbacks (must be dictionary with property id as a key and a callback function as a value). Printing received event', _propChEv + + _tmp_props = _propChEv.properties + _triggers = {} + + for _prop_key in self._changeCallbacks: + for _prop_idx in range(len(_tmp_props)): + if _tmp_props[_prop_idx].id == _prop_key: + if not _triggers.has_key(self._changeCallbacks[_prop_key]): + _triggers[self._changeCallbacks[_prop_key]] = {} + _triggers[self._changeCallbacks[_prop_key]].update(properties.prop_to_dict(_tmp_props[_prop_idx])) + _tmp_props.pop(_prop_idx) + break + + if len(_triggers) != 0: + for _trigger in _triggers: + _trigger(_propChEv.evt_id, _propChEv.reg_id, _propChEv.resource_id, _triggers[_trigger], _propChEv.timestamp) + + if len(_tmp_props) != 0: + if self._defaultCallback: + self._defaultCallback(_propChEv.evt_id, _propChEv.reg_id, _propChEv.resource_id, properties.props_to_dict(_tmp_props), _propChEv.timestamp) + return + print 'Property Change Event:' + print ' event id:',_propChEv.evt_id + print ' registration id:',_propChEv.reg_id + print ' resource id:', _propChEv.resource_id + print ' properties:', properties.props_to_dict(_tmp_props) + print ' timestamp:', _propChEv.timestamp + diff --git a/redhawk/src/base/framework/python/ossie/utils/sb/rh-inspect b/redhawk/src/base/framework/python/ossie/utils/sb/rh-inspect index afbe458cf..33f90dae2 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sb/rh-inspect +++ b/redhawk/src/base/framework/python/ossie/utils/sb/rh-inspect @@ -39,7 +39,7 @@ if '__main__': # Instantiate component and build API, but don't actually launch component process comp = Component(componentDescriptor=arg,autoKick=False) print "rh-inspect " + str(arg) + "*******************************" - comp.api() + comp.api(destfile=sys.stdout) print "\n" except Exception, e: diff --git a/redhawk/src/base/framework/python/ossie/utils/sb/rh-launch b/redhawk/src/base/framework/python/ossie/utils/sb/rh-launch index 89d327662..3ec9c0b86 100644 --- a/redhawk/src/base/framework/python/ossie/utils/sb/rh-launch +++ b/redhawk/src/base/framework/python/ossie/utils/sb/rh-launch @@ -69,7 +69,7 @@ if '__main__': components[componentNumber]["InputFile"] = {} components[componentNumber]["InputFile"]["Filename"] = inputFilename components[componentNumber]["InputFile"]["Object"] = InputFile(str(inputFilename), "short") - components[componentNumber]["InputFile"]["Object"].api() + components[componentNumber]["InputFile"]["Object"].api(destfile=sys.stdout) componentNumber = componentNumber + 1 needInputFileDataType = False elif arg.find("!") != -1: diff --git a/redhawk/src/base/framework/python/ossie/utils/sdds/__init__.py b/redhawk/src/base/framework/python/ossie/utils/sdds/__init__.py new file mode 100644 index 000000000..7dd6a08ab --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/sdds/__init__.py @@ -0,0 +1,23 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +from sdds_time import * +from sdds_pkt import * +from sdds_analyzer import * diff --git a/redhawk/src/base/framework/python/ossie/utils/sdds/sdds_analyzer.py b/redhawk/src/base/framework/python/ossie/utils/sdds/sdds_analyzer.py new file mode 100644 index 000000000..ec663f8a4 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/sdds/sdds_analyzer.py @@ -0,0 +1,278 @@ + +from binascii import hexlify as _hexify +from StringIO import StringIO +import ossie.utils.sb.helpers as _helpers +import sdds_pkt as _sdds_pkt +import traceback + +__all__ = [ 'SDDSAnalyzer' ] + +class SDDSAnalyzer(object): + """ + The SDDSAnalyzer class can process a block of raw bytes as SDDS packets and perform the + following actions: + + trackChanges: + + Displays the following contents of the first packet's SDDS header fields. + seq - sequence number of packet + fmt - data mode + cplx - complex flags + bps - bits per sample + freq - collected signal frequency + clk - sample rate + time valid - value of ttv field + time slip - amount of time slip between valid time stamps + + Then tracks changes in these fields and reports the following: + - No change in data detected, sequence number passed, + or expected time stamp passed (i.e., last valid time stamp + nsamples*1.0/sample rate) + *** Change in data field, out of sequence packet, time stamp invalid, + or expected time stamp did not pass + + Valid time stamp value provided in packet header + + Parameters: + pkt_start = first packet to dump + pkt_end = last packet to dump (None == end of list) + repeat_header = displays column header every Nth packet displayed + use_pager = display data using pager to limit number of packets that are displayed + + dumpRawPackets: + + Dump the contents of the capture with the results managed by a pager. + + Parameters: + pkt_start = first packet to dump + pkt_end = last packet to dump (None == end of list) + row_width = 80 the number of bytes to display per row + bytes_per_group = the number of bytes to group when hexify-ing + pkt_len = the number of bytes in a packet, None defaults to 1080 or length when getData method was called + use_pager = display data using pager to limit number of packets that are displayed + + dumpPackets: + + Dump packet fields and their data values, contents managed by a pager + + Parameters: + pkt_start = first packet to dump + pkt_end = last packet to dump (None == end of list) + payload_start = starting payload sample to display + payload_end = ending payload sample to display + raw_payload = dump payload data as raw bytes + header_only = only display header information for each packet + use_pager = display data using pager to limit number of packets that are displayed + + getPacketsIterator: + + Returns a Python iterator that will traverse the set of packets managed by the SDDSAnalyzer object + + Parameters: + pkt_start = first packet to dump + pkt_end = last packet to dump (None == end of list) + + getPackets - returns a list of sdds_packet objects + + Returns a list of sdds_packet objects + + Parameters: + pkt_start = first packet to dump + pkt_end = last packet to dump (None == end of list) + + __iter__ - The iterable returns a tuple (pkt number,sdds_packet) + __len__ - returns the number of packets + + """ + _VALID_VAL_='+' + _TRACK_OK_='-' + _TRACK_ERROR_='***' + + def __init__(self, raw_data, npkts, pkt_len=1080, total_bytes=None): + self.npkts_ = npkts + self.raw_data_ = raw_data + self.pkt_len_ = pkt_len + dsize=len(raw_data) + expected_size = pkt_len * npkts + + if expected_size > dsize : + raise RuntimeError("Invalid parameters, pkt_len*npkts is greater than raw_data size ") + + # adjust total bytes if necessary + if total_bytes: + if total_bytes > dsize: + total_bytes=dsize + else: + total_bytes = dsize + + self.total_bytes_=total_bytes + + + def dumpRawPackets(self, pkt_start=0, pkt_end=None, row_width=80, bytes_per_group=2, pkt_len=None, use_pager=True ): + if pkt_end == None: + pkt_end = self.npkts_ + if pkt_len: + pkt_end = self.npkts_ + else: + pkt_end = pkt_end + 1 + + if pkt_len == None: pkt_len = self.pkt_len_ + genf=self._gen_hex_dump( self.raw_data_, pkt_start, pkt_len, row_width, bytes_per_group ) + res = StringIO() + for i, line in enumerate(genf,pkt_start): + if i < pkt_end: + print >>res, 'pkt:'+str(i) + ' ' + line + else: + break + + if use_pager: + _helpers.Pager( res.getvalue() ) + else: + print res.getvalue() + + def dumpPackets(self, pkt_start=0, pkt_end=None, payload_start=0, payload_end=40, raw_payload=False, header_only=False, use_pager=True ): + genf=self._gen_packet( self.raw_data_, pkt_start ) + if pkt_end == None: + pkt_end = self.npkts_ + else: + pkt_end = pkt_end + 1 + res = StringIO() + for i, pkt in enumerate(genf,pkt_start): + if i < pkt_end: + print >>res, 'Packet: ', str(i) + print >>res, pkt.header_and_payload(payload_start, payload_end, header_only=header_only, raw=raw_payload ) + else: + break + + if use_pager: + _helpers.Pager( res.getvalue() ) + else: + print res.getvalue() + + def _cmp_pkt( self, res, last_pkt, next_pkt, last_tstamp, last_nsamps ): + if last_pkt: + last_pkt.inc() + if last_pkt.get_fsn() != next_pkt.get_fsn() : + res['fsn']=self._TRACK_ERROR_ + + if last_pkt and last_pkt.get_complex() != next_pkt.get_complex() : + res['cplx']=self._TRACK_ERROR_ + + if last_pkt and last_pkt.get_dmode() != next_pkt.get_dmode() : + res['dmode']=self._TRACK_ERROR_ + + if last_pkt and last_pkt.get_bps() != next_pkt.get_bps() : + res['bps']=self._TRACK_ERROR_ + + if last_pkt and last_pkt.get_freq() != next_pkt.get_freq() : + res['freq']=self._TRACK_ERROR_ + + if last_pkt and last_pkt.get_rate() != next_pkt.get_rate() : + res['rate']=self._TRACK_ERROR_ + + if last_pkt: + if last_pkt.get_ttv(): res['ttv']=self._VALID_VAL_ + if last_pkt.get_ttv() != next_pkt.get_ttv(): + res['ttv']=self._TRACK_ERROR_ + + if last_tstamp: + # check that we have a good timestamp to check against + t2 = next_pkt.get_SDDSTime() + if next_pkt.get_ttv(): + if res['freq'] == False: + rate=pkt.get_freq(); + offset = 1.0/rate*last_nsamps + t_ck = sdds_time.add( last_tstamp, offset ) + if sdds_time.compare( t2, t_chk ) == False: + res['timeslip']=self._TRACK_ERROR_ + else: + res['timeslip']=self._TRACK_ERROR_ + + def _first_pkt( self, res, next_pkt ): + res['fsn'] = str(next_pkt.get_fsn()) + res['dmode']=str(next_pkt.get_dmode()) + res['cplx']=str(next_pkt.get_complex()) + res['bps']=str(next_pkt.get_bps()) + res['freq']=str(next_pkt.get_freq()) + res['rate']=str(next_pkt.get_rate()) + res['ttv']=str(next_pkt.get_ttv()) + t2 = next_pkt.get_SDDSTime() + res['timeslip']=str(t2) + + def trackChanges(self, pkt_start=0, pkt_end=None, repeat_header=20, use_pager=True ): + genf=self._gen_packet( self.raw_data_, pkt_start ) + if pkt_end == None: pkt_end = self.npkts_ + res = StringIO() + keys = [ 'pkt', 'fsn', 'dmode', 'cplx', 'bps', 'freq', 'rate', 'ttv', 'timeslip' ] + hdrs = [ 'PKT', 'SEQ', 'FMT', 'CPLX', 'BPS', ' FREQ ', ' CLK ', 'TIME VALID', 'TIME SLIP' ] + hdr_fmt='{pkt:^5s} {fsn:^5s} {dmode:^5s} {cplx:^4s} {bps:^5s} {freq:^12s} {rate:^12s} {ttv:^10s} {timeslip:^9s}' + line_fmt='{pkt:^5d} {fsn:^5s} {dmode:^5s} {cplx:^4s} {bps:^5s} {freq:^12s} {rate:^12s} {ttv:^10s} {timeslip:^9s}' + last_pkt = None + last_tstamp=None + last_nsamps=0 + for i, pkt in enumerate(genf,pkt_start): + if ( i % repeat_header ) == 0: + print >>res, hdr_fmt.format( **dict(zip(keys,hdrs))) + + cmp_res = dict.fromkeys( keys, self._TRACK_OK_ ) + if i == pkt_start : + self._first_pkt( cmp_res, pkt ) + else: + self._cmp_pkt( cmp_res, last_pkt, pkt, last_tstamp, last_nsamps ) + cmp_res['pkt']=i + + dline=line_fmt.format( **cmp_res ) + print >>res, dline + if pkt.get_ttv() : + last_tstamp=pkt.get_SDDSTime() + last_nsamp=0 + + # keep running count of samples + last_nsamps += pkt.get_samples_for_bps() + last_pkt=pkt + + if use_pager: + _helpers.Pager( res.getvalue() ) + else: + print res.getvalue() + + def getPacketIterator(self, pkt_start=0, pkt_end=None ): + genf=self._gen_packet( self.raw_data_, pkt_start ) + if pkt_end == None: + pkt_end = self.npkts_ + else: + pkt_end +=1 + for i, pkt in enumerate(genf,pkt_start): + if i < pkt_end: + yield i,pkt + else: + StopIteration + + def __iter__(self): + return self.getPacketIterator() + + def __len__(self): + return self.npkts_ + + def getPackets(self, pkt_start=0, pkt_end=None ): + res=[] + for i, pkt in self.getPacketIterator( pkt_start, pkt_end ): + res.append(pkt) + return res + + def _gen_hex_dump( self, data, pkt_start, pkt_len, max_row_width=80, bytes_per_group=2 ): + # break on pkt length + bstart = pkt_start*self.pkt_len_ + pkt_iter=xrange( bstart, len(data), pkt_len) + for x in pkt_iter: + raw_pkt = data[x:x+max_row_width] + d_iter = xrange(0, len(raw_pkt), bytes_per_group) + yield ' '.join( [ _hexify(raw_pkt[i:i+bytes_per_group]) for i in d_iter ] ) + + def _gen_packet( self, data, pkt_start ): + # break on pkt length + bstart = pkt_start*self.pkt_len_ + pkt_iter=xrange( bstart, len(data), self.pkt_len_ ) + for x in pkt_iter: + raw_pkt = data[x:x+self.pkt_len_] + pkt=_sdds_pkt.sdds_packet(raw_pkt) + yield pkt + diff --git a/redhawk/src/base/framework/python/ossie/utils/sdds/sdds_pkt.py b/redhawk/src/base/framework/python/ossie/utils/sdds/sdds_pkt.py new file mode 100644 index 000000000..567f6e19d --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/sdds/sdds_pkt.py @@ -0,0 +1,1336 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK rh.SourceSDDS. +# +# REDHAWK rh.SourceSDDS is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK rh.SourceSDDS is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +import ctypes +import time +import datetime +""" +Class definitions that represent SDDS packet structure. The classes make use of python +ctypes to pack and unpack bit fields into class's members. The fields of arranged in +a set format to correctly unpack from data packets that are in BigEndian format. For +extact field definitions and values consult the SDDS packet specification. + +To unpack a raw data buffer from a socket to an SDDS packet you +would perform the following: + + pkt = sdds_packet(data_buffer) + +The pkt object provides extraction of sdds header as well as +payload overlays for accessing the each of the following formats: + + raw: 1024 8 bit samples + sn: 2048 samples representing the two 4 bit samples packed into a byte + sb: 1024 8 bit samples + cb: 512 samples of interleaved I & Q data + si: 512 samples of 16 bit data + ci: 256 samples of 16 bit data I & Q data + sf: 256 samples of (32 bit) scalar float data + +In addition to the overlays, the pkt.get_data() method returns a copy of the data samples +in the correct format. + + +Format Identifier + Complex field + def get_complex(self) + def set_complex(self, isComplex=False ) + + SpectralSense field + def get_spectralsense(self, ison=False ): + def set_spectralsense(self, ison=False ): + + Very Wideband field + def get_vw(self): + def set_vw(self, isVeryWide=False ): + + Bits per sample + def get_bps(self): + def set_bps(self, bps ): + + Data Mode + def get_dmode(self): + def set_dmode(self,dm, cplx=False, calc_bps=True, bps=None): + + def get_bps_for_mode(self, dmode ): + def get_samples_for_bps(self, bps=None ): + + Frame Sequence number + def get_fsn(self): + def set_fsn(self, v ): + + Time Tag fields + def get_msptr( self ): + def set_msptr( self, val ): + + def get_msdelta( self ): + def set_msdelta( self, val ): + + def get_msv(self): + def set_msv(self, valid=True): + + def get_ttv(self): + def set_ttv(self, valid=True): + + def get_sscv(self): + def set_sscv(self, valid=True): + + def set_time(self, ps250, pf250 ): + def get_SDDSTime(self): + def set_SDDSTime(self, sdds_time, ): + + Synchronous Sample Clock + def get_freq(self): + def set_freq(self, freq): + + def get_rate(self): + def set_rate(self, freq): + + def get_dfdt(self): + def set_dfdt(self, freq): + + Payload Processing + def get_format(self): + def set_format(self, fmt): + def get_data(self, start=None, end=None ): + +""" + +__all__ = [ + 'format_identifier', + 'frame_sequence', + 'msptr_data', + 'ttag_info_struct', + 'ttag_info_union', + 'ttag_values', + 'ttag_info', + 'ssc_info_struct', + 'ssd_data', + 'aad_data', + 'sdds_header', + 'sdds_sb_payload', + 'sdds_cb_payload', + 'sdds_si_payload', + 'sdds_ci_payload', + 'sdds_sn_sample', + 'sdds_sn_payload', + 'sdds_sf_payload', + 'sdds_payload', + 'sdds_packet', +] + +def BitsToNumber(sbits, reverse=False ): + tbits=sbits[:] + if reverse: + tbits.reverse() + f = [x << n for n, x in enumerate(tbits)] + return reduce(lambda x, y: x + y, f) + +class format_identifier(ctypes.Structure): + _pack_ = 1 + _fields_ = [ ('dm',ctypes.c_uint8,3), + ('ss',ctypes.c_uint8,1), + ('of',ctypes.c_uint8,1), + ('pp',ctypes.c_uint8,1), + ('sos',ctypes.c_uint8,1), + ('sf',ctypes.c_uint8,1), + ('bps',ctypes.c_uint8,5), + ('vw',ctypes.c_uint8,1), + ('snp',ctypes.c_uint8,1), + ('cx',ctypes.c_uint8,1) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(format_identifier,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + self.sf = 1 + self.sos = 1 + self.pp = 0 + self.of = 0 + self.ss = 0 + self.bps = 8 + self.vw = 0 + self.snp = 0 + self.cx = 0 + self.dm=1 + + def __str__(self): + return ' '.join( [ x[0]+':'+str(getattr(self,x[0])) for x in self._fields_ ]) + + + def set_dmode(self, dm, reverse=False ): + if type(dm) == list: + self.dm = BitsToNumber(dm) + else: + self.dm = dm + + def set_bps(self, bps, reverse=False ): + _bps=bps + if type(bps) == list: + _bps = BitsToNumber(bps, reverse) + + if _bps == 32 : _bps = 31 + self.bps= _bps + + def get_bps(self): + _bps=self.bps + if _bps == 31 : _bps = 32 + return _bps + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class frame_sequence(ctypes.BigEndianStructure): + MAX_SEQ_NUMBER=65536 + _pack_ = 1 + _fields_ = [ ('seq',ctypes.c_ushort,16) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(frame_sequence,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + self.seq = 0 + + def inc(self): + self.seq = (self.seq + 1 ) % frame_sequence.MAX_SEQ_NUMBER + + def __str__(self): + return ''.join( [ str(getattr(self,x[0])) for x in self._fields_ ]) + + def get(self): + return self.seq + + def set(self, v ): + self.seq = v + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class msptr_data (ctypes.BigEndianStructure): + _pack_ = 1 + _fields_ = [ ('msptr',ctypes.c_ushort,16), + ('msdelta',ctypes.c_ushort,16) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(msptr_data,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + self.msptr=0 + self.msdelta=0 + + def __str__(self): + return '/'.join( [ x[0] for x in self._fields_ ]) + ': '+ '/'.join( [ str(getattr(self,x[0])) for x in self._fields_ ]) + + def set_msptr( self, val ): + val = val&0x07FF + self.msptr = val + + def get_msptr( self ): + return self.msptr + + def set_msdelta( self, val ): + self.msdelta = val + + def get_msdelta( self ): + return self.msdelta + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class ttag_info_struct(ctypes.Structure): + _pack_ = 1 + _fields_ = [ ('pad1',ctypes.c_uint8,1), + ('pad2',ctypes.c_uint8,1), + ('pad3',ctypes.c_uint8,1), + ('peo',ctypes.c_uint8,1), + ('pi',ctypes.c_uint8,1), + ('sscv',ctypes.c_uint8,1), + ('ttv',ctypes.c_uint8,1), + ('msv',ctypes.c_uint8,1), + ('pad4',ctypes.c_uint8,8), + ('pad5',ctypes.c_uint16,16) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(ttag_info_struct,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + self.ttv=0 + self.sscv=0 + self.pi=0 + self.msv=0 + self.peo=0 + self.pad1=0 + self.pad2=0 + self.pad3=0 + + def __str__(self): + return ' '.join( [ x[0]+':'+str(getattr(self,x[0])) for x in self._fields_ if x[0] in ['msv','ttv','sscv','pi','peo']]) + + def get_sscv(self): + return self.sscv + + def set_sscv(self, valid=True): + if valid: + self.sscv = 1 + else: + self.sscv = 0 + + def get_ttv(self): + return self.ttv + + def set_ttv(self, valid=True): + if valid: + self.ttv = 1 + else: + self.ttv = 0 + + def get_msv(self): + return self.msv + + def set_msv(self, valid=True): + if valid: + self.msv = 1 + else: + self.msv = 0 + + def get_pi(self): + return self.pi + + def set_pi(self, onoff=True): + if onoff: + self.pi = 1 + else: + self.pi = 0 + + def get_peo(self): + return self.peo + + def set_peo(self, odd=False): + if odd: + self.peo = 1 + else: + self.peo = 0 + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class ttag_info_union(ctypes.Union): + _pack_ = 1 + _fields_ = [ ('msptr', msptr_data), + ('info', ttag_info_struct )] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(ttag_info_union,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def __str__(self): + return 'info: '+str(self.info)+ ' ' + str(self.msptr) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class ttag_values(ctypes.BigEndianStructure): + _pack_ = 1 + _fields_ = [ ('ttag',ctypes.c_uint64), + ('ttage',ctypes.c_uint32) + ] + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(ttag_values,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + self.ttag=0 + self.ttage=0 + + def __str__(self): + return str(self.get_SDDSTime()) + + def set(self, ps250, pf250): + self.ttag = ps250 + self.ttage = pf250 + + def set_SDDSTime(self, sdds_time): + self.ttag = sdds_time.picoTicks() + self.ttage = sdds_time.picoTicksFractional() + + def get_SDDSTime(self): + from ossie.utils.sdds import Time + t=Time() + t.set( self.ttag, self.ttage ) + return t + + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + + +class ttag_info(ctypes.Structure): + _pack_ = 1 + _fields_ = [ ('info', ttag_info_union ), + ('tstamp', ttag_values ) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(ttag_info,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def __str__(self): + return str(self.info)+' tstamp: '+str(self.tstamp) + + def get_msptr( self ): + return self.info.msptr.get_msptr() + + def set_msptr( self, val ): + self.info.msptr.set_msptr(val) + + def get_msdelta( self ): + return self.info.msptr.get_msdelta() + + def set_msdelta( self, val ): + self.info.msptr.set_msdelta(val) + + def clear_msptr(self): + self.info.msptr.msptr = 0 + self.info.msptr.msdelta = 0 + + def set_msv(self, valid=True): + self.info.info.set_msv(valid) + + def get_msv(self): + return self.info.info.get_msv() + + def get_ttv(self): + return self.info.info.get_ttv() + + def set_ttv(self, valid=True): + self.info.info.set_ttv(valid) + + def get_sscv(self): + return self.info.info.get_sscv() + + def set_sscv(self, valid=True): + self.info.info.set_sscv(valid) + + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class ssc_info_struct(ctypes.BigEndianStructure): + _pack_ = 1 + _fields_ = [ ('dfdt',ctypes.c_int32), + ('freq',ctypes.c_uint64) + ] + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(ssc_info_struct,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + self.dfdt=0 + self.freq=0 + + + def __str__(self): + return 'freq: '+ str(self.get_freq())+' dfdt: '+ str(self.get_dfdt()) + + def get_freq(self): + # frequency units in resolution 125mhz/2^63 + rfreq = ( self.freq * 1.3552527156068805e-11 ) + return rfreq + + def set_freq(self, freq): + # frequency units resolution 2^63/125mhz + sfreq= freq* 73786976294.838211 + self.freq = long(sfreq) + + def get_dfdt(self): + sdfdt = self.dfdt * 9.3132257461547852e-10 + return sdfdt + + def set_dfdt(self, val ): + sdfdt = val * 1073741824.0 + self.dfdt = int(sdfdt) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + + +class ssd_data(ctypes.BigEndianStructure): + DATA_LEN=2 + _pack_ = 1 + _fields_ = [ ('data', ctypes.c_uint16* DATA_LEN ) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(ssd_data,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def __str__(self): + return ','.join( [ str(hex(ord(x))) for x in self.asBuffer() ] ) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class aad_data(ctypes.BigEndianStructure): + DATA_LEN=20 + _pack_ = 1 + _fields_ = [ ('data', ctypes.c_uint8*DATA_LEN) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(aad_data,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def __str__(self): + return ','.join( [ str(hex(ord(x))) for x in self.asBuffer() ] ) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + + +class sdds_header(ctypes.Structure): + PARITY_SEQ_NUMBER=32 + MAX_SEQ_NUMBER=65536 + _pack_ = 1 + _fields_ = [ ('formatid', format_identifier), + ('fsn', frame_sequence), + ('ttag', ttag_info ), + ('ssc', ssc_info_struct ), + ('ssd', ssd_data ), + ('aad', aad_data ), + ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(sdds_header,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None, skip_parity=True): + self._skip_parity=skip_parity + pass + + def __str__(self): + return 'format_id: '+str(self.formatid)+'\n'+ \ + ' fsn: '+str(self.fsn) +'\n' + \ + ' ttag: '+str(self.ttag)+'\n' + \ + ' ssc: '+str(self.ssc)+'\n' + \ + ' ssd: '+str(self.ssd)+'\n' + \ + ' aad: '+str(self.aad) + + + def inc(self, skip_parity=None): + self.fsn.inc() + if skip_parity or self._skip_parity: + if self.fsn.seq % sdds_header.PARITY_SEQ_NUMBER == 31: + self.fsn.inc() + + if self.fsn == 0: + self.formatid.sos=0 + + ## + ## Format Identifier + ## + def get_standardformat(self): + return self.formatid.sf + + def set_standardformat(self, sf=False ): + self.formatid.sf=0 + if sf: + self.formatid.sf=1 + + def get_startofsequence(self): + return self.formatid.sos + + def set_startofsequence(self, sos=False ): + self.formatid.sos=0 + if sos: + self.formatid.sos=1 + + def get_paritypacket(self): + return self.formatid.pp + + def set_paritypacket(self, pp=False ): + self.formatid.pp=0 + if pp: + self.formatid.pp=1 + + def get_originalformat(self): + return self.formatid.of + + def set_originalformat(self, of=False ): + self.formatid.of=0 + if of: + self.formatid.of=1 + + def get_spectralsense(self): + return self.formatid.ss + + def set_spectralsense(self, ss=False ): + self.formatid.ss=0 + if ss: + self.formatid.ss=1 + + def get_complex(self): + return self.formatid.cx + + def set_complex(self, isComplex=False ): + self.formatid.cx=0 + if isComplex: + self.formatid.cx=1 + + def set_dmode(self,dm): + self.formatid.set_dmode(dm) + + def get_dmode(self): + return self.formatid.dm + + def get_vw(self): + return self.formatid.vw + + def set_vw(self, isVeryWide=False ): + if isVeryWide: + self.formatid.vw = 1 + else: + self.formatid.vw = 0 + + def get_bps(self): + return self.formatid.get_bps() + + def set_bps(self, bps ): + self.formatid.set_bps(bps) + + ## + ## frame sequence + ## + def get_fsn(self): + return self.fsn.get() + + def set_fsn(self, v ): + self.fsn.set(v) + + def inc_fsn(self): + self.fsn.inc() + + ## + ## ttag - time tag + ## + def get_msptr( self ): + return self.ttag.get_msptr() + + def set_msptr( self, val ): + self.ttag.set_msptr(val) + + def get_msdelta( self ): + return self.ttag.get_msdelta() + + def set_msdelta( self, val ): + self.ttag.set_msdelta(val) + + def clear_msptr(self): + self.ttag.clean_msptr() + + def set_msv(self, valid=True): + self.ttag.set_msv(valid) + + def get_msv(self): + return self.ttag.get_msv() + + def get_ttv(self): + return self.ttag.get_ttv() + + def set_ttv(self, valid=True): + self.ttag.set_ttv(valid) + + def get_sscv(self): + return self.ttag.get_sscv() + + def set_sscv(self, valid=True): + self.ttag.set_sscv(valid) + + def set_time(self, ps250, pf250 ): + self.ttag.tstamp.ttag = ps250 + self.ttag.tstamp.ttage = pf250 + + def set_SDDSTime(self, sdds_time, ): + self.ttag.tstamp.ttag = sdds_time.picoTicks() + self.ttag.tstamp.ttage = sdds_time.picoTicksFractional() + + def get_SDDSTime(self): + from ossie.utils.sdds import Time + t=Time() + t.set( self.ttag.tstamp.ttag, + self.ttag.tstamp.ttage ) + return t + + + ## + ## ssc - synchronous sample clock + ## + def get_freq(self): + return self.ssc.get_freq() + + def set_freq(self, freq): + self.ssc.set_freq(freq) + + def get_rate(self): + rate = self.get_freq() + if self.get_vw() == 1: + rate *= 16.0 + + if self.get_complex() == 1: + rate *= 0.5 + return rate + + def set_rate(self, rate): + vw=0 + val=rate + if rate>= 125e6: + vw = 1 + val = val * 0.0625 + self.set_vw(vw) + self.set_freq(val) + + def get_dfdt(self): + return self.ssc.get_dfdt() + + def set_dfdt(self, freq): + self.ssc.set_dfdt(freq) + + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + + +#################################################################################### +# +# SDDS Payload Containers +# +#################################################################################### + +class sdds_sb_payload(ctypes.BigEndianStructure): + NUM_SAMPLES=1024 + PAYLOAD_SIZE=1024 + _pack_ = 1 + _fields_ = [ ('data', ctypes.c_uint8 *PAYLOAD_SIZE ) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(sdds_sb_payload,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def get_data(self, start=None, end=None): + return self.data[start:end] + + def set_data(self, samples ): + if type(samples) == list: + for i,x in enumerate(samples): + if i < sdds_sb_payload.NUM_SAMPLES: + self.data[i] = x + else: + fit = min(len(samples), ctypes.sizeof(self)) + ctypes.memmove(ctypes.addressof(self), samples, fit) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + + +class sdds_cb_payload(ctypes.BigEndianStructure): + NUM_SAMPLES=512 + PAYLOAD_SIZE=1024 + _pack_ = 1 + _fields_ = [ ('data', ctypes.c_uint8 *PAYLOAD_SIZE ) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(sdds_cb_payload,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def get_data(self, start=None, end=None): + return self.data[start:end] + + def set_data(self, samples ): + if type(samples) == list: + for i,x in enumerate(samples): + if i < sdds_cb_payload.NUM_SAMPLES: + self.data[i] = x + else: + fit = min(len(samples), ctypes.sizeof(self)) + ctypes.memmove(ctypes.addressof(self), samples, fit) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + + +class sdds_si_payload(ctypes.BigEndianStructure): + NUM_SAMPLES=512 + PAYLOAD_SIZE=512 + _pack_ = 1 + _fields_ = [ ('data', ctypes.c_uint16 * PAYLOAD_SIZE) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(sdds_si_payload,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def get_data(self, start=None, end=None): + return self.data[start:end] + + def set_data(self, samples ): + if type(samples) == list: + for i,x in enumerate(samples): + if i < sdds_si_payload.NUM_SAMPLES: + self.data[i] = x + else: + fit = min(len(samples), ctypes.sizeof(self)) + ctypes.memmove(ctypes.addressof(self), samples, fit) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class sdds_ci_payload(ctypes.BigEndianStructure): + NUM_SAMPLES=256 + PAYLOAD_SIZE=512 + _pack_ = 1 + _fields_ = [ ('data', ctypes.c_uint16 * PAYLOAD_SIZE) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(sdds_ci_payload,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def get_data(self, start=None, end=None): + return self.data[start:end] + + def set_data(self, samples ): + if type(samples) == list: + for i,x in enumerate(samples): + if i < sdds_ci_payload.NUM_SAMPLES: + self.data[i] = x + else: + fit = min(len(samples), ctypes.sizeof(self)) + ctypes.memmove(ctypes.addressof(self), samples, fit) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class sdds_sn_sample(ctypes.BigEndianStructure): + _pack_ = 1 + _fields_ = [ ('sn2', ctypes.c_uint8,4 ), + ('sn1', ctypes.c_uint8,4 ) + ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(sdds_sn_sample,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def get_data(self): + return [ self.sn1, self.sn2 ] + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class sdds_sn_payload(ctypes.Structure): + NUM_SAMPLES=2048 + PAYLOAD_SIZE=1024 + _pack_ = 1 + _fields_ = [ ('data', sdds_sn_sample*PAYLOAD_SIZE) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(sdds_sn_payload,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def get_data(self, start=None, end=None): + _ret=[] + for x in self.data[start:end]: + _ret += x.get_data() + return _ret + + def set_data(self, samples ): + if type(samples) == list: + for i,x in enumerate(samples): + if i < sdds_sn_payload.NUM_SAMPLES: + self.data[i] = x + else: + fit = min(len(samples), ctypes.sizeof(self)) + ctypes.memmove(ctypes.addressof(self), samples, fit) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class sdds_sf_payload(ctypes.BigEndianStructure): + NUM_SAMPLES=256 + PAYLOAD_SIZE=256 + _pack_ = 1 + _fields_ = [ ('data', ctypes.c_float * PAYLOAD_SIZE ) ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(sdds_sf_payload,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def get_data(self, start=None, end=None): + return self.data[start:end] + + def set_data(self, samples ): + if type(samples) == list: + for i,x in enumerate(samples): + if i < sdds_sf_payload.NUM_SAMPLES: + self.data[i] = x + else: + fit = min(len(samples), ctypes.sizeof(self)) + ctypes.memmove(ctypes.addressof(self), samples, fit) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + +class sdds_payload(ctypes.Union): + PAYLOAD_SIZE=1024 + _pack_ = 1 + _fields_ = [ ('raw', ctypes.c_uint8 *PAYLOAD_SIZE ), + ('sn', sdds_sn_payload ), + ('sb', sdds_sb_payload ), + ('cb', sdds_sb_payload ), + ('si', sdds_si_payload ), + ('ci', sdds_si_payload ), + ('sf', sdds_sf_payload ) + ] + + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(sdds_payload,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None): + pass + + def __str__(self): + return ','.join( [ str(x) for x in self.raw[:40] ] ) + + def trim_payload(self, start=0, end=40): + return ','.join( [ str(x) for x in self.raw[start:end] ] ) + + def get_data(self, start=None, end=None): + return self.raw[start:end] + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) + + +class sdds_packet(ctypes.Structure): + FORMATS = { + 'SN' : { 'dmode': 0, 'bps': 4, 'cplx': 0, 'samples': 2048, 'get_data': sdds_sn_payload.get_data }, + 'SB' : { 'dmode': 1, 'bps': 8, 'cplx': 0, 'samples': 1024, 'get_data': sdds_sb_payload.get_data }, + 'CB' : { 'dmode': 1, 'bps': 8, 'cplx': 1, 'samples': 512, 'get_data': sdds_cb_payload.get_data}, + 'SI' : { 'dmode': 2, 'bps': 16, 'cplx': 0, 'samples': 512, 'get_data': sdds_si_payload.get_data}, + 'CI' : { 'dmode': 2, 'bps': 16, 'cplx': 1, 'samples': 256, 'get_data': sdds_ci_payload.get_data}, + 'SF' : { 'dmode': 7, 'bps': 32, 'cplx': 0, 'samples': 256, 'get_data': sdds_sf_payload.get_data}, + 'AD4' : { 'dmode': 5, 'bps': 8, 'cplx': 0, 'samples': 1024, 'get_data': sdds_sb_payload.get_data}, + 'AD12' : { 'dmode': 6, 'bps': 16, 'cplx': 0, 'samples': 512, 'get_data': sdds_si_payload.get_data}, + } + PKT_LEN=1080 + HEADER_LEN=56 + PAYLOAD_LEN=1024 + _pack_ = 1 + _fields_ = [ ('header', sdds_header ), + ('payload', sdds_payload ) + ] + def __new__(cls,*args, **kwargs ): + if len(args) > 0 or 'buf' in kwargs: + if len(args) > 0: + return cls.from_buffer_copy(args[0]) + return cls.from_buffer_copy(kwargs['buf']) + else: + return super(sdds_packet,cls).__new__(cls,*args,**kwargs) + + def __init__(self,data=None, skip_parity=True): + self._skip_parity=skip_parity + pass + + def __str__(self): + return ''.join(str(self.header)) + '\n' +\ + ' payload: '+ ''.join(str(self.payload)) + + def header_and_payload(self, start=0, end=40, header_only=False, raw=False ): + pkt_line=''.join(str(self.header)) + if header_only == False: + if raw: + pkt_line = pkt_line + '\n' +\ + ' payload: '+ ''.join(str(self.payload.trim_payload(start,end))) + else: + pkt_line = pkt_line + '\n' +\ + ' payload: '+ ''.join(str(self.get_data(start,end))) + return pkt_line + + def inc(self): + self.header.inc(self._skip_parity) + + ## + ## Format Identifier + ## + def get_standardformat(self): + return self.header.get_standardformat() + + def set_standardformat(self, sf=False ): + self.header.set_standardformat(sf) + + def get_startofsequence(self): + return self.header.get_startofsequence() + + def set_startofsequence(self, sos=False ): + self.header.set_startofsequence(sos) + + def get_paritypacket(self): + return self.header.get_paritypacket() + + def set_paritypacket(self, pp=False ): + self.header.set_paritypacket(pp) + + def get_originalformat(self): + return self.header.get_originalformat() + + def set_originalformat(self, of=False ): + self.header.set_originalformat(of) + + def get_complex(self): + return self.header.get_complex() + + def set_complex(self, isComplex=False ): + self.header.set_complex(isComplex) + + def get_spectralsense(self ): + return self.header.get_spectralsense() + + def set_spectralsense(self, ison=False ): + self.header.set_spectralsense(ison) + + def get_vw(self): + return self.header.get_vw() + + def set_vw(self, isVeryWide=False ): + self.header.set_vw( isVeryWide ) + + def get_bps(self): + return self.header.get_bps() + + def set_bps(self, bps ): + self.header.set_bps(bps) + + def get_dmode(self): + return self.header.get_dmode() + + def set_dmode(self,dm, cplx=False, calc_bps=True, bps=None): + if self.ok_dmode(dm): + self.header.set_dmode(dm) + self.header.set_complex(cplx) + if calc_bps or bps==None: + self.header.set_bps( self.get_bps_for_mode(dm) ) + else: + self.header.set_bps(bps) + + def ok_dmode(self, dmode ): + return dmode == 0 or dmode == 1 or dmode == 2 or dmode == 5 or dmode == 6 or dmode == 7; + + def get_bps_for_mode(self, dmode ): + bps=8 + if dmode == 0: + bps=4 + if dmode == 1 or dmode == 5: + bps=8 + if dmode == 2 or dmode == 6: + bps=16 + if dmode == 7: bps=32 + return bps + + def get_samples_for_bps(self, bps=None ): + if bps == None: + bps=self.get_bps() + + for x in self.FORMATS.values(): + if x.has_key('bps' ) and x['bps'] == bps: + return x['samples'] + + return None + + ## + ## frame sequence + ## + def get_fsn(self): + return self.header.get_fsn() + + def set_fsn(self, v ): + self.header.set_fsn(v) + + def inc_fsn(self): + self.header.inc_fsn() + + ## + ## ttag - time tag + ## + def get_msptr( self ): + return self.header.get_msptr() + + def set_msptr( self, val ): + self.header.set_msptr(val) + + def clear_msptr( self): + self.header.clear_msptr() + + def get_msdelta( self ): + return self.header.get_msdelta() + + def set_msdelta( self, val ): + self.header.set_msdelta(val) + + def get_msv(self): + return self.header.get_msv() + + def set_msv(self, valid=True): + self.header.set_msv(valid) + + def get_ttv(self): + return self.header.get_ttv() + + def set_ttv(self, valid=True): + self.header.set_ttv(valid) + + def get_sscv(self): + return self.header.get_sscv() + + def set_sscv(self, valid=True): + self.header.set_sscv(valid) + + def set_time(self, ps250, pf250 ): + self.header.set_time(ps250, pf250) + + def get_SDDSTime(self): + return self.header.get_SDDSTime() + + def set_SDDSTime(self, sdds_time, ): + self.header.set_SDDSTime(sdds_time) + + ## + ## ssc - synchronous sample clock + ## + def get_freq(self): + return self.header.get_freq() + + def set_freq(self, freq): + self.header.set_freq(freq) + + def get_rate(self): + return self.header.get_rate() + + def set_rate(self, freq): + self.header.set_rate(freq) + + def get_dfdt(self): + return self.header.get_dfdt() + + def set_dfdt(self, freq): + self.header.set_dfdt(freq) + + def get_format(self): + dm=self.header.get_dmode() + fmt='SB' + for k, v in sdds_packet.FORMATS.items(): + if v['dmode'] == dm : + fmt=k + return fmt + + def set_format(self, fmt): + ret=1 + if fmt in sdds_packet.FORMATS.keys(): + _fmt = sdds_packet.FORMATS[fmt] + ret=0 + cplx = _fmt['cplx'] + dm = _fmt['dmode'] + self.set_dmode( dm, cplx, bps=_fmt['bps']) + + + def get_data(self, start=None, end=None ): + bps=self.header.get_bps() + for k, v in sdds_packet.FORMATS.items(): + if v['bps'] == bps : + attr = getattr(self.payload, k.lower()) + return v['get_data'](attr) + return self.payload.sb.get_data(start,end) + + def asBuffer(self): + return buffer(self)[:] + + def asString(self): + return ctypes.string_at(ctypes.addressof(self),ctypes.sizeof(self)) diff --git a/redhawk/src/base/framework/python/ossie/utils/sdds/sdds_time.py b/redhawk/src/base/framework/python/ossie/utils/sdds/sdds_time.py new file mode 100644 index 000000000..89ad77fc7 --- /dev/null +++ b/redhawk/src/base/framework/python/ossie/utils/sdds/sdds_time.py @@ -0,0 +1,158 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK rh.SourceSDDS. +# +# REDHAWK rh.SourceSDDS is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK rh.SourceSDDS is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +import time +import datetime +import calendar +import struct +import math +import copy as _copy + +def difference(t1, t2): + tmp = _copy.copy(t1) + if tmp.pf250_ >= t2.pf250_ : + tmp.pf250_ -= t2.pf250_ + tmp.ps250_ -= t2.ps250_ + else: + tmp.pf250_ = Time.Two32 + ( tmp.pf250_ - t2.pf250_ ) + tmp.ps250_ -= t2.ps250_ + 1; + return tmp + +def sum( t1, t2 ): + tmp=_copy.copy(t1) + tfrac =long(tmp.pf250_) + t2.pf250_ + tmp.ps250_ += t2.ps250_ + int(tfrac>>32) + tmp.pf250_ = int(tfrac) + return tmp + +def add(t1, offset): + if isinstance(offset, Time): + return sum(t1, offset) + else: + return iadd(t1, offset) + +def iadd(t1, offset): + if not isinstance(t1, Time): + return t1 + tmp=_copy.copy(t1) + # make into tics + pfrac,pwhole = math.modf(offset*Time.TicFreq) + tfrac = long(tmp.pf250_) + int(pfrac * Time.Two32) + tmp.ps250_ += long(pwhole) + (tfrac>>32) + tmp.pf250_ = int(tfrac) + return tmp + +def sub(t1, other): + if isinstance(other, Time): + return difference(t1, other) + else: + return isub(t1, other) + +def isub(t1, offset): + return iadd(t1, -offset) + +def compare(t1, t2): + if not isinstance(t1, Time) or not isinstance(t2, Time): + return -1 + if t1.ps250_ == t2.ps250_: + return cmp(t1.pf250_,t2.pf250_) + else: + return cmp(t1.ps250_,t2.ps250_) + +class Time: + REDHAWK_FORMAT="%Y:%m:%d::%H:%M:%S" + Tic = 250e-12 + TicFreq = 4000000000.0 + TicFreqLong = 4000000000L + Two32 = 4294967296.0 + def __init__(self ): + + self.ps250_ = 0L + self.pf250_ = 0 + self.startofyear = self.startOfYear() + self.setFromTime() + + def setFromTime(self, time_sec=time.time() ): + """ + Create a sdds time object from the input parameter. If the time_sec is from the epoch + then we need to convert to the current year as per spec. + """ + if time_sec: + if time_sec >= self.startofyear: + # UTC.. need to convert to SDDS EPOCH + time_sec = time_sec - self.startofyear + + pfrac, pwhole = math.modf(time_sec*Time.TicFreq) + self.ps250_ = long(pwhole) + self.pf250_ = int( pfrac*Time.Two32) + #print "td: %12Lu %12u %16.2Lf " % ( self.ps250_, self.pf250_, pfrac ) + + def setFromPartial( self, integral, fractional ): + pfrac, pwhole= math.modf(fractional*Time.TicFreq) + self.ps250_ = long(integral*Time.TicFreqLong) + long(pwhole) + self.pf250_ = int( pfrac * Time.Two32) + #print "td: %12Lu %12u %16.2Lf " % ( self.ps250_, self.pf250_, pfrac ) + + def set( self, psec, pfsec ): + self.ps250_ = psec + self.pf250_ = pfsec + #print "td: %12Lu %12u " % ( self.ps250_, self.pf250_ ) + + def secondsThisYear( self ): + return self.ps250_*Time.Tic + self.pf250_ * (Time.Tic/Time.Two32) + + def seconds( self ): + return self.startofyear + self.secondsThisYear() + + def picoTicks( self ): + return self.ps250_ + + def picoTicksFractional( self ): + return self.pf250_ + + def gmtime( self): + return time.gmtime(self.startofyear+self.secondsThisYear()) + + @staticmethod + def toString(t1, fmt=None): + gmt = t1.gmtime() + frac = int(t1.pf250_ * (Time.Tic/Time.Two32)) + if not fmt: + fmt = Time.REDHAWK_FORMAT + xx=time.strftime(fmt,gmt) + return '%s.%06d' % (xx,frac) + else: + return time.strftime(fmt,gmt) + + def __str__(self): + return toString(self) + + + @staticmethod + def startOfYear(): + soy=datetime.datetime(datetime.date.today().year,1,1,0,0,0) + return calendar.timegm(soy.timetuple()) + +Time.__add__ = add +Time.__iadd__ = iadd +Time.__sub__ = sub +Time.__isub__ = isub +Time.__isub__ = isub +Time.__cmp__ = compare +Time.__str__ = Time.toString diff --git a/redhawk/src/base/framework/python/ossie/utils/testing/rhunittest.py b/redhawk/src/base/framework/python/ossie/utils/testing/rhunittest.py index e2c658279..52586fa08 100644 --- a/redhawk/src/base/framework/python/ossie/utils/testing/rhunittest.py +++ b/redhawk/src/base/framework/python/ossie/utils/testing/rhunittest.py @@ -34,6 +34,21 @@ __all__ = ('RHTestCase', 'RHTestLoader', 'RHTestProgram') +def _getSpdFile(classdict, bases): + """ + Looks up the 'SPD_FILE' attribute at class definition time, first checking + the new class' dictionary, then the base classes. This allows subclassing a + an RHTestCase class. + """ + spd_file = classdict.get('SPD_FILE', None) + if spd_file: + return spd_file + for base in bases: + spd_file = getattr(base, 'SPD_FILE', None) + if spd_file: + return spd_file + return None + class RHTestCaseMeta(type): """ @@ -59,7 +74,7 @@ class object is created (i.e., when the class is parsed), it is assumed """ def __init__(self, name, bases, classdict): type.__init__(self, name, bases, classdict) - spd_file = classdict.get('SPD_FILE', None) + spd_file = _getSpdFile(classdict, bases) if spd_file: # Get the module that contains the test case class. This allows us # to find the SPD file relative to its location. diff --git a/redhawk/src/base/framework/python/ossie/utils/type_helpers.py b/redhawk/src/base/framework/python/ossie/utils/type_helpers.py index 9c4f2afe1..93539ae63 100644 --- a/redhawk/src/base/framework/python/ossie/utils/type_helpers.py +++ b/redhawk/src/base/framework/python/ossie/utils/type_helpers.py @@ -17,6 +17,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # +from ossie.utils import rhtime class OutOfRangeException(Exception): pass @@ -100,6 +101,10 @@ def checkValidValue(value, dataType): if dataType == 'char' and len(value) != 1: raise TypeError, 'expected a character, but string of length %d found' % len(value) return value + elif dataType == 'utctime': + if type(value) == str: + return rhtime.convert(value) + return value elif isinstance(value, basestring): raise TypeError, "Cannot convert string to type '%s'" % dataType elif dataType in ('double', 'float'): diff --git a/redhawk/src/base/framework/python/ossie/utils/weakmethod.py b/redhawk/src/base/framework/python/ossie/utils/weakmethod.py index 423a93258..7269b501f 100644 --- a/redhawk/src/base/framework/python/ossie/utils/weakmethod.py +++ b/redhawk/src/base/framework/python/ossie/utils/weakmethod.py @@ -19,6 +19,7 @@ # import warnings +warnings.filterwarnings('once',category=DeprecationWarning) warnings.warn('%s has been replaced by ossie.utils.weakobj module' % __name__, DeprecationWarning) from weakobj import WeakBoundMethod diff --git a/redhawk/src/base/framework/python/ossie/version.py b/redhawk/src/base/framework/python/ossie/version.py index 7a9164e77..860450d02 100644 --- a/redhawk/src/base/framework/python/ossie/version.py +++ b/redhawk/src/base/framework/python/ossie/version.py @@ -18,4 +18,4 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # -__version__='2.0.9' +__version__='2.2.1' diff --git a/redhawk/src/base/framework/python/ossie/xmidas.py b/redhawk/src/base/framework/python/ossie/xmidas.py index 9f1390a49..778a8a878 100644 --- a/redhawk/src/base/framework/python/ossie/xmidas.py +++ b/redhawk/src/base/framework/python/ossie/xmidas.py @@ -23,6 +23,7 @@ # X-MIDAS interoperabiliy layer import warnings +warnings.filterwarnings('once',category=DeprecationWarning) warnings.warn("ossie.xmidas is deprecated, use xmsca.resource instead", DeprecationWarning) from XMinter import * diff --git a/redhawk/src/base/framework/python/redhawk/bitbuffer.py b/redhawk/src/base/framework/python/redhawk/bitbuffer.py new file mode 100644 index 000000000..c1f62fcec --- /dev/null +++ b/redhawk/src/base/framework/python/redhawk/bitbuffer.py @@ -0,0 +1,559 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import itertools +import numbers + +__all__ = ('bitbuffer', 'takeskip') + +def _bits_to_bytes(bits): + return int((bits + 7)/8) + +def _char_to_bit(ch): + bit = '01'.find(ch) + if bit < 0: + raise ValueError("invalid character '%s'" % ch) + return bit + +def _bitmask(bits): + return (1<> shift + return value & 0xFF + +def _read_split_bits(src, sbyte, offset, bits): + value = src[sbyte] << 8 + last = offset + bits - 1 + value |= src[sbyte + (last//8)] + shift = 15 - last + return (value >> shift) & ((1 << bits) - 1) + +def _copy_bits(dest, dstart, src, sstart, count): + dbyte, dbit = _split_index(dstart) + sbyte, sbit = _split_index(sstart) + + # If the left hand side is not byte-aligned, copy a sub-byte number of bits + # so that remaining iterations are aligned + if dbit > 0: + nbits = min(8 - dbit, count) + value = _read_split_bits(src, sbyte, sbit, nbits) + _write_bits(dest, dbyte, dbit, value, nbits) + + # Advance to the next byte for the left hand side, and adjust + # the offset for the right hand side (which may advance to the + # next byte as well) + dbyte += 1 + sbit += nbits + sbyte += sbit / 8 + sbit = sbit & 7 + count -= nbits + + # Left offset is now guaranteed to be 0; if the right offset is also 0, + # then both sides are exactly byte-aligned + bytes = count // 8 + if sbit == 0: + dest[dbyte:dbyte+bytes] = src[sbyte:sbyte+bytes] + else: + # The two bit arrays are not exactly aligned; iterate through each + # byte from the left-hand side + for pos in xrange(bytes): + dest[dbyte+pos] = _read_split_byte(src, sbyte+pos, sbit) + dbyte += bytes + sbyte += bytes + + # If less than a full byte remains, process it + remain = count & 7 + if remain > 0: + value = _read_split_bits(src, sbyte, sbit, remain) + _write_bits(dest, dbyte, 0, value, remain) + +def _unpack(src, start, count): + byte, bit = _split_index(start) + + last_byte = byte + (bit + count + 7) // 8 + for pos in xrange(byte, last_byte): + nbits = min(8 - bit, count) + + # Use the first (inclusive) and last (exclusive) bits to determine the + # shift range + first = 7 - bit + last = first - nbits + value = src[pos] + for shift in range(first, last, -1): + yield (value >> shift) & 1 + + # Subsequent bytes should be aligned + bit = 0 + count -= nbits + +# Lookup table with each byte exploded into a list of 8 bit values +_exploded = [[(x>>shift)&1 for shift in range(7,-1,-1)] for x in range(256)] + +def _unpack(src, start, count): + byte, bit = _split_index(start) + + if bit > 0: + nbits = min(8 - bit, count) + items = _exploded[src[byte]] + for item in items[bit:bit+nbits]: + yield item + count -= nbits + byte += 1 + + bytes = count // 8 + for value in src[byte:byte+bytes]: + for item in _exploded[value]: + yield item + byte += bytes + + remain = count & 7 + if remain > 0: + items = _exploded[src[byte]] + for item in items[:remain]: + yield item + +class counted_iterator(object): + """ + Helper iterator wrapper that keeps track of the number of iterations that + were performed. + """ + def __init__(self, iterable): + self.count = 0 + self.iter = iter(iterable) + + def __iter__(self): + for item in self.iter: + self.count += 1 + yield item + +class biterator(object): + """ + Helper binary iterator that yields one bit of an integer value per + iteration, starting with the most significant bit. + """ + def __init__(self, value, bits): + self.value = value + self.bits = bits + + def __iter__(self): + for shift in xrange(self.bits-1, -1, -1): + yield (self.value >> shift) & 1 + + +def _iterable_to_bytes(iterable, func): + value = 0 + shift = 7 + for item in iterable: + bit = 1 if func(item) else 0 + value = value | (bit << shift) + shift -= 1 + if shift < 0: + yield value + value = 0 + shift = 7 + + # If the shift value is not 7, there are some bits stored in value that + # must be returned + if shift != 7: + yield value + + +def takeskip(iterable, take, skip): + """ + Generator function to perform a take/skip operation on any iterable. + Returns 'take' elements from the iterable, then discards 'skip' elements, + until the iterable is exhausted. + """ + it = iter(iterable) + while True: + for _ in xrange(take): + yield it.next() + for _ in xrange(skip): + it.next() + +class bitbuffer(object): + """ + A sequence container for bit data. + + bitbuffer provides read/write bit-level access to a backing array of raw + byte data, in addition to higher-level bit string operations. It supports + common Python sequence operations, such as len(), element access, slicing + and iteration. + + Creating a bitbuffer from a string: + + buf = bitbuffer('10101101010011') + + Creating a bitbuffer from an integer, such as a hexadecimal literal: + + buf = bitbuffer(0x1AC07B23, 29) + + Creating a bitbuffer from an iterable: + + buf = bitbuffer(x & 1 for x in xrange(64)) + + Indexing: + Individual bits are accessed by index. The value of a bit is treated as an + int with a value of 0 or 1 (as opposed to a bool). When setting a bit, the + value is first converted to an int, and any non-zero value is considered to + be 1. + + Slicing: + Standard Python slice syntax is supported. For example, to select 64 bits + starting at offset 16: + + buf2 = buf[16:80] + + For efficiency, object slices share the backing byte array instead of + creating a new copy. Modifications to the slice affect the original buffer, + and vice-versa. To make a unique copy that does not share data, use the + `copy` module: + + import copy + buf2 = copy.copy(buf[16:80]) + + Integer Conversion: + A bitbuffer may be converted to an integer with the int() method, e.g.: + + val = int(buf) + + Use slicing to select a range of bits: + + val = int(buf[8:16]) + + Integer conversion is performed in big-endian order, starting at the most + significant bit. The returned value is right-justified (i.e., the least- + significant N bits contain the value). If the value exceeds the range of + int, it is automatically converted to a long. + """ + def __init__(self, data=None, bits=None, start=None): + """ + Create a bitbuffer. + + If `data` is not specified, enough space to hold `bits` bits is + allocated. The bits are not initialized. If `bits` is not given, a 0- + length bitbuffer is created. + + If `data` is given, the initial value depends on the type of `data`: + * str: Parse `data` as a string of 0's and 1's. + * bitbuffer: Creates an alias to `data`. + * bytearray: Use `data` as backing byte array. + * integer: Store `bits` bits from `data` starting at MSB. + * iterable: Convert each item in `data` to a bit value. + + When `bits` is not given, the resulting number of bits is determined + from `data` if possible. If `data` is an integral type (int, long), + `bits` must be specified. + + The optional `start` argument may be used to discard bits from the + beginning of `data`. + + Args: + data: Object to convert to bitbuffer value. + bits: Number of bits. + start: Index of first bit (default 0). + + Raises: + TypeError: If `data` is an int or long but `bits` is not given. + ValueError: If `data` is a string and contains characters other + than '0' and '1'. + ValueError: If `data` is a sequence or iterable and contains an + item that cannot be converted to int. + """ + if data is None: + # No data given, create a backing byte array + if bits is None: + bits = 0 + data = bytearray(_bits_to_bytes(bits)) + elif isinstance(data, bitbuffer): + # Copy constructor, reference the same byte array, and if the size + # and start were not given, get them from the other bitbuffer + if bits is None: + bits = len(data) + if start is None: + start = data.__start + data = data.__data + elif isinstance(data, bytearray): + # Already a byte array, just determine the size if not given + if bits is None: + bits = len(data) * 8 + else: + if isinstance(data, numbers.Integral): + # Integer: copy bits starting at MSB, using iterator + if bits is None: + raise TypeError('integer given with no bit count') + data = biterator(data, bits) + func = int + elif isinstance(data, basestring): + # String: parse as binary string + func = _char_to_bit + else: + # Anything else (list, generator expression, etc.), convert via + # integer + func = int + + # Use a counted iterator to fill the byte array; this is helpful + # when given a generator expression, where you can't know the + # number of bits in advance and the backing bytearray is quantized + # to bytes. + it = counted_iterator(data) + data = bytearray(_iterable_to_bytes(it, func)) + if bits is None: + bits = it.count + + # If no start offset was given, start at 0 + if start is None: + start = 0 + + self.__data = data + self.__bits = bits + self.__start = start + + def __getitem__(self, pos): + if isinstance(pos, slice): + # Get via a slice object; indices() takes care of start/stop range, + # there is no need for an explicit check + start, stop, step = pos.indices(len(self)) + if step == 1: + # Return a view using the same underlying array + bits = stop - start + start_bit = self.__start + start + return bitbuffer(self, bits, start_bit) + else: + # Create a new bitbuffer by striding through this one + return bitbuffer(self[pos] for pos in xrange(start, stop, step)) + else: + # Get an individual bit + pos = self._check_index(pos) + byte, bit = self._split_index(pos) + return (self.__data[byte] >> (7-bit)) & 1 + + def __setitem__(self, pos, value): + if isinstance(pos, slice): + # Set via a slice object; indices() takes care of start/stop range, + # there is no need for an explicit check + start, stop, step = pos.indices(len(self)) + + # With normal stride and a bitbuffer as the source, use byte-wise + # copy method (~1000x faster in some cases) + if step == 1 and isinstance(value, bitbuffer): + self._assign(start, stop, value) + return + + indices = xrange(start, stop, step) + bits = len(indices) + try: + value_len = len(value) + except TypeError: + value = itertools.repeat(value) + value_len = bits + + if value_len != bits: + raise ValueError('attempt to assign sequence of size %d to extended slice of size %d' % (value_len, bits)) + + for index, val in zip(indices, value): + self[index] = val + else: + # Set an individual bit + pos = self._check_index(pos) + byte, bit = self._split_index(pos) + mask = 1 << (7 - bit) + value = mask if int(value) else 0 + self.__data[byte] = (self.__data[byte] & ~mask) | value + + def __iter__(self): + return _unpack(self.__data, self.__start, self.__bits) + + def __len__(self): + return self.__bits + + def __repr__(self): + return "bitbuffer('%s')" % self + + def __str__(self): + return ''.join(str(x) for x in self) + + def __eq__(self, other): + if len(self) != len(other): + return False + if isinstance(other, basestring): + func = _char_to_bit + else: + func = bool + for lhs, rhs in zip(self, other): + if lhs != func(rhs): + return False + return True + + def __int__(self): + value = 0 + for bit in self: + value = (value << 1) | bit + return value + + def __hex__(self): + return hex(int(self)) + + def __copy__(self): + # Make a copy of the data array so that modifications to the copy do + # not affect this instance. This is consistent with bytearray itself + # and numpy arrays. + return bitbuffer(bytearray(self.__data), self.__bits, self.__start) + + def __add__(self, other): + # Convert incoming value to a bitbuffer, because at the very least we + # need to know its size + if not isinstance(other, bitbuffer): + other = bitbuffer(other) + bits = len(self) + len(other) + result = bitbuffer(bits=bits) + result[:len(self)] = self + result[len(self):] = other + return result + + def bytes(self): + """ + Returns a raw byte string containing all bits from this bitbuffer, + left-aligned. + """ + byte, bit = self._split_index(0) + if bit == 0: + # Bit data is byte aligned, convert the backing array to bytes + return bytes(self.__data[byte:]) + else: + # Unaligned, create a new bitbuffer with a copy of the data, which + # will be aligned + temp = bitbuffer(bits=len(self)) + temp[:] = self + return temp.bytes() + + def unpack(self): + """ + Unpacks the bits into a list of integers, one per bit. + """ + return list(iter(self)) + + def popcount(self): + """ + Returns the population count (number of 1's). + """ + return sum(self) + + def distance(self, other): + """ + Determines the Hamming distance from a sequence or iterable. + + Args: + other: Another bitbuffer or iterable to compare with. + + Returns: + int: Number of bits that are different. + """ + return sum(x^bool(y) for x,y in zip(self, other)) + + def find(self, pattern, start=0, end=None, maxDistance=0): + """ + Finds a pattern in this bitbuffer within a maximum Hamming distance. + + Starting from `start` and ending at `end` (or the end of the bitbuffer + if `end` is not given), searches forward for a position at which the + Hamming distance between this bitbuffer and `pattern` is less than or + equal to `maxDistance`. + + Args: + pattern: Bit pattern to search for. + start: Starting bit index. + end: Ending bit index. + maxDistance: Maximum allowable Hamming distance. + + Returns: + int: Bit index of first occurrence of pattern, or -1 if pattern was + not found. + """ + # Explicitly convert the pattern to a bitbuffer to take care of string + # parsing, generator functions, etc. + pattern = bitbuffer(pattern) + length = len(pattern) + + # Get bounded indices for start and end (ignoring step) + start, end, step = self._indices(start, end) + + # Clamp end to the last position at which there is a full pattern + # length to do the comparison + end = min(end, len(self) - length) + + for pos in xrange(start, end): + if pattern.distance(self[pos:pos+length]) <= maxDistance: + return pos + return -1 + + def takeskip(self, take, skip, start=0, end=None): + """ + Performs a take/skip operation to create a new bitbuffer. + + Alternately copies `take` bits and skips `skip` bits from the range + [start, end) into a new bitbuffer. + + Args: + take: Number of bits to copy per iteration. + skip: Number of bits to skip per iteration. + start: Index of first bit (default 0). + end: Index of last bit, exclusive (default end). + + Returns: + New bitbuffer with requested bits. + """ + start, end, step = self._indices(start, end) + return bitbuffer(takeskip(self[start:end], take, skip)) + + def _assign(self, start, end, other): + bits = end - start + if bits != len(other): + raise ValueError('attempt to assign sequence of size %d to extended slice of size %d' % (len(other), bits)) + _copy_bits(self.__data, self.__start + start, other.__data, other.__start, bits) + + def _indices(self, start, end, step=None): + # Use slice to return properly bounded indices + return slice(start, end, step).indices(len(self)) + + def _check_index(self, pos): + if pos < 0: + pos = self.__bits + pos + if pos >= self.__bits or pos < 0: + raise IndexError('bit index out of range') + return pos + + def _split_index(self, pos): + # Given a bit index, returns the index of the byte that contains that + # bit index, and its index relative to that byte. + return _split_index(pos + self.__start) diff --git a/redhawk/src/base/framework/python/redhawk/numa.py b/redhawk/src/base/framework/python/redhawk/numa.py new file mode 100644 index 000000000..e6ffd184f --- /dev/null +++ b/redhawk/src/base/framework/python/redhawk/numa.py @@ -0,0 +1,72 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +def parseRange(line): + first, last = line.split('-') + return range(int(first), int(last)+1) + +def parseValues(line, delim=','): + values = [] + for section in line.split(delim): + if '-' in section: + values.extend(parseRange(section)) + else: + values.append(int(section)) + return values + +class NumaNode(object): + def __init__(self, node): + self._available = True + self.node = node + self.cpus = self._getCpuList() + + def _getCpuList(self): + try: + filename = '/sys/devices/system/node/node%d/cpulist' % self.node + with open(filename) as f: + line = f.readline().strip() + return parseValues(line, ',') + except IOError, e: + self._available = False + return [] + +class NumaTopology(object): + def __init__(self): + self._available = True + self.nodes = [NumaNode(node) for node in self._getNodes()] + self.cpus = sum((node.cpus for node in self.nodes), []) + + def available(self): + return self._available + + def _getNodes(self): + try: + with open('/sys/devices/system/node/online') as f: + line = f.readline().strip() + return parseValues(line, ',') + except IOError, e: + self._available = False + return [] + + def getNodeForCpu(self, cpu): + for node in self.nodes: + if cpu in node.cpus: + return node + return None diff --git a/redhawk/src/base/framework/python/regenerate_parser.sh b/redhawk/src/base/framework/python/regenerate_parser.sh index bebc55f48..715877bad 100755 --- a/redhawk/src/base/framework/python/regenerate_parser.sh +++ b/redhawk/src/base/framework/python/regenerate_parser.sh @@ -25,7 +25,7 @@ if test "x$GENERATE_DS" == "x"; then GENERATE_DS="generateDS.py" fi -GENERATE_DS_FLAGS="-f --no-process-includes --silence -m" +GENERATE_DS_FLAGS="-f --silence -m" echo "Generating DCD parser" ${GENERATE_DS} ${GENERATE_DS_FLAGS} -o ossie/parsers/dcd.py ../../../xml/xsd/dcd.xsd echo "Generating DMD parser" diff --git a/redhawk/src/base/framework/python/setup.py b/redhawk/src/base/framework/python/setup.py index fd5ea86df..918ee58a4 100644 --- a/redhawk/src/base/framework/python/setup.py +++ b/redhawk/src/base/framework/python/setup.py @@ -19,8 +19,7 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # - -from distutils.core import setup +from setuptools import setup import sys try: @@ -42,58 +41,65 @@ print co.getvalue() sys.exit(-1) -ossiepy = ['ossie', - 'ossie/apps', - 'ossie/apps/qtbrowse', - 'ossie/apps/rhlauncher', - 'ossie/apps/rhlauncher/ui', - 'ossie/cf', - 'ossie/cf/CF', - 'ossie/cf/CF__POA', - 'ossie/cf/PortTypes', - 'ossie/cf/PortTypes__POA', - 'ossie/cf/StandardEvent', - 'ossie/cf/StandardEvent__POA', - 'ossie/cf/ExtendedEvent', - 'ossie/cf/ExtendedEvent__POA', - 'ossie/cf/ExtendedCF', - 'ossie/cf/ExtendedCF__POA', - 'ossie/cf/ExtendedCF/WKP', - 'ossie/cf/ExtendedCF__POA/WKP', - 'ossie/events', - 'ossie/logger', - 'ossie/parsers', - 'ossie/utils', - 'ossie/utils/bluefile', - 'ossie/utils/bulkio', - 'ossie/utils/log4py', - 'ossie/utils/model', - 'ossie/utils/redhawk', - 'ossie/utils/sandbox', - 'ossie/utils/sb', - 'ossie/utils/sca', - 'ossie/utils/testing', - 'ossie/utils/tools', - 'redhawk'] - -exec(open('ossie/version.py').read()) +import ossie.version -setup( - name='ossiepy', - version=__version__, - description='OSSIE Python', - packages=ossiepy, - package_data={'ossie/apps/rhlauncher':['ui/*.ui', - 'ui/icons/*']}, - scripts=['ossie/utils/tools/prf2py.py', - 'ossie/apps/qtbrowse/qtbrowse', - 'ossie/apps/rhlauncher/rhlauncher', - 'ossie/apps/scaclt', - 'ossie/apps/py2prf', - 'ossie/apps/eventviewer', - 'ossie/apps/rh_net_diag', - 'ossie/apps/cleanns', - 'ossie/apps/cleanes', - 'ossie/apps/cleanomni', - 'ossie/apps/sdrlint'], - ) +setup(name='ossiepy', + version=ossie.version.__version__, + description='OSSIE Python', + packages=['ossie', + 'ossie/apps', + 'ossie/apps/qtbrowse', + 'ossie/apps/rhlauncher', + 'ossie/apps/rhlauncher/ui', + 'ossie/cf', + 'ossie/cf/CF', + 'ossie/cf/CF__POA', + 'ossie/cf/PortTypes', + 'ossie/cf/PortTypes__POA', + 'ossie/cf/StandardEvent', + 'ossie/cf/StandardEvent__POA', + 'ossie/cf/ExtendedEvent', + 'ossie/cf/ExtendedEvent__POA', + 'ossie/cf/ExtendedCF', + 'ossie/cf/ExtendedCF__POA', + 'ossie/cf/ExtendedCF/WKP', + 'ossie/cf/ExtendedCF__POA/WKP', + 'ossie/events', + 'ossie/logger', + 'ossie/parsers', + 'ossie/utils', + 'ossie/utils/bluefile', + 'ossie/utils/bulkio', + 'ossie/utils/log4py', + 'ossie/utils/model', + 'ossie/utils/redhawk', + 'ossie/utils/sandbox', + 'ossie/utils/sb', + 'ossie/utils/sca', + 'ossie/utils/testing', + 'ossie/utils/tools', + 'ossie/utils/sdds', + 'ossie/utils/rhtime', + 'ossie/utils/rhconnection', + 'ossie/utils/allocations', + 'redhawk'], + package_data={'ossie/apps/rhlauncher':['ui/*.ui', + 'ui/icons/*']}, + scripts=['ossie/utils/tools/prf2py.py', + 'ossie/apps/qtbrowse/qtbrowse', + 'ossie/apps/rhlauncher/rhlauncher', + 'ossie/apps/scaclt', + 'ossie/apps/py2prf', + 'ossie/apps/eventviewer', + 'ossie/apps/rh_net_diag', + 'ossie/apps/cleanns', + 'ossie/apps/cleanes', + 'ossie/apps/cleanomni', + 'ossie/apps/sdrlint'], + entry_points={'redhawk.sandbox.helpers':['SoundSink=ossie.utils.sb.audio:SoundSink', + 'LinePlot=ossie.utils.sb.plots:LinePlot', + 'LinePSD=ossie.utils.sb.plots:LinePSD', + 'RasterPlot=ossie.utils.sb.plots:RasterPlot', + 'RasterPSD=ossie.utils.sb.plots:RasterPSD', + 'XYPlot=ossie.utils.sb.plots:XYPlot']} + ) diff --git a/redhawk/src/base/framework/shm/Allocator.cpp b/redhawk/src/base/framework/shm/Allocator.cpp new file mode 100644 index 000000000..f467de0d2 --- /dev/null +++ b/redhawk/src/base/framework/shm/Allocator.cpp @@ -0,0 +1,133 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include +#include + +#include + +#include + +#include "Block.h" + +namespace redhawk { + namespace shm { + namespace { + static boost::once_flag heapInit = BOOST_ONCE_INIT; + static boost::scoped_ptr instance(0); + + static void initializeHeap() + { + char* shm_env = getenv("RH_SHMALLOC"); + if (shm_env && strcmp(shm_env, "disable") == 0) { + std::cerr << "SHM disabled" << std::endl; + } else { + const std::string name = redhawk::shm::getProcessHeapName(getpid()); + Heap* heap = 0; + try { + heap = new Heap(name); + } catch (const std::exception&) { + std::cerr << "Unable to create process heap, SHM disabled" << std::endl; + } + instance.reset(heap); + } + } + } + + std::string getProcessHeapName(pid_t pid) + { + std::ostringstream oss; + oss << "heap-" << pid; + return oss.str(); + } + + Heap* getProcessHeap() + { + boost::call_once(heapInit, &initializeHeap); + return instance.get(); + } + + bool isEnabled() + { + Heap* heap = getProcessHeap(); + return (heap != 0); + } + + void* allocate(size_t bytes) + { + Heap* heap = getProcessHeap(); + if (!heap) { + return 0; + } + return heap->allocate(bytes); + } + + void deallocate(void* ptr) + { + Heap* heap = getProcessHeap(); + if (!heap) { + throw std::logic_error("redhawk::shm::deallocate called without process heap"); + } + heap->deallocate(ptr); + } + + void* allocateHybrid(size_t bytes) + { + redhawk::shm::Heap* heap = redhawk::shm::getProcessHeap(); + if (!heap) { + return redhawk::BufferManager::Allocate(bytes); + } + + void* ptr = heap->allocate(bytes); + if (ptr) { + return ptr; + } + + ptr = redhawk::BufferManager::Allocate(sizeof(Block) + bytes); + if (ptr) { + Block* block = new (ptr) Block(0, 0); + return block->data(); + } + + return 0; + } + + void deallocateHybrid(void* ptr) + { + redhawk::shm::Heap* heap = redhawk::shm::getProcessHeap(); + if (!heap) { + redhawk::BufferManager::Deallocate(ptr); + return; + } + + Block* block = Block::from_pointer(ptr); + assert(block->valid()); + if (!block->getSuperblock()) { + // Invalidate the block and pass it on to BufferManager + block->~Block(); + redhawk::BufferManager::Deallocate(block); + } else { + heap->deallocate(ptr); + } + } + } +} diff --git a/redhawk/src/base/framework/shm/Block.h b/redhawk/src/base/framework/shm/Block.h new file mode 100644 index 000000000..fa1662e1c --- /dev/null +++ b/redhawk/src/base/framework/shm/Block.h @@ -0,0 +1,229 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_SHM_BLOCK_H +#define REDHAWK_SHM_BLOCK_H + +#include +#include + +#include "atomic_counter.h" +#include "offset_ptr.h" + +namespace redhawk { + + namespace shm { + + class Superblock; + + struct Block { + // NB: Least significant bit of magic number is reserved for the + // "previous block in use" flag + typedef uint32_t magic_type; + static const magic_type BLOCK_MAGIC = 0xbaddc0de; + + // Quantum for block allocation; offset and size are in terms of + // this block size (i.e., not bytes) + static const size_t BLOCK_SIZE = 16; + + typedef uint32_t blocksize_type; + + Block(blocksize_type offset, blocksize_type blocks) : + _magic(Block::BLOCK_MAGIC), + _refcount(-1), + _offset(offset), + _size(blocks) + { + } + + ~Block() + { + // Invert the bit pattern for the magic number to make it more + // obvious that a block has been destroyed when debugging + _magic = ~BLOCK_MAGIC; + _size = -1; + } + + blocksize_type offset() const + { + return _offset; + } + + size_t byteOffset() const + { + return _offset * BLOCK_SIZE; + } + + blocksize_type size() const + { + return _size; + } + + size_t byteSize() const + { + return _size * BLOCK_SIZE; + } + + Block* split(blocksize_type blocks) + { + // Only free blocks can be split + assert(isFree()); + + // Adjust the size of the this block, then use the next() method to get + // a pointer to where the new block starts; note that it is not a valid + // block yet, until the in-place constructor is called + size_t remainder = _size - blocks; + assert(remainder < _size); + _size = blocks; + void* ptr = next(); + + // Initialize the block, which must also be free, so mark the tail in + // case it gets left-coalesced + Block* block = new (ptr) Block(_offset + blocks, remainder); + block->markTail(); + return block; + } + + void join(Block* block) + { + assert(block == next()); + _size += block->_size; + + // Invalidate the discarded block + block->~Block(); + } + + void incref() + { + _refcount.increment(); + } + + size_t decref() + { + return _refcount.decrement(); + } + + void* data() + { + return (this + 1); + } + + Superblock* getSuperblock() + { + if (_offset == 0) { + return 0; + } + return offset_ptr(this, -(ptrdiff_t)byteOffset()); + } + + bool isPreviousFree() const + { + return _magic & FLAG_PREV; + } + + void setPreviousFree() + { + _magic |= FLAG_PREV; + } + + void setPreviousUsed() + { + _magic &= ~FLAG_PREV; + } + + void markTail() + { + blocksize_type* tail = next()->_getPreviousSize(); + *tail = _size; + } + + Block* prev() + { + if (isPreviousFree()) { + ptrdiff_t diff = *_getPreviousSize() * BLOCK_SIZE; + return offset_ptr(this, -diff); + } else { + // Previous block is in use + return 0; + } + } + + Block* next() + { + return offset_ptr(this, byteSize()); + } + + const Block* next() const + { + return offset_ptr(this, byteSize()); + } + + static Block* from_pointer(void* ptr) + { + return reinterpret_cast(ptr) - 1; + } + + static size_t bytes_to_blocks(size_t bytes) + { + return (bytes + sizeof(Block) + Block::BLOCK_SIZE - 1) / Block::BLOCK_SIZE; + } + + bool valid() const + { + return (_magic & ~FLAG_PREV) == BLOCK_MAGIC; + } + + bool isFree() const + { + return (_refcount < 0); + } + + void markFree() + { + _refcount = -1; + } + + void markUsed() + { + _refcount = 1; + } + + int getRefcount() const + { + return _refcount; + } + + private: + static const magic_type FLAG_PREV = 1; + + blocksize_type* _getPreviousSize() + { + return reinterpret_cast(this) - 1; + } + + magic_type _magic; + atomic_counter _refcount; + blocksize_type _offset; + blocksize_type _size; + }; + } +} + +#endif // REDHAWK_SHM_BLOCK_H diff --git a/redhawk/src/base/framework/shm/Heap.cpp b/redhawk/src/base/framework/shm/Heap.cpp new file mode 100644 index 000000000..b50ea048e --- /dev/null +++ b/redhawk/src/base/framework/shm/Heap.cpp @@ -0,0 +1,205 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include "Superblock.h" +#include "Block.h" +#include "ThreadState.h" + +#include +#include +#include +#include +#include + +#include + +#include + +using namespace redhawk::shm; + +#define PAGE_ROUND_DOWN(x,p) ((x/p)*p) +#define PAGE_ROUND_UP(x,p) (((x+p-1)/p)*p) + +bool MemoryRef::operator! () const +{ + return heap.empty(); +} + +class Heap::PrivateHeap { +public: + PrivateHeap(int id, Heap* heap) : + _id(id), + _heap(heap) + { + } + + void* allocate(size_t bytes) + { + // NB: Thread-specific state may not be needed with per-CPU private + // heaps; it is maintained here to avoid modifying the superblock + // API (for now) + ThreadState* state = _heap->_getThreadState(); + boost::mutex::scoped_lock lock(_mutex); + for (SuperblockList::iterator superblock = _superblocks.begin(); superblock != _superblocks.end(); ++superblock) { + void* ptr = (*superblock)->allocate(state, bytes); + if (ptr) { + // Move the successful superblock to the front of the list, + // under the assumption that it is more likely to satisfy a + // future request + std::iter_swap(superblock, _superblocks.begin()); + return ptr; + } + } + + Superblock* superblock = _heap->_createSuperblock(bytes); + if (superblock) { + _superblocks.insert(_superblocks.begin(), superblock); + return superblock->allocate(state, bytes); + } + + return 0; + } + +private: + int _id; + Heap* _heap; + + boost::mutex _mutex; + typedef std::vector SuperblockList; + SuperblockList _superblocks; +}; + +Heap::Heap(const std::string& name) : + _file(name), + _canGrow(true) +{ + _file.create(); + int nprocs = sysconf(_SC_NPROCESSORS_CONF); + for (int id = 0; id < nprocs; ++id) { + _allocs.push_back(new PrivateHeap(id, this)); + } +} + +Heap::~Heap() +{ + // Remove the file when the owner exits; other processes connected to the + // same superblock file will still be able to access everything, but no new + // connections are possible + try { + _file.file().unlink(); + } catch (const std::exception&) { + // It may have been removed from another context, nothing else to do + } + +#ifdef HEAP_DEBUG + std::cout << _superblocks.size() << " superblocks" << std::endl; + std::cout << _file.size() << " total bytes" << std::endl; +#endif +} + +void* Heap::allocate(size_t bytes) +{ + PrivateHeap* heap = _getPrivateHeap(); + return heap->allocate(bytes); +} + +void Heap::deallocate(void* ptr) +{ + Superblock::deallocate(ptr); +} + +MemoryRef Heap::getRef(const void* ptr) +{ + Block* block = Block::from_pointer(const_cast(ptr)); + MemoryRef ref; + const Superblock* superblock = block->getSuperblock(); + if (superblock) { + ref.heap = superblock->heap(); + ref.superblock = superblock->offset(); + } else { + ref.heap = ""; + ref.superblock = 0; + } + ref.offset = block->offset(); + return ref; +} + +const std::string& Heap::name() const +{ + return _file.name(); +} + +Heap::PrivateHeap* Heap::_getPrivateHeap() +{ + size_t cpuid = sched_getcpu(); + assert(cpuid < _allocs.size()); + return _allocs[cpuid]; +} + +ThreadState* Heap::_getThreadState() +{ + ThreadState* state = _threadState.get(); + if (!state) { + state = new ThreadState(); + _threadState.reset(state); + } + return state; +} + +Superblock* Heap::_createSuperblock(size_t minSize) +{ + boost::mutex::scoped_lock lock(_mutex); + if (!_canGrow) { + return 0; + } + + size_t superblock_size = DEFAULT_SUPERBLOCK_SIZE; + const char* superblock_size_env = getenv("SUPERBLOCK_SIZE"); + if (superblock_size_env) { + char* end; + superblock_size = strtoll(superblock_size_env, &end, 10); + if ((superblock_size == 0) || (*end != '\0')) { + std::cerr << "Invalid superblock size, using default" << std::endl; + superblock_size = DEFAULT_SUPERBLOCK_SIZE; + } else { + // Shared memory should be allocated along page boundaries + superblock_size = PAGE_ROUND_UP(superblock_size, MappedFile::PAGE_SIZE); + std::cout << "Using superblock size " << superblock_size << std::endl; + } + } + + // Ensure that the superblock is large enough for the request, accounting + // for the overhead of the block metadata (roughly) + // TODO: Should extra large requests be handled differently? In glibc, + // above a certain size it starts using mmap/munmap. As a quick "fix" + // use a minimum of 2 blocks plus overhead. + minSize = (minSize + 64) * 2; + if (minSize > superblock_size) { + superblock_size = PAGE_ROUND_UP(minSize, MappedFile::PAGE_SIZE); + } + + try { + return _file.createSuperblock(superblock_size); + } catch (const std::bad_alloc&) { + _canGrow = false; + return 0; + } +} diff --git a/redhawk/src/base/framework/shm/HeapClient.cpp b/redhawk/src/base/framework/shm/HeapClient.cpp new file mode 100644 index 000000000..cfdadb9ac --- /dev/null +++ b/redhawk/src/base/framework/shm/HeapClient.cpp @@ -0,0 +1,74 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include + +#include "Superblock.h" + +using namespace redhawk::shm; + +HeapClient::HeapClient() +{ +} + +HeapClient::~HeapClient() +{ + detach(); +} + +void* HeapClient::fetch(const MemoryRef& ref) +{ + SuperblockFile* file = _getSuperblockFile(ref.heap); + Superblock* superblock = file->getSuperblock(ref.superblock); + return superblock->attach(ref.offset); +} + +void HeapClient::deallocate(void* ptr) +{ + Superblock::deallocate(ptr); +} + +void HeapClient::detach() +{ + for (FileMap::iterator file = _files.begin(); file != _files.end(); ++file) { + file->second->close(); + delete file->second; + } + _files.clear(); +} + +SuperblockFile* HeapClient::_getSuperblockFile(const std::string& name) +{ + FileMap::iterator existing = _files.find(name); + if (existing != _files.end()) { + return existing->second; + } + + SuperblockFile* file = new SuperblockFile(name); + try { + file->open(); + } catch (const std::exception& exc) { + delete file; + throw std::invalid_argument("cannot open superblock file"); + } + _files[name] = file; + return file; +} diff --git a/redhawk/src/base/framework/shm/MappedFile.cpp b/redhawk/src/base/framework/shm/MappedFile.cpp new file mode 100644 index 000000000..92f0715d0 --- /dev/null +++ b/redhawk/src/base/framework/shm/MappedFile.cpp @@ -0,0 +1,150 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include +#include + +#include +#include +#include +#include +#include + +using namespace redhawk::shm; + +static std::string error_string() +{ + return strerror(errno); +} + +const size_t MappedFile::PAGE_SIZE = sysconf(_SC_PAGESIZE); + +MappedFile::MappedFile(const std::string& name) : + _name(name), + _fd(-1) +{ +} + +void MappedFile::create() +{ + if (_fd >= 0) { + throw std::runtime_error("shm file is already open"); + } + + _fd = shm_open(_name.c_str(), O_RDWR|O_CREAT|O_TRUNC, 0666); + if (_fd < 0) { + throw std::runtime_error("shm_open: " + error_string()); + } +} + +void MappedFile::open() +{ + if (_fd >= 0) { + throw std::runtime_error("shm file is already open"); + } + + _fd = shm_open(_name.c_str(), O_RDWR, 0); + if (_fd < 0) { + throw std::runtime_error("shm_open: " + error_string()); + } +} + +MappedFile::~MappedFile() +{ + close(); +} + +const std::string& MappedFile::name() const +{ + return _name; +} + +size_t MappedFile::size() const +{ + struct stat statbuf; + if (fstat(_fd, &statbuf)) { + throw std::runtime_error("fstat: " + error_string()); + } + return statbuf.st_size; +} + +void MappedFile::resize(size_t bytes) +{ + size_t current_size = size(); + if (bytes <= current_size) { + return; + } + int status = posix_fallocate(_fd, current_size, bytes - current_size); + if (status == 0) { + return; + } else if (status == ENOSPC) { + throw std::bad_alloc(); + } else { + throw std::runtime_error("fallocate failed"); + } +} + +void* MappedFile::map(size_t bytes, mode_e mode, off_t offset) +{ + int prot = PROT_READ; + if (mode == READWRITE) { + prot |= PROT_WRITE; + } + + void* addr = mmap(0, bytes, prot, MAP_SHARED, _fd, offset); + if (addr == MAP_FAILED) { + throw std::runtime_error("mmap: " + error_string()); + } + return addr; +} + +void* MappedFile::remap(void* oldAddr, size_t oldSize, size_t newSize) +{ + int flags = MREMAP_MAYMOVE; + void* addr = mremap(oldAddr, oldSize, newSize, flags); + if (addr == MAP_FAILED) { + throw std::runtime_error("mremap: " + error_string()); + } + return addr; +} + +void MappedFile::unmap(void* ptr, size_t bytes) +{ + if (munmap(ptr, bytes)) { + throw std::runtime_error("munmap: " + error_string()); + } +} + +void MappedFile::close() +{ + if (_fd >= 0) { + ::close(_fd); + _fd = -1; + } +} + +void MappedFile::unlink() +{ + if (shm_unlink(_name.c_str())) { + throw std::runtime_error("unlink: " + error_string()); + } +} diff --git a/redhawk/src/base/framework/shm/Superblock.cpp b/redhawk/src/base/framework/shm/Superblock.cpp new file mode 100644 index 000000000..96febdce0 --- /dev/null +++ b/redhawk/src/base/framework/shm/Superblock.cpp @@ -0,0 +1,488 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "Superblock.h" +#include "Block.h" +#include "ThreadState.h" +#include "offset_ptr.h" + +#include +#include + +#include +#include +#include +#include +#include +#include + +using namespace redhawk; +using redhawk::shm::Superblock; +using redhawk::shm::Block; +using redhawk::shm::ThreadState; + +#define ALLOC_DEBUG 0 +#if ALLOC_DEBUG > 0 +#define LOG_ALLOC(x) std::cout << "+ " << ((x) - sizeof(Block)) << std::endl; +#define LOG_DEALLOC(x) std::cout << "- " << ((x) - sizeof(Block)) << std::endl; +#else +#define LOG_ALLOC(x) +#define LOG_DEALLOC(x) +#endif + +struct Superblock::FreeBlock : public Block { + FreeBlock(size_t offset, size_t bytes) : + Block(offset, bytes), + prev_free(0), + next_free(0), + prev_size(0), + next_size(0) + { + markTail(); + } + + uint32_t prev_free; + uint32_t next_free; + uint32_t prev_size; + uint32_t next_size; +}; + +Superblock::Superblock(const std::string& heap, size_t offset, size_t size) : + _offset(offset), + _size(size), + _dataStart(MappedFile::PAGE_SIZE), + _used(0), + _first(0), + _last(0) +{ + assert(heap.size() < 256); + strcpy(_heapname, heap.c_str()); + + uint32_t block_start = _dataStart / Block::BLOCK_SIZE; + uint32_t block_count = size / Block::BLOCK_SIZE; + FreeBlock* block = new (_data()) FreeBlock(block_start, block_count); + _queueFree(block); +} + +Superblock::~Superblock() +{ +} + +const char* Superblock::heap() const +{ + return _heapname; +} + +size_t Superblock::offset() const +{ + return _offset; +} + +size_t Superblock::size() const +{ + return _size; +} + +size_t Superblock::used() const +{ + return _used; +} + +void* Superblock::attach(size_t offset) +{ + Block* block = offset_ptr(this, offset * Block::BLOCK_SIZE); + if (!block->valid()) { + throw std::bad_alloc(); + } + block->incref(); + return block->data(); +} + +void Superblock::deallocate(void* ptr) +{ + // The C standard says it's safe to call free() with a null pointer, so for + // consistency, handle that case here + if (!ptr) { + return; + } + + Block* block = Block::from_pointer(ptr); + assert(block->valid()); + if (block->decref() == 0) { + Superblock* superblock = block->getSuperblock(); + superblock->_deallocate(block); + } +} + +void Superblock::dump(std::ostream& stream) const +{ + scoped_lock lock(_lock); + _dump(stream); +} + +void Superblock::_dump(std::ostream& stream) const +{ + stream << "Free list:" << std::endl; + int index = 0; + for (const FreeBlock* bin = _offsetToBlock(_first); bin; bin = _offsetToBlock(bin->next_size)) { + if (!bin->valid()) { + stream << "Bad bin@" << bin << " [" << index << "]" << std::endl; + break; + } + std::cout << "Bin " << bin->byteSize() << std::endl; + for (const FreeBlock* free_block = bin; free_block; free_block = _offsetToBlock(free_block->next_free)) { + stream << "block@" << free_block << ":" << std::endl; + if (!free_block->valid()) { + stream << " INVALID" << std::endl; + break; + } + stream << " refcount: " << free_block->getRefcount() << std::endl; + stream << " offset: " << free_block->offset() << std::endl; + stream << " prev: " << free_block->prev_free << std::endl; + stream << " next: " << free_block->next_free << std::endl; + ++index; + } + } + + stream << std::endl + << "All blocks:" << std::endl; + size_t blocks = 0; + for (const Block* block = _begin(); block != _end(); block = block->next()) { + stream << "block@" << block << ":" << std::endl; + if (!block->valid()) { + stream << " INVALID" << std::endl; + break; + } else { + stream << " prevfree: " << block->isPreviousFree() << std::endl; + stream << " refcount: " << block->getRefcount() << std::endl; + stream << " offset: " << block->offset() << std::endl; + stream << " size: " << block->byteSize() << std::endl; + blocks++; + } + } + stream << blocks << " block(s)" << std::endl; +} + +char* Superblock::_data() +{ + char* ptr = reinterpret_cast(this); + return ptr + _dataStart; +} + +const char* Superblock::_data() const +{ + return const_cast(this)->_data(); +} + +const Block* Superblock::_begin() const +{ + return reinterpret_cast(_data()); +} + +const Block* Superblock::_end() const +{ + return reinterpret_cast(_data() + _size); +} + +Superblock::FreeBlock* Superblock::_offsetToBlock(uint32_t offset) +{ + if (offset) { + return offset_ptr(this, offset * Block::BLOCK_SIZE); + } else { + return 0; + } +} + +const Superblock::FreeBlock* Superblock::_offsetToBlock(uint32_t offset) const +{ + if (offset) { + return offset_ptr(this, offset * Block::BLOCK_SIZE); + } else { + return 0; + } +} + +void Superblock::_deallocate(Block* block) +{ + scoped_lock lock(_lock); + assert(block->byteSize() >= sizeof(FreeBlock)); + LOG_DEALLOC(block->byteSize()); + + _used -= block->byteSize(); + +#if ALLOC_DEBUG > 1 + std::cout << "Returning block@" << block << std::endl; + std::cout << " offset: " << block->offset() << std::endl; + std::cout << " size: " << block->byteSize() << std::endl; +#endif + + // Try to recombine adjacent memory + _coalesceNext(block); + if (block->isPreviousFree()) { + // Previous block is free, combine and re-queue the "new" block + block = _coalescePrevious(block); + } else { + // Mark this block as free to allow it to be right-coalesced in the + // future + block->markFree(); + } + + // Write the tail so that the next block can determine the size of this one + // (after all coalescing is complete) + block->markTail(); + + _queueFree(block); + +#if ALLOC_DEBUG > 1 + _dump(std::cout); +#endif +} + +void Superblock::_queueFree(Block* block) +{ + assert(block->isFree()); + + FreeBlock* free_block = reinterpret_cast(block); + if (!_first) { + // No free blocks exist + free_block->prev_free = 0; + free_block->next_free = 0; + free_block->prev_size = 0; + free_block->next_size = 0; + _first = _last = free_block->offset(); + } else { + for (FreeBlock* bin = _offsetToBlock(_first); bin; bin = _offsetToBlock(bin->next_size)) { + assert(bin->valid()); + assert(bin->isFree()); + if (bin->size() == free_block->size()) { + // Insert into existing bin + free_block->prev_free = bin->offset(); + free_block->next_free = bin->next_free; + if (free_block->next_free) { + FreeBlock* next_free = _offsetToBlock(free_block->next_free); + next_free->prev_free = free_block->offset(); + } + free_block->prev_size = 0; + free_block->next_size = 0; + bin->next_free = free_block->offset(); + return; + } else if (bin->size() > free_block->size()) { + // The current bin is larger, insert block as a new bin before + // the current one + free_block->prev_free = 0; + free_block->next_free = 0; + free_block->next_size = bin->offset(); + FreeBlock* prev_size = _offsetToBlock(bin->prev_size); + free_block->prev_size = bin->prev_size; + if (prev_size) { + prev_size->next_size = free_block->offset(); + } + bin->prev_size = free_block->offset(); + if (bin->offset() == _first) { + _first = free_block->offset(); + } + return; + } + } + // No matching bin, and the block is larger than all existing bins + FreeBlock* prev_size = _offsetToBlock(_last); + _last = free_block->offset(); + prev_size->next_size = _last; + free_block->prev_free = 0; + free_block->next_free = 0; + free_block->prev_size = prev_size->offset(); + free_block->next_size = 0; + } +} + +void* Superblock::allocate(ThreadState* thread, size_t bytes) +{ + scoped_lock lock(_lock, false); + if (lock.trylock()) { + thread->contention = std::max(thread->contention - 1, 0); + } else { + thread->contention++; + lock.lock(); + } + + // Add overhead for block metadata, making sure that the total byte size + // is enough for a free block, then round up to the nearest block size to + // preserve alignment on all architectures; otherwise, atomic operations + // may cause a fatal bus error. + bytes = std::max(bytes + sizeof(Block), sizeof(FreeBlock)); + size_t blocks = Block::bytes_to_blocks(bytes); + + LOG_ALLOC(bytes); + + FreeBlock* block = _findAvailable(blocks); + if (!block) { + // No free blocks, give up + return 0; + } + assert(block->valid()); + assert(block->isFree()); + assert(block->size() >= blocks); + +#if ALLOC_DEBUG > 1 + std::cout << "Allocating from block@" << block << std::endl; + std::cout << " offset: " << block->offset() << std::endl; + std::cout << " size: " << block->byteSize() << std::endl; + std::cout << " prev: " << block->prev_free << std::endl; + std::cout << " next: " << block->next_free << std::endl; +#endif + + // Remove the block from the free list; if there's a remainder, it will be + // be of a different size, so it gets re-inserted later + _removeFreeBlock(block); + + // If it's not taking the entire block, assign the remainder to a new block + // (taking into account that there has to be enough left over to store the + // extra pointers) + size_t remainder = block->size() - blocks; + if ((remainder * Block::BLOCK_SIZE) > sizeof(FreeBlock)) { + FreeBlock* next_block = reinterpret_cast(block->split(blocks)); + _queueFree(next_block); + +#if ALLOC_DEBUG > 1 + std::cout << "Used block@" << block << std::endl; + std::cout << " offset: " << block->offset() << std::endl; + std::cout << " size: " << block->byteSize() << std::endl; + + std::cout << "Remain block@" << next_block << std::endl; + std::cout << " offset: " << next_block->offset() << std::endl; + std::cout << " size: " << next_block->byteSize() << std::endl; +#endif + assert(next_block->valid()); + } + + // Mark the block as "allocated" + block->markUsed(); + Block* next = block->next(); + if (next != _end()) { + assert(next->valid()); + next->setPreviousUsed(); + } + +#if ALLOC_DEBUG > 1 + _dump(std::cout); +#endif + _used += block->byteSize(); + + return block->data(); +} + +void Superblock::_removeFreeBlock(FreeBlock* block) +{ + FreeBlock* prev_block = _offsetToBlock(block->prev_free); + FreeBlock* next_block = _offsetToBlock(block->next_free); + if (prev_block) { + // Block is not the first in its bin + prev_block->next_free = block->next_free; + if (next_block) { + next_block->prev_free = block->prev_free; + } + } else { + FreeBlock* prev_size = _offsetToBlock(block->prev_size); + FreeBlock* next_size = _offsetToBlock(block->next_size); + if (next_block) { + // There's another block in this bin, make it the head of the bin + next_block->prev_free = 0; + next_block->prev_size = block->prev_size; + next_block->next_size = block->next_size; + if (prev_size) { + prev_size->next_size = block->next_free; + } else { + _first = block->next_free; + } + if (next_size) { + next_size->prev_size = block->next_free; + } else { + _last = block->next_free; + } + } else { + // Removing this bin entirely + if (prev_size) { + prev_size->next_size = block->next_size; + } else { + _first = block->next_size; + } + if (next_size) { + next_size->prev_size = block->prev_size; + } else { + _last = block->prev_size; + } + } + } +} + +Superblock::FreeBlock* Superblock::_findAvailable(size_t blocks) +{ + for (FreeBlock* bin = _offsetToBlock(_first); bin; bin = _offsetToBlock(bin->next_size)) { + assert(bin->valid()); + assert(bin->isFree()); + if (bin->size() >= blocks) { + if (bin->next_free) { + // Take the second entry in the bin to avoid having to alter + // the bin list + FreeBlock* next_free = _offsetToBlock(bin->next_free); + assert(bin->size() == next_free->size()); + return next_free; + } else { + // This is the only block in the bin + return bin; + } + } + } + return 0; +} + +void Superblock::_coalesceNext(Block* block) +{ + Block* next = block->next(); + if (next == _end()) { + return; + } + + assert(next->valid()); + if (next->isFree()) { + // Next block is free, append it to this block + _removeFreeBlock(reinterpret_cast(next)); + block->join(next); + } else { + // Next block is in use, just inform it that this block is free so that + // it can coalesce this block when it gets freed + next->setPreviousFree(); + } +} + +Block* Superblock::_coalescePrevious(Block* block) +{ + FreeBlock* prev = reinterpret_cast(block->prev()); + assert(prev->valid()); + assert(prev->isFree()); + + // Pull the block off of the free list; its size is going to change, so + // its position will need to be adjusted + _removeFreeBlock(prev); + prev->join(block); + + // Return the "new" block + return prev; +} diff --git a/redhawk/src/base/framework/shm/Superblock.h b/redhawk/src/base/framework/shm/Superblock.h new file mode 100644 index 000000000..b3f013b9d --- /dev/null +++ b/redhawk/src/base/framework/shm/Superblock.h @@ -0,0 +1,95 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_SHM_SUPERBLOCK_H +#define REDHAWK_SHM_SUPERBLOCK_H + +#include +#include + +#include "shared_mutex.h" + +namespace redhawk { + + namespace shm { + + class ThreadState; + class Block; + + class Superblock { + public: + Superblock(const std::string& heap, size_t offset, size_t size); + ~Superblock(); + + const char* heap() const; + + size_t offset() const; + + size_t size() const; + + size_t used() const; + + void* allocate(ThreadState* thread, size_t bytes); + + void* attach(size_t offset); + + static void deallocate(void* ptr); + + void dump(std::ostream& stream) const; + + protected: + struct FreeBlock; + + void _deallocate(Block* block); + + void _dump(std::ostream& stream) const; + + char* _data(); + const char* _data() const; + const Block* _begin() const; + const Block* _end() const; + void _queueFree(Block* block); + + FreeBlock* _findAvailable(size_t bytes); + void _removeFreeBlock(FreeBlock* block); + + void _coalesceNext(Block* block); + Block* _coalescePrevious(Block* block); + + FreeBlock* _offsetToBlock(uint32_t offset); + const FreeBlock* _offsetToBlock(uint32_t offset) const; + + char _heapname[256]; + const uint64_t _offset; + const uint32_t _size; + const uint32_t _dataStart; + mutable redhawk::shared_mutex _lock; + + volatile size_t _used; + + // Free list pointers + uint32_t _first; + uint32_t _last; + }; + + } +} + +#endif // REDHAWK_SHM_SUPERBLOCK_H diff --git a/redhawk/src/base/framework/shm/SuperblockFile.cpp b/redhawk/src/base/framework/shm/SuperblockFile.cpp new file mode 100644 index 000000000..b9e65de2c --- /dev/null +++ b/redhawk/src/base/framework/shm/SuperblockFile.cpp @@ -0,0 +1,294 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include +#include + +#include +#include +#include + +#include + +#include "Superblock.h" +#include "atomic_counter.h" + +using namespace redhawk::shm; + +struct SuperblockFile::Header { + // Magic number to identify superblock files. This should never change. + typedef uint32_t magic_type; + static const magic_type SUPERBLOCK_MAGIC = 0xACEB70CC; + + // ABI version of superblock file. If the layout of the header or the + // Superblock class changes, changes, this version must be incremented. + typedef uint32_t version_type; + static const version_type SUPERBLOCK_VERSION = 1; + + Header() : + magic(SUPERBLOCK_MAGIC), + version(SUPERBLOCK_VERSION), + refcount(1), + creator(getpid()) + { + } + + const magic_type magic; + const version_type version; + atomic_counter refcount; + const pid_t creator; +}; + +SuperblockFile::SuperblockFile(const std::string& name) : + _file(name), + _attached(false), + _header(0) +{ +} + +SuperblockFile::~SuperblockFile() +{ + close(); +} + +bool SuperblockFile::IsSuperblockFile(const std::string& name) +{ + std::string path = redhawk::shm::getSystemPath(); + path += "/" + name; + std::ifstream file(path.c_str()); + if (!file) { + return false; + } + Header::magic_type magic = 0; + if (!file.read(reinterpret_cast(&magic), sizeof(magic))) { + return false; + } + + return magic == Header::SUPERBLOCK_MAGIC; +} + +const std::string& SuperblockFile::name() const +{ + return _file.name(); +} + +MappedFile& SuperblockFile::file() +{ + return _file; +} + +pid_t SuperblockFile::creator() const +{ + if (!_header) { + return 0; + } + return _header->creator; +} + +int SuperblockFile::refcount() const +{ + if (!_header) { + return -1; + } + return _header->refcount; +} + +bool SuperblockFile::isOrphaned() const +{ + if (!_header) { + throw std::logic_error("not attached"); + } + + // Check that the creator PID is still alive + return (kill(_header->creator, 0) != 0); +} + +SuperblockFile::Statistics SuperblockFile::getStatistics() +{ + Statistics stats; + stats.size = 0; + stats.used = 0; + stats.superblocks = 0; + stats.unused = 0; + + // First superblock starts at next page after the header + size_t offset = MappedFile::PAGE_SIZE; + const size_t end = _file.size(); + while (offset < end) { + // Map just the header of the superblock; no calls here need to acquire + // its lock, so this prevents accidental modifications + void* base = _file.map(MappedFile::PAGE_SIZE, MappedFile::READONLY, offset); + const Superblock* superblock = reinterpret_cast(base); + + // Extra safety check; since we're walking through the superblocks, the + // offsets should always be correct, but just in case... + bool valid = (superblock->offset() == offset); + if (valid) { + stats.size += superblock->size(); + size_t used = superblock->used(); + stats.used += used; + if (!used) { + stats.unused++; + } + stats.superblocks++; + // Account for the superblock overhead + offset += MappedFile::PAGE_SIZE + superblock->size(); + } + // Don't forget to unmap--it doesn't happen automatically! + _file.unmap(base, MappedFile::PAGE_SIZE); + + if (!valid) { + break; + } + } + + return stats; +} + +void SuperblockFile::create() +{ + if (_header) { + throw std::logic_error("file is already open"); + } + + _file.create(); + + // Use a page to create the header + try { + _file.resize(MappedFile::PAGE_SIZE); + } catch (const std::exception&) { + // Something is terribly wrong, probably out of memory; remove the file + // and relay the exception + _file.unlink(); + throw; + } + void* base = _file.map(MappedFile::PAGE_SIZE, MappedFile::READWRITE); + _header = new (base) Header; + _attached = true; +} + +void SuperblockFile::open(bool attach) +{ + if (_header) { + throw std::logic_error("file is already open"); + } + + _file.open(); + + // Check for a heap that was created on a full tmpfs--the file exists but + // has no allocated memory + if (_file.size() < MappedFile::PAGE_SIZE) { + throw std::runtime_error("invalid superblock file (no header)"); + } + + // Map the file and overlay the header structure over it, checking the + // magic number to make sure it's really a superblock file + void* base = _file.map(MappedFile::PAGE_SIZE, MappedFile::READWRITE); + Header* header = reinterpret_cast(base); + if (header->magic != Header::SUPERBLOCK_MAGIC) { + throw std::runtime_error("invalid superblock file (magic number does not match)"); + } else if (header->version != Header::SUPERBLOCK_VERSION) { + throw std::runtime_error("incompatible superblock file (version mismatch)"); + } + + // Store a reference the header and attach, so that we clean up on close + _header = header; + if (attach) { + _header->refcount.increment(); + _attached = true; + } +} + +void SuperblockFile::close() +{ + if (!_header) { + return; + } + + _detach(); + + // Unmap the header to avoid keeping the file alive + _file.unmap(_header, MappedFile::PAGE_SIZE); + + _file.close(); + + _header = 0; +} + +Superblock* SuperblockFile::getSuperblock(size_t offset) +{ + // Check if the superblock is already mapped + SuperblockMap::iterator existing = _superblocks.find(offset); + if (existing != _superblocks.end()) { + return existing->second; + } + + return _mapSuperblock(offset); +} + +Superblock* SuperblockFile::createSuperblock(size_t bytes) +{ + // Allocate 1 page for the header, plus the superblock memory + size_t current_offset = _file.size(); + size_t total_size = MappedFile::PAGE_SIZE + bytes; + _file.resize(current_offset + total_size); + + void* base = _file.map(total_size, MappedFile::READWRITE, current_offset); + Superblock* superblock = new (base) Superblock(_file.name(), current_offset, bytes); + _superblocks[superblock->offset()] = superblock; + return superblock; +} + +void SuperblockFile::_detach() +{ + if (!_attached) { + return; + } + + if (_header->refcount.decrement() == 0) { + try { + _file.unlink(); + } catch (const std::exception&) { + // Ignore exception--someone may have already forcibly removed the + // file, but that's not a problem here. + } + } +} + +Superblock* SuperblockFile::_mapSuperblock(size_t offset) +{ + // Map just the superblock's header to get the complete size + void* base = _file.map(MappedFile::PAGE_SIZE, MappedFile::READWRITE, offset); + Superblock* superblock = reinterpret_cast(base); + if (superblock->offset() != offset) { + throw std::invalid_argument("offset is not a valid superblock"); + } + size_t superblock_size = superblock->size(); + + // Remap to get the full superblock size + base = _file.remap(base, MappedFile::PAGE_SIZE, MappedFile::PAGE_SIZE + superblock_size); + superblock = reinterpret_cast(base); + + // Store mapping + _superblocks[superblock->offset()] = superblock; + return superblock; +} diff --git a/redhawk/src/base/framework/shm/System.cpp b/redhawk/src/base/framework/shm/System.cpp new file mode 100644 index 000000000..ea0e897f4 --- /dev/null +++ b/redhawk/src/base/framework/shm/System.cpp @@ -0,0 +1,54 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include + +#define SHMDIR "/dev/shm" + +namespace redhawk { + namespace shm { + + const char* getSystemPath() + { + return SHMDIR; + } + + size_t getSystemTotalMemory() + { + struct statvfs status; + if (statvfs(SHMDIR, &status)) { + return 0; + } + + return (status.f_blocks * status.f_frsize); + } + + size_t getSystemFreeMemory() + { + struct statvfs status; + if (statvfs(SHMDIR, &status)) { + return 0; + } + return (status.f_bfree * status.f_frsize); + } + } +} diff --git a/redhawk/src/base/framework/shm/ThreadState.h b/redhawk/src/base/framework/shm/ThreadState.h new file mode 100644 index 000000000..6336b82ea --- /dev/null +++ b/redhawk/src/base/framework/shm/ThreadState.h @@ -0,0 +1,43 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_THREADSTATE_H +#define REDHAWK_THREADSTATE_H + +namespace redhawk { + + namespace shm { + class Superblock; + + class ThreadState { + public: + ThreadState() : + last(0), + contention(0) + { + } + + shm::Superblock* last; + int contention; + }; + } +} + +#endif // REDHAWK_THREADSTATE_H diff --git a/redhawk/src/base/framework/shm/atomic_counter.h b/redhawk/src/base/framework/shm/atomic_counter.h new file mode 100644 index 000000000..bf2e0d8c3 --- /dev/null +++ b/redhawk/src/base/framework/shm/atomic_counter.h @@ -0,0 +1,82 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef ATOMIC_COUNTER_H +#define ATOMIC_COUNTER_H + +template +class atomic_counter { +public: + typedef T counter_type; + + atomic_counter() : + _value(0) + { + } + + explicit atomic_counter(counter_type value) : + _value(value) + { + } + + operator counter_type() const + { +#ifdef __ATOMIC_RELAXED + counter_type result; + __atomic_load(&_value, &result, __ATOMIC_RELAXED); + return result; +#else + return _value; +#endif + } + + atomic_counter& operator=(counter_type value) + { +#ifdef __ATOMIC_RELAXED + __atomic_store(&_value, &value, __ATOMIC_RELAXED); +#else + _value = value; +#endif + return *this; + } + + counter_type increment() + { +#ifdef __ATOMIC_RELAXED + return __atomic_add_fetch(&_value, 1, __ATOMIC_RELAXED); +#else + return __sync_add_and_fetch(&_value, 1); +#endif + } + + counter_type decrement() + { +#ifdef __ATOMIC_RELAXED + return __atomic_sub_fetch(&_value, 1, __ATOMIC_RELAXED); +#else + return __sync_sub_and_fetch(&_value, 1); +#endif + } + +private: + volatile counter_type _value; +}; + +#endif // ATOMIC_COUNTER_H diff --git a/redhawk/src/base/framework/shm/offset_ptr.h b/redhawk/src/base/framework/shm/offset_ptr.h new file mode 100644 index 000000000..4adc4cb3c --- /dev/null +++ b/redhawk/src/base/framework/shm/offset_ptr.h @@ -0,0 +1,42 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef OFFSET_PTR_HH +#define OFFSET_PTR_HH + +#include + +template +inline T* offset_ptr(void* base, ptrdiff_t offset) +{ + char* ptr = reinterpret_cast(base); + ptr += offset; + return reinterpret_cast(ptr); +} + +template +inline const T* offset_ptr(const void* base, ptrdiff_t offset) +{ + const char* ptr = reinterpret_cast(base); + ptr += offset; + return reinterpret_cast(ptr); +} + +#endif // OFFSET_PTR_HH diff --git a/redhawk/src/base/framework/shm/shared_mutex.h b/redhawk/src/base/framework/shm/shared_mutex.h new file mode 100644 index 000000000..1d12e321f --- /dev/null +++ b/redhawk/src/base/framework/shm/shared_mutex.h @@ -0,0 +1,117 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_SHARED_MUTEX_H +#define REDHAWK_SHARED_MUTEX_H + +#include + +namespace redhawk { + + class shared_mutex { + public: + shared_mutex() + { + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); + pthread_mutex_init(&_mutex, &attr); + pthread_mutexattr_destroy(&attr); + } + + ~shared_mutex() + { + pthread_mutex_destroy(&_mutex); + } + + void lock() + { + pthread_mutex_lock(&_mutex); + } + + bool trylock() + { + if (pthread_mutex_trylock(&_mutex)) { + return false; + } + return true; + } + + void unlock() + { + pthread_mutex_unlock(&_mutex); + } + + private: + pthread_mutex_t _mutex; + }; + + class scoped_lock { + public: + scoped_lock(shared_mutex& mutex, bool acquire=true) : + _mutex(&mutex), + _locked(false) + { + if (acquire) { + lock(); + } + } + + ~scoped_lock() + { + unlock(); + } + + bool trylock() + { + if (!_locked) { + _locked = _mutex->trylock(); + } + return _locked; + } + + void lock() + { + if (!_locked) { + _mutex->lock(); + _locked = true; + } + } + + void unlock() + { + if (_locked) { + _mutex->unlock(); + _locked = false; + } + } + + shared_mutex* mutex() + { + return _mutex; + } + + private: + shared_mutex* _mutex; + bool _locked; + }; +} + +#endif // SHARED_MUTEX_H diff --git a/redhawk/src/base/include/ossie/BufferManager.h b/redhawk/src/base/include/ossie/BufferManager.h new file mode 100644 index 000000000..07ed5f528 --- /dev/null +++ b/redhawk/src/base/include/ossie/BufferManager.h @@ -0,0 +1,349 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_BUFFERMANAGER_H +#define REDHAWK_BUFFERMANAGER_H + +#include + +#include +#include + +namespace redhawk { + /** + * @brief Singleton class providing buffer allocation management. + * + * The %BufferManager improves the performance of repetetive allocations by + * caching allocated memory blocks on a per-thread basis. When a memory + * block is deallocated, it is returned to the cache of the thread that + * made the original allocation. Future allocations of the same (or nearly + * the same) size on the originating thread can return cached memory blocks + * rather than allocating a new memory block from the operating system. In + * comparison to using the operating system's facilities, the memory is not + * zeroed before it is returned to the caller; skipping this step provides + * the most significant optimization in the allocation process. + * + * %BufferManager's API gives additional control over caching policy to + * limit the size of per-thread caches. + */ + class BufferManager { + public: + + /** + * @brief STL-compliant allocator using BufferManager. + * + * %Allocator goes through the BufferManager to improve performance + * with repetitive allocations. In testing, allocations under 1K bytes + * did not show any benefit from %BufferManager; as a result, they + * defer to the basic std::allocator implementation. + */ + template + class Allocator : public std::allocator + { + public: + typedef std::allocator base; + typedef typename base::pointer pointer; + typedef typename base::value_type value_type; + typedef typename base::size_type size_type; + + static const size_type MIN_ELEMENTS = 1024 / sizeof(value_type); + + template + struct rebind { + typedef Allocator other; + }; + + Allocator() throw() : + base() + { + } + + Allocator(const Allocator& other) throw() : + base(other) + { + } + + template + Allocator(const Allocator& other) throw() : + base(other) + { + } + + pointer allocate(size_type count) + { + if (count >= MIN_ELEMENTS) { + return static_cast(BufferManager::Allocate(count * sizeof(value_type))); + } else { + return base::allocate(count); + } + } + + void deallocate(pointer ptr, size_type count) + { + if (count >= MIN_ELEMENTS) { + BufferManager::Deallocate(ptr); + } else { + base::deallocate(ptr, count); + } + } + }; + + /** + * @brief Gets the %BufferManager singleton. + * @return Reference to the singleton instace. + */ + static BufferManager& Instance(); + + /** + * Static convenience function to allocate memory. + * @see allocate(size_t) + */ + static inline void* Allocate(size_t bytes) + { + return Instance().allocate(bytes); + } + + /** + * Static convenience function to deallocate memory. + * @see deallocate(void*) + */ + static inline void Deallocate(void* ptr) + { + return Instance().deallocate(ptr); + } + + /** + * @brief Allocate memory. + * @param bytes Required number of bytes. + * @return A void* to a memory block of at least @a bytes. + * + * If the requested allocation can be satisfied by the current thread's + * cache, a previously used memory block is returned. Otherwise, a new + * memory block is allocated from the operating system. + */ + void* allocate(size_t bytes); + + /** + * @brief Deallocate memory. + * @param ptr The memory block to deallocate. + * + * The memory block is returned to the cache of the thread that originally + * allocated it. + */ + void deallocate(void* ptr); + + /** + * @brief Checks whether the %BufferManager is enabled. + * @return true if the %BufferManager is enabled. + * + * If the %BufferManager is disabled, deallocated memory blocks are + * immediately returned to the operating system. + */ + bool isEnabled() const; + + /** + * @brief Enable or disable the %BufferManager. + * @param enabled true to enable the %BufferManager, false to disable + * + * If the %BufferManager is changing from enabled to disabled, all + * currently cached memory blocks are returned to the operating system. + */ + void enable(bool enabled); + + /** + * @returns The current per-thread cache byte limit. + */ + size_t getMaxThreadBytes() const; + + /** + * @brief Sets the per-thread cache byte limit. + * @param bytes Maximum cached bytes (per thread). + */ + void setMaxThreadBytes(size_t bytes); + + /** + * @returns The current per-thread cached memory block limit. + */ + size_t getMaxThreadBlocks() const; + + /** + * @brief Sets the per-thread cached memory block limit. + * @param bytes Maximum cached memory blocks (per thread). + */ + void setMaxThreadBlocks(size_t blocks); + + /** + * @returns The current per-thread cache memory block age limit. + */ + size_t getMaxThreadAge() const; + + /** + * @brief Sets the per-thread cache memory block age limit. + * @param bytes Maximum age of cached memory block (per thread). + * + * The age of a memory block is defined in terms of deallocate cycles. + * When a block is returned to the cache, it starts with an age of 0. + * Each time another block of memory is returned to the cache, the age + * of all existing blocks in the cache increases by one. If a block's + * age hits the age limit it is deallocated, allowing infrequently used + * blocks to be returned to the operating system. + */ + void setMaxThreadAge(size_t age); + + /** + * @brief Statistical information about buffer caches. + * + * The %Statistics struct describes the aggregate state of all thread + * caches maintained by %BufferManager. + */ + struct Statistics { + /** + * Number of currently active caches. + */ + size_t caches; + + /** + * Number of times an allocation was satisfied with a memory block + * from the cache. + */ + size_t hits; + + /** + * Number of times the cache was not able to satisfy an allocation + * and a new memory block had to be allocated from the system. + */ + size_t misses; + + /** + * Total number of memory blocks currently cached. + */ + size_t blocks; + + /** + * Total bytes currently cached. + */ + size_t bytes; + + /** + * High water mark for total cached bytes. + */ + size_t highBytes; + }; + + /** + * @brief Returns the current statistical information for all caches. + */ + Statistics getStatistics(); + + private: + /// @cond IMPL + + // BufferManager is a singleton object. This method is inaccessible to + // user code. + BufferManager(); + + // BufferManager is a singleton whose lifetime is managed by the + // library. This method is inaccessible to user code. + ~BufferManager(); + + // Non-copyable + BufferManager(const BufferManager&); + + // Non-assignable + BufferManager& operator=(const BufferManager&); + + class CacheBlock; + friend class CacheBlock; + + class BufferCache; + friend class BufferCache; + + // Round up the given allocation size to the nearest granularity, + // taking the CacheBlock overhead into consideration + size_t _nearestSize(size_t bytes); + + // Acquire a new memory block from the operating system + CacheBlock* _allocate(size_t bytes); + + // Return an existing memory block to the operating system + void _deallocate(CacheBlock* ptr); + + // Associate a new thread's buffer cache with the BufferManager + void _addCache(BufferCache* cache); + + // Remove an existing buffer cache from the BufferManager + void _removeCache(BufferCache* cache); + + // Returns the buffer cache for the current thread + BufferCache* _getCache(); + + // Report an increase in the total cached bytes (also updates high + // water mark if necessary) + void _increaseSize(size_t bytes); + + // Report a decrease in the total cached bytes + void _decreaseSize(size_t bytes); + + // Lock protecting the cache list + boost::mutex _lock; + typedef std::set CacheList; + CacheList _caches; + + // Per-thread association of buffer cache + boost::thread_specific_ptr _threadCache; + + // Policy settings + bool _enabled; + size_t _maxThreadBytes; + size_t _maxThreadBlocks; + size_t _maxThreadAge; + + // Statistical data + size_t _hits; + size_t _misses; + + // Total size tracking; must be updated atomically + volatile size_t _currentBytes; + volatile size_t _highWaterBytes; + + // Singleton instance + static BufferManager _instance; + + /// @endcond + }; + + + template + inline bool operator==(const BufferManager::Allocator& a1, + const BufferManager::Allocator& a2) + { + return true; + } + + template + inline bool operator!=(const BufferManager::Allocator& a1, + const BufferManager::Allocator& a2) + { + return false; + } + +} // namespace redhawk + +#endif // OSSIE_THREADCACHE_H diff --git a/redhawk/src/base/include/ossie/Component.h b/redhawk/src/base/include/ossie/Component.h index 54f3f8398..955331637 100644 --- a/redhawk/src/base/include/ossie/Component.h +++ b/redhawk/src/base/include/ossie/Component.h @@ -34,19 +34,24 @@ class Component : public Resource_impl { /* * Return a pointer to the Application that the Resource is deployed on */ - redhawk::ApplicationContainer* getApplication() { - return this->_app; - } + redhawk::ApplicationContainer* getApplication(); + /* * Return the network information that was allocated to this Component (if applicable) */ - redhawk::NetworkContainer* getNetwork() { - return this->_net; - } -private: - redhawk::ApplicationContainer *_app; - redhawk::NetworkContainer *_net; + redhawk::NetworkContainer* getNetwork(); + +protected: + virtual void setCommandLineProperty(const std::string& id, const redhawk::Value& value); + void setApplication(CF::Application_ptr application); + + // Give Resource_impl friend access so it can call setApplication + friend class Resource_impl; + +private: + boost::scoped_ptr _app; + boost::scoped_ptr _net; }; #endif /* COMPONENT_H */ diff --git a/redhawk/src/base/include/ossie/CorbaUtils.h b/redhawk/src/base/include/ossie/CorbaUtils.h index 8fdd32a10..504c45004 100644 --- a/redhawk/src/base/include/ossie/CorbaUtils.h +++ b/redhawk/src/base/include/ossie/CorbaUtils.h @@ -25,6 +25,7 @@ #include #include #include +#include #include "CorbaSequence.h" #include "ossie/debug.h" @@ -231,6 +232,8 @@ namespace ossie { return std::string(static_cast(corbaString)); } + CORBA::TypeCode_ptr unalias(CORBA::TypeCode_ptr type); + bool isValidType (const CORBA::Any& lhs, const CORBA::Any& rhs); inline bool objectExists(CORBA::Object_ptr obj) { @@ -253,6 +256,25 @@ namespace ossie { // Set up a handler for retrying calls to the provided object on a COMM_FAILURE exception. void setObjectCommFailureRetries (CORBA::Object_ptr obj, int numRetries); + namespace internal { + // Implementation of reference-to-servant lookup for generic type + PortableServer::ServantBase* getLocalServant(CORBA::Object_ptr object); + } + + // If the object reference is to a servant in this process space, return a pointer to + // the local servant; otherwise, return a null pointer + template + T* getLocalServant(CORBA::Object_ptr object) { + PortableServer::ServantBase* servant = internal::getLocalServant(object); + if (servant) { + return dynamic_cast(servant); + } + return 0; + } + + std::string describeException(const CORBA::SystemException& exc); + std::string describeException(const CORBA::Exception& exc); + // Mapping of C++ types to type codes. template static CORBA::TypeCode_ptr TypeCode (void) @@ -331,6 +353,12 @@ namespace ossie { { return CORBA::_tc_string; } + + template<> + inline CORBA::TypeCode_ptr TypeCode (void) + { + return CORBA::_tc_TypeCode; + } // Instantiates POAs on demand class POACreator : public virtual POA_PortableServer::AdapterActivator @@ -456,6 +484,16 @@ inline void operator <<= (CORBA::Any& _a, const std::string& _s) _a <<= _s.c_str(); } +inline bool operator >>= (const CORBA::Any& _a, CF::UTCTime& _utctime) +{ + const CF::UTCTime* _local_utctime; + if (_a >>= _local_utctime) { + _utctime = *_local_utctime; + return true; + } + return false; +} + inline bool operator >>= (const CORBA::Any& _a, bool& _b) { CORBA::Boolean b; @@ -491,6 +529,7 @@ ANY_VECTOR_OPERATORS(CORBA::LongLong, CORBA::LongLongSeq); ANY_VECTOR_OPERATORS(CORBA::ULongLong, CORBA::ULongLongSeq); ANY_VECTOR_OPERATORS(CORBA::Float, CORBA::FloatSeq); ANY_VECTOR_OPERATORS(CORBA::Double, CORBA::DoubleSeq); +ANY_VECTOR_OPERATORS(CF::UTCTime, CF::UTCTimeSequence); #undef ANY_VECTOR_OPERATORS #define ANY_VECTOR_CONVERT_OPERATORS(T,SEQ) \ diff --git a/redhawk/src/base/include/ossie/Device_impl.h b/redhawk/src/base/include/ossie/Device_impl.h index 322c63b92..58dd87b3a 100644 --- a/redhawk/src/base/include/ossie/Device_impl.h +++ b/redhawk/src/base/include/ossie/Device_impl.h @@ -88,6 +88,8 @@ class Device_impl: // Set admin state (LOCKED, SHUTTING_DOWN, UNLOCKED) void setAdminState (CF::Device::AdminType _adminType); + void setLogger(rh_logger::LoggerPtr logptr); + protected: // Admin state (LOCKED, SHUTTING_DOWN, UNLOCKED) CF::Device::AdminType _adminState; @@ -229,6 +231,8 @@ class Device_impl: return this->_devMgr; } + rh_logger::LoggerPtr _deviceLog; + private: // Adapter template function for device constructors. This is the only part of // device creation that requires type-specific knowledge. diff --git a/redhawk/src/base/include/ossie/EventTypes.h b/redhawk/src/base/include/ossie/EventTypes.h index 0344496b4..45284dab6 100644 --- a/redhawk/src/base/include/ossie/EventTypes.h +++ b/redhawk/src/base/include/ossie/EventTypes.h @@ -69,6 +69,9 @@ namespace ossie { typedef CF::EventChannelManager::EventChannelReg EventChannelReg; typedef CF::EventChannelManager::EventChannelReg_var EventChannelReg_var; typedef CF::EventChannelManager::EventChannelReg* EventChannelReg_ptr; + typedef CF::EventChannelManager::PublisherReg PublisherReg; + typedef CF::EventChannelManager::PublisherReg_var PublisherReg_var; + typedef CF::EventChannelManager::PublisherReg* PublisherReg_ptr; }; // end of event namespace diff --git a/redhawk/src/base/include/ossie/Events.h b/redhawk/src/base/include/ossie/Events.h index 5e1e90839..8903c6564 100644 --- a/redhawk/src/base/include/ossie/Events.h +++ b/redhawk/src/base/include/ossie/Events.h @@ -390,7 +390,7 @@ namespace events { Manager( Resource_impl *obj ); - static ManagerPtr _Manager; + //static ManagerPtr _Manager; Registrations _registrations; @@ -403,10 +403,14 @@ namespace events { Mutex _mgr_lock; bool _allow; + + static std::map _managers; Resource_impl *_obj; std::string _obj_id; + rh_logger::LoggerPtr _resourceLog; + rh_logger::LoggerPtr _eventManagerLog; }; @@ -443,6 +447,10 @@ namespace events { // @param retry_wait number of millisecs to wait between retries Publisher( ossie::events::EventChannel_ptr channel); + void setLogger(rh_logger::LoggerPtr logptr) { + _publisherLog = logptr; + }; + // // // @@ -530,6 +538,7 @@ namespace events { // handle to object that responds to disconnect messages Receiver *_disconnectReceiver; + rh_logger::LoggerPtr _publisherLog; }; @@ -571,6 +580,9 @@ namespace events { }; + void setLogger(rh_logger::LoggerPtr logptr) { + _subscriberLog = logptr; + }; typedef boost::shared_ptr< Subscriber::DataArrivedListener > DataArrivedListenerPtr; @@ -744,6 +756,7 @@ namespace events { void _init( ossie::events::EventChannel_ptr inChannel ); + rh_logger::LoggerPtr _subscriberLog; }; // end of Subscriber diff --git a/redhawk/src/base/include/ossie/ExecutableDevice_impl.h b/redhawk/src/base/include/ossie/ExecutableDevice_impl.h index 208fc9c46..ffae081a9 100644 --- a/redhawk/src/base/include/ossie/ExecutableDevice_impl.h +++ b/redhawk/src/base/include/ossie/ExecutableDevice_impl.h @@ -77,20 +77,25 @@ class ExecutableDevice_impl: (CF::Device::InvalidState, CF::ExecutableDevice::InvalidProcess, CORBA::SystemException); + void setLogger(rh_logger::LoggerPtr logptr); + protected: // Parse the command-line arguments to retrieve the name of the Component that is to be launched static std::string get_component_name_from_exec_params(const CF::Properties& params); // Retrieve the name of the Component from its profile static std::string component_name_from_profile_name(const std::string& profile_name); - + // process affinity options virtual void set_resource_affinity( const CF::Properties& options, const pid_t rsc_pid, const char *rsc_name, const std::vector &bl = std::vector(0) ); - + rh_logger::LoggerPtr _executabledeviceLog; + private: CF::ExecutableDevice::ProcessID_Type PID; + + void _init(); }; #endif diff --git a/redhawk/src/base/include/ossie/ExecutorService.h b/redhawk/src/base/include/ossie/ExecutorService.h new file mode 100644 index 000000000..24583fe79 --- /dev/null +++ b/redhawk/src/base/include/ossie/ExecutorService.h @@ -0,0 +1,246 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_EXECUTORSERVICE_H +#define REDHAWK_EXECUTORSERVICE_H + +#include + +#include +#include +#include + +namespace redhawk { + + /** + * @brief A class for scheduling functions to run at a later time. + * + * %ExecutorService provides an interface for queueing functions to run on + * another thread at a specified time. This can be used for implementing + * periodic monitors, executing deferred callbacks, or other operations + * that do not need to be performed immediately (and do not require a + * return value). + */ + class ExecutorService { + public: + /** + * @brief Construct an %ExecutorService. + * + * The %ExecutorService is created in a stopped state. To begin + * executing scheduled functions, call start(). + */ + ExecutorService(); + + /** + * @brief Destroys the %ExecutorService. + * + * The executor thread is stopped and all queued functions are purged. + */ + ~ExecutorService(); + + /** + * @brief Starts executing scheduled functions. + * + * If the executor thread is not running, it is started. Any functions + * scheduled for the current time (or earlier) will be run at the next + * possible time. + */ + void start (); + + /** + * @brief Stops executing scheduled functions. + * + * If the executor thread is running, it is stopped. Any remaining + * scheduled functions will not be run until the %ExecutorService is + * started again. + */ + void stop (); + + /** + * @brief Calls a function on the executor thread. + * @param func Callable object. + * + * Queues the callable object @a func to be called on the executor + * thread at the next possible time. This function does not wait for + * @a func to execute. + * + * If the %ExecutorService is not running, @a func will run after the + * next call to start(). + */ + template + void execute (F func) + { + _insertSorted(func); + } + + /** + * @brief Calls a function on the executor thread. + * @param func Callable object. + * @param A1 Argument to pass to callable object. + * + * Queues the callable object @a func to be called with the single + * argument @a A1 on the executor thread at the next possible time. + * This function does not wait for @a func to execute. + * + * If @a func is a class member function, the class instance should be + * passed as @a A1. + * + * If the %ExecutorService is not running, @a func will run after the + * next call to start(). + */ + template + void execute (F func, A1 arg1) + { + _insertSorted(boost::bind(func, arg1)); + } + + /** + * @brief Calls a function on the executor thread. + * @param func Callable object. + * @param A1 First argument to pass to callable object. + * @param A2 Second argument to pass to callable object. + * + * Queues the callable object @a func to be called with the arguments + * @a A1 and @a A2 on the executor thread at the next possible time. + * This function does not wait for @a func to execute. + * + * If @a func is a class member function, the class instance should be + * passed as @a A1. + * + * If the %ExecutorService is not running, @a func will run after the + * next call to start(). + */ + template + void execute (F func, A1 arg1, A2 arg2) + { + _insertSorted(boost::bind(func, arg1, arg2)); + } + + /** + * @brief Schedules a function on the executor thread. + * @param when The time at which to run the function. + * @param func Callable object. + * + * Queues the callable object @a func to be called on the executor + * thread at @a when. The actual time at which it runs is guaranteed to + * be at least @a when, but may be later, depending on the system and + * what the %ExecutorService is doing at the time. + * + * If the %ExecutorService is not running at the scheduled time, + * @a func will run after the next call to start(). + */ + template + void schedule (boost::system_time when, F func) + { + _insertSorted(func, when); + } + + /** + * @brief Schedules a function on the executor thread. + * @param when The time at which to run the function. + * @param func Callable object. + * @param A1 Argument to pass to callable object. + * + * Queues the callable object @a func to be called with the single + * argument @a A1 on the executor thread at @a when. The actual time + * at which it runs is guaranteed to be at least @a when, but may be + * later, depending on the system and what the %ExecutorService is + * doing at the time. + * + * If @a func is a class member function, the class instance should be + * passed as @a A1. + * + * If the %ExecutorService is not running at the scheduled time, + * @a func will run after the next call to start(). + */ + template + void schedule (boost::system_time when, F func, A1 arg1) + { + _insertSorted(boost::bind(func, arg1), when); + } + + /** + * @brief Schedules a function on the executor thread. + * @param when The time at which to run the function. + * @param func Callable object. + * @param A1 First argument to pass to callable object. + * @param A2 Second argument to pass to callable object. + * + * Queues the callable object @a func to be called with the arguments + * @a A1 and @a A2 on the executor thread at @a when. The actual time + * at which it runs is guaranteed to be at least @a when, but may be + * later, depending on the system and what the %ExecutorService is + * doing at the time. + * + * If @a func is a class member function, the class instance should be + * passed as @a A1. + * + * If the %ExecutorService is not running at the scheduled time, + * @a func will run after the next call to start(). + */ + template + void schedule (boost::system_time when, F func, A1 arg1, A2 arg2) + { + _insertSorted(boost::bind(func, arg1, arg2), when); + } + + /** + * @brief Discards all pending functions. + */ + void clear (); + + /** + * @brief Returns the number of queued functions to be run. + */ + size_t pending (); + + private: + /// @cond IMPL + + typedef boost::function func_type; + typedef std::pair task_type; + typedef std::list task_queue; + + // Thread main function. + void _run (); + + // Inserts a callable object into the task queue at the given time, + // defaulting to now (i.e., run at the next possible time). + void _insertSorted (func_type func, boost::system_time when=boost::get_system_time()); + + // Mutex/condvar pair to synchronize access and handle waiting for the + // next scheduled event. + boost::mutex _mutex; + boost::condition_variable _cond; + + // Executor thread and control flag. + boost::thread* _thread; + volatile bool _running; + + // Function queue, sorted by scheduled time such that the first item on + // the queue is the next function to call. + task_queue _queue; + + /// @endcond + }; + +} + +#endif // REDHAWK_EXECUTORSERVICE_H diff --git a/redhawk/src/base/include/ossie/FileStream.h b/redhawk/src/base/include/ossie/FileStream.h index 165e6ea9d..ca887515a 100644 --- a/redhawk/src/base/include/ossie/FileStream.h +++ b/redhawk/src/base/include/ossie/FileStream.h @@ -56,30 +56,14 @@ class File_stream : public std::istream * Opening a stream using this constructor will ensure that the SCA file get's closed automatically * when the file stream is destroyed. */ - explicit File_stream(CF::FileSystem_ptr fsysptr, const char* path) throw(std::ios_base::failure) : std::ios(0), needsClose(true) - { - try { - sb = new File_buffer((CF::File_var)fsysptr->open(path, true)); - this->init(sb); - } catch( ... ) { - throw std::ios_base::failure("exception while opening file"); - } - } + File_stream(CF::FileSystem_ptr fsysptr, const char* path) throw(std::ios_base::failure); /* * Open a stream given a SCA File. * * Note: the caller is responsible for closing the provided file. */ - explicit File_stream(CF::File_ptr fptr) : std::ios(0), needsClose(false) - { - try { - sb = new File_buffer(fptr); - this->init(sb); - } catch( ... ) { - throw std::ios_base::failure("exception while opening file"); - } - } + explicit File_stream(CF::File_ptr fptr); virtual ~File_stream(); diff --git a/redhawk/src/base/include/ossie/LoadableDevice_impl.h b/redhawk/src/base/include/ossie/LoadableDevice_impl.h index ffb379f6a..dc37a7e24 100644 --- a/redhawk/src/base/include/ossie/LoadableDevice_impl.h +++ b/redhawk/src/base/include/ossie/LoadableDevice_impl.h @@ -170,9 +170,6 @@ class LoadableDevice_impl: LoadableDevice_impl (char*, char*, char*, char*, char*); LoadableDevice_impl (char*, char*, char*, char*, CF::Properties capacities, char*); virtual ~LoadableDevice_impl (); - void configure (const CF::Properties& configProperties) - throw (CF::PropertySet::PartialConfiguration, - CF::PropertySet::InvalidConfiguration, CORBA::SystemException); // Externally visible function call to load a file void load (CF::FileSystem_ptr fs, const char* fileName, @@ -203,6 +200,8 @@ class LoadableDevice_impl: if ( xfersize > 0 ) transferSize=xfersize; } + void setLogger(rh_logger::LoggerPtr logptr); + protected: // Increment the loadedFiles counter @@ -226,6 +225,11 @@ class LoadableDevice_impl: void update_selected_paths(std::vector &paths); // Transfer size when loading files CORBA::LongLong transferSize; // block transfer size when loading files + std::string prependCacheIfAvailable(const std::string &localPath); + + // Returns the base directory in use for the file cache + const std::string& getCacheDirectory(); + rh_logger::LoggerPtr _loadabledeviceLog; private: LoadableDevice_impl(); // No default constructor @@ -233,14 +237,12 @@ class LoadableDevice_impl: void _init(); std::map cacheTimestamps; std::map > duplicate_filenames; + std::string cacheDirectory; void _loadTree(CF::FileSystem_ptr fs, std::string remotePath, boost::filesystem::path& localPath, std::string fileKey); void _deleteTree(const std::string &fileKey); bool _treeIntact(const std::string &fileKey); void _copyFile(CF::FileSystem_ptr fs, const std::string &remotePath, const std::string &localPath, const std::string &fileKey); - - - }; #endif diff --git a/redhawk/src/base/include/ossie/Logging_impl.h b/redhawk/src/base/include/ossie/Logging_impl.h index 7abbcd4d2..786d78fd2 100644 --- a/redhawk/src/base/include/ossie/Logging_impl.h +++ b/redhawk/src/base/include/ossie/Logging_impl.h @@ -35,7 +35,8 @@ class Logging_impl { public: - Logging_impl (); + Logging_impl (std::string logger_name); + Logging_impl (rh_logger::LoggerPtr parent_logger); virtual ~Logging_impl() {}; @@ -58,6 +59,15 @@ class Logging_impl // override this method to accept logging configuration information as a string void setLogLevel( const char *logger_id, const CF::LogLevel newLevel ) throw (CF::UnknownIdentifier); + // override this method to accept logging configuration information as a string + CF::LogLevel getLogLevel( const char *logger_id ) throw (CF::UnknownIdentifier); + + // retrieves the list of named loggers associated with the logger + CF::StringSequence* getNamedLoggers(); + + // reset the logger + void resetLog(); + // returns the current logger assigned to the resource, by default it is the root logger LOGGER getLogger(); @@ -111,6 +121,12 @@ class Logging_impl */ void saveLoggingContext( const std::string &url, int loglevel, ossie::logging::ResourceCtxPtr ctx ); + /* + * getExpandedLogConfig + * Apply the macros to the log configuration + * + */ + std::string getExpandedLogConfig(const std::string &logcfg_url); // // RESOLVE: refactor to use boost::function and boost::bind @@ -287,12 +303,24 @@ class Logging_impl // current logging level set for this component via execparams, or LogConfiguration API CF::LogLevel _logLevel; - // current logging object + // static logging object (deprecated) LOGGER _logger; // logging macro defintion table; ossie::logging::MacroTable _loggingMacros; - + + // logger instance + rh_logger::LoggerPtr _baseLog; + + bool haveLogger(const std::string &name); + bool haveLoggerHierarchy(const std::string &name); + + public: + + rh_logger::LoggerPtr getBaseLogger() { + return this->_baseLog; + } + private: // logging configuration data, @@ -300,8 +328,14 @@ class Logging_impl std::string _logCfgURL; + std::string _origLogCfgURL; + int _origLogLevel; + ossie::logging::ResourceCtxPtr _origCtx; + ossie::logging::ResourceCtxPtr _loggingCtx; + bool _origLevelSet; + // Event channel to listen for configuration and log level changes boost::shared_ptr< ossie::events::PushEventConsumer > logConfigChannel; diff --git a/redhawk/src/base/include/ossie/Makefile.am b/redhawk/src/base/include/ossie/Makefile.am index 57b84ab4b..0abb88151 100644 --- a/redhawk/src/base/include/ossie/Makefile.am +++ b/redhawk/src/base/include/ossie/Makefile.am @@ -44,6 +44,7 @@ pkginclude_HEADERS = AggregateDevice_impl.h \ ComplexProperties.h \ Service_impl.h \ MessageInterface.h \ + MessageSupplier.h \ type_traits.h \ AnyUtils.h \ RedhawkDefs.h \ @@ -61,9 +62,29 @@ pkginclude_HEADERS = AggregateDevice_impl.h \ OptionalProperty.h \ PropertyMonitor.h \ Autocomplete.h \ - Versions.h + Versions.h \ + refcount_memory.h \ + shared_buffer.h \ + ExecutorService.h \ + UsesPort.h \ + ProvidesPort.h \ + Transport.h \ + BufferManager.h \ + bitops.h \ + bitbuffer.h nobase_pkginclude_HEADERS = internal/equals.h \ + internal/message_traits.h \ logging/rh_logger.h \ logging/LogConfigUriResolver.h \ - logging/loghelpers.h + logging/loghelpers.h \ + debug/check.h \ + debug/checked_allocator.h \ + debug/checked_iterator.h \ + shm/Allocator.h \ + shm/Heap.h \ + shm/HeapClient.h \ + shm/MappedFile.h \ + shm/SuperblockFile.h \ + shm/System.h + diff --git a/redhawk/src/base/include/ossie/MessageInterface.h b/redhawk/src/base/include/ossie/MessageInterface.h index d4ff3c5c5..c2ad0fc90 100644 --- a/redhawk/src/base/include/ossie/MessageInterface.h +++ b/redhawk/src/base/include/ossie/MessageInterface.h @@ -26,16 +26,18 @@ #include #include +#include + #include "CF/ExtendedEvent.h" +#include "CF/QueryablePort.h" #include "CF/cf.h" #include "CorbaUtils.h" #include "Port_impl.h" #include "callback.h" +#include "internal/message_traits.h" #include - - /************************************************************************************ Message consumer ************************************************************************************/ @@ -89,7 +91,7 @@ class MessageConsumerPort : public Port_Provides_base_impl public: MessageConsumerPort (std::string port_name); - virtual ~MessageConsumerPort (void) { }; + virtual ~MessageConsumerPort (void); /* * Register a callback function @@ -100,7 +102,8 @@ class MessageConsumerPort : public Port_Provides_base_impl template void registerMessage (const std::string& id, Class* target, void (Class::*func)(const std::string&, const MessageStruct&)) { - callbacks_[id] = new MemberCallback(*target, func); + const char* format = ::redhawk::internal::message_traits::format(); + callbacks_[id] = new MessageCallbackImpl(format, boost::bind(func, target, _1, _2)); } template @@ -126,15 +129,22 @@ class MessageConsumerPort : public Port_Provides_base_impl void fireCallback (const std::string& id, const CORBA::Any& data); - std::string getRepid() const; + std::string getRepid() const; - std::string getDirection() const; - + std::string getDirection() const; protected: + + friend class MessageSupplierPort; + + rh_logger::LoggerPtr _messageconsumerLog; + void addSupplier (const std::string& connectionId, CosEventComm::PushSupplier_ptr supplier); CosEventComm::PushSupplier_ptr removeSupplier (const std::string& connectionId); + + bool hasGenericCallbacks(); + void dispatchGeneric(const std::string& id, const CORBA::Any& data); boost::mutex portInterfaceAccess; std::map consumers; @@ -143,125 +153,81 @@ class MessageConsumerPort : public Port_Provides_base_impl SupplierAdmin_i *supplier_admin; /* - * Abstract interface for message callbacks. + * Abstract untyped interface for message callbacks. */ class MessageCallback { public: - virtual void operator() (const std::string& value, const CORBA::Any& data) = 0; + virtual void dispatch (const std::string& value, const CORBA::Any& data) = 0; + virtual void dispatch (const std::string& value, const void* data) = 0; virtual ~MessageCallback () { } + bool isCompatible (const char* format) + { + if (_format.empty()) { + // Message type has no format descriptor, assume that it cannot + // be passed via void* + return false; + } + // The format descriptors must be identical, otherwise go through + // CORBA::Any + return _format == format; + } + protected: - MessageCallback () { } + MessageCallback(const std::string& format) : + _format(format) + { + } + + const std::string _format; }; /* - * Concrete class for member function property change callbacks. + * Concrete typed class for message callbacks. */ - template - class MemberCallback : public MessageCallback + template + class MessageCallbackImpl : public MessageCallback { public: - typedef void (Class::*MemberFn)(const std::string&, const M&); + typedef redhawk::callback CallbackFunc; - virtual void operator() (const std::string& value, const CORBA::Any& data) + MessageCallbackImpl (const std::string& format, CallbackFunc func) : + MessageCallback(format), + func_(func) { - M message; + } + + virtual void dispatch (const std::string& value, const CORBA::Any& data) + { + Message message; if (data >>= message) { - (target_.*func_)(value, message); + func_(value, message); } } - protected: - // Only allow MessageConsumerPort to instantiate this class. - MemberCallback (Class& target, MemberFn func) : - target_(target), - func_(func) + virtual void dispatch (const std::string& value, const void* data) { + const Message* message = reinterpret_cast(data); + func_(value, *message); } - friend class MessageConsumerPort; - - Class& target_; - MemberFn func_; + private: + CallbackFunc func_; }; typedef std::map CallbackTable; CallbackTable callbacks_; + MessageCallback* getMessageCallback(const std::string& msgId); + ossie::notification generic_callbacks_; typedef std::map SupplierTable; SupplierTable suppliers_; }; - -/************************************************************************************ - Message producer -************************************************************************************/ - -class MessageSupplierPort : public Port_Uses_base_impl -#ifdef BEGIN_AUTOCOMPLETE_IGNORE -, public virtual POA_CF::Port -#endif -{ - -public: - MessageSupplierPort (std::string port_name); - virtual ~MessageSupplierPort (void); - - // CF::Port methods - void connectPort(CORBA::Object_ptr connection, const char* connectionId); - void disconnectPort(const char* connectionId); - - void push(const CORBA::Any& data); - - CosEventChannelAdmin::ProxyPushConsumer_ptr removeConsumer(std::string consumer_id); - void extendConsumers(std::string consumer_id, CosEventChannelAdmin::ProxyPushConsumer_ptr proxy_consumer); - - // Send a single message - template - void sendMessage(const Message& message) { - const Message* begin(&message); - const Message* end(&begin[1]); - sendMessages(begin, end); - } - - // Send a sequence of messages - template - void sendMessages(const Sequence& messages) { - sendMessages(messages.begin(), messages.end()); - } - - // Send a set of messages from an iterable set - template - void sendMessages(Iterator first, Iterator last) - { - CF::Properties properties; - properties.length(std::distance(first, last)); - for (CORBA::ULong ii = 0; first != last; ++ii, ++first) { - // Workaround for older components whose structs have a non-const, - // non-static member function getId(): determine the type of value - // pointed to by the iterator, and const_cast the dereferenced - // value; this ensures that it works for both bare pointers and - // "true" iterators - typedef typename std::iterator_traits::value_type value_type; - properties[ii].id = const_cast(*first).getId().c_str(); - properties[ii].value <<= *first; - } - CORBA::Any data; - data <<= properties; - push(data); - } - - std::string getRepid() const; - -protected: - boost::mutex portInterfaceAccess; - std::map consumers; - std::map _connections; - -}; +#include "MessageSupplier.h" #endif // MESSAGEINTERFACE_H diff --git a/redhawk/src/base/include/ossie/MessageSupplier.h b/redhawk/src/base/include/ossie/MessageSupplier.h new file mode 100644 index 000000000..7dd89b80f --- /dev/null +++ b/redhawk/src/base/include/ossie/MessageSupplier.h @@ -0,0 +1,135 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef MESSAGESUPPLIER_H +#define MESSAGESUPPLIER_H + +#include + +#include + +#include + +#include "UsesPort.h" +#include "internal/message_traits.h" + +/************************************************************************************ + Message producer +************************************************************************************/ +class MessageSupplierPort : public redhawk::UsesPort +{ + +public: + MessageSupplierPort (const std::string& name); + virtual ~MessageSupplierPort (void); + + /** + * @brief Sends pre-serialized messages. + * @param data Messages serialized to a CORBA::Any. + * @param connectionID Target connection (default: all). + * @throw redhawk::InvalidConnectionId If @p connectionId is not empty and + * does not match any connection + */ + void push(const CORBA::Any& data, const std::string& connectionId=std::string()); + + /** + * @brief Sends a single message. + * @param message Message to send. + * @param connectionID Target connection (default: all). + * @throw redhawk::InvalidConnectionId If @p connectionId is not empty and + * does not match any connection + */ + template + void sendMessage(const Message& message, const std::string& connectionId=std::string()) + { + const Message* begin(&message); + const Message* end(&begin[1]); + sendMessages(begin, end, connectionId); + } + + /** + * @brief Sends a sequence of messages. + * @param messages Container of messages to send. + * @param connectionID Target connection (default: all). + * @throw redhawk::InvalidConnectionId If @p connectionId is not empty and + * does not match any connection + */ + template + void sendMessages(const Sequence& messages, const std::string& connectionId=std::string()) + { + sendMessages(messages.begin(), messages.end(), connectionId); + } + + /** + * @brief Sends a sequence of messages. + * @param first Iterator to first message. + * @param last Iterator to one past last message. + * @param connectionID Target connection (default: all). + * @throw redhawk::InvalidConnectionId If @p connectionId is not empty and + * does not match any connection + */ + template + void sendMessages(Iterator first, Iterator last, const std::string& connectionId=std::string()) + { + boost::mutex::scoped_lock lock(updatingPortsLock); + _checkConnectionId(connectionId); + _beginMessageQueue(std::distance(first, last), connectionId); + for (; first != last; ++first) { + _queueMessage(*first, connectionId); + } + _sendMessageQueue(connectionId); + } + + std::string getRepid() const; + +protected: + virtual void _validatePort(CORBA::Object_ptr object); + virtual redhawk::UsesTransport* _createTransport(CORBA::Object_ptr object, const std::string& connectionId); + + template + inline void _queueMessage(const Message& message, const std::string& connectionId) + { + // Use the traits class to abstract differences between message classes + // based on the REDHAWK version with which they were generated + typedef ::redhawk::internal::message_traits traits; + const std::string messageId = traits::getId(message); + const char* format = traits::format(); + _queueMessage(messageId, format, &message, &traits::serialize, connectionId); + } + + typedef void (*SerializerFunc)(CORBA::Any&,const void*); + + void _beginMessageQueue(size_t count, const std::string& connectionId); + void _queueMessage(const std::string& msgId, const char* format, const void* msgData, + SerializerFunc serializer, const std::string& connectionId); + void _sendMessageQueue(const std::string& connectionId); + + bool _isConnectionSelected(const std::string& connectionId, const std::string& targetId); + void _checkConnectionId(const std::string& connectionId); + void _push(const CORBA::Any& data, const std::string& connectionId=std::string()); + + class MessageTransport; + class CorbaTransport; + class LocalTransport; + + typedef redhawk::UsesPort::TransportIteratorAdapter TransportIterator; +}; + +#endif // MESSAGESUPPLIER_H diff --git a/redhawk/src/base/include/ossie/PortSupplier_impl.h b/redhawk/src/base/include/ossie/PortSupplier_impl.h index e691ab7a1..cc5f9c685 100644 --- a/redhawk/src/base/include/ossie/PortSupplier_impl.h +++ b/redhawk/src/base/include/ossie/PortSupplier_impl.h @@ -47,6 +47,8 @@ class PortSupplier_impl // Return an object reference for the named port. CORBA::Object* getPort (const char*) throw (CF::PortSupplier::UnknownPort, CORBA::SystemException); + void setLogger(rh_logger::LoggerPtr logptr); + protected: typedef std::map PortServantMap; PortServantMap _portServants; @@ -77,6 +79,9 @@ class PortSupplier_impl private: void insertPort (const std::string& name, PortBase* servant); void deactivatePort (PortBase* servant); + + rh_logger::LoggerPtr _portsupplierLog; + }; #endif diff --git a/redhawk/src/base/include/ossie/Port_impl.h b/redhawk/src/base/include/ossie/Port_impl.h index 2b6c85b9f..41ca6975b 100644 --- a/redhawk/src/base/include/ossie/Port_impl.h +++ b/redhawk/src/base/include/ossie/Port_impl.h @@ -31,6 +31,7 @@ #include "CF/cf.h" #include "ossie/Autocomplete.h" +#include "ossie/logging/rh_logger.h" namespace _seqVector { @@ -112,6 +113,20 @@ template } } // namespace _seqVector +namespace redhawk { + + class PortCallError : public std::runtime_error { + + public: + PortCallError( const std::string &msg, const std::vector &connectionids ); + + ~PortCallError() throw (); + + private: + static std::string makeMessage(const std::string& msg, const std::vector& connectionids); + }; + +} class Port_impl #ifdef BEGIN_AUTOCOMPLETE_IGNORE @@ -273,7 +288,7 @@ class PortBase virtual void setDescription(const std::string& desc) { - description = desc; + description = desc; } virtual void startPort () @@ -284,6 +299,12 @@ class PortBase { } + // Called when the Port is first activated by a PortSupplier; subclasses + // may override to implement virtual constructor behavior + virtual void initializePort() + { + } + virtual void releasePort() { } @@ -297,13 +318,13 @@ class PortBase // Return the Port description virtual std::string getDescription () { - return description; + return description; } // Return the interface that this Port supports virtual std::string getRepid () const { - return "IDL:CORBA/Object:1.0"; + return "IDL:CORBA/Object:1.0"; } // Return the direction (uses/provides) for this Port @@ -312,9 +333,13 @@ class PortBase return "Direction"; } + LOGGER getLogger(); + void setLogger(LOGGER newLogger); + protected: std::string name; std::string description; + LOGGER _portLog; }; class Port_Uses_base_impl : public PortBase @@ -352,7 +377,7 @@ class Port_Uses_base_impl : public PortBase // Return the direction (uses/provides) for this Port virtual std::string getDirection () const { - return "Uses"; + return CF::PortSet::DIRECTION_USES; } protected: @@ -376,7 +401,7 @@ class Port_Provides_base_impl : public PortBase // Return the direction (uses/provides) for this Port virtual std::string getDirection () const { - return "Provides"; + return CF::PortSet::DIRECTION_PROVIDES; } }; diff --git a/redhawk/src/base/include/ossie/ProcessThread.h b/redhawk/src/base/include/ossie/ProcessThread.h index 9a6c44800..f486abcfe 100644 --- a/redhawk/src/base/include/ossie/ProcessThread.h +++ b/redhawk/src/base/include/ossie/ProcessThread.h @@ -24,14 +24,18 @@ #include #include +#include "debug.h" + class ThreadedComponent; namespace ossie { class ProcessThread { + ENABLE_LOGGING; + public: - ProcessThread(ThreadedComponent* target, float delay); + ProcessThread(ThreadedComponent* target, float delay, const std::string& name=std::string()); ~ProcessThread(); // Kicks off the thread @@ -55,6 +59,7 @@ class ProcessThread volatile bool _running; ThreadedComponent* _target; struct timespec _delay; + std::string _name; public: boost::thread*& _mythread; diff --git a/redhawk/src/base/include/ossie/PropertyInterface.h b/redhawk/src/base/include/ossie/PropertyInterface.h index 721bc8be5..2512c21a1 100644 --- a/redhawk/src/base/include/ossie/PropertyInterface.h +++ b/redhawk/src/base/include/ossie/PropertyInterface.h @@ -31,6 +31,7 @@ #include "ossie/AnyUtils.h" #include "ossie/CorbaUtils.h" #include "CF/cf.h" +#include "CF/DataType.h" #include "ossie/Port_impl.h" #include "ossie/ComplexProperties.h" @@ -43,6 +44,8 @@ #include #include "PropertyMonitor.h" +#include + /* * @@ -279,10 +282,7 @@ class PropertyWrapper : public PropertyInterface template void setQuery (Target target, Func func) { - if (!isQueryable()) { - throw std::logic_error("property '" + id + "' is not queryable"); - } - ossie::bind(query_, target, func); + setQuery(QueryFunc(target, func)); } template @@ -297,10 +297,7 @@ class PropertyWrapper : public PropertyInterface template void setConfigure (Target target, Func func) { - if (!isConfigurable()) { - throw std::logic_error("property '" + id + "' is not configurable"); - } - ossie::bind(configure_, target, func); + setConfigure(ConfigureFunc(target, func)); } template @@ -309,7 +306,6 @@ class PropertyWrapper : public PropertyInterface pointerListeners_.add(func); } - template void addChangeListener (Target target, R (Base::*func)(A1*, A2*)) { @@ -352,10 +348,7 @@ class PropertyWrapper : public PropertyInterface template void setAllocator (Target target, Func func) { - if (!isAllocatable()) { - throw std::logic_error("property '" + id + "' is not allocatable"); - } - ossie::bind(allocator_, target, func); + setAllocator(AllocateFunc(target, func)); } template @@ -370,10 +363,7 @@ class PropertyWrapper : public PropertyInterface template void setDeallocator (Target target, Func func) { - if (!isAllocatable()) { - throw std::logic_error("property '" + id + "' is not allocatable"); - } - ossie::bind(deallocator_, target, func); + setDeallocator(DeallocateFunc(target, func)); } const std::string getNativeType () const @@ -447,10 +437,10 @@ class PropertyWrapper : public PropertyInterface private: // Delegate function types - typedef boost::function QueryFunc; - typedef boost::function ConfigureFunc; - typedef boost::function AllocateFunc; - typedef boost::function DeallocateFunc; + typedef redhawk::callback QueryFunc; + typedef redhawk::callback ConfigureFunc; + typedef redhawk::callback AllocateFunc; + typedef redhawk::callback DeallocateFunc; QueryFunc query_; ConfigureFunc configure_; @@ -475,6 +465,7 @@ typedef PropertyWrapper ULongLongProperty; typedef PropertyWrapper LongLongProperty; typedef PropertyWrapper FloatProperty; typedef PropertyWrapper DoubleProperty; +typedef PropertyWrapper UTCTimeProperty; typedef PropertyWrapper > ComplexFloatProperty; typedef PropertyWrapper > ComplexBooleanProperty; @@ -521,6 +512,27 @@ class SequenceProperty : public PropertyWrapper > } }; +namespace CF { + + CF::UTCTime operator+(const CF::UTCTime& lhs, double seconds); + + CF::UTCTime& operator+=(CF::UTCTime& lhs, double seconds); + + double operator-(const CF::UTCTime& lhs, const CF::UTCTime& rhs); + CF::UTCTime operator-(const CF::UTCTime& lhs, double seconds); + + CF::UTCTime& operator-=(CF::UTCTime& lhs, double seconds); + + bool operator==(const CF::UTCTime& lhs, const CF::UTCTime& rhs); + bool operator!=(const CF::UTCTime& lhs, const CF::UTCTime& rhs); + bool operator<(const CF::UTCTime& lhs, const CF::UTCTime& rhs); + bool operator<=(const CF::UTCTime& lhs, const CF::UTCTime& rhs); + bool operator>(const CF::UTCTime& lhs, const CF::UTCTime& rhs); + bool operator>=(const CF::UTCTime& lhs, const CF::UTCTime& rhs); + + std::ostream& operator<<(std::ostream&, const CF::UTCTime&); +} + typedef SequenceProperty StringSeqProperty; typedef SequenceProperty CharSeqProperty; typedef SequenceProperty BooleanSeqProperty; @@ -533,6 +545,7 @@ typedef SequenceProperty LongLongSeqProperty; typedef SequenceProperty ULongLongSeqProperty; typedef SequenceProperty FloatSeqProperty; typedef SequenceProperty DoubleSeqProperty; +typedef SequenceProperty UTCTimeSeqProperty; typedef SequenceProperty > ComplexFloatSeqProperty; typedef SequenceProperty > ComplexDoubleSeqProperty; @@ -658,6 +671,7 @@ class PropertyWrapperFactory static ULongLongProperty* Create (CORBA::ULongLong&); static FloatProperty* Create (CORBA::Float&); static DoubleProperty* Create (CORBA::Double&); + static UTCTimeProperty* Create (CF::UTCTime&); static ComplexBooleanProperty* Create (std::complex&); static ComplexCharProperty* Create (std::complex&); @@ -683,6 +697,7 @@ class PropertyWrapperFactory static ULongLongSeqProperty* Create (std::vector&); static FloatSeqProperty* Create (std::vector&); static DoubleSeqProperty* Create (std::vector&); + static UTCTimeSeqProperty* Create (std::vector&); static ComplexBooleanSeqProperty* Create (std::vector >&); static ComplexCharSeqProperty* Create (std::vector >&); diff --git a/redhawk/src/base/include/ossie/PropertyMap.h b/redhawk/src/base/include/ossie/PropertyMap.h index 384bbdd66..419b3890b 100644 --- a/redhawk/src/base/include/ossie/PropertyMap.h +++ b/redhawk/src/base/include/ossie/PropertyMap.h @@ -60,8 +60,17 @@ namespace redhawk { Value& operator[] (const std::string& id); const Value& operator[] (const std::string& id) const; + const Value& get(const std::string& id, const Value& def=Value()) const; + + bool operator==( const redhawk::PropertyMap &other ) const; + bool operator!=( const redhawk::PropertyMap &other ) const; + + void update(const CF::Properties& properties); + void push_back (const CF::DataType& dt); + void extend(const CF::Properties& properties); + iterator begin(); iterator end(); @@ -74,8 +83,11 @@ namespace redhawk { void erase(const std::string& id); void erase(iterator pos); void erase(iterator first, iterator last); + + std::string toString() const; }; + std::ostream& operator<<(std::ostream& out, const PropertyMap& properties); } #endif // REDHAWK_PROPERTYMAP_H diff --git a/redhawk/src/base/include/ossie/PropertyMonitor.h b/redhawk/src/base/include/ossie/PropertyMonitor.h index f145f32fc..f01f5b347 100644 --- a/redhawk/src/base/include/ossie/PropertyMonitor.h +++ b/redhawk/src/base/include/ossie/PropertyMonitor.h @@ -119,6 +119,7 @@ typedef SimpleMonitor ULongLongProperty; typedef SimpleMonitor LongLongProperty; typedef SimpleMonitor FloatProperty; typedef SimpleMonitor DoubleProperty; +typedef SimpleMonitor UTCTimeProperty; typedef SimpleMonitor > ComplexFloatProperty; typedef SimpleMonitor > ComplexBooleanProperty; @@ -189,6 +190,7 @@ typedef SequenceMonitor LongLongSeqProperty; typedef SequenceMonitor ULongLongSeqProperty; typedef SequenceMonitor FloatSeqProperty; typedef SequenceMonitor DoubleSeqProperty; +typedef SequenceMonitor UTCTimeSeqProperty; typedef SequenceMonitor > ComplexFloatSeqProperty; typedef SequenceMonitor > ComplexDoubleSeqProperty; @@ -288,6 +290,7 @@ typedef SequenceMonitor > ComplexULongLongSeqProp static ULongLongProperty* Create (CORBA::ULongLong&); static FloatProperty* Create (CORBA::Float&); static DoubleProperty* Create (CORBA::Double&); + static UTCTimeProperty* Create (CF::UTCTime&); static ComplexBooleanProperty* Create (std::complex&); static ComplexCharProperty* Create (std::complex&); @@ -313,6 +316,7 @@ typedef SequenceMonitor > ComplexULongLongSeqProp static ULongLongSeqProperty* Create (std::vector&); static FloatSeqProperty* Create (std::vector&); static DoubleSeqProperty* Create (std::vector&); + static UTCTimeSeqProperty* Create (std::vector&); static ComplexBooleanSeqProperty* Create (std::vector >&); static ComplexCharSeqProperty* Create (std::vector >&); diff --git a/redhawk/src/base/include/ossie/PropertySet_impl.h b/redhawk/src/base/include/ossie/PropertySet_impl.h index dc9f26754..28f6f974b 100644 --- a/redhawk/src/base/include/ossie/PropertySet_impl.h +++ b/redhawk/src/base/include/ossie/PropertySet_impl.h @@ -35,6 +35,9 @@ #include "ossie/Autocomplete.h" #include "CF/cf.h" +namespace redhawk { + class Value; +} class PropertySet_impl #ifdef BEGIN_AUTOCOMPLETE_IGNORE @@ -88,9 +91,27 @@ class PropertySet_impl // void startPropertyChangeMonitor( const std::string &rsc_id); void stopPropertyChangeMonitor(); + + static CF::UTCTime _makeTime(short status, double wsec, double fsec) { + CF::UTCTime _time; + struct timeval tv; + gettimeofday(&tv, NULL); + if (status == -1) { + _time.tcstatus = 1; + _time.twsec = tv.tv_sec; + _time.tfsec = tv.tv_usec/1e6; + } else { + _time.tcstatus = status; + _time.twsec = wsec; + _time.tfsec = fsec; + } + return _time; + }; protected: + virtual void setCommandLineProperty(const std::string& id, const redhawk::Value& value); + /*CF::Properties propertySet;*/ CF::DataType @@ -138,6 +159,22 @@ class PropertySet_impl wrapper->isNil(false); return wrapper; } + + template + PropertyInterface* addProperty (CF::UTCTime& value, + const T2& initial_value, + const std::string& id, + const std::string& name, + const std::string& mode, + const std::string& units, + const std::string& action, + const std::string& kinds) + { + PropertyInterface* wrapper = addProperty(value, id, name, mode, units, action, kinds); + value = redhawk::time::utils::convert(initial_value); + wrapper->isNil(false); + return wrapper; + } template void addPropertyChangeListener (const std::string& id, C* target, void (C::*func)(const T*, const T*)) @@ -319,6 +356,10 @@ class PropertySet_impl // Preferred new-style properties. typedef std::map PropertyMap; PropertyMap propTable; + + std::string _propertyQueryTimestamp; + + void setLogger(rh_logger::LoggerPtr logptr); private: template @@ -334,9 +375,11 @@ class PropertySet_impl return wrapper; } - typedef boost::function PropertyCallback; + typedef redhawk::callback PropertyCallback; void setPropertyCallback (const std::string& id, PropertyCallback callback); + rh_logger::LoggerPtr _propertysetLog; + typedef std::map PropertyCallbackMap; PropertyCallbackMap propCallbacks; diff --git a/redhawk/src/base/include/ossie/PropertyType.h b/redhawk/src/base/include/ossie/PropertyType.h index 38be85f8c..fdc9863d5 100644 --- a/redhawk/src/base/include/ossie/PropertyType.h +++ b/redhawk/src/base/include/ossie/PropertyType.h @@ -41,6 +41,9 @@ namespace redhawk { PropertyType(); explicit PropertyType(const CF::DataType& dt); + PropertyType(const std::string& id, const CORBA::Any& value); + explicit PropertyType(const std::string& id, const Value& value=Value()); + PropertyType& operator=(const CF::DataType& dt); void setId(const std::string& identifier); diff --git a/redhawk/src/base/include/ossie/ProvidesPort.h b/redhawk/src/base/include/ossie/ProvidesPort.h new file mode 100644 index 000000000..e082d327d --- /dev/null +++ b/redhawk/src/base/include/ossie/ProvidesPort.h @@ -0,0 +1,66 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef OSSIE_PROVIDESPORT_H +#define OSSIE_PROVIDESPORT_H + +#include +#include + +#include "CF/DataType.h" +#include "CF/NegotiablePort.h" +#include "Port_impl.h" +#include "Autocomplete.h" +#include "Transport.h" + +namespace redhawk { + + class NegotiableProvidesPortBase : public Port_Provides_base_impl +#ifdef BEGIN_AUTOCOMPLETE_IGNORE + , public virtual POA_ExtendedCF::NegotiableProvidesPort +#endif + { + public: + NegotiableProvidesPortBase(const std::string& name); + ~NegotiableProvidesPortBase(); + + virtual void initializePort(); + virtual void releasePort(); + + virtual ExtendedCF::TransportInfoSequence* supportedTransports(); + virtual ExtendedCF::NegotiationResult* negotiateTransport(const char* transportType, + const CF::Properties& transportProperties); + virtual void disconnectTransport(const char* transportId); + + protected: + ProvidesTransportManager* _getTransportManager(const std::string& transportType); + + ProvidesTransport* _getTransport(const std::string identifier); + + boost::mutex _transportMutex; + + typedef std::vector TransportManagerList; + TransportManagerList _transportManagers; + + typedef std::map TransportMap; + TransportMap _transports; + }; +} + +#endif // OSSIE_PROVIDESPORT_H diff --git a/redhawk/src/base/include/ossie/Resource_impl.h b/redhawk/src/base/include/ossie/Resource_impl.h index c1d3a692d..3a62e6ffe 100644 --- a/redhawk/src/base/include/ossie/Resource_impl.h +++ b/redhawk/src/base/include/ossie/Resource_impl.h @@ -78,6 +78,7 @@ class Resource_impl: char* identifier () throw (CORBA::SystemException); CORBA::Boolean started() throw (CORBA::SystemException); char* softwareProfile () throw (CORBA::SystemException); + CF::StringSequence* getNamedLoggers(); virtual void run (); virtual void halt (); @@ -89,10 +90,28 @@ class Resource_impl: /* * Return a pointer to the Domain Manager that the Resource is deployed on */ - redhawk::DomainManagerContainer* getDomainManager() { - return this->_domMgr; + redhawk::DomainManagerContainer* getDomainManager(); + + /* + * Register a function for notification when this Resource is released + */ + template + void addReleaseListener(Func func) + { + _resourceReleased.add(func); } + /* + * Register a member function for notification when this Resource is released + */ + template + void addReleaseListener(Target target, Func func) + { + _resourceReleased.add(target, func); + } + + const std::string& getIdentifier() const; + /* * Globally unique identifier for this Resource */ @@ -103,7 +122,14 @@ class Resource_impl: std::string naming_service_name; std::string _parent_id; + void setLogger(rh_logger::LoggerPtr logptr); + protected: + virtual void setCommandLineProperty(const std::string& id, const redhawk::Value& value); + + void setDomainManager(CF::DomainManager_ptr domainManager); + + const std::string& getDeploymentRoot() const; /* * Boolean describing whether or not this Resource is started @@ -130,12 +156,20 @@ class Resource_impl: return component; } + rh_logger::LoggerPtr _resourceLog; + // Generic implementation of start_component, taking a function pointer to // a component constructor (via make_component). typedef boost::function ctor_type; static void start_component(ctor_type ctor, int argc, char* argv[]); std::string currentWorkingDirectory; - redhawk::DomainManagerContainer *_domMgr; + boost::scoped_ptr _domMgr; bool _initialized; + std::string _deploymentRoot; + + ossie::notification _resourceReleased; + +public: + static Resource_impl* create_component(ctor_type, const CF::Properties& parameters); }; #endif diff --git a/redhawk/src/base/include/ossie/Service_impl.h b/redhawk/src/base/include/ossie/Service_impl.h index c3a8af713..1d337f6bb 100644 --- a/redhawk/src/base/include/ossie/Service_impl.h +++ b/redhawk/src/base/include/ossie/Service_impl.h @@ -52,6 +52,12 @@ class Service_impl std::string dpath(""); std::string sname(""); + for (int index = 1; index < argc; ++index) { + if (std::string(argv[index]) == std::string("-i")) { + std::cout<<"Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain"< execparams; for (int i = 0; i < argc; i++) { @@ -237,12 +243,15 @@ class Service_impl redhawk::DomainManagerContainer* getDomainManager() { return this->_domMgr; } + rh_logger::LoggerPtr _baseLog; protected: std::string _devMgr_ior; omni_mutex component_running_mutex; omni_condition component_running; + rh_logger::LoggerPtr _serviceLog; + private: void initResources(char*, char*); redhawk::DeviceManagerContainer *_devMgr; diff --git a/redhawk/src/base/include/ossie/ThreadedComponent.h b/redhawk/src/base/include/ossie/ThreadedComponent.h index ff5bb1cc9..76c97ea3d 100644 --- a/redhawk/src/base/include/ossie/ThreadedComponent.h +++ b/redhawk/src/base/include/ossie/ThreadedComponent.h @@ -20,8 +20,8 @@ #ifndef OSSIE_THREADEDCOMPONENT_H #define OSSIE_THREADEDCOMPONENT_H -#include "ossie/ProcessThread.h" -#include "ossie/Autocomplete.h" + +#include "ProcessThread.h" enum { NOOP = 0, @@ -57,7 +57,10 @@ class ThreadedComponent { ossie::ProcessThread* serviceThread; boost::mutex serviceThreadLock; + void setThreadName(const std::string& name); + private: + std::string _threadName; float _defaultDelay; }; diff --git a/redhawk/src/base/include/ossie/Transport.h b/redhawk/src/base/include/ossie/Transport.h new file mode 100644 index 000000000..78bb56b3c --- /dev/null +++ b/redhawk/src/base/include/ossie/Transport.h @@ -0,0 +1,225 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef OSSIE_TRANSPORT_H +#define OSSIE_TRANSPORT_H + +#include +#include +#include + +#include + +#include "CF/NegotiablePort.h" +#include "PropertyMap.h" + +namespace redhawk { + + class NegotiableProvidesPortBase; + class UsesPort; + class NegotiableUsesPort; + + class TransportError : public std::runtime_error + { + public: + TransportError(const std::string& message) : + std::runtime_error(message) + { + } + }; + + class TransportTimeoutError : public TransportError + { + public: + TransportTimeoutError(const std::string& message) : + TransportError(message) + { + } + }; + + class FatalTransportError : public TransportError + { + public: + FatalTransportError(const std::string& message) : + TransportError(message) + { + } + }; + + class Transport { + public: + virtual ~Transport() + { + } + + virtual std::string transportType() const = 0; + + virtual CF::Properties transportInfo() const + { + return CF::Properties(); + } + }; + + class TransportManager { + public: + virtual ~TransportManager() + { + } + + virtual std::string transportType() = 0; + + virtual CF::Properties transportProperties() + { + return CF::Properties(); + } + }; + + class UsesTransport : public Transport + { + public: + UsesTransport(UsesPort* port); + virtual ~UsesTransport() { } + + bool isAlive() const; + void setAlive(bool alive); + + virtual void disconnect() { } + + private: + UsesPort* _port; + bool _alive; + }; + + class UsesTransportManager : public TransportManager + { + public: + virtual ~UsesTransportManager() + { + } + + /** + * Return null to abort transport negotiation. + * + * Must not throw an exception. + */ + virtual UsesTransport* createUsesTransport(CORBA::Object_ptr object, + const std::string& connectionId, + const redhawk::PropertyMap& properties) = 0; + + /** + * Must not throw an exception. + */ + virtual redhawk::PropertyMap getNegotiationProperties(UsesTransport*) + { + return redhawk::PropertyMap(); + } + + /** + * May throw a TransportError to abort transport negotiation. + */ + virtual void setNegotiationResult(UsesTransport*, const redhawk::PropertyMap&) + { + } + }; + + class ProvidesTransport : public Transport + { + public: + ProvidesTransport(NegotiableProvidesPortBase* port, const std::string& transportId); + + virtual ~ProvidesTransport() + { + } + + const std::string& transportId() const; + + /** + * May throw a TransportError to abort transport negotiation. + */ + virtual void startTransport() + { + } + + virtual void stopTransport() + { + } + + protected: + NegotiableProvidesPortBase* _port; + const std::string _transportId; + }; + + class ProvidesTransportManager : public TransportManager + { + public: + virtual ~ProvidesTransportManager() + { + } + + /** + * Return null to abort transport negotiation. + * + * Must not throw an exception. + */ + virtual ProvidesTransport* createProvidesTransport(const std::string& transportId, + const redhawk::PropertyMap& properties) = 0; + + /** + * Must not throw an exception. + */ + virtual redhawk::PropertyMap getNegotiationProperties(ProvidesTransport*) + { + return redhawk::PropertyMap(); + } + }; + + class TransportFactory + { + public: + ~TransportFactory() + { + } + + virtual std::string transportType() = 0; + virtual std::string repoId() = 0; + virtual int defaultPriority() = 0; + + virtual ProvidesTransportManager* createProvidesManager(redhawk::NegotiableProvidesPortBase* port) = 0; + virtual UsesTransportManager* createUsesManager(redhawk::NegotiableUsesPort* port) = 0; + }; + + typedef std::vector TransportStack; + + class TransportRegistry + { + public: + static void RegisterTransport(TransportFactory* transport); + static TransportStack GetTransports(const std::string& repoId); + + private: + TransportRegistry(); + + class Impl; + + static Impl& Instance(); + }; +} + +#endif // OSSIE_TRANSPORT_H diff --git a/redhawk/src/base/include/ossie/UsesPort.h b/redhawk/src/base/include/ossie/UsesPort.h new file mode 100644 index 000000000..cf4627e25 --- /dev/null +++ b/redhawk/src/base/include/ossie/UsesPort.h @@ -0,0 +1,216 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef OSSIE_USESPORT_H +#define OSSIE_USESPORT_H + +#include +#include + +#include + +#include "CF/NegotiablePort.h" + +#include "Autocomplete.h" +#include "Port_impl.h" +#include "callback.h" +#include "debug.h" +#include "Transport.h" + +namespace redhawk { + + class UsesPort : public Port_Uses_base_impl +#ifdef BEGIN_AUTOCOMPLETE_IGNORE + , public virtual POA_ExtendedCF::QueryablePort +#endif + { + public: + UsesPort(const std::string& name); + virtual ~UsesPort(); + + // Register the member function 'func' to be called on class instance + // 'target' when a new connection is made. The function receives one + // argument, the connection ID: + // + // void Target::func(const std::string&); + // + template + void addConnectListener (Target target, Func func) + { + _portConnected.add(target, func); + } + + // Unregister the member function 'func' on class instance 'target' + // from further connection notifications. If the pair has not been + // registered previously, it is ignored. + template + void removeConnectListener (Target target, Func func) + { + _portConnected.remove(target, func); + } + + // Register the member function 'func' to be called on class instance + // 'target' when an existing connection is broken. The function + // receives one argument, the connection ID: + // + // void Target::func(const std::string&); + // + template + void addDisconnectListener (Target target, Func func) + { + _portDisconnected.add(target, func); + } + + // Unregister the member function 'func' on class instance 'target' + // from further disconnection notifications. If the pair has not been + // registered previously, it is ignored. + template + void removeDisconnectListener (Target target, Func func) + { + _portDisconnected.remove(target, func); + } + + virtual void connectPort(CORBA::Object_ptr connection, const char* connectionId); + + virtual void disconnectPort(const char* connectionId); + + virtual ExtendedCF::UsesConnectionSequence* connections(); + + protected: + class Connection { + public: + Connection(const std::string& connectionId, CORBA::Object_ptr objref, UsesTransport* transport); + virtual ~Connection(); + + virtual void disconnected(); + + std::string connectionId; + CORBA::Object_var objref; + UsesTransport* transport; + }; + + typedef std::vector ConnectionList; + + typedef std::vector TransportList; + + template + class TransportIteratorAdapter { + public: + typedef ConnectionList::iterator IteratorType; + + TransportIteratorAdapter() + { + } + + TransportIteratorAdapter(IteratorType iter) : + _iterator(iter) + { + } + + inline const std::string& connectionId() + { + return (*_iterator)->connectionId; + } + + inline TransportType* transport() + { + return static_cast((*_iterator)->transport); + } + + inline TransportIteratorAdapter& operator++() + { + ++_iterator; + return *this; + } + + inline TransportIteratorAdapter operator++(int) + { + TransportIteratorAdapter result(*this); + ++(*this); + return result; + } + + inline bool operator==(const TransportIteratorAdapter& other) const + { + return (_iterator == other._iterator); + } + + inline bool operator!=(const TransportIteratorAdapter& other) const + { + return (_iterator != other._iterator); + } + + private: + IteratorType _iterator; + }; + + virtual void _validatePort(CORBA::Object_ptr object); + + ConnectionList::iterator _findConnection(const std::string& connectionId); + + virtual Connection* _createConnection(CORBA::Object_ptr object, const std::string& connectionId); + + virtual UsesTransport* _createTransport(CORBA::Object_ptr object, const std::string& connectionId) = 0; + + bool _hasConnection(const std::string& connectionId); + + ConnectionList _connections; + + private: + ossie::notification _portConnected; + ossie::notification _portDisconnected; + }; + + class NegotiableUsesPort : public UsesPort +#ifdef BEGIN_AUTOCOMPLETE_IGNORE + , public virtual POA_ExtendedCF::NegotiableUsesPort +#endif + { + public: + NegotiableUsesPort(const std::string& name); + virtual ~NegotiableUsesPort(); + + virtual void initializePort(); + + virtual ExtendedCF::TransportInfoSequence* supportedTransports(); + + virtual ExtendedCF::ConnectionStatusSequence* connectionStatus(); + + protected: + class NegotiatedConnection; + + virtual Connection* _createConnection(CORBA::Object_ptr object, const std::string& connectionId); + + virtual UsesTransport* _createLocalTransport(PortBase* port, CORBA::Object_ptr object, const std::string& connectionId); + + NegotiatedConnection* _negotiateConnection(ExtendedCF::NegotiableProvidesPort_ptr negotiablePort, + const std::string& connectionId); + + NegotiatedConnection* _negotiateTransport(ExtendedCF::NegotiableProvidesPort_ptr negotiablePort, + const std::string& connectionId, + UsesTransportManager* manager, + const redhawk::PropertyMap& properties); + + typedef std::vector TransportManagerList; + TransportManagerList _transportManagers; + }; +} + +#endif // OSSIE_USESPORT_H diff --git a/redhawk/src/base/include/ossie/Value.h b/redhawk/src/base/include/ossie/Value.h index 74ad28ab0..1a0cea963 100644 --- a/redhawk/src/base/include/ossie/Value.h +++ b/redhawk/src/base/include/ossie/Value.h @@ -31,13 +31,33 @@ namespace redhawk { class Value : public CORBA::Any { public: + enum Type { + TYPE_NONE, + TYPE_STRING, + TYPE_BOOLEAN, + TYPE_FLOAT, + TYPE_DOUBLE, + TYPE_OCTET, + TYPE_SHORT, + TYPE_USHORT, + TYPE_LONG, + TYPE_ULONG, + TYPE_LONGLONG, + TYPE_ULONGLONG, + TYPE_DATATYPE, + TYPE_VALUE, + TYPE_SEQUENCE, + TYPE_VALUE_SEQUENCE, + TYPE_PROPERTIES, + TYPE_OTHER + }; Value(); explicit Value(const CORBA::Any& any); Value(const Value& any); template - explicit Value(const T& value) + Value(const T& value) { setValue(value); } @@ -62,6 +82,14 @@ namespace redhawk { return *this; } + static Type GetType(CORBA::TypeCode_ptr typecode); + static bool IsNumeric(Type type); + + Type getType() const; + bool isNumeric() const; + bool isSequence() const; + Type getElementType() const; + std::string toString() const; bool toBoolean() const; float toFloat() const; @@ -94,6 +122,7 @@ namespace redhawk { } }; + std::ostream& operator<<(std::ostream& out, const CORBA::Any& value); class ValueSequence : public CORBA::AnySeq { public: diff --git a/redhawk/src/base/include/ossie/Versions.h b/redhawk/src/base/include/ossie/Versions.h index dcd47425b..85d288c9e 100644 --- a/redhawk/src/base/include/ossie/Versions.h +++ b/redhawk/src/base/include/ossie/Versions.h @@ -29,7 +29,7 @@ namespace redhawk { - int compareVersions(std::string &a, std::string &b); + int compareVersions(const std::string& a, const std::string& b); } diff --git a/redhawk/src/base/include/ossie/bitbuffer.h b/redhawk/src/base/include/ossie/bitbuffer.h new file mode 100644 index 000000000..82da4fc2b --- /dev/null +++ b/redhawk/src/base/include/ossie/bitbuffer.h @@ -0,0 +1,767 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_BITBUFFER_H +#define REDHAWK_BITBUFFER_H + +#include +#include + +#include "refcount_memory.h" +#include "bitops.h" + +namespace redhawk { + + // Forward declaration of read/write bitbuffer class. + class bitbuffer; + + /** + * @brief An immutable, shared container for working with bit data. + * + * The %shared_bitbuffer class provides read-only bit-level access to a + * backing array of byte data that can be shared between many bitbuffer + * instances. This enables the transfer of ownership of data without + * explicit management of references. + * + * shared_bitbuffers have reference semantics. Assignment and copy + * construction do not copy any bits, only the data pointer. A + * %shared_bitbuffer never peforms any memory allocation of its own, but + * can take ownership of an existing array. When the last reference to the + * backing array goes away, the backing array is freed. + * + * For write access and memory allocation, see bitbuffer. + */ + class shared_bitbuffer + { + public: + /// @brief The backing memory data type. + typedef unsigned char data_type; + /// @brief Value used to represent invalid bit indices. + static const size_t npos = static_cast(-1); + + /** + * Construct an empty %shared_bitbuffer. + */ + shared_bitbuffer(); + + /** + * @brief Construct a %shared_bitbuffer with an existing pointer. + * @param ptr Pointer to first byte of bit data. + * @param size Number of bits. + * + * The newly-created %sharedbit_buffer takes ownership of @a ptr. When + * the last %shared_buffer pointing to @a ptr is destroyed, @a ptr will + * be deleted with delete[]. + */ + shared_bitbuffer(data_type* ptr, size_t bits); + + /** + * @brief Construct a %shared_bitbuffer with an existing pointer and a + * custom deleter. + * @param ptr Pointer to first byte of bit data. + * @param size Number of bits. + * @param deleter Callable object. + * + * @a D must by copy-constructible. When the last %shared_bitbuffer + * pointing to @a ptr is destroyed, @a deleter will be called on + * @a ptr. This can be used to define custom release behavior. + */ + template + shared_bitbuffer(data_type* ptr, size_t bits, D deleter) : + _M_memory(ptr, _M_bits_to_bytes(bits), deleter), + _M_base(ptr), + _M_offset(0), + _M_size(bits) + { + } + + /** + * @brief Construct a %shared_bitbuffer with an existing pointer known + * to be allocated from process-shared memory. + * @param ptr Pointer to first byte of bit data. + * @param size Number of bits. + * @param deleter Callable object. + * @param tag Indicates that @a ptr is in process-shared memory. + * + * @warning This constructor is intended for internal use only. + */ + template + shared_bitbuffer(data_type* ptr, size_t bits, D deleter, detail::process_shared_tag tag) : + _M_memory(ptr, _M_bits_to_bytes(bits), deleter, tag), + _M_base(ptr), + _M_offset(0), + _M_size(bits) + { + } + + /** + * Returns the number of bits. + */ + size_t size() const; + + /** + * Returns true if the %shared_bitbuffer is empty. + */ + bool empty() const; + + /** + * Returns a read-only pointer to the backing array. + */ + const data_type* data() const; + + /** + * @brief Returns the index of the first bit in the backing array. + * + * The offset is always in the range [0, 8). Bits are numbered starting + * at the most significant bit. + */ + size_t offset() const; + + /** + * @brief Subscript read access to a bit. + * @param index The index of the desired bit. + * @return The value of the bit (0 or 1). + */ + int operator[] (size_t pos) const; + + /** + * @brief Extracts an integer value. + * @param pos Index of first bit. + * @param bits Number of bits to extract (max 64). + * @return Integer value. + * @throw std::out_of_range If @p pos > size(), or there are fewer than + * @p bits at @p pos. + * @throw std::length_error If @p bits is greater than 64. + * @see redhawk::bitops::getint + */ + uint64_t getint(size_t pos, size_t bits) const; + + /** + * @brief Returns a %shared_bitbuffer containing a subset of bits. + * @param start Index of first bit. + * @param end Index of last bit, exclusive (default end). + * @return The new %shared_bitbuffer. + * @throw std::out_of_range If @p start > size(). + * @throw std::invalid_argument If @p end < @p start. + * + * The new %shared_bitbuffer shares the same backing array. If @a end + * is past the last bit, the slice extends to the last bit. + */ + shared_bitbuffer slice(size_t start, size_t end=npos) const; + + /** + * @brief Adjusts the start and end bits. + * @param start Index of first bit. + * @param end Index of last bit, exclusive (default end). + * @throw std::out_of_range If @p start > size(). + * @throw std::invalid_argument If @p end < @p start. + * + * If @a end is past the last bit, the end index is unchanged. + */ + void trim(size_t start, size_t end=npos); + + /** + * @brief Copies this bit buffer. + * @return A new %bitbuffer with its own copy of the backing array. + */ + bitbuffer copy() const; + + /** + * @brief Copies this bit buffer using a provided allocator. + * @param allocator STL-compliant allocator. + * @return A new bit buffer with its own copy of the backing array. + * + * The new %bitbuffer's backing array is allocated with a copy of + * @a allocator. + */ + template + bitbuffer copy(const Alloc& allocator) const; + + /** + * @brief Performs a take/skip operation into a new bitbuffer. + * @param take Number of bits to copy per iteration. + * @param skip Number of bits to skip per iteration. + * @param start Index of first bit (default 0). + * @param end Index of last bit, exclusive (default end). + * @return New bitbuffer with requested bits. + * @throw std::out_of_range If @p start > size(). + * @throw std::invalid_argument If @p end < @p start. + * @see bitbuffer::takeskip + * + * Alternately copies @a take bits and skips @a skip bits from the + * range [@a start, @a end) into a new bitbuffer. + */ + bitbuffer takeskip(size_t take, size_t skip, size_t start=0, size_t end=npos) const; + + /** + * @brief Swap contents with another bit buffer. + * @param other The %shared_bitbuffer to swap with. + */ + void swap(shared_bitbuffer& other); + + /** + * Returns the population count (number of 1's). + */ + int popcount() const; + + /** + * @brief Determines the Hamming distance from another bit buffer. + * @param other The %shared_bitbuffer to compare with. + * @return Number of bits that are different. + */ + int distance(const shared_bitbuffer& other) const; + + /** + * @brief Finds a pattern in this bit buffer within a maximum Hamming + * distance. + * @param pattern Bit pattern to search for. + * @param maxDistance Maximum allowable Hamming distance. + * @return Bit index of first occurence of @p pattern. + * + * Searches forward for a position at which the Hamming distance + * between this bit buffer and @a pattern is less than or equal to + * @a maxDistance. If found, returns the bit index at which the match + * occurs. If not found, returns npos. + */ + size_t find(const shared_bitbuffer& pattern, int maxDistance) const + { + return find(0, pattern, maxDistance); + } + + /** + * @brief Finds a pattern in this bit buffer within a maximum Hamming + * distance. + * @param start Starting bit index. + * @param pattern Bit pattern to search for. + * @param maxDistance Maximum allowable Hamming distance. + * @return Bit index of first occurrence of @p pattern. + * + * Starting from @a start, searches forward for a position at which the + * Hamming distance between this bit buffer and @a pattern is less than + * or equal to @a maxDistance. If found, returns the bit index at which + * the match occurs. If not found, returns npos. + */ + size_t find(size_t start, const shared_bitbuffer& pattern, int maxDistance) const; + + /** + * Returns a reference to the backing memory object. + * + * @warning This method is intended for internal use only. + */ + const refcount_memory& get_memory() const + { + return _M_memory; + } + + /** + * @brief Creates a transient %shared_bitbuffer. + * @param data Pointer to first byte. + * @param size Number of bits. + * @see make_transient(const data_type*,size_t,size_t) + */ + static inline shared_bitbuffer make_transient(const data_type* data, size_t bits) + { + return make_transient(data, 0, bits); + } + + /** + * @brief Creates a transient %shared_bitbuffer. + * @param data Pointer to first byte. + * @param start Index of first bit. + * @param size Number of bits. + * + * Adapts externally managed memory to work with the %shared_bitbuffer + * API; however, additional care must be taken to ensure that the data + * is copied if it needs to be held past the lifetime of the transient + * %shared_bitbuffer. + */ + static shared_bitbuffer make_transient(const data_type* data, size_t start, size_t bits); + + /** + * @brief Returns true if the backing array's lifetime is not managed. + * + * Transient shared_bitbuffers do not own the underlying data. If the + * receiver of a transient bit buffer needs to hold on to it past the + * lifetime of the call, they must make a copy. + */ + bool transient() const + { + return !(this->_M_memory); + } + + protected: + /// @cond IMPL + + // Internal constructor for use by bitbuffer. The implementation is in + // the header so that it can be inlined, which in premise avoids the + // need to add/remove extra references on the refcount_memory. + shared_bitbuffer(refcount_memory memory, size_t bits) : + _M_memory(memory), + _M_base((data_type*) _M_memory.address()), + _M_offset(0), + _M_size(bits) + { + } + + static inline size_t _M_bits_to_bytes(size_t bits) + { + return (bits + 7) / 8; + } + + /** + * @brief Checks index for validity. + * @param pos Index to check. + * @param size Size of container. + * @param name Name of calling method for exception message. + * @throw std::out_of_range If @p start > @p size. + */ + static void _M_check_pos(size_t pos, size_t size, const char* name); + + /** + * @brief Checks start and end indices for validity. + * @param start Start index. + * @param end End index (in/out). + * @param size Size of container. + * @param name Name of calling method for exception message. + * @throw std::out_of_range If @p start > @p size. + * @throw std::invalid_argument If @p end < @p start. + * @post @p end <= @p size + * + * Checks the indices @a start and @a end for validity against @a size + * and each other, and clamps @a end to be no larger than @a size. + */ + static void _M_check_range(size_t start, size_t& end, size_t size, const char* name); + + static size_t _M_takeskip_size(size_t size, size_t take, size_t skip); + + private: + // Prevent user code from calling swap with a bitbuffer. + void swap(bitbuffer& other); + + refcount_memory _M_memory; + data_type* _M_base; + size_t _M_offset; + size_t _M_size; + /// @endcond + }; + + /** + * @brief A shared container for working with bit data. + * + * The %bitbuffer class extends shared_bitbuffer to provides bit-level + * write access. Multiple bitbuffers and shared_bitbuffers may point to the + * same backing array. + * + * bitbuffers have reference semantics. Assignment and copy construction do + * not copy any bits, only the data pointer. + * + * Unlike %shared_bitbuffer, %bitbuffer has allocating constructors. When + * the last reference to the backing array goes away, the backing array is + * freed. + */ + class bitbuffer : public shared_bitbuffer + { + private: + /** + * @brief Proxy bit reference class. + * + * This class adapts bit indices and data pointers to behave like + * both rvalues and lvalues. + * + * This class is not intended for direct public usage; operator[] can + * return an instance, and syntactically it behaves like a primitive + * reference (mostly), but user code cannot declare one. + */ + class reference { + public: + reference(data_type* data, size_t pos); + operator int () const; + reference& operator= (bool); + reference& operator= (const reference& other); + private: + data_type* _M_data; + size_t _M_pos; + }; + + public: + typedef std::allocator default_allocator; + + /** + * Construct an empty %bitbuffer. + */ + bitbuffer(); + + /** + * @brief Creates a %bitbuffer and allocates a backing array. + * @param bits Number of bits. + * + * Allocates a backing array large enough to hold @a bits bits using + * the default allocator. The memory is not initialized. + */ + explicit bitbuffer(size_t bits) : + shared_bitbuffer(_M_allocate(bits, default_allocator()), bits) + { + } + + /** + * @brief Creates a %bitbuffer and allocates a backing array. + * @param bits Number of bits. + * @param allocator STL-compliant allocator. + * + * Allocates a backing array large enough to hold @a bits bits using + * a copy of @a allocator. The memory is not initialized. + */ + template + bitbuffer(size_t bits, const Alloc& allocator) : + shared_bitbuffer(_M_allocate(bits, allocator), bits) + { + } + + /** + * @brief Convenience function to create a %bitbuffer from an integer. + * @param value Integer value. + * @param bits Number of bits in @p value (max 64). + * @return A new %bitbuffer. + * @throw std::length_error If @p bits is greater than 64. + * + * Allocates and initializes a new %bitbuffer from the least + * significant @a bits bits of @a value. + */ + static inline bitbuffer from_int(uint64_t value, size_t bits) + { + bitbuffer result(bits); + result.setint(0, value, bits); + return result; + } + + /** + * @brief Convenience function to create a new %bitbuffer from a string + * of '0' and '1' characters. + * @param str String to be parsed. + * @return A new %bitbuffer. + * @throw std::invalid_argument If @p str contains any characters + * other than '0' or '1'. + */ + static inline bitbuffer from_string(const std::string& str) + { + bitbuffer result(str.size()); + result._M_parse(str); + return result; + } + + /** + * @brief Convenience function to create a %bitbuffer from an unpacked + * byte array. + * @param unpacked Pointer to first element. + * @param count Number of values to pack. + * @return A new %bitbuffer. + * + * Allocates and initializes a new %bitbuffer by packing the values in + * @a unpacked. Each element of @a unpacked is converted to a bit + * value, where zero becomes 0 bit and any non-zero value is a 1 bit. + */ + static inline bitbuffer from_unpacked(const bitops::byte* unpacked, size_t count) + { + bitbuffer result(count); + bitops::pack(result.data(), result.offset(), unpacked, count); + return result; + } + + /** + * @brief Convenience function to create a %bitbuffer from a byte + * array. + * @param ptr Pointer to first byte. + * @param bits Number of bits. + * @return A new %bitbuffer. + * + * Allocates and initializes a new %bitbuffer using @a bits bits from + * the byte array @a ptr. The new %bitbuffer does not take ownership of + * @a ptr. + */ + static inline bitbuffer from_array(const data_type* ptr, size_t bits) + { + return from_array(ptr, 0, bits); + } + + /** + * @brief Convenience function to create a %bitbuffer from a byte + * array. + * @param ptr Pointer to first byte. + * @param start Index of first bit. + * @param bits Number of bits. + * + * Allocates and initializes a new %bitbuffer using @a bits bits + * starting at bit @a start in the byte array @a ptr. The new + * %bitbuffer does not take ownership of @a ptr. + */ + static inline bitbuffer from_array(const data_type* ptr, size_t start, size_t bits) + { + return shared_bitbuffer::make_transient(ptr, start, bits).copy(); + } + + using shared_bitbuffer::data; + + /** + * Returns a read/write pointer to the backing array. + */ + data_type* data(); + + using shared_bitbuffer::operator[]; + + /** + * @brief Subscript read/write access to a bit. + * @param index The index of the desired bit. + * @return A reference to the bit. + * + * Because bits are not directly accessible, a proxy object is returned + * instead of the more typical reference-to-element. This proxy can be + * used as both an rvalue and an lvalue; however, it is not exactly + * equivalent to a primitive reference type. + */ + reference operator[] (size_t pos); + + /** + * @brief Inserts an integer value. + * @param pos Index of first bit. + * @param value Integer value to set. + * @param bits Number of bits in @p value (max 64). + * @throw std::out_of_range If @p pos > size(), or there are fewer than + * @p bits at @p pos. + * @throw std::length_error If @p bits is greater than 64. + * @see redhawk::bitops::setint + */ + void setint(size_t pos, uint64_t value, size_t bits); + + using shared_bitbuffer::slice; + + /** + * @brief Returns a %bitbuffer containing a subset of bits. + * @param start Index of first bit. + * @param end Index of last bit, exclusive (default end). + * @return The new %bitbuffer. + */ + bitbuffer slice(size_t start, size_t end=npos); + + /** + * @brief Fills the bit buffer with a value. + * @param value Bit value to set. + * + * Sets all bits to @a value. + */ + void fill(bool value) + { + fill(0, size(), value); + } + + /** + * @brief Fills a range of bits with a value. + * @param start Index of first bit. + * @param end Index of last bit, exclusive. + * @param value Bit value to set. + * + * Sets the bits in the range [@a start, @a end) to @a value. + */ + void fill(size_t start, size_t end, bool value); + + /** + * @brief Resizes the %bitbuffer to the specified number of bits. + * @param bits Number of bits. + * @see trim + * + * Allocates a new backing buffer large enough to hold @a bits bits + * using the default allocator, preserving existing bit values. If + * @a bits is larger than the current size, new bit values are + * uninitialized. + * + * If @a bits is smaller than the current size, unless a new copy is + * desired, trim is more efficient because it does not perform any + * allocation or copy. + */ + void resize(size_t bits) + { + resize(bits, default_allocator()); + } + + /** + * @brief Resizes the %bitbuffer to the specified number of bits using + * a provided allocator. + * @param bits Number of bits. + * @param allocator STL-compliant allocator. + * + * Allocates a new backing buffer large enough to hold @a bits bits + * using a copy of @a allocator, preserving existing bit values. If + * @a bits is larger than the current size, new bit values are + * uninitialized. + * + * If @a bits is smaller than the current size, unless a new copy is + * desired, trim is more efficient because it does not perform any + * allocation or copy. + */ + template + void resize(size_t bits, const Alloc& allocator) + { + bitbuffer temp(bits, allocator); + _M_resize(temp); + } + + /** + * @brief Replaces bit values. + * @param pos Index of first bit to replace. + * @param bits Number of bits to replace. + * @param src The bit buffer to insert. + * @throw std::out_of_range If @p pos > size(), or there are fewer + * than @p bits at @p pos. + * + * Starting at @a pos, replaces existing bit values with the bit values + * in @a src. + */ + inline void replace(size_t pos, size_t bits, const shared_bitbuffer& src) + { + replace(pos, bits, src, 0); + } + + /** + * @brief Replaces bit values. + * @param pos Index of first bit to replace. + * @param bits Number of bits to replace. + * @param src The bit buffer to insert. + * @param srcpos Index of first bit in @p src. + * @throw std::out_of_range If @p pos > size(), there are fewer than + * @p bits at @p pos, or + * @p srcpos > @p src.size(). + * + * Starting at @a pos, replaces existing bit values with the bit values + * starting at @a srcpos in @a src. + */ + void replace(size_t pos, size_t bits, const shared_bitbuffer& src, size_t srcpos); + + using shared_bitbuffer::takeskip; + + /** + * @brief Performs a take/skip operation into this bitbuffer. + * @param src Source bit buffer. + * @param take Number of bits to copy per iteration. + * @param skip Number of bits to skip per iteration. + * @param start Index of first bit in @p src (default 0). + * @param end Index of last bit in @p src, exclusive (default + * @p src.size()). + * @return Number of bits copied. + * @throw std::out_of_range If @p start > @p src.size(). + * @throw std::invalid_argument If @p end < @p start. + * @throw std::length_error If this bitbuffer is not large enough. + * + * Alternately copies @a take bits and skips @a skip bits into this bit + * buffer from @a src. + */ + size_t takeskip(const shared_bitbuffer& src, size_t take, size_t skip, size_t start=0, size_t end=npos) + { + return takeskip(0, src, take, skip, start, end); + } + + /** + * @brief Performs a take/skip operation into this bitbuffer. + * @param pos Index of first bit to write to. + * @param src Source bit buffer. + * @param take Number of bits to copy per iteration. + * @param skip Number of bits to skip per iteration. + * @param start Index of first bit in @p src (default 0). + * @param end Index of last bit in @p src, exclusive (default + * @p src.size()). + * @return Number of bits copied. + * @throw std::out_of_range If @p start > @p src.size(). + * @throw std::invalid_argument If @p end < @p start. + * @throw std::length_error If this bitbuffer is not large enough. + * + * Alternately copies @a take bits and skips @a skip bits into this bit + * buffer from @a src. + */ + size_t takeskip(size_t pos, const shared_bitbuffer& src, size_t take, size_t skip, size_t start=0, size_t end=npos); + + /** + * @brief Swap contents with another bitbuffer. + * @param other The bitbuffer to swap with. + */ + void swap(bitbuffer& other); + + private: + /// @cond IMPL + // Helper to handle the allocation of reference counted memory to pass + // to the shared_bitbuffer constructor. Inlining it allows the compiler + // to elide extra reference counts, as this is being used to initialize + // the _M_memory member of shared_bitbuffer. + template + static inline refcount_memory _M_allocate(size_t bits, const Alloc& allocator) + { + return refcount_memory(_M_bits_to_bytes(bits), allocator); + } + + // Helper to parse a string into a newly-created bitbuffer. + void _M_parse(const std::string& str); + + void _M_resize(bitbuffer& dest); + /// @endcond + }; + + inline bitbuffer shared_bitbuffer::copy() const + { + // NB: Implementation cannot be done in-line because the buffer class + // is incomplete at that point + return this->copy(bitbuffer::default_allocator()); + } + + template + inline bitbuffer shared_bitbuffer::copy(const Alloc& allocator) const + { + // NB: Implementation cannot be done in-line because the buffer class + // is incomplete at that point + bitbuffer result(size(), allocator); + result.replace(0, result.size(), *this); + return result; + } + + inline bitbuffer shared_bitbuffer::takeskip(size_t take, size_t skip, size_t start, size_t end) const + { + // NB: Implementation cannot be done in-line because the buffer class + // is incomplete at that point + _M_check_range(start, end, size(), "redhawk::shared_bitbuffer::takeskip"); + size_t bits = _M_takeskip_size(end-start, take, skip); + bitbuffer result(bits); + result.takeskip(*this, take, skip, start, end); + return result; + } + + /** + * @brief Bit buffer equality comparison. + * @param lhs First bit buffer. + * @param rhs Second bit buffer. + * @return True iff the size and bits of the bit buffers are equal. + */ + bool operator==(const shared_bitbuffer& lhs, const shared_bitbuffer& rhs); + + /** + * @brief Bit buffer inequality comparison. + * @param lhs First bit buffer. + * @param rhs Second bit buffer. + * @return True iff the size or bits of the bit buffers are not equal. + */ + bool operator!=(const shared_bitbuffer& lhs, const shared_bitbuffer& rhs); +} + +#endif // REDHAWK_BITBUFFER_H diff --git a/redhawk/src/base/include/ossie/bitops.h b/redhawk/src/base/include/ossie/bitops.h new file mode 100644 index 000000000..90dc19e4e --- /dev/null +++ b/redhawk/src/base/include/ossie/bitops.h @@ -0,0 +1,233 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_BITOPS_H +#define REDHAWK_BITOPS_H + +#include +#include + +namespace redhawk { + + namespace bitops { + + // @brief Bits are stored MSB first in bytes + typedef unsigned char byte; + + /** + * @brief Gets a single bit from a bit string. + * @param str Bit string. + * @param pos Index of bit. + * @returns Value of bit number @p pos in @p str. + */ + bool getbit(const byte* str, size_t pos); + + /** + * @brief Sets a single bit in a bit string. + * @param str Bit string. + * @param pos Index of bit. + * @param value Bit value to set. + */ + void setbit(byte* str, size_t pos, bool value); + + /** + * @brief Extracts an integer value from a bit string. + * @param str Bit string. + * @param start Starting bit index. + * @param bits Number of bits to extract (max 64). + * @returns Integer value. + * @throw std::length_error If @p bits is greater than 64. + * + * The value is extracted in big-endian order, and returned right- + * justified (i.e., the least-significant N bits contain the value). + * If fewer than 64 bits are extracted, the most signifiant bits are + * zeroed. + */ + uint64_t getint(const byte* str, size_t start, size_t bits); + + /** + * @brief Inserts an integer value into a bit string. + * @param str Bit string. + * @param start Starting bit index. + * @param value Value to insert. + * @param bits Number of bits in @p value (max 64). + * @throw std::length_error If @p bits is greater than 64. + * + * @a value is inserted in big-endian order, and must be right- + * justified (i.e., the least-significant N bits are inserted). If + * fewer than 64 bits are requested, the most significant bits are + * ignored. + */ + void setint(byte* str, size_t start, uint64_t value, size_t bits); + + /** + * @brief Sets all of the bits in a bit string to a given value. + * @param str Bit string. + * @param start Indef of first bit in @p str. + * @param length Number of bits to set. + * @param value Bit value to set. + */ + void fill(byte* str, size_t start, size_t length, bool value); + + /** + * @brief Packs a byte array into a bit string. + * @param dest Destination bit string. + * @param dstart Index of first bit in @p dest. + * @param src Source byte array. + * @param length Number of elements to pack. + * + * Each byte of @a src is turned into a bit in @a dest. If the byte + * value is 0, the corresponding bit is 0; if the byte value is non- + * zero, the corresponding bit is 1. + */ + void pack(byte* dest, size_t dstart, const byte* src, size_t length); + + /** + * @brief Unpacks a bit string into a byte array. + * @param dest Destination byte array. + * @param src Source bit string. + * @param sstart Index of first bit in @p src. + * @param length Number of elements to unpack. + * + * Each bit of @a src is turned into a byte in @a dest. If the bit + * value is 0, the corresponding byte is 0; if the value is 1, the + * corresponding byte is 1. + */ + void unpack(byte* dest, const byte* src, size_t sstart, size_t length); + + /** + * @brief Converts a bit string into a character string. + * @param str Destination character array. + * @param src Source bit string. + * @param sstart Index of first bit in @p src. + * @param length Number of bits in @p src. + * @pre Enough space for @a length characters must have been allocated + * at @a str. + * + * Expands each bit in @a src into a character in @a str, where each + * bit is one of '0' or '1'. No null character terminator is added at + * the end of @a str. + */ + void toString(char* str, const byte* src, size_t sstart, size_t length); + + /** + * @brief Parses a character string into a bit string. + * @param dest Destination bit string. + * @param dstart Index of first bit in @p dest. + * @param src Source character array. + * @param length Number of characters in @p src. + * @returns Number of characters parsed. + * @pre Enough space for @a length bits must have been allocated at + * @a dest, taking @a dstart into account. + * + * Converts the characters in @a str into bits, where each character + * must be one of '0' or '1'. On success, returns @a length. If an + * invalid character is encountered, parsing stops and the number of + * valid characters is returned. + */ + int parseString(byte* dest, size_t dstart, const char* str, size_t length); + + /** + * @brief Copies bits from one bit string to another. + * @param dest Destination bit string. + * @param dstart Index of first bit in @p dest. + * @param src Source bit string. + * @param sstart Index of first bit in @p src. + * @param length Number of bits to copy. + */ + void copy(byte* dest, size_t dstart, const byte* src, size_t sstart, size_t length); + + /** + * @brief Compares two bit strings. + * @param s1 First bit string. + * @param start1 Index of first bit in @p s1. + * @param s2 Second bit string. + * @param start2 Index of first bit in @p s2. + * @param length Number of bits to compare. + * @returns Positive integer, 0, or negative integer. + * + * Returns a positive integer if @a s1 is ordered before @a s2, 0 if + * both bit strings are equivalent, or a negative integer if @a s1 is + * ordered after @a s2. + */ + int compare(const byte* s1, size_t start1, const byte* s2, size_t start2, size_t length); + + /** + * @brief Calculates the population count of a bit string. + * @param str Bit string. + * @param start Index of first bit in @p str. + * @param length Length of @p str. + * @returns Number of 1's in @p str. + */ + int popcount(const byte* str, size_t start, size_t length); + + /** + * @brief Calculates the Hamming distance between two bit strings. + * @param s1 First bit string. + * @param start1 Index of first bit in @p s1. + * @param s2 Second bit string. + * @param start2 Index of first bit in @p s2. + * @param length Number of bits to compare. + * @returns Number of bits that are different between @p s1 and @p s2. + */ + int hammingDistance(const byte* s1, size_t start1, const byte* s2, size_t start2, size_t length); + + /** + * @brief Finds a pattern in a bit string within a maximum Hamming + * distance. + * @param str Bit string in which to search. + * @param sstart Index of first bit in @p str. + * @param slen Length of @p str. + * @param patt Bit pattern to search for. + * @param pstart Index of first bit in @p patt. + * @param plen Length of @p patt. + * @param maxdist Maximum allowable Hamming distance. + * @returns Bit index in @p str where @p patt was found, or -1 if + * @p patt was not found within @p maxdist. + * + * Searches @a str for a position at which the Hamming distance between + * @a str and @a patt is less than or equal to @a maxdist. + */ + int find(const byte* str, size_t sstart, size_t slen, + const byte* patt, size_t pstart, size_t plen, + int maxdist); + + /** + * @brief Performs a take/skip operation. + * @param dest Destination bit string. + * @param dstart Index of first bit in @p dest. + * @param src Source bit string. + * @param sstart Index of first bit in @p src. + * @param slen Length of @p src. + * @param take Number of bits to copy per iteration. + * @param skip Number of bits to skip per iteration. + * @returns Number of bits copied. + * + * Alternately copies @a take bits and skips @a skip bits from @a src + * into @a dest. + */ + size_t takeskip(byte* dest, size_t dstart, + const byte* src, size_t sstart, size_t slen, + size_t take, size_t skip); + } + +} + +#endif // REDHAWK_BITOPS_H diff --git a/redhawk/src/base/include/ossie/callback.h b/redhawk/src/base/include/ossie/callback.h index fb5620001..b3cbbfbec 100644 --- a/redhawk/src/base/include/ossie/callback.h +++ b/redhawk/src/base/include/ossie/callback.h @@ -25,6 +25,858 @@ #include #include +#include "internal/equals.h" + +namespace redhawk { + namespace detail { + // Bring ossie::internal::has_equals into this namespace (in effect) so + // we can specialize it for boost::bind + template + struct has_equals : public ossie::internal::has_equals { }; + + // Special case: boost::bind returns objects whose operator== returns + // another bind object, as opposed to a boolean, which breaks the + // default has_equals template. However, it does have one, and can be + // compared for equality using function_equal. + template + struct has_equals > { + static bool const value = true; + }; + + // Helper class to manage an object via void pointers, to avoid + // generating a lot of virtual functions and object type information + // when using callbacks. The object is stored in a buffer directly + // following the void_manager; its type is erased, but the key + // functions of copy/delete and equality comparison are maintained as + // function pointers that are able to cast back to the original object + // type. + struct void_manager { + ~void_manager() + { + _M_deleter(get_object()); + } + + // Returns a pointer to the managed object + inline void* get_object() + { + return &_M_buf[0]; + } + + // Returns a const pointer to the managed object + inline const void* get_object() const + { + return &_M_buf[0]; + } + + // Copies this void_manager and its contained object + void_manager* clone() const + { + // Allocate enough space for the manager plus the object, then + // initialize the manager with placement new + void* buf = _M_allocate(_M_size); + void_manager* manager = new (buf) void_manager(*this); + + // Dispatch to the object copy constructor (via _M_clone) + _M_clone(manager->get_object(), get_object()); + return manager; + } + + // Compares this void_manager's contained object with another's. + bool operator==(const void_manager& other) const + { + // If the object doesn't support equality comparison, consider + // the objects unequal. If the two managers have different + // equality function pointers, the types must differ, so also + // consider the objects unequal; this may be more limiting than + // the compiler might allow (e.g., due to implict conversions + // or overloads), but is the safest approach without more type + // information. + if (!_M_equal || (_M_equal != other._M_equal)) { + return false; + } + return _M_equal(get_object(), other.get_object()); + } + + // Create a new void_manager with a copy of the object + template + static void_manager* create(const T& object) + { + // Allocate enough space for the void_manager and the object, + // then initialize with placement new + void* buf = _M_allocate(sizeof(T)); + void_manager* manager = new (buf) void_manager(&deleter, &clone, get_equals(), sizeof(T)); + + // Clone the input object into the new void_manager + manager->_M_clone(manager->get_object(), &object); + return manager; + } + + private: + // Function types for object management; these adapt the object + // type, which is only known at creation time, to void pointers + typedef void (*deleter_func)(void*); + typedef void (*clone_func)(void* dest, const void* src); + typedef bool (*equal_func)(const void*, const void*); + + // Create a new void manager; can only be used from create() + void_manager(deleter_func deleter, clone_func clone, equal_func equal, size_t size) : + _M_deleter(deleter), + _M_clone(clone), + _M_equal(equal), + _M_size(size) + { + } + + // Allocates enough memory for a void_manager with an object buffer + // of the given size + static void* _M_allocate(size_t size) + { + size_t bytes = sizeof(void_manager) + size - sizeof(_M_buf); + return ::operator new(bytes); + } + + // Deleter function template, instantiated by create (as a function + // pointer) + template + static void deleter(void* data) + { + // Call the stored object's destructor directly instead of + // delete, because the memory is part of the void_manager + // instance + static_cast(data)->~T(); + } + + // Copy function template, instantiated by create (as a function + // pointer) + template + static void clone(void* dest, const void* src) + { + // Call the placement new copy constructor into the destination + // object buffer; the memory itself is managed by the + // void_manager instance + new (dest) T(*static_cast(src)); + } + + // Equals function template, instantiated by create (as a function + // pointer), but only if the template type supports testing for + // equality + template + static bool equals(const void* lhs, const void* rhs) + { + return equals(*static_cast(lhs), *static_cast(rhs)); + } + + // Implementation of equality function for types that support it + // normally + template + static inline bool equals(const T& lhs, const T& rhs) + { + return lhs == rhs; + } + + // Overload of equality function for boost::bind objects, which are + // not possible to compare with operator== + template + static inline bool equals(const boost::_bi::bind_t& lhs, const boost::_bi::bind_t& rhs) + { + return function_equal(lhs, rhs); + } + + // Template function to get the function pointer for equals, for + // types that cannot be compared for equality. Unlike the other + // function pointers, equals is looked up in this way to avoid + // instantiating the function template unless it's needed, because + // when used as a function pointer it creates a linker symbol. + template + static inline equal_func get_equals(typename boost::disable_if >::type* unused=0) + { + return 0; + } + + // Template function to get the function pointer for equals, for + // types that can be compared for equality + template + static inline equal_func get_equals(typename boost::enable_if >::type* unused=0) + { + return &equals; + } + + // Object management function pointers; see type definitions above + deleter_func _M_deleter; + clone_func _M_clone; + equal_func _M_equal; + + // The size of the managed object + size_t _M_size; + + // Buffer for storing the managed object; its true size is known at + // allocation time (and stored in _M_size), but it must be at least + // 1. The object storage starts at _M_buf[0] and goes to the end of + // the allocated memory. + char _M_buf[1]; + }; + + // Forward declaration of an "unusable" argument type. For simplicity + // of implementation, callback provides zero, one and two argument + // versions of operator(); without variadic templates, we would have to + // create specializations for each argument count. The overloads with + // too many arguments take an "unusable" struct, which will never match + // the argument type, giving an error message. + struct unusable; + + // Traits class to bind a function signature Sig to an invoker function + // type. Callback invokers take a void pointer as the first argument, + // which is a type-erased pointer to a callable object (e.g., function + // pointer, member_function, etc.), followed by the normal arguments. + // This is how the callback class dispatches function calls. + // + // This class must be specialized for each number of arguments (in this + // case zero, one and two). The result type is also included in the + // typedef rather than using Boost's function traits classes because + // it's simple enough to include on our own. + template + struct callback_traits; + + // Specialization for zero-argument invoker function type + template + struct callback_traits { + typedef R result_type; + typedef unusable first_argument_type; + typedef unusable second_argument_type; + typedef R (*invoker_func)(void*); + }; + + // Specialization for two-argument invoker function type + template + struct callback_traits { + typedef R result_type; + typedef A1 first_argument_type; + typedef unusable second_argument_type; + typedef R (*invoker_func)(void*, A1); + }; + + // Specialization for two-argument invoker function type + template + struct callback_traits { + typedef R result_type; + typedef A1 first_argument_type; + typedef A2 second_argument_type; + typedef R (*invoker_func)(void*, A1, A2); + }; + + // Templatized class that adapts the callable type Func to the function + // signature Sig. This must be specialized for each number of arguments + // supported (in this case zero, one and two) with a static function + // call() that is used as a function pointer; it must be assignable to + // a callback_traits::invoker_func. The call() function receives + // the callable as a void* (which can then be static cast back to the + // callable type) and any arguments declared in Sig. + // + // For each argument count, this should be further specialized for the + // void return versions to adapt non-void callables to void return + // signatures. The compiler allows statements like "return f();" if + // both functions return void, but if f() returns a value it becomes a + // compilation error. + template + struct function_invoker; + + // Specialization for zero-argument invocation function + template + struct function_invoker { + static R call(void* data) + { + Func* func = static_cast(data); + return (*func)(); + } + }; + + // Specialization for zero-argument void return invocation function + template + struct function_invoker { + static void call(void* data) + { + Func* func = static_cast(data); + (*func)(); + } + }; + + // Specialization for one-argument invocation function + template + struct function_invoker { + static R call(void* data, A1 a1) + { + Func* func = static_cast(data); + return (*func)(a1); + } + }; + + // Specialization for one-argument void return invocation function + template + struct function_invoker { + static void call(void* data, A1 a1) + { + Func* func = static_cast(data); + (*func)(a1); + } + }; + + // Specialization for two-argument invocation function + template + struct function_invoker { + static R call(void* data, A1 a1, A2 a2) + { + Func* func = static_cast(data); + return (*func)(a1, a2); + } + }; + + // Specialization for two-argument void return invocation function + template + struct function_invoker { + static void call(void* data, A1 a1, A2 a2) + { + Func* func = static_cast(data); + (*func)(a1, a2); + } + }; + + // Template class to bind together a member function with an object + // instance. The instance may be stored by pointer, value or shared + // pointer; the function must be a member function pointer. + template + struct member_function { + member_function(Target target, Func func) : + target(target), + func(func) + { + } + + Target target; + Func func; + }; + + // If the target object type supports it, define operator== for the + // related member_function type(s). It is assumed that the function + // objects are always comparable. + template + inline typename boost::enable_if,bool>::type + operator==(const member_function& lhs, const member_function& rhs) + { + return (lhs.target == rhs.target) && (lhs.func == rhs.func); + } + + // The get_pointer() function converts the target of a member function + // into a pointer so that any type can be used with operator->* to call + // a member function pointer. This overlaps quite a bit with the Boost + // function of the same name, but supports by-value objects in member + // functions via its default implementation. + template + inline T* get_pointer(T& value) + { + return &value; + } + + // Overload of get_pointer() for types that are already pointers + template + inline T* get_pointer(T* value) + { + return value; + } + + // Overload of get_pointer() for boost::shared_ptr + template + inline T* get_pointer(boost::shared_ptr& value) + { + return value.get(); + } + + // Templatized class that adapts a class instance/member function pair + // to the function signature Sig. See function_invoker for explanation + // about specialization requirements. + template + struct member_invoker; + + // Specialization for zero-argument invocation function + template + struct member_invoker { + static R call(void* data) + { + MemberFunc* func = static_cast(data); + return (get_pointer(func->target)->*(func->func)) (); + } + }; + + // Specialization for zero-argument void return invocation function + template + struct member_invoker { + static void call(void* data) + { + MemberFunc* func = static_cast(data); + (get_pointer(func->target)->*(func->func)) (); + } + }; + + // Specialization for one-argument invocation function + template + struct member_invoker { + static R call(void* data, A1 a1) + { + MemberFunc* func = static_cast(data); + return (get_pointer(func->target)->*(func->func)) (a1); + } + }; + + // Specialization for one-argument void return invocation function + template + struct member_invoker { + static void call(void* data, A1 a1) + { + MemberFunc* func = static_cast(data); + (get_pointer(func->target)->*(func->func)) (a1); + } + }; + + // Specialization for two-argument invocation function + template + struct member_invoker { + static R call(void* data, A1 a1, A2 a2) + { + MemberFunc* func = static_cast(data); + return (get_pointer(func->target)->*(func->func)) (a1, a2); + } + }; + + // Specialization for one-argument void return invocation function + template + struct member_invoker { + static void call(void* data, A1 a1, A2 a2) + { + MemberFunc* func = static_cast(data); + (get_pointer(func->target)->*(func->func)) (a1, a2); + } + }; + } + + /** + * @brief Generic callback class. + * + * %callback provides overlapping functionality with Boost Function/Bind + * and C++11's header (also available as part of TR1 on older + * compilers); however, for the way callbacks are used in REDHAWK, each has + * deficiencies that necessitated the creation of %callback: + * - boost::function creates unique symbols per type that prevent the + * dynamic loader from unloading libraries + * - C++11's std::function does not support operator==, which is used in + * the notification class to unregister a callback + */ + template + struct callback + { + private: + typedef typename detail::callback_traits traits; + typedef typename traits::invoker_func invoker_func; + typedef typename traits::result_type result_type; + typedef typename traits::first_argument_type first_argument_type; + typedef typename traits::second_argument_type second_argument_type; + + // Use a member pointer as the type for boolean-like conversion (so + // that you can do "if (x)"), because it cannot be converted to any + // other type + typedef invoker_func (callback::*unspecified_bool_type); + + public: + /** + * @brief Construct an empty %callback. + */ + callback() : + _M_invoker(0), + _M_type(TYPE_NONE) + { + } + + /** + * @brief Construct a %callback with a function pointer. + * @param func Function pointer. + * + * The signature of @a func must be compatible with the declared return + * type and arguments. + */ + template + callback(Func* func) : + _M_invoker(&detail::function_invoker::call), + _M_type(TYPE_FUNCTION) + { + // Function pointers are simple enough to store in _M_impl, but the + // type doesn't match the function pointer placeholder; rather than + // work around aliasing warnings, use placement new to initialize. + typedef Func* impl_type; + new (&_M_impl) impl_type(func); + } + + /** + * @brief Construct a %callback with a functor by reference. + * @param func Reference to a functor object. + * + * The signature of @a func must be compatible with the declared return + * type and arguments. + */ + template + callback(boost::reference_wrapper func) : + _M_invoker(&detail::function_invoker::call), + _M_type(TYPE_FUNCTOR_REF) + { + _M_impl.functor = func.get_pointer(); + } + + /** + * @brief Construct a %callback with a class instance and member + * function pointer. + * @param target Pointer to the class instance. + * @param func Member function pointer. + * + * The signature of @a func must be compatible with the declared return + * type and arguments. + */ + template + callback(Target* target, Func func) : + _M_type(TYPE_MEMBER) + { + // Like function pointers, member functions with a pointer to an + // object can be stored in _M_impl, but the types don't match. + // Because the member_function template class is intentionally + // laid out to match the member function placeholder, placement + // new can be used to initialize. + typedef detail::member_function impl_type; + new (&_M_impl) impl_type(target, func); + _M_invoker = &detail::member_invoker::call; + } + + /** + * @brief Construct a %callback with a functor. + * @param func A functor object. + * + * @a func must be copy-constructible and support an operator() that is + * compatible with the declared return type and arguments. + */ + template + callback(const Functor& func) : + _M_invoker(&detail::function_invoker::call), + _M_type(TYPE_MANAGED) + { + // Create a new managed object, which also copies the functor + // object argument + _M_impl.managed = detail::void_manager::create(func); + } + + /** + * @brief Construct a %callback with a class instance and member + * function pointer. + * @param target Class instance (or shared pointer to a class + * instance). + * @param func Member function pointer. + * + * The signature of @a func must be compatible with the declared return + * type and arguments. + */ + template + callback(Target target, Func func) : + _M_type(TYPE_MANAGED) + { + // Create a new managed object, with a member_function as the + // contents. This is most likely to be used with shared pointers, + // but can work for other types as well + typedef detail::member_function impl_type; + _M_impl.managed = detail::void_manager::create(impl_type(target, func)); + _M_invoker = &detail::member_invoker::call; + } + + /** + * Copy constructor. + */ + callback(const callback& other) : + _M_invoker(other._M_invoker), + _M_type(other._M_type) + { + switch (other._M_type) { + case TYPE_MANAGED: + // Copy the other callback's implementation (which includes + // the manager) + _M_impl.managed = other._M_impl.managed->clone(); + break; + case TYPE_FUNCTION: + // Copy the function pointer (the type doesn't matter) + _M_impl.func = other._M_impl.func; + break; + case TYPE_FUNCTOR_REF: + // Copy the functor pointer (the type doesn't matter) + _M_impl.functor = other._M_impl.functor; + break; + case TYPE_MEMBER: + // Copy the member function object and pointer (the types don't + // matter--the sizes are the same regardless of the object type + // or function signature) + _M_impl.member = other._M_impl.member; + break; + default: + break; + } + } + + /** + * @brief Destructor. + * + * Any allocated objects are destroyed. + */ + ~callback() + { + // Delete any managed object + clear(); + } + + /** + * Copy assignment. + */ + callback& operator=(const callback& other) + { + if (&other != this) { + // Use the copy constructor, swap and the destructor to handle + // everything (as opposed to re-implementing basically the same + // thing) + callback temp(other); + this->swap(temp); + } + return *this; + } + + /** + * @brief Replace the current target with a class instance and member + * function pointer. + * @param target Class instance (by pointer, shared pointer or value). + * @param func Member function pointer. + * + * The signature of @a func must be compatible with the declared return + * type and arguments. + */ + template + void assign(Target target, Func func) + { + callback temp(target, func); + this->swap(temp); + } + + /** + * @brief Checks whether this %callback is equivalent to another. + * @param other Another %callback. + * @return true if this %callback is equivalent to @a other, false + * otherwise. + * + * Two callbacks are considered equal if their targets can reasonably + * be assumed to be equivalent. The targets must be of the same type to + * be compared: + * - function pointers must be exactly equal + * - member functions must point to the same object and member function + * - functors must support operator== and evalute as equal + */ + bool operator==(const callback& other) const + { + // If the invoker functions are different, the types must be + // different + if (_M_invoker != other._M_invoker) { + return false; + } + switch (_M_type) { + case TYPE_MANAGED: + // Defer to the managed object's equality operator + return (*(_M_impl.managed) == *(other._M_impl.managed)); + case TYPE_FUNCTION: + // Compare standalone function pointers directly (the types + // don't matter) + return (_M_impl.func == other._M_impl.func); + case TYPE_FUNCTOR_REF: + // Compare standalone function pointers directly (the types + // don't matter) + return (_M_impl.functor == other._M_impl.functor); + case TYPE_MEMBER: + // Compare the object and member function pointers directly + // (the types don't matter) + return (_M_impl.member.target == other._M_impl.member.target) && + (_M_impl.member.func == other._M_impl.member.func); + default: + // Empty callbacks are "equal" + return true; + } + } + + /** + * Returns true if this %callback is empty. + */ + bool operator! () const + { + return empty(); + } + + /** + * Evaluates to true in a boolean context if this %callback is non- + * empty, false otherwise. + */ + operator unspecified_bool_type () const + { + return empty()?0:&callback::_M_invoker; + } + + /** + * @brief Returns true if this %callback does not have a target. + */ + bool empty() const + { + return _M_type == TYPE_NONE; + } + + /** + * @brief Resets this %callback to an empty state. + */ + void clear() + { + if (_M_type == TYPE_MANAGED) { + delete _M_impl.managed; + } + _M_invoker = 0; + _M_type = TYPE_NONE; + } + + /** + * @brief Swap contents with another %callback. + * @param other %callback to swap with. + */ + void swap(callback& other) + { + // Copy the raw bytes, since we do not know (nor do we need to, + // really) the other's implementation + impl temp_impl; + memcpy(&temp_impl, &other._M_impl, sizeof(impl)); + memcpy(&other._M_impl, &_M_impl, sizeof(impl)); + memcpy(&_M_impl, &temp_impl, sizeof(impl)); + + std::swap(_M_invoker, other._M_invoker); + std::swap(_M_type, other._M_type); + } + + /** + * @brief Invokes the target function. + * @return The result of the target function (or nothing, if the + * return type of this %callback is void). + * @throws std::runtime_error if this %callback is empty + */ + inline result_type operator() () + { + if (empty()) { + throw std::runtime_error("empty callback"); + } + return _M_invoker(_M_data()); + } + + /** + * @brief Invokes the target function with a single argument. + * @param a1 The first argument. + * @return The result of the target function (or nothing, if the + * return type of this %callback is void). + * @throws std::runtime_error if this %callback is empty + */ + inline result_type operator() (first_argument_type a1) + { + if (empty()) { + throw std::runtime_error("empty callback"); + } + return _M_invoker(_M_data(), a1); + } + + /** + * @brief Invokes the target function with a two arguments. + * @param a1 The first argument. + * @param a2 The second argument. + * @return The result of the target function (or nothing, if the + * return type of this %callback is void). + * @throws std::runtime_error if this %callback is empty + */ + inline result_type operator() (first_argument_type a1, second_argument_type a2) + { + if (empty()) { + throw std::runtime_error("empty callback"); + } + return _M_invoker(_M_data(), a1, a2); + } + + private: + // Poison comparison to callbacks with different signatures + template + bool operator==(const callback&) const; + + /// @cond IMPL + + // Returns a pointer to the callable object, for invocation + void* _M_data() + { + if (_M_type == TYPE_MANAGED) { + // The invoker takes the void_manager's contained object + return _M_impl.managed->get_object(); + } else if (_M_type == TYPE_FUNCTOR_REF) { + return _M_impl.functor; + } else { + // Use the impl object as an alias for the function pointer or + // object/member pair + return &_M_impl; + } + } + + // Discriminated union type to hold the specific implementation of the + // callback. Function pointers, by-reference functors, and object + // pointer/member function pointer pairs can be stored in-place; all + // other types are allocated on the heap. + typedef union { + // Placeholder for a function pointer; all function pointers should + // have the same size, so the specific type is only relevant for + // dispatch, which does not go through this object + void (*func)(); + + // Pointer to a functor object; functors passed by reference (via + // boost::ref) are converted to a pointer and stored here + void* functor; + + // Placeholder for an object and member function pointer; as with + // function pointers, the types are only relevant for dispatch, + // which uses a different interpretation of this object + struct { + callback* target; + void (callback::*func)(); + } member; + + // For all other types, which cannot be stored in place, a heap- + // allocated void_manager holds the callable object + detail::void_manager* managed; + } impl; + + // Discriminant for implementation types + enum impl_type { + TYPE_NONE, + TYPE_FUNCTION, + TYPE_FUNCTOR_REF, + TYPE_MEMBER, + TYPE_MANAGED + }; + + impl _M_impl; + invoker_func _M_invoker; + impl_type _M_type; + /// @endcond + }; + + template + bool operator!=(const callback& lhs, const T& rhs) + { + return !(lhs == rhs); + } +} + namespace ossie { // The functions and classes in this header are designed to provide easy @@ -166,7 +1018,7 @@ namespace ossie { class notification_base { public: - typedef boost::function func_type; + typedef redhawk::callback func_type; /* * Register the callable 'func' to be called when this notification is @@ -189,6 +1041,31 @@ namespace ossie { this->_M_listeners.end()); } + /* + * Register the member function 'func' to be called on the instance + * 'target' when this notification is triggered. + * + * Listeners do not need to match the signature exactly, but the + * notification's argument types must be convertible to the listener's + * argument types. + */ + template + void add(Target target, Func func) + { + this->add(func_type(target, func)); + } + + /* + * Remove the member function 'func' on the instance 'target' from + * further notifications. The pair must have been registered together + * in a prior call to add(). + */ + template + void remove(Target target, Func func) + { + this->remove(func_type(target, func)); + } + bool empty() const { return _M_listeners.empty(); @@ -218,34 +1095,7 @@ namespace ossie { call_each(this->_M_listeners.begin(), this->_M_listeners.end()); } - /* - * Register the member function 'func' to be called on the instance - * 'target' when this notification is triggered. - * - * Listeners do not need to match the signature exactly, but the - * notification's argument types must be convertible to the listener's - * argument types. - */ - template - void add(Target target, Func func) - { - super::add(boost::bind(func, target)); - } - - /* - * Remove the member function 'func' on the instance 'target' from - * further notifications. The pair must have been registered together - * in a prior call to add(). - */ - template - void remove(Target target, Func func) - { - super::remove(boost::bind(func, target)); - } - private: - typedef notification_base super; - template static inline void call_each(Iterator begin, const Iterator end) { for (; begin != end; ++begin) { @@ -269,35 +1119,7 @@ namespace ossie { call_each(this->_M_listeners.begin(), this->_M_listeners.end(), arg1); } - /* - * Register the member function 'func' to be called on the instance - * 'target' when this notification is triggered. - * - * Listeners do not need to match the signature exactly, but the - * notification's argument types must be convertible to the listener's - * argument types. - */ - template - void add(Target target, Func func) - { - super::add(boost::bind(&invoker_type::template invoke, target, func, _1)); - } - - /* - * Remove the member function 'func' on the instance 'target' from - * further notifications. The pair must have been registered together - * in a prior call to add(). - */ - template - void remove(Target target, Func func) - { - super::remove(boost::bind(&invoker_type::template invoke, target, func, _1)); - } - private: - typedef notification_base super; - typedef ::ossie::detail::invoker invoker_type; - template static inline void call_each(Iterator begin, const Iterator end, A1 arg1) { for (; begin != end; ++begin) { @@ -321,36 +1143,7 @@ namespace ossie { call_each(this->_M_listeners.begin(), this->_M_listeners.end(), arg1, arg2); } - - /* - * Register the member function 'func' to be called on the instance - * 'target' when this notification is triggered. - * - * Listeners do not need to match the signature exactly, but the - * notification's argument types must be convertible to the listener's - * argument types. - */ - template - void add(Target target, Func func) - { - super::add(boost::bind(&invoker_type::template invoke, target, func, _1, _2)); - } - - /* - * Remove the member function 'func' on the instance 'target' from - * further notifications. The pair must have been registered together - * in a prior call to add(). - */ - template - void remove(Target target, Func func) - { - super::remove(boost::bind(&invoker_type::template invoke, target, func, _1, _2)); - } - private: - typedef notification_base super; - typedef ::ossie::detail::invoker invoker_type; - template static inline void call_each(Iterator begin, const Iterator end, A1 arg1, A2 arg2) { for (; begin != end; ++begin) { diff --git a/redhawk/src/base/include/ossie/debug.h b/redhawk/src/base/include/ossie/debug.h index 293ffa830..a866c8c91 100644 --- a/redhawk/src/base/include/ossie/debug.h +++ b/redhawk/src/base/include/ossie/debug.h @@ -178,6 +178,17 @@ LOG_##levelname(classname, expression << "; unknown exception") \ } +#define CATCH_RH_EXCEPTION(logger, expression, levelname) \ + catch( std::exception& ex ) { \ + RH_##levelname(logger, expression << "; std::exception info: " << ex.what()) \ + } \ + catch( CORBA::Exception& ex ) { \ + RH_##levelname(logger, expression << "; CORBA::Exception name: " << ex._name()) \ + } \ + catch( ... ) { \ + RH_##levelname(logger, expression << "; unknown exception") \ + } + #define CATCH_LOG_TRACE(classname, expression) CATCH_LOG_EXCEPTION(classname, expression, TRACE) #define CATCH_LOG_DEBUG(classname, expression) CATCH_LOG_EXCEPTION(classname, expression, DEBUG) #define CATCH_LOG_INFO(classname, expression) CATCH_LOG_EXCEPTION(classname, expression, INFO) @@ -185,6 +196,13 @@ #define CATCH_LOG_ERROR(classname, expression) CATCH_LOG_EXCEPTION(classname, expression, ERROR) #define CATCH_LOG_FATAL(classname, expression) CATCH_LOG_EXCEPTION(classname, expression, FATAL) +#define CATCH_RH_TRACE(logger, expression) CATCH_RH_EXCEPTION(logger, expression, TRACE) +#define CATCH_RH_DEBUG(logger, expression) CATCH_RH_EXCEPTION(logger, expression, DEBUG) +#define CATCH_RH_INFO(logger, expression) CATCH_RH_EXCEPTION(logger, expression, INFO) +#define CATCH_RH_WARN(logger, expression) CATCH_RH_EXCEPTION(logger, expression, WARN) +#define CATCH_RH_ERROR(logger, expression) CATCH_RH_EXCEPTION(logger, expression, ERROR) +#define CATCH_RH_FATAL(logger, expression) CATCH_RH_EXCEPTION(logger, expression, FATAL) + /* * Provide standardized exception handling for catching and throwing a new exception. */ @@ -202,6 +220,20 @@ throw(newexception); \ } +#define CATCH_THROW_RH_EXCEPTION(logger, expression, levelname, newexception) \ + catch( std::exception& ex ) { \ + RH_##levelname(logger, expression << "; std::exception info: " << ex.what()) \ + throw(newexception); \ + } \ + catch( CORBA::Exception& ex ) { \ + RH_##levelname(logger, expression << "; CORBA::Exception name: " << ex._name()) \ + throw(newexception); \ + } \ + catch( ... ) { \ + RH_##levelname(logger, expression << "; unknown exception") \ + throw(newexception); \ + } + #define CATCH_THROW_LOG_TRACE(classname, expression, newexception) CATCH_THROW_LOG_EXCEPTION(classname, expression, TRACE, newexception) #define CATCH_THROW_LOG_DEBUG(classname, expression, newexception) CATCH_THROW_LOG_EXCEPTION(classname, expression, DEBUG, newexception) #define CATCH_THROW_LOG_INFO(classname, expression, newexception) CATCH_THROW_LOG_EXCEPTION(classname, expression, INFO, newexception) @@ -209,6 +241,13 @@ #define CATCH_THROW_LOG_ERROR(classname, expression, newexception) CATCH_THROW_LOG_EXCEPTION(classname, expression, ERROR, newexception) #define CATCH_THROW_LOG_FATAL(classname, expression, newexception) CATCH_THROW_LOG_EXCEPTION(classname, expression, FATAL, newexception) +#define CATCH_THROW_RH_TRACE(logger, expression, newexception) CATCH_THROW_RH_EXCEPTION(logger, expression, TRACE, newexception) +#define CATCH_THROW_RH_DEBUG(logger, expression, newexception) CATCH_THROW_RH_EXCEPTION(logger, expression, DEBUG, newexception) +#define CATCH_THROW_RH_INFO(logger, expression, newexception) CATCH_THROW_RH_EXCEPTION(logger, expression, INFO, newexception) +#define CATCH_THROW_RH_WARN(logger, expression, newexception) CATCH_THROW_RH_EXCEPTION(logger, expression, WARN, newexception) +#define CATCH_THROW_RH_ERROR(logger, expression, newexception) CATCH_THROW_RH_EXCEPTION(logger, expression, ERROR, newexception) +#define CATCH_THROW_RH_FATAL(logger, expression, newexception) CATCH_THROW_RH_EXCEPTION(logger, expression, FATAL, newexception) + /* * Provide standardized exception handling for catching and rethrowing. */ @@ -226,6 +265,20 @@ throw; \ } +#define CATCH_RETHROW_RH_EXCEPTION(logger, expression, levelname) \ + catch( std::exception& ex ) { \ + RH_##levelname(logger, expression << "; std::exception info: " << ex.what()) \ + throw; \ + } \ + catch( CORBA::Exception& ex ) { \ + RH_##levelname(logger, expression << "; CORBA::Exception name: " << ex._name()) \ + throw; \ + } \ + catch( ... ) { \ + RH_##levelname(logger, expression << "; unknown exception") \ + throw; \ + } + #define CATCH_RETHROW_LOG_TRACE(classname, expression) CATCH_RETHROW_LOG_EXCEPTION(classname, expression, TRACE) #define CATCH_RETHROW_LOG_DEBUG(classname, expression) CATCH_RETHROW_LOG_EXCEPTION(classname, expression, DEBUG) #define CATCH_RETHROW_LOG_INFO(classname, expression) CATCH_RETHROW_LOG_EXCEPTION(classname, expression, INFO) @@ -233,6 +286,13 @@ #define CATCH_RETHROW_LOG_ERROR(classname, expression) CATCH_RETHROW_LOG_EXCEPTION(classname, expression, ERROR) #define CATCH_RETHROW_LOG_FATAL(classname, expression) CATCH_RETHROW_LOG_EXCEPTION(classname, expression, FATAL) +#define CATCH_RETHROW_RH_TRACE(logger, expression) CATCH_RETHROW_RH_EXCEPTION(logger, expression, TRACE) +#define CATCH_RETHROW_RH_DEBUG(logger, expression) CATCH_RETHROW_RH_EXCEPTION(logger, expression, DEBUG) +#define CATCH_RETHROW_RH_INFO(logger, expression) CATCH_RETHROW_RH_EXCEPTION(logger, expression, INFO) +#define CATCH_RETHROW_RH_WARN(logger, expression) CATCH_RETHROW_RH_EXCEPTION(logger, expression, WARN) +#define CATCH_RETHROW_RH_ERROR(logger, expression) CATCH_RETHROW_RH_EXCEPTION(logger, expression, ERROR) +#define CATCH_RETHROW_RH_FATAL(logger, expression) CATCH_RETHROW_RH_EXCEPTION(logger, expression, FATAL) + /* * Provide a backwards compatible macro. diff --git a/redhawk/src/base/include/ossie/debug/check.h b/redhawk/src/base/include/ossie/debug/check.h new file mode 100644 index 000000000..484531c94 --- /dev/null +++ b/redhawk/src/base/include/ossie/debug/check.h @@ -0,0 +1,28 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_DEBUG_CHECK_H +#define REDHAWK_DEBUG_CHECK_H + +#include + +#define _RH_DEBUG_CHECK(X) assert(X) + +#endif diff --git a/redhawk/src/base/include/ossie/debug/checked_allocator.h b/redhawk/src/base/include/ossie/debug/checked_allocator.h new file mode 100644 index 000000000..fd9f0ca26 --- /dev/null +++ b/redhawk/src/base/include/ossie/debug/checked_allocator.h @@ -0,0 +1,139 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_DEBUG_CHECKED_ALLOCATOR_H +#define REDHAWK_DEBUG_CHECKED_ALLOCATOR_H + +#include +#include + +#include "check.h" + +namespace redhawk { + + namespace debug { + + /** + * @brief Allocator that checks for buffer overruns and underruns. + * + * A custom allocator to perform additional checking for some classes + * of memory error. Upon allocation, padding is added on the front and + * back of the buffer and filled with a known pattern. When the memory + * is deallocated, the front and back pads are checked to ensure that + * they were not overwritten (or at least, are statistically unlikely + * to have been). If the pattern has been disturbed, the allocator + * aborts the program. + * + * This allocator is intended for development-time use only and should + * never be used in production systems. + */ + template + struct checked_allocator + { + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; + + template + struct rebind { + typedef checked_allocator other; + }; + + pointer allocate(size_type count, const void* = 0) + { + // Allocate enough total bytes to accomodate the requested number + // of elements plus the "magic number" pads on the front and back + size_type bytes = count * sizeof(T); + bytes += 2 * MAGIC_COUNT * sizeof(magic_type); + void* data = ::operator new(bytes); + + // Mark the front and back pads with the known pattern and adjust + // the returned pointer to just after the front pad + magic_type* front = static_cast(data); + T* result = _M_mark_block(front); + magic_type* back = _M_get_back(result, count); + _M_mark_block(back); + return result; + } + + void deallocate(pointer ptr, size_type count) + { + // Adjust the pointer to the start of the front pad, which was the + // beginning of the original allocation + magic_type* front = _M_get_front(ptr); + + // Check that neither the front or the back pad has been written to + _RH_DEBUG_CHECK(_M_is_unmodified(front)); + magic_type* back = _M_get_back(ptr, count); + _RH_DEBUG_CHECK(_M_is_unmodified(back)); + + ::operator delete(front); + } + + private: + // Use two 64-bit ints with a known pattern to mark the front and back + // pads; this maintains the original alignment up to 16 bytes + typedef int64_t magic_type; + static const magic_type MAGIC_NUMBER = 0xfeedfacedeadbeef; + static const size_t MAGIC_COUNT = 2; + + // Writes the known pattern to the given pad + T* _M_mark_block(magic_type* ptr) + { + for (size_t ii = 0; ii < MAGIC_COUNT; ++ii) { + *ptr++ = MAGIC_NUMBER; + } + return reinterpret_cast(ptr); + } + + // Recovers the front pad from a data pointer + magic_type* _M_get_front(T* ptr) + { + magic_type* result = reinterpret_cast(ptr); + return (result - MAGIC_COUNT); + } + + // Given data pointer and number of elements, returns the rear pad + magic_type* _M_get_back(T* ptr, size_t count) + { + return reinterpret_cast(ptr + count); + } + + // Checks whether the known patter in a pad has been disturbed + bool _M_is_unmodified(magic_type* ptr) + { + for (size_t ii = 0; ii < MAGIC_COUNT; ++ii) { + if (ptr[ii] != MAGIC_NUMBER) { + return false; + } + } + return true; + } + }; + + } // namespace debug + +} // namespace redhawk + +#endif // _CHECKED_ALLOCATOR_HH_ diff --git a/redhawk/src/base/include/ossie/debug/checked_iterator.h b/redhawk/src/base/include/ossie/debug/checked_iterator.h new file mode 100644 index 000000000..d6b0a2532 --- /dev/null +++ b/redhawk/src/base/include/ossie/debug/checked_iterator.h @@ -0,0 +1,389 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +/** + * @file ossie/debug/checked_iterator.h + * + * Checked iterator classes for conditionally enabling additional run-time + * checking, and to an extent stricter compile-time checking. + * + * Inspired by GNU's C++ safe_iterator extensions, but limited in scope to + * avoid imposing any binary changes on the sequences. This allows users to + * selectively enable this feature in a compilation unit without affecting + * other code that may have been compiled with the feature disabled. + * + * Checked iterators are intended for development-time use only and should + * never be used in production systems. + */ + +#ifndef REDHAWK_DEBUG_CHECKED_ITERATOR_H +#define REDHAWK_DEBUG_CHECKED_ITERATOR_H + +#include + +#include "check.h" + +namespace redhawk { + + namespace debug { + + /** + * @brief Base checked iterator wrapper. + * + * Aids in comparing const and non-const iterators, providing a common + * base class and a method to verify both point to the same sequence. + */ + template + class checked_iterator_base { + public: + + typedef Sequence sequence_type; + + /** + * @brief Checks if this iterator can be compared to another. + * @param other Iterator to compare against. + * @return true if the iterators can be compared. + * + * Checked iterators are considered comparable if they belong to + * the same sequence. + */ + bool _M_can_compare(const checked_iterator_base& other) const + { + return _M_sequence == other._M_sequence; + } + + protected: + checked_iterator_base(const sequence_type* sequence) : + _M_sequence(sequence) + { + } + + // Owning sequence + const sequence_type* _M_sequence; + }; + + /** + * @brief Checked iterator wrapper. + * + * Iterator wrapper class that performs runtime validity checks on the + * underlying iterator instance. + */ + template + class checked_iterator : public checked_iterator_base { + + typedef std::iterator_traits _Traits; + + public: + typedef Iterator iterator_type; + typedef Sequence sequence_type; + typedef typename _Traits::difference_type difference_type; + typedef typename _Traits::value_type value_type; + typedef typename _Traits::reference reference; + typedef typename _Traits::pointer pointer; + typedef typename _Traits::iterator_category iterator_category; + + /** + * @brief Create a %checked_iterator. + * @param current Underlying iterator. + * @param sequence Containing sequence. + */ + checked_iterator(iterator_type current, const sequence_type* sequence) : + checked_iterator_base(sequence), + _M_current(current) + { + } + + /** + * @brief Copy constructor from a mutable iterator to a constant + * iterator. + * + * If used in the opposite direction (copy from const to mutable), + * the initialization of _M_current will fail. This is intentional, + * and easier than conditionally disabling this overload. + */ + template + checked_iterator(const checked_iterator& other): + checked_iterator_base(other), + _M_current(other.base()) + { + } + + /** + * @brief Iterator dereference. + * @pre iterator is dereferenceable. + */ + reference operator*() const + { + _RH_DEBUG_CHECK(_M_can_dereference()); + return *_M_current; + } + + /** + * @brief Iterator dereference. + * @pre iterator is dereferenceable. + */ + pointer operator->() const + { + _RH_DEBUG_CHECK(_M_can_dereference()); + return &*_M_current; + } + + /** + * @brief Iterator pre-increment. + * @pre iterator is incrementable. + */ + checked_iterator operator++(int) + { + // Can't increment past the end + _RH_DEBUG_CHECK(!_M_is_end()); + checked_iterator tmp(*this); + ++_M_current; + return tmp; + } + + /** + * @brief Iterator post-increment. + * @pre iterator is incrementable. + */ + checked_iterator& operator++() + { + // Can't increment past the end + _RH_DEBUG_CHECK(!_M_is_end()); + ++_M_current; + return *this; + } + + /** + * @brief Iterator pre-decrement. + * @pre iterator is decrementable. + */ + checked_iterator operator--(int) + { + // Can't decrement past the first element + _RH_DEBUG_CHECK(!_M_is_begin()); + checked_iterator tmp(*this); + --_M_current; + return tmp; + } + + /** + * @brief Iterator post-decrement. + * @pre iterator is decrementable. + */ + checked_iterator& operator--() + { + // Can't decrement past the first element + _RH_DEBUG_CHECK(!_M_is_begin()); + --_M_current; + return *this; + } + + /** + * @brief Indexed iterator dereference. + * @param index Index of element to dereference. + * @pre iterator plus index does not exceed bounds of sequence. + */ + reference operator[](const difference_type& index) const + { + _RH_DEBUG_CHECK(_M_can_increment(index+1)); + return _M_current[index]; + } + + /** + * @brief Iterator in-place add. + * @param offset Number of elements to advance. + * @pre iterator plus offset does not go past the sequence's end(). + */ + checked_iterator& operator+=(const difference_type& offset) + { + _RH_DEBUG_CHECK(_M_can_increment(offset)); + _M_current += offset; + return *this; + } + + /** + * @brief Iterator add. + * @param offset Number of elements to advance. + * @return Iterator advanced by offset elements. + * @pre iterator plus offset does not go past the sequence's end(). + */ + checked_iterator operator+(const difference_type& offset) const + { + checked_iterator result(*this); + result += offset; + return result; + } + + /** + * @brief Iterator in-place subtract. + * @param offset Number of elements to reverse. + * @pre iterator minus offset does not go past the sequence's + * begin(). + */ + checked_iterator& operator-=(const difference_type& offset) + { + _RH_DEBUG_CHECK(_M_can_decrement(offset)); + _M_current -= offset; + return *this; + } + + /** + * @brief Iterator subtract. + * @param offset Number of elements to reverse. + * @return Iterator moved backwards by offset elements. + * @pre iterator minus offset does not go past the sequence's + * begin(). + */ + checked_iterator operator-(const difference_type& offset) const + { + checked_iterator result(*this); + result -= offset; + return result; + } + + /** + * @brief Return the underlying iterator. + */ + iterator_type base() const + { + return _M_current; + } + + /** + * @brief Conversion to underlying iterator type. + * + * Allows implicit conversions in expressions. + */ + operator iterator_type() const + { + return _M_current; + } + + private: + // Predicate to check if this iterator is at the beginning of the + // sequence + bool _M_is_begin() const + { + return *this == this->_M_sequence->begin(); + } + + // Predicate to check if this iterator is at the end of the + // sequence + bool _M_is_end() const + { + return *this == this->_M_sequence->end(); + } + + // Predicate to check if this iterator can be dereferenced (i.e., + // can use operator*); the only condition that is checked is that + // it is not at the end, under the assumption that it cannot be + // outside the range [begin, end), otherwise a prior check would + // have failed + bool _M_can_dereference() const + { + return !_M_is_end(); + } + + // Checks whether this iterator can be incremented by the given + // number of elements + bool _M_can_increment(int count) const + { + typedef typename Sequence::const_iterator const_iterator_type; + return std::distance(*this, this->_M_sequence->end()) >= count; + } + + // Checks whether this iterator can be decremented by the given + // number of elements + bool _M_can_decrement(int count) const + { + typedef typename Sequence::const_iterator const_iterator_type; + return std::distance(this->_M_sequence->begin(), this) >= count; + } + + // Underlying iterator value + iterator_type _M_current; + }; + + // The operators below are templatized for two iterator types instead + // of just one to support operations between const and non-const + // iterators. + + template + inline bool operator==(const checked_iterator& lhs, + const checked_iterator& rhs) + { + _RH_DEBUG_CHECK(lhs._M_can_compare(rhs)); + return lhs.base() == rhs.base(); + } + + template + inline bool operator!=(const checked_iterator& lhs, + const checked_iterator& rhs) + { + _RH_DEBUG_CHECK(lhs._M_can_compare(rhs)); + return lhs.base() != rhs.base(); + } + + template + inline bool operator<(const checked_iterator& lhs, + const checked_iterator& rhs) + { + _RH_DEBUG_CHECK(lhs._M_can_compare(rhs)); + return lhs.base() < rhs.base(); + } + + template + inline bool operator<=(const checked_iterator& lhs, + const checked_iterator& rhs) + { + _RH_DEBUG_CHECK(lhs._M_can_compare(rhs)); + return lhs.base() <= rhs.base(); + } + + template + inline bool operator>(const checked_iterator& lhs, + const checked_iterator& rhs) + { + _RH_DEBUG_CHECK(lhs._M_can_compare(rhs)); + return lhs.base() > rhs.base(); + } + + template + inline bool operator>=(const checked_iterator& lhs, + const checked_iterator& rhs) + { + _RH_DEBUG_CHECK(lhs._M_can_compare(rhs)); + return lhs.base() >= rhs.base(); + } + + template + inline typename checked_iterator::difference_type + operator-(const checked_iterator& lhs, + const checked_iterator& rhs) + { + _RH_DEBUG_CHECK(lhs._M_can_compare(rhs)); + return lhs.base() - rhs.base(); + } + + } // namespace debug + +} // namespace redhawk + +#endif // _CHECKED_ITERATOR_HH_ diff --git a/redhawk/src/base/include/ossie/internal/equals.h b/redhawk/src/base/include/ossie/internal/equals.h index 04c0678b9..7b84d7c04 100644 --- a/redhawk/src/base/include/ossie/internal/equals.h +++ b/redhawk/src/base/include/ossie/internal/equals.h @@ -21,6 +21,8 @@ #ifndef OSSIE_INTERNAL_EQUALS_H #define OSSIE_INTERNAL_EQUALS_H +#include + #include /* diff --git a/redhawk/src/base/include/ossie/internal/message_traits.h b/redhawk/src/base/include/ossie/internal/message_traits.h new file mode 100644 index 000000000..fc3b22972 --- /dev/null +++ b/redhawk/src/base/include/ossie/internal/message_traits.h @@ -0,0 +1,93 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_INTERNAL_MESSAGE_TRAITS_H +#define REDHAWK_INTERNAL_MESSAGE_TRAITS_H + +#include + +namespace redhawk { + + namespace internal { + + // Templatized traits struct to distinguish between structs that have a + // static format() method (REDHAWK 2.1+) and those that do not (2.0 and + // prior. + template + struct has_format + { + typedef ::boost::type_traits::no_type no_type; + typedef ::boost::type_traits::yes_type yes_type; + template struct type_check; + + template + static yes_type& check(type_check*); + + template + static no_type& check(...); + + static bool const value = (sizeof(check(0)) == sizeof(yes_type)); + }; + + // Base message traits for structs that do not include a static + // getFormat() method. The format is always an empty string, to adapt + // old structs to work with newer messaging code. + template + struct message_traits_base + { + static const char* format() + { + return ""; + } + }; + + // Base message traits for structs that have a static getFormat() + // method. + template + struct message_traits_base >::type> + { + static const char* format() + { + return T::getFormat(); + } + }; + + // Traits class to adapt multiple levels of REDHAWK-generated struct + // classes used in messaging. Provides a consistent interface for + // getting a format string or message ID, and serialization. + template + struct message_traits : public message_traits_base { + static void serialize(CORBA::Any& any, const void* data) + { + any <<= *(reinterpret_cast(data)); + } + + static inline std::string getId(const T& message) + { + // Workaround for older components whose structs have a non-const, + // non-static member function getId(): const_cast the value + return const_cast(message).getId(); + } + }; + } + +} + +#endif // REDHAWK_INTERNAL_MESSAGE_TRAITS_H diff --git a/redhawk/src/base/include/ossie/logging/loghelpers.h b/redhawk/src/base/include/ossie/logging/loghelpers.h index 58faa1f14..1ce71827f 100644 --- a/redhawk/src/base/include/ossie/logging/loghelpers.h +++ b/redhawk/src/base/include/ossie/logging/loghelpers.h @@ -39,6 +39,11 @@ namespace ossie namespace logging { + // resolve logging config uri from command line + std::string ResolveLocalUri( const std::string &logfile_uri, + const std::string &rootPath, + std::string &validated_uri ); + std::string GetDeviceMgrPath( const std::string &dm, const std::string &node ); diff --git a/redhawk/src/base/include/ossie/logging/rh_logger.h b/redhawk/src/base/include/ossie/logging/rh_logger.h index 7a0173def..a2962893c 100644 --- a/redhawk/src/base/include/ossie/logging/rh_logger.h +++ b/redhawk/src/base/include/ossie/logging/rh_logger.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -213,6 +214,8 @@ namespace rh_logger { public: + static const std::string USER_LOGS; + static const std::string SYSTEM_LOGS; // // Logging record events // @@ -254,6 +257,10 @@ namespace rh_logger { // static LoggerPtr getLogger( const std::string &name ); static LoggerPtr getLogger( const char *name ); + static LoggerPtr getNewHierarchy( const std::string &name ); + + virtual LoggerPtr getInstanceLogger( const std::string &name ); + LoggerPtr getChildLogger( const std::string &logname, const std::string &ns=USER_LOGS ); static LoggerPtr getResourceLogger( const std::string &name ); static const std::string &getResourceLoggerName(); @@ -284,7 +291,6 @@ namespace rh_logger { virtual void debug( const std::string &msg ); virtual void trace( const std::string &msg ); - virtual bool isFatalEnabled() const; virtual bool isErrorEnabled() const; virtual bool isWarnEnabled() const; @@ -313,6 +319,8 @@ namespace rh_logger { virtual void appendLogRecord( const LogRecord &rec); virtual void appendLogRecord( const LevelPtr &level, const std::string &msg); + virtual void configureLogger(const std::string &configuration, bool root_reset=false, int level=-1); + // // Set the logging event history limit // @@ -322,7 +330,13 @@ namespace rh_logger { // Get the logging event history limit // virtual size_t getLogRecordLimit(); - + + virtual std::vector getNamedLoggers(); + + virtual bool isLoggerInHierarchy(const std::string& search_name); + + virtual void* getUnderlyingLogger(); + protected: Logger( const char *name ); diff --git a/redhawk/src/base/include/ossie/prop_helpers.h b/redhawk/src/base/include/ossie/prop_helpers.h index 6e60d4555..17d91be10 100644 --- a/redhawk/src/base/include/ossie/prop_helpers.h +++ b/redhawk/src/base/include/ossie/prop_helpers.h @@ -30,6 +30,35 @@ #endif #include "CF/cf.h" +#include +#include + +namespace redhawk { + namespace time { + namespace utils { + CF::UTCTime create( const double wholeSecs=-1.0, const double fractionalSecs=-1.0 ); + + // year:month:day::hour:minute:second + CF::UTCTime convert( const std::string formatted ); + + /* + * Create a time stamp object from the current time of day reported by the system + */ + CF::UTCTime now(); + + /* + * Create a time stamp object from the current time of day reported by the system + */ + CF::UTCTime notSet(); + + /* + * Adjust the whole and fractional portions of a time stamp object to + * ensure there is no fraction in the whole seconds, and vice-versa + */ + void normalize(CF::UTCTime& time); + } + } +} namespace ossie { @@ -53,6 +82,20 @@ namespace ossie } } + class badConversion : public std::runtime_error { + public: + badConversion(std::string value, std::string type) : std::runtime_error("Unable to perform conversion"), _value(value), _type(type) {}; + ~badConversion() throw() {}; + virtual const char* what() const throw() + { + std::ostringstream _msg; + _msg << std::runtime_error::what() << ": '"<<_value<<"' to type '"<<_type << "'"; + return _msg.str().c_str(); + }; + private: + std::string _value, _type; + }; + template double perform_math(double operand, T propval, std::string& math) { @@ -101,6 +144,8 @@ namespace ossie std::string any_to_string(const CORBA::Any& value); CORBA::Any strings_to_any(const std::vector& values, CORBA::TCKind kind); + CORBA::Any strings_to_any(const std::vector& values, CORBA::TCKind kind, CORBA::TypeCode_ptr type); + CORBA::BooleanSeq* strings_to_boolean_sequence(const std::vector &values); CORBA::CharSeq* strings_to_char_sequence(const std::vector &values); CORBA::DoubleSeq* strings_to_double_sequence(const std::vector &values); @@ -113,12 +158,25 @@ namespace ossie CORBA::ULongSeq* strings_to_unsigned_long_sequence(const std::vector &values); CORBA::ULongLongSeq* strings_to_unsigned_long_long_sequence(const std::vector &values); CORBA::StringSeq* strings_to_string_sequence(const std::vector &values); + CF::UTCTimeSequence* strings_to_utctime_sequence(const std::vector &values); + + CF::complexBooleanSeq* strings_to_complex_boolean_sequence(const std::vector &values); + CF::complexCharSeq* strings_to_complex_char_sequence(const std::vector &values); + CF::complexDoubleSeq* strings_to_complex_double_sequence(const std::vector &values); + CF::complexFloatSeq* strings_to_complex_float_sequence(const std::vector &values); + CF::complexShortSeq* strings_to_complex_short_sequence(const std::vector &values); + CF::complexLongSeq* strings_to_complex_long_sequence(const std::vector &values); + CF::complexLongLongSeq* strings_to_complex_long_long_sequence(const std::vector &values); + CF::complexOctetSeq* strings_to_complex_octet_sequence(const std::vector &values); + CF::complexUShortSeq* strings_to_complex_unsigned_short_sequence(const std::vector &values); + CF::complexULongSeq* strings_to_complex_unsigned_long_sequence(const std::vector &values); + CF::complexULongLongSeq* strings_to_complex_unsigned_long_long_sequence(const std::vector &values); CORBA::TCKind getTypeKind(std::string type); CORBA::TypeCode_ptr getTypeCode(std::string type); CORBA::TypeCode_ptr getTypeCode(CORBA::TCKind kind, std::string structName); - CF::Properties getNonNilProperties(CF::Properties& originalProperties); - CF::Properties getNonNilConfigureProperties(CF::Properties& originalProperties); + CF::Properties getNonNilProperties(const CF::Properties& originalProperties); + CF::Properties getNonNilConfigureProperties(const CF::Properties& originalProperties); } #endif diff --git a/redhawk/src/base/include/ossie/refcount_memory.h b/redhawk/src/base/include/ossie/refcount_memory.h new file mode 100644 index 000000000..28bd0b736 --- /dev/null +++ b/redhawk/src/base/include/ossie/refcount_memory.h @@ -0,0 +1,395 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_REFCOUNT_MEMORY_H +#define REDHAWK_REFCOUNT_MEMORY_H + +#include "shm/Allocator.h" + +namespace redhawk { + + namespace detail { + // Atomically increments a reference count. + // Abstracts away the platform-specific aspects of atomic operations + // for use in reference counting. + static inline void add_reference(int* counter) + { +#ifdef __ATOMIC_RELAXED + // Adding a reference can use the relaxed memory model because no + // further action needs to be taken based on the reference count + // (at least one reference exists already). + __atomic_add_fetch(counter, 1, __ATOMIC_RELAXED); +#else + // In GCC 4.4, the atomic built-ins are a full memory barrier. + __sync_add_and_fetch(counter, 1); +#endif + } + + // Atomically decrements a reference count, returning the new value. + // Abstracts away the platform-specific aspects of atomic operations + // for use in reference counting. + static inline int remove_reference(int* counter) + { +#ifdef __ATOMIC_ACQ_REL + // Technically, the subtraction just requires a release barrier (to + // ensure no prior operations can be moved after it), with an + // acquire barrier only if the reference count is 0 (so that the + // destructor code is not moved before it). In practice, using an + // acquire-release barrier does not appear to make much difference + // (on x86, at least) and is always safe (because it it stricter). + return __atomic_sub_fetch(counter, 1, __ATOMIC_ACQ_REL); +#else + // In GCC 4.4, the atomic built-ins are a full memory barrier. + return __sync_sub_and_fetch(counter, 1); +#endif + } + + // Special tag class for explicitly constructing a buffer that is known + // to have been allocated by REDHAWK's shared memory allocator. + struct process_shared_tag { }; + + // Traits class for determining whether an allocator provides memory + // that can be shared between processes (default: false). + template + struct is_process_shared { + static const bool value = false; + }; + + // Traits class specialization for REDHAWK's shared memory allocator + template + struct is_process_shared< ::redhawk::shm::Allocator > { + static const bool value = true; + }; + + // Traits class specialization for REDHAWK's hybrid memory allocator + template + struct is_process_shared< ::redhawk::shm::HybridAllocator > { + static const bool value = true; + }; + } + + /** + * @brief Reference-counted memory implementation similar to, but simpler + * than, Boost/C++11's shared_ptr. + * + * %refcount_memory is a utility class that manages allocated memory with a + * reference count. When the last reference to a block of allocated memory + * expires, the memory is deallocated. + * + * This class is designed to be used as part of a more sophisticated data + * structure like redhawk::shared_buffer to manage the reference counting + * and customizable deallocation. + */ + class refcount_memory + { + public: + /** + * @brief Construct an empty %refcount_memory. + */ + refcount_memory() : + _M_impl(0) + { + } + + /** + * @brief Construct a %refcount_memory with an existing pointer. + * @param ptr Pointer to first element. + * @param count Number of elements. + * + * The newly-created %refcount_memory takes ownership of @a data. When + * the last %refcount_memory pointing to @a data is destroyed, @a data + * will be deleted with delete[]. + */ + template + refcount_memory(T* ptr, size_t count) : + _M_impl(new impl(ptr, count)) + { + } + + /** + * @brief Construct a %refcount_memory and allocate memory. + * @param count Number of elements to allocate. + * @param alloc STL-compliant allocator. + + * Creates an internal copy of @a alloc and allocates @a count elements + * in an exception-safe manner. When the last %refcount_memory pointing + * to the allocated memory is destroyed, the memory will be deallocated + * via the internal copy of @a alloc. + */ + template + refcount_memory(size_t count, const Alloc& alloc) : + _M_impl(_M_allocate(count, alloc)) + { + } + + /** + * @brief Construct a %refcount_memory with an existing pointer and a + * custom deleter. + * @param ptr Pointer to first element. + * @param count Number of elements. + * @param deleter Callable object. + * + * @a D must by copy-constructible. When the last %refcount_memory + * pointing to @a ptr is destroyed, @a deleter will be called on + * @a ptr. This can be used to define custom release behavior. + */ + template + refcount_memory(T* ptr, size_t count, D deleter) : + _M_impl(new func_impl(ptr, count, deleter, false)) + { + } + + /** + * @brief Construct a %refcount_memory with an existing pointer known + * to be allocated from process-shared memory. + * @param ptr Pointer to first element. + * @param count Number of elements. + * @param deleter Callable object. + * @param tag Indicates that @a ptr is in process-shared memory. + * + * @warning This constructor is intended for internal use only. + */ + template + refcount_memory(T* ptr, size_t count, D deleter, detail::process_shared_tag) : + _M_impl(new func_impl(ptr, count, deleter, true)) + { + } + + /** + * @brief %refcount_memory copy constructor. + * @param other Another %refcount_memory. + * + * %refcount_memory has reference semantics; after construction, this + * instance shares the underlying memory, increasing its reference + * count by one. + */ + refcount_memory(const refcount_memory& other) : + _M_impl(other._M_impl) + { + if (_M_impl) { + detail::add_reference(&(_M_impl->refcount)); + } + } + + /** + * The dtor decrements reference count of the allocated memory. If no + * other %refcount_memory points to the memory it is deallocated. + */ + ~refcount_memory() + { + if (_M_impl) { + if (detail::remove_reference(&(_M_impl->refcount)) == 0) { + delete _M_impl; + } + } + } + + /** + * @brief %refcount_memory assignment operator. + * @param other Another %refcount_memory. + * + * %refcount_memory has reference semantics; after assignment, this + * instance shares the underlying memory, increasing its reference + * count by one. The prior memory is released; if this was the last + * reference, the memory is deallocated. + */ + refcount_memory& operator=(const refcount_memory& other) + { + // Use copy constructor and swap to handle reference count + refcount_memory temp(other); + this->swap(temp); + return *this; + } + + /** + * Returns true if the allocated memory is from process-shared memory. + */ + bool is_process_shared() const + { + if (_M_impl) { + return _M_impl->shared; + } else { + return false; + } + } + + /** + * Returns the base address of the allocated memory. + */ + const void* address() const + { + if (_M_impl) { + return _M_impl->addr; + } else { + return 0; + } + } + + /** + * Returns the size of the allocated memory, in bytes. + */ + size_t bytes() const + { + if (_M_impl) { + return _M_impl->bytes; + } else { + return 0; + } + } + + /** + * Validity check, returning true if this object contains no allocated + * memory. + */ + bool operator! () const + { + return !_M_impl; + } + + /** + * @brief Swaps contents with another %refcount_memory. + * @param other %refcount_memory to swap with. + */ + void swap(refcount_memory& other) + { + std::swap(_M_impl, other._M_impl); + } + + private: + /// @cond IMPL + + // Base implementation of the reference counted memory block, storing + // the reference count and memory address along with a function pointer + // to a deleter, which subclasses may provide. Compared with virtual + // functions, this approach greatly reduces the number of symbols + // created per type, and makes the in-memory layout more predicatable + // should cache alignment be a concern. + struct impl { + typedef void (*release_func)(impl*); + + template + impl(T* ptr, size_t count, release_func func=&impl::delete_release, bool shared=false) : + refcount(1), + addr(ptr), + bytes(count * sizeof(T)), + release(func), + shared(shared) + { + } + + ~impl() + { + if (addr) { + release(this); + } + } + + template + static void delete_release(impl* imp) + { + T* ptr = reinterpret_cast(imp->addr); + delete[] ptr; + } + + int refcount; + void* addr; + size_t bytes; + release_func release; + bool shared; + }; + + // Data buffer implementation that uses an arbitrary function to + // release the memory; may be a function pointer or functor. + template + struct func_impl : public impl { + template + func_impl(T* ptr, size_t count, Func func, bool shared) : + impl(ptr, count, &func_impl::func_release, shared), + func(func) + { + } + + template + static void func_release(impl* imp) + { + T* ptr = reinterpret_cast(imp->addr); + static_cast(imp)->func(ptr); + } + + Func func; + }; + + // Data buffer implementation that inherits from an STL-compliant + // allocator class; used below for exception-safe allocation, as well + // as deallocation when the memory is released. + template + struct allocator_impl : public impl, public Alloc + { + typedef typename Alloc::value_type value_type; + + allocator_impl(value_type* ptr, size_t count, const Alloc& allocator) : + impl(ptr, count, &allocator_impl::allocator_release, detail::is_process_shared::value), + Alloc(allocator) + { + } + + static void allocator_release(impl* imp) + { + value_type* ptr = reinterpret_cast(imp->addr); + size_t count = imp->bytes / sizeof(value_type); + allocator_impl* alloc = static_cast(imp); + alloc->deallocate(ptr, count); + } + }; + + // Implementation of allocating constructor; creates an allocator-based + // data buffer implementation in an exception-safe way which can then + // be passed to the base class constructor. + template + static impl* _M_allocate(size_t count, const Alloc& allocator) + { + // Zero-length buffer requires no allocation, so don't bother with + // an implementation in the first place. + if (count == 0) { + return 0; + } + + // Create an empty allocator_impl instance first, then try to + // allocate the memory, using the fact that it inherits from the + // allocator. + typedef allocator_impl impl_type; + impl_type* imp = new impl_type(0, count, allocator); + try { + imp->addr = imp->allocate(count); + } catch (...) { + // If allocation throws an exception (most likely, std::bad_alloc), + // delete the implementation to avoid a memory leak, and rethrow. + delete imp; + throw; + } + return imp; + } + + impl* _M_impl; + /// @endcond + }; +} + +#endif // REDHAWK_REFCOUNT_MEMORY_H diff --git a/redhawk/src/base/include/ossie/shared_buffer.h b/redhawk/src/base/include/ossie/shared_buffer.h new file mode 100644 index 000000000..5f8788708 --- /dev/null +++ b/redhawk/src/base/include/ossie/shared_buffer.h @@ -0,0 +1,957 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_SHARED_BUFFER_H +#define REDHAWK_SHARED_BUFFER_H + +#include +#include + +#include "refcount_memory.h" +#include "shm/Allocator.h" + +#ifdef _RH_SHARED_BUFFER_DEBUG +#include "debug/check.h" +#include "debug/checked_allocator.h" +#include "debug/checked_iterator.h" + +#define _RH_SHARED_BUFFER_CHECK(X) _RH_DEBUG_CHECK(X) +#else // !_RH_SHARED_BUFFER_DEBUG +#define _RH_SHARED_BUFFER_CHECK(X) +#endif + +namespace redhawk { + + // Forward declaration of read/write buffer class. + template + class buffer; + + /** + * @brief An immutable container that can share its data with other + * instances. + * + * The %shared_buffer class provides read-only access to a sized array of + * elements that can be shared between many buffer instances. This enables + * the transfer of ownership of data without explicit management of + * references. + * + * shared_buffers have reference semantics. Assignment and copy construction + * do not copy any elements, only the data pointer. A %shared_buffer never + * peforms any memory allocation of its own, but can take ownership of an + * existing array. When the last reference to the underlying data goes + * away, the data is freed. + * + * For write access and memory allocation, see buffer. + */ + template + class shared_buffer { + public: + /// @brief The element type (T). + typedef T value_type; + /** + * @brief A random access iterator to const value_type. + * + * Note that all access to %shared_buffer is const. This means that + * %iterator and %const_iterator are equivalent. + */ +#ifdef _RH_SHARED_BUFFER_DEBUG + typedef ::redhawk::debug::checked_iterator iterator; +#else + typedef const value_type* iterator; +#endif + /** + * @brief A random access iterator to const value_type. + * + * Note that all access to %shared_buffer is const. This means that + * %iterator and %const_iterator are equivalent. + */ + typedef iterator const_iterator; + + /** + * @brief Construct an empty %shared_buffer. + */ + shared_buffer() : + _M_memory(), + _M_start(0), + _M_finish(0) + { + } + + /** + * @brief Construct a %shared_buffer with an existing pointer. + * @param data Pointer to first element. + * @param size Number of elements. + * + * The newly-created %shared_buffer takes ownership of @a data. When + * the last %shared_buffer pointing to @a data is destroyed, @a data + * will be deleted with delete[]. + */ + shared_buffer(value_type* data, size_t size) : + _M_memory(data, size), + _M_start(data), + _M_finish(data + size) + { + } + + /** + * @brief Construct a %shared_buffer with an existing pointer and a + * custom deleter. + * @param data Pointer to first element. + * @param size Number of elements. + * @param deleter Callable object. + * + * @a D must by copy-constructible. When the last %shared_buffer + * pointing to @a data is destroyed, @a deleter will be called on + * @a data. This can be used to define custom release behavior. + */ + template + shared_buffer(value_type* data, size_t size, D deleter) : + _M_memory(data, size, deleter), + _M_start(data), + _M_finish(data + size) + { + } + + /** + * @brief Construct a %shared_buffer with an existing pointer known to + * be allocated from process-shared memory. + * @param data Pointer to first element. + * @param size Number of elements. + * @param deleter Callable object. + * @param tag Indicates that @a data is in process-shared memory. + * + * @warning This constructor is intended for internal use only. + */ + template + shared_buffer(value_type* data, size_t size, D deleter, detail::process_shared_tag tag) : + _M_memory(data, size, deleter, tag), + _M_start(data), + _M_finish(data + size) + { + } + + /** + * @brief %shared_buffer copy constructor. + * @param other A %shared_buffer of identical element type. + * + * %shared_buffer has reference semantics; after construction, this + * instance shares the underlying data, increasing its reference count + * by one. + */ + shared_buffer(const shared_buffer& other) : + _M_memory(other._M_memory), + _M_start(other._M_start), + _M_finish(other._M_finish) + { + } + + /** + * The dtor releases the %shared_buffer's shared data pointer. If no + * other %shared_buffer points to the data, the data is released. + */ + ~shared_buffer() + { + } + + /** + * @brief %shared_buffer assignment operator. + * @param other A %shared_buffer of identical element type. + * + * %shared_buffer has reference semantics; after assignment, this + * instance shares the underlying data, increasing its reference count + * by one. The prior data pointer is released, deleting the data if + * this was the last reference. + */ + shared_buffer& operator=(const shared_buffer& other) + { + // Use copy constructor and swap to handle reference count + shared_buffer temp(other); + this->swap(temp); + return *this; + } + + /** + * Returns a read-only iterator that points to the first element in the + * %shared_buffer. + */ + iterator begin() const + { + return _M_iterator(this->_M_begin()); + } + + /** + * Returns a read-only iterator that points one past the last element + * in the %shared_buffer. + */ + iterator end() const + { + return _M_iterator(this->_M_end()); + } + + /** + * @brief Subscript access to the data contained in the %shared_buffer. + * @param index The index of the element to access + * @return Read-only reference to element + */ + const value_type& operator[] (size_t index) const + { + return this->_M_index(index); + } + + /** + * Returns the number of elements in the %shared_buffer. + */ + size_t size() const + { + return size_t(this->_M_finish - this->_M_start); + } + + /** + * Returns true if the %shared_buffer is empty. + */ + bool empty() const + { + return this->begin() == this->end(); + } + + /** + * Returns a reference to the backing memory object. + * + * @warning This method is intended for internal use only. + */ + const refcount_memory& get_memory() const + { + return _M_memory; + } + + /** + * Returns a read-only pointer to the first element. + */ + const value_type* data() const + { + return this->_M_start; + } + + /** + * @brief Returns a %shared_buffer containing a subset of elements. + * @param start Index of first element. + * @param end Index of last element, exclusive (default end). + * @return The new %shared_buffer. + */ + shared_buffer slice(size_t start, size_t end=size_t(-1)) const + { + shared_buffer result(*this); + result.trim(start, end); + return result; + } + + /** + * @brief Adjusts the start and end indices of this %shared_buffer. + * @param start Index of first element. + * @param end Index of last element, exclusive (default end). + */ + void trim(size_t start, size_t end=size_t(-1)) + { + if (end == (size_t)-1) { + end = this->size(); + } + this->_M_trim(this->_M_begin() + start, this->_M_begin() + end); + } + + /** + * @brief Adjusts the beginning of this %shared_buffer. + * @param start Iterator to first element. + */ + void trim(iterator first) + { + this->_M_trim(first, end()); + } + + /** + * @brief Adjusts the beginning and end of this %shared_buffer. + * @param start Iterator to first element. + * @param end Iterator to last element, exclusive. + */ + void trim(iterator first, iterator last) + { + this->_M_trim(first, last); + } + + /** + * @brief Returns a copy of this %shared_buffer. + * + * The returned %buffer points to a newly-allocated array. + */ + buffer copy() const + { + buffer result(this->size()); + this->_M_copy(result); + return result; + } + + /** + * @brief Returns a copy of this %shared_buffer. + * @param allocator An allocator object. + * + * The returned %buffer points to a new array allocated with + * @a allocator. @a allocator must be copy-constructible. + */ + template + buffer copy(const Alloc& allocator) const + { + buffer result(this->size(), allocator); + this->_M_copy(result); + return result; + } + + /** + * @brief Swap contents with another %shared_buffer. + * @param other %shared_buffer to swap with. + */ + void swap(shared_buffer& other) + { + this->_M_swap(other); + } + + /** + * @brief Reinterpret a shared_buffer as another data type. + * @param other %shared_buffer to reinterpret. + * @return The new %shared_buffer. + * + * Data is reinterpreted by standard C++ reinterpret_cast semantics. + * The size of the new %shared_buffer is the floor of the size of + * @a other multiplied by the ratio sizeof(U)/sizeof(T). + */ + template + static shared_buffer recast(const shared_buffer& other) + { + return _M_recast(other); + } + + /** + * @brief Returns a transient %shared_buffer. + * @param data Pointer to first element. + * @param size Number of elements. + * + * Adapts externally managed memory to work with the %shared_buffer + * API; however, additional care must be taken to ensure that the data + * is copied if it needs to be held past the lifetime of the transient + * %shared_buffer. + */ + static shared_buffer make_transient(const value_type* data, size_t size) + { + shared_buffer result; + result._M_start = const_cast(data); + result._M_finish = result._M_start + size; + return result; + } + + /** + * @brief Returns true if the array's lifetime is not managed. + * + * Transient shared_buffers do not own the underlying data. If the + * receiver of a transient buffer needs to hold on to it past the + * lifetime of the call, they must make a copy. + */ + bool transient() const + { + return !(this->_M_memory); + } + + protected: + /// @cond IMPL + + // Internal implementation of operator[]; supports const and non-const + // use. + value_type& _M_index(size_t index) const + { + _RH_SHARED_BUFFER_CHECK(index < this->size()); + return this->_M_start[index]; + } + + // Internal implementation of begin; supports const and non-const use. + value_type* _M_begin() const + { + return this->_M_start; + } + + // Internal implementation of end; supports const and non-const use. + value_type* _M_end() const + { + return this->_M_finish; + } + + // Converts a pointer into an iterator; if debug is enabled, the ctor + // requires an additional parameter (a pointer back to the originating + // conatiner), otherwise, it's a no-op + inline iterator _M_iterator(value_type* iter) const + { +#ifdef _RH_SHARED_BUFFER_DEBUG + return iterator(iter, this); +#else + return iterator(iter); +#endif + } + + // Internal implementation of trim to support checked iterators, which + // are different classes for shared_buffer and buffer + void _M_trim(const value_type* first, const value_type* last) + { + _RH_SHARED_BUFFER_CHECK(last >= first); + _RH_SHARED_BUFFER_CHECK(last <= this->_M_finish); + this->_M_start = const_cast(first); + this->_M_finish = const_cast(last); + } + + // Internal implementation of swap. + void _M_swap(shared_buffer& other) + { + this->_M_memory.swap(other._M_memory); + std::swap(this->_M_start, other._M_start); + std::swap(this->_M_finish, other._M_finish); + } + + // Internal implementation of copy. Copies the contents of this buffer + // into another, pre-existing buffer. + void _M_copy(shared_buffer& dest) const + { + std::copy(this->begin(), this->end(), dest._M_begin()); + } + + // Implementation of recast. The output type is a template parameter so + // that buffer can use this method as well. + template + static Tout _M_recast(const U& other) + { + // Reinterpret the input class (which is some form of shared_buffer + // or buffer) via a void pointer so that the compiler doesn't + // object about strict aliasing rules, which shouldn't matter here. + // The in-memory layout is always the same irrespective of the + // individual element type (including the data implementation), so + // the important effects of the copy constructor like reference + // counting will still occur. + const void* ptr = &other; + Tout result(*reinterpret_cast(ptr)); + // Truncate any extra bytes from the end of the array so that the + // end pointer is at an integral offset from the start (otherwise + // iterators might not meet). This may be a concern when the output + // type is larger than the input type; e.g., 9 floats yields 4 + // complex floats, with 1 float "lost". Note that size() already + // truncates the number of elements for us. + result._M_finish = result._M_start + result.size(); + return result; + } + + // Internal constructor to allow buffer to request a number of elements + // and an allocator type for the internal buffer to do an allocation. + template + shared_buffer(size_t count, const Alloc& allocator) : + _M_memory(count, allocator), + _M_start((value_type*) _M_memory.address()), + _M_finish(_M_start + count) + { + } + /// @endcond + + private: + // Disallow swap with any other type (mainly, buffer). + template + void swap(U& other); + + refcount_memory _M_memory; + value_type* _M_start; + value_type* _M_finish; + }; + + + /** + * @brief A shared container data type. + * + * The %buffer class extends shared_buffer to provides write access. + * Multiple buffers and shared_buffers may point to the same underlying + * data. + * + * buffers have reference semantics. Assignment and copy construction do + * not copy any elements, only the data pointer. + * + * Unlike %shared_buffer, %buffer has allocating constructors. When the + * last reference to the underlying data goes away, the data is freed. + */ + template + class buffer : public shared_buffer + { + public: + /// @brief The equivalent shared buffer type. + typedef shared_buffer shared_type; + /// @brief The element type (T). + typedef T value_type; + /// @brief A random access iterator to value_type. +#ifdef _RH_SHARED_BUFFER_DEBUG + typedef ::redhawk::debug::checked_iterator iterator; +#else + typedef value_type* iterator; +#endif + /// @brief A random access iterator to const value_type. +#ifdef _RH_SHARED_BUFFER_DEBUG + typedef ::redhawk::debug::checked_iterator const_iterator; +#else + typedef const value_type* const_iterator; +#endif + /// @brief The default allocator class. +#ifdef _RH_SHARED_BUFFER_DEBUG + typedef ::redhawk::debug::checked_allocator default_allocator; +#elif defined(_RH_SHARED_BUFFER_USE_STD_ALLOC) + typedef std::allocator default_allocator; +#else + typedef ::redhawk::shm::HybridAllocator default_allocator; +#endif + + /** + * @brief Construct an empty %buffer. + */ + buffer() : + shared_type() + { + } + + /** + * @brief Construct a %buffer and allocate space. + * @param size Number of elements. + * + * This constructor allocates memory for @a size elements; no + * initialization is performed. + */ + explicit buffer(size_t size) : + shared_type(size, default_allocator()) + { + } + + /** + * @brief Construct a %buffer and allocate space. + * @param size Number of elements. + * @param allocator An allocator. + * + * This constructor allocates memory for @a size elements using + * @a allocator; no initialization is performed. @a allocator must be + * copy-constructible. + */ + template + buffer(size_t size, const Alloc& allocator) : + shared_type(size, allocator) + { + } + + /** + * @brief Construct a %buffer with an existing pointer. + * @param data Pointer to first element. + * @param size Number of elements. + * + * The newly-created %buffer takes ownership of @a data. When the last + * %buffer pointing to @a data is destroyed, @a data will be deleted + * with delete[]. + */ + buffer(value_type* data, size_t size) : + shared_type(data, size) + { + } + + /** + * @brief Construct a %buffer with an existing pointer and a custom + * deleter. + * @param data Pointer to first element. + * @param size Number of elements. + * @param deleter Callable object. + * + * @a D must be copy-constructible. When the last %buffer pointing to + * @a data is destroyed, @a deleter will be called on @a data. This can + * be used to define custom release behavior. + */ + template + buffer(value_type* data, size_t size, D deleter) : + shared_type(data, size, deleter) + { + } + + /** + * @brief Construct a %buffer with an existing pointer known to be + * allocated from process-shared memory. + * @param data Pointer to first element. + * @param size Number of elements. + * @param deleter Callable object. + * @param tag Indicates that @a data is in process-shared memory. + * + * @warning This constructor is intended for internal use only. + */ + template + buffer(value_type* data, size_t size, D deleter, detail::process_shared_tag tag) : + shared_type(data, size, deleter, tag) + { + } + + /** + * The dtor releases the %buffer's shared data pointer. If no other + * buffers point to the data, the data is released. + */ + ~buffer() + { + } + + /** + * Returns a read/write iterator that points to the first element in + * the %buffer. + */ + iterator begin() + { + return _M_iterator(this->_M_begin()); + } + + /** + * Returns a read-only iterator that points to the first element in the + * %buffer. + */ + const_iterator begin() const + { + return _M_const_iterator(this->_M_begin()); + } + + /** + * Returns a read/write iterator that points one past the last element + * in the %buffer. + */ + iterator end() + { + return _M_iterator(this->_M_end()); + } + + /** + * Returns a read-only iterator that points one past the last element + * in the %buffer. + */ + const_iterator end() const + { + return _M_const_iterator(this->_M_end()); + } + + /** + * @brief Subscript access to the data contained in the %buffer. + * @param index The index of the element to access + * @return Read/write reference to element + */ + value_type& operator[] (size_t index) + { + return this->_M_index(index); + } + + /** + * @brief Subscript access to the data contained in the %buffer. + * @param index The index of the element to access + * @return Read-only reference to element + */ + const value_type& operator[] (size_t index) const + { + return this->_M_index(index); + } + + /** + * Returns the number of elements in the %buffer. + */ + size_t size() const + { + return shared_type::size(); + } + + /** + * Returns true if the %buffer is empty. + */ + bool empty() const + { + return shared_type::empty(); + } + + /** + * Returns a read-only pointer to the first element. + */ + const value_type* data() const + { + return this->_M_begin(); + } + + /** + * Returns a read/write pointer to the first element. + */ + value_type* data() + { + return this->_M_begin(); + } + + /** + * @brief Returns a %shared_buffer containing a subset of elements. + * @param start Index of first element. + * @param end Index of last element, exclusive (default end). + * @return The new %shared_buffer. + */ + shared_type slice(size_t start, size_t end=size_t(-1)) const + { + return shared_type::slice(start, end); + } + + /** + * @brief Returns a %buffer containing a subset of elements. + * @param start Index of first element. + * @param end Index of last element, exclusive (default end). + * @return The new %buffer. + */ + buffer slice(size_t start, size_t end=size_t(-1)) + { + buffer result(*this); + result.trim(start, end); + return result; + } + + /** + * @brief Adjusts the start and end indices of this %buffer. + * @param start Index of first element. + * @param end Index of last element, exclusive (default end). + */ + void trim(size_t start, size_t end=size_t(-1)) + { + shared_type::trim(start, end); + } + + /** + * @brief Adjusts the beginning of this %buffer. + * @param start Iterator to first element. + */ + void trim(iterator first) + { + this->_M_trim(first, end()); + } + + /** + * @brief Adjusts the beginning and end of this %buffer. + * @param start Iterator to first element. + * @param end Iterator to last element, exclusive (default end). + */ + void trim(iterator first, iterator last) + { + this->_M_trim(first, last); + } + + /** + * @brief Resizes this %buffer. + * @param size Number of elements. + * + * Allocates new memory using the default allocator. Existing values + * are copied to the new memory. + */ + void resize(size_t size) + { + buffer temp(size); + _M_resize(temp); + } + + /** + * @brief Resizes this %buffer. + * @param size Number of elements. + * @param allocator STL-compliant allocator. + * + * Allocates new memory using @a allocator. Existing values are copied + * to the new memory. + */ + template + void resize(size_t size, Alloc& allocator) + { + buffer temp(size, allocator); + _M_resize(temp); + } + + /** + * @brief Replaces the contents of this %buffer. + * @param pos Index of first element to be replaced. + * @param len Number of elements to replace. + * @param src A shared_buffer from which to copy elements. + */ + void replace(size_t pos, size_t len, const shared_type& src) + { + replace(pos, len, src, 0); + } + + /** + * @brief Replaces the contents of this %buffer. + * @param pos Index of first element to be replaced. + * @param len Number of elements to replace. + * @param src A shared_buffer from which to copy elements. + * @param srcpos Index of first element in @p src to copy. + */ + void replace(size_t pos, size_t len, const shared_type& src, size_t srcpos) + { + typename shared_type::const_iterator start = src.begin() + srcpos; + std::copy(start, start + len, begin() + pos); + } + + /** + * @brief Returns a copy of this %buffer. + * + * The returned %buffer points to a newly-allocated array. + */ + buffer copy() const + { + return shared_type::copy(); + } + + /** + * @brief Returns a copy of this %buffer. + * @param allocator An allocator object. + * + * The returned %buffer points to a new array allocated with + * @a allocator. @a allocator must be copy-constructible. + */ + template + buffer copy(const Alloc& allocator) const + { + return shared_type::copy(allocator); + } + + /** + * @brief Swap contents with another %buffer. + * @param other %buffer to swap with. + */ + void swap(buffer& other) + { + this->_M_swap(other); + } + + /** + * @brief Reinterpret a %buffer as another data type. + * @param other %buffer to reinterpret. + * @return The new %buffer. + * + * Data is reinterpreted by standard C++ reinterpret_cast semantics. + * The size of the new %buffer is the floor of the size of @a other + * multiplied by the ratio sizeof(U)/sizeof(T). + */ + template + static buffer recast(const buffer& other) + { + // Use the base class' template method implementation, with this + // type as the first template parameter (the second is deduced from + // the argument). + return shared_type::template _M_recast(other); + } + + protected: + /// @cond IMPL + + // Converts a pointer into an iterator (see shared_buffer::_M_iterator + // for more explanation) + inline iterator _M_iterator(value_type* iter) + { +#ifdef _RH_SHARED_BUFFER_DEBUG + return iterator(iter, this); +#else + return iterator(iter); +#endif + } + + // Converts a const pointer into an const_iterator (see above) + inline const_iterator _M_const_iterator(const value_type* iter) const + { +#ifdef _RH_SHARED_BUFFER_DEBUG + return const_iterator(iter, this); +#else + return const_iterator(iter); +#endif + } + + // Copies existing elements into a destination buffer then swaps the + // buffers, as used to perform a resize. The allocation is done inside + // the public method to ensure that the correct allocator template is + // used. + inline void _M_resize(buffer& dest) + { + size_t count = std::min(size(), dest.size()); + std::copy(begin(), begin() + count, dest.begin()); + this->swap(dest); + } + /// @endcond + }; + + /** + * @brief Buffer equality comparison. + * @param lhs A %shared_buffer. + * @param rhs A %shared_buffer of the same type as @a lhs. + * @return True iff the size and elements of the shared_buffers are equal. + */ + template + inline bool operator==(const shared_buffer& lhs, const shared_buffer& rhs) + { + if (lhs.size() != rhs.size()) { + // Different sizes always compare unequal + return false; + } else if (lhs.data() == rhs.data()) { + // If the data pointer is the same (the size is already known to be + // the same), no further comparison is required + return true; + } else { + // Perform element-wise comparison + return std::equal(lhs.begin(), lhs.end(), rhs.begin()); + } + } + + /** + * @brief Buffer inequality comparison. + * @param lhs A %shared_buffer. + * @param rhs A %shared_buffer of the same type as @a lhs. + * @return True iff the size or elements of the shared_buffers are not equal. + */ + template + inline bool operator!=(const shared_buffer& lhs, const shared_buffer& rhs) + { + return !(lhs == rhs); + } + + /** + * @brief A convenience wrapper for creating a buffer. + * @param data Pointer to first element. + * @param size Number of elements. + * @return A newly-constructed buffer<> of the appropriate type. + */ + template + inline redhawk::buffer make_buffer(T* data, size_t size) + { + return redhawk::buffer(data, size); + } + + /** + * @brief A convenience wrapper for creating a buffer with a custom deleter. + * @param data Pointer to first element. + * @param size Number of elements. + * @param deleter Callable object. + * @return A newly-constructed buffer<> of the appropriate type. + */ + template + inline redhawk::buffer make_buffer(T* data, size_t size, D deleter) + { + return redhawk::buffer(data, size, deleter); + } + +} // namespace redhawk + +#endif // REDHAWK_SHARED_BUFFER_H diff --git a/redhawk/src/base/include/ossie/shm/Allocator.h b/redhawk/src/base/include/ossie/shm/Allocator.h new file mode 100644 index 000000000..cb2b637ad --- /dev/null +++ b/redhawk/src/base/include/ossie/shm/Allocator.h @@ -0,0 +1,137 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_SHM_ALLOCATOR_H +#define REDHAWK_SHM_ALLOCATOR_H + +#include +#include + +namespace redhawk { + + namespace shm { + + class Heap; + + std::string getProcessHeapName(pid_t pid); + + Heap* getProcessHeap(); + bool isEnabled(); + void* allocate(size_t bytes); + void deallocate(void* ptr); + + void* allocateHybrid(size_t bytes); + void deallocateHybrid(void* ptr); + + template + struct Allocator : public std::allocator + { + public: + typedef std::allocator base; + typedef typename base::pointer pointer; + typedef typename base::value_type value_type; + typedef typename base::size_type size_type; + + template + struct rebind { + typedef Allocator other; + }; + + Allocator() throw() : + base() + { + } + + Allocator(const Allocator& other) throw() : + base(other) + { + } + + template + Allocator(const Allocator& other) throw() : + base(other) + { + } + + pointer allocate(size_type count) + { + size_type bytes = count * sizeof(value_type); + void* ptr = redhawk::shm::allocate(bytes); + if (!ptr) { + throw std::bad_alloc(); + } + return static_cast(ptr); + } + + void deallocate(pointer ptr, size_type /*unused*/) + { + redhawk::shm::deallocate(ptr); + } + }; + + template + struct HybridAllocator : public std::allocator + { + public: + typedef std::allocator base; + typedef typename base::pointer pointer; + typedef typename base::value_type value_type; + typedef typename base::size_type size_type; + + template + struct rebind { + typedef HybridAllocator other; + }; + + HybridAllocator() throw() : + base() + { + } + + HybridAllocator(const HybridAllocator& other) throw() : + base(other) + { + } + + template + HybridAllocator(const HybridAllocator& other) throw() : + base(other) + { + } + + pointer allocate(size_type count) + { + size_type bytes = count * sizeof(value_type); + void* ptr = redhawk::shm::allocateHybrid(bytes); + if (!ptr) { + throw std::bad_alloc(); + } + return static_cast(ptr); + } + + void deallocate(pointer ptr, size_type /*unused*/) + { + redhawk::shm::deallocateHybrid(ptr); + } + }; + } +} + +#endif // REDHAWK_SHM_ALLOCATOR_H diff --git a/redhawk/src/base/include/ossie/shm/Heap.h b/redhawk/src/base/include/ossie/shm/Heap.h new file mode 100644 index 000000000..c0ba2f30c --- /dev/null +++ b/redhawk/src/base/include/ossie/shm/Heap.h @@ -0,0 +1,82 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_SHM_HEAP_H +#define REDHAWK_SHM_HEAP_H + +#include +#include + +#include "SuperblockFile.h" + +namespace redhawk { + + namespace shm { + class ThreadState; + + struct MemoryRef { + std::string heap; + size_t superblock; + size_t offset; + + bool operator!() const; + }; + + class Heap { + public: + Heap(const std::string& name); + ~Heap(); + + void* allocate(size_t bytes); + void deallocate(void* ptr); + + static MemoryRef getRef(const void* ptr); + + const std::string& name() const; + + private: + struct PrivateHeap; + + // Non-copyable, non-assignable + Heap(const Heap&); + Heap& operator=(const Heap&); + + // Default to 2MB superblock + static const size_t DEFAULT_SUPERBLOCK_SIZE = 2097152; + + Superblock* _createSuperblock(size_t minSize); + + PrivateHeap* _getPrivateHeap(); + ThreadState* _getThreadState(); + + // Serializes access to all members except thread-specific data + boost::mutex _mutex; + + SuperblockFile _file; + bool _canGrow; + + std::vector _allocs; + + boost::thread_specific_ptr _threadState; + }; + } +} + +#endif // REDHAWK_SHM_HEAP_H diff --git a/redhawk/src/base/include/ossie/shm/HeapClient.h b/redhawk/src/base/include/ossie/shm/HeapClient.h new file mode 100644 index 000000000..5f522f53c --- /dev/null +++ b/redhawk/src/base/include/ossie/shm/HeapClient.h @@ -0,0 +1,57 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_SHM_HEAPCLIENT_H +#define REDHAWK_SHM_HEAPCLIENT_H + +#include +#include + +namespace redhawk { + + namespace shm { + + struct MemoryRef; + class SuperblockFile; + + class HeapClient { + public: + HeapClient(); + ~HeapClient(); + + void* fetch(const MemoryRef& ref); + static void deallocate(void* ptr); + + void detach(); + + private: + // Non-copyable, non-assignable + HeapClient(const HeapClient&); + HeapClient& operator=(const HeapClient&); + + SuperblockFile* _getSuperblockFile(const std::string& name); + + typedef std::map FileMap; + FileMap _files; + }; + } +} + +#endif // REDHAWK_SHM_HEAPCLIENT_H diff --git a/redhawk/src/base/include/ossie/shm/MappedFile.h b/redhawk/src/base/include/ossie/shm/MappedFile.h new file mode 100644 index 000000000..a3503e0fe --- /dev/null +++ b/redhawk/src/base/include/ossie/shm/MappedFile.h @@ -0,0 +1,69 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_SHM_MAPPEDFILE_H +#define REDHAWK_SHM_MAPPEDFILE_H + +#include +#include + +namespace redhawk { + + namespace shm { + + class MappedFile { + public: + enum mode_e { + READONLY, + READWRITE + }; + + static const size_t PAGE_SIZE; + + MappedFile(const std::string& name); + ~MappedFile(); + + void create(); + void open(); + + const std::string& name() const; + + size_t size() const; + void resize(size_t bytes); + + void* map(size_t bytes, mode_e mode, off_t offset=0); + void* remap(void* oldAddr, size_t oldSize, size_t newSize); + void unmap(void* addr, size_t bytes); + + void close(); + void unlink(); + + private: + // Non-copyable, non-assignable + MappedFile(const MappedFile&); + MappedFile& operator=(const MappedFile&); + + const std::string _name; + int _fd; + }; + } +} + +#endif // REDHAWK_SHM_MAPPEDFILE_H diff --git a/redhawk/src/base/include/ossie/shm/SuperblockFile.h b/redhawk/src/base/include/ossie/shm/SuperblockFile.h new file mode 100644 index 000000000..14173182a --- /dev/null +++ b/redhawk/src/base/include/ossie/shm/SuperblockFile.h @@ -0,0 +1,87 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_SHM_SUPERBLOCKFILE_H +#define REDHAWK_SHM_SUPERBLOCKFILE_H + +#include + +#include "MappedFile.h" + +namespace redhawk { + + namespace shm { + + class Superblock; + + class SuperblockFile { + public: + struct Statistics + { + size_t size; + size_t used; + size_t superblocks; + size_t unused; + }; + + SuperblockFile(const std::string& name); + ~SuperblockFile(); + + static bool IsSuperblockFile(const std::string& name); + + pid_t creator() const; + int refcount() const; + bool isOrphaned() const; + + Statistics getStatistics(); + + void create(); + void open(bool attach=true); + void close(); + + Superblock* getSuperblock(size_t offset); + Superblock* createSuperblock(size_t bytes); + + const std::string& name() const; + + MappedFile& file(); + + private: + // Non-copyable, non-assignable + SuperblockFile(const SuperblockFile&); + SuperblockFile& operator=(const SuperblockFile&); + + void _detach(); + + Superblock* _mapSuperblock(size_t offset); + + MappedFile _file; + bool _attached; + + struct Header; + Header* _header; + + typedef std::map SuperblockMap; + SuperblockMap _superblocks; + }; + } +} + +#endif // REDHAWK_SHM_SUPERBLOCKFILE_H diff --git a/redhawk/src/base/include/ossie/shm/System.h b/redhawk/src/base/include/ossie/shm/System.h new file mode 100644 index 000000000..a5cd4e26d --- /dev/null +++ b/redhawk/src/base/include/ossie/shm/System.h @@ -0,0 +1,35 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef REDHAWK_SHM_SYSTEM_H +#define REDHAWK_SHM_SYSTEM_H + +#include +#include + +namespace redhawk { + namespace shm { + const char* getSystemPath(); + size_t getSystemTotalMemory(); + size_t getSystemFreeMemory(); + } +} + +#endif // REDHAWK_SHM_SYSTEM_H diff --git a/redhawk/src/configure.ac b/redhawk/src/configure.ac index 5a8628d44..cd13e648f 100644 --- a/redhawk/src/configure.ac +++ b/redhawk/src/configure.ac @@ -19,7 +19,7 @@ # dnl Update this version number immedately after a release -AC_INIT([ossie],[2.0.9]) +AC_INIT([ossie],[2.2.1]) #AM_INIT_AUTOMAKE(nostdinc) # allows filenames over 99 characters long during dist AM_INIT_AUTOMAKE([1.9 tar-pax subdir-objects]) @@ -69,12 +69,17 @@ AX_BOOST_REGEX AX_BOOST_THREAD AX_BOOST_DATE_TIME +m4_include(acinclude/unitdir.m4) +m4_include(acinclude/testdir.m4) + AX_LIB_EXPAT([1.95.8]) if test x"$HAVE_EXPAT" = xno ; then AC_MSG_ERROR([The Expat XML parser library and header files are required]) fi PKG_CHECK_MODULES([OMNIORB], [omniORB4 >= 4.0.0]) +OMNIORB_INCLUDEDIR=`pkg-config --variable=includedir omniORB4` +AC_SUBST(OMNIORB_INCLUDEDIR) PKG_CHECK_EXISTS([omniORB4 >= 4.2.0],[AC_DEFINE([OMNIORB4_2],[1],[])]) PKG_CHECK_MODULES([OMNITHREAD], [omnithread3 >= 4.0.0]) PKG_CHECK_MODULES([OMNIDYNAMIC], [omniDynamic4 >= 4.0.0]) @@ -121,8 +126,8 @@ AC_CHECK_PROG([XMLCATALOG], [xmlcatalog], [xmlcatalog], [no]) AM_CONDITIONAL(HAVE_XMLCATALOG, test $XMLCATALOG != "no") # add numa support to affinity module -AC_ARG_ENABLE([affinity], AS_HELP_STRING([--enable-affinity], [Enable numa affinity processing])) - AC_MSG_CHECKING([Adding support for numa affinity processing]) +AC_ARG_ENABLE([affinity], [AS_HELP_STRING([--disable-affinity], [Disable numa affinity processing])],,[enable_affinity=yes]) + AC_MSG_CHECKING([Enabling support for numa affinity processing]) if test "x$enable_affinity" = "xyes"; then dnl determine if numa library support is available AC_SUBST(OSSIE_AFFINITY, "yes") @@ -148,16 +153,6 @@ OSSIE_ENABLE_PERSISTENCE m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) -AC_ARG_ENABLE([xml_validation], AS_HELP_STRING([--disable-xml-validation], [Disable xml validation in the parsers])) -AC_MSG_CHECKING([if xml parsers should perform validation]) -if test "x$enable_xml_validation" != "xno"; then - AC_SUBST(OSSIE_XSDFLAGS, "--generate-validation") - AC_MSG_RESULT([yes]) -else - AC_SUBST(OSSIE_XSDFLAGS, "--suppress-validation") - AC_MSG_RESULT([no]) -fi - # Allow user to explictly prevent JAVA compilation AC_ARG_ENABLE([java], AS_HELP_STRING([--disable-java], [Disable framework java support])) @@ -169,7 +164,7 @@ if test "x$enable_java" != "xno"; then RH_JAVA_HOME # Minimum Java version - java_source_version=1.6 + java_source_version=1.8 # Locate tools we need based on JAVA_HOME RH_PROG_JAVAC([$java_source_version]) @@ -242,6 +237,10 @@ AC_SUBST(OSSIE_CFLAGS, '-I$(top_srcdir)/base/include') AC_SUBST(OSSIE_IDLDIR, '$(top_srcdir)/idl') AC_SUBST(OSSIEIDL_LIBS, '$(top_builddir)/base/framework/idl/libossieidl.la') +# For test components with a generated Makefile.am, set the expected flags +AC_SUBST(PROJECTDEPS_CFLAGS, '$(OSSIE_CFLAGS) -I$(top_srcdir)/base/include/ossie') +AC_SUBST(PROJECTDEPS_LIBS, '$(top_builddir)/base/framework/libossiecf.la $(OSSIEIDL_LIBS)') + if test "$HAVE_JAVASUPPORT" = yes; then AC_SUBST(OSSIE_CLASSPATH, '$(top_srcdir)/base/framework/java/CFInterfaces.jar:$(top_srcdir)/base/framework/java/log4j-1.2.15.jar:$(top_srcdir)/base/framework/java/ossie/ossie.jar') @@ -272,6 +271,13 @@ if test "$HAVE_JAVASUPPORT" = yes; then IDLJNICXX='$(IDL) -p $(OMNIJNI_PYTHONDIR) -b omnijni.idljni' fi +# C++ unit testing support. May want to conditionally enable/disable this. +AM_PATH_CPPUNIT(1.12.1) +AS_IF([test "x$HAVE_JAVASUPPORT" == "xyes"], [ + dnl Use RPM location hard-coded for now + AC_SUBST([JUNIT_CLASSPATH], "/usr/share/java/junit4.jar") +]) + # Undefine the PACKAGE_ variables to avoid warnings when omniORB is installed outside of # /usr. OSSIE doesn't use these variables anyways...and even if it did # they would get undefined in any file that included CORBA.h (because ossieconfig.h @@ -295,6 +301,7 @@ AC_CONFIG_FILES(Makefile \ control/sdr/Makefile \ control/sdr/dommgr/Makefile \ control/sdr/devmgr/Makefile \ + control/sdr/ComponentHost/Makefile \ base/Makefile \ base/framework/python/Makefile \ base/framework/java/Makefile \ @@ -310,10 +317,13 @@ AC_CONFIG_FILES(Makefile \ base/plugin/logcfg/Makefile \ idl/Makefile \ xml/Makefile \ - tools/Makefile \ - tools/LogEventAppender/Makefile \ - testing/Makefile \ + tools/Makefile ) + +AM_COND_IF( [BUILD_TESTS], [ +AC_CONFIG_FILES( [testing/Makefile \ testing/_unitTestHelpers/buildconfig.py \ + testing/cpp/Makefile \ + testing/java/Makefile \ testing/sdr/dev/devices/ExecutableDevice/Makefile \ testing/sdr/dev/devices/BasicTestDevice_cpp/BasicTestDevice_cpp_impl1/Makefile \ testing/sdr/dev/devices/BasicTestDevice_java/java/Makefile \ @@ -331,14 +341,20 @@ AC_CONFIG_FILES(Makefile \ testing/sdr/dev/devices/base_programmable/cpp/Makefile \ testing/sdr/dev/devices/base_persona/cpp/Makefile \ testing/sdr/dev/devices/devcpp/cpp/Makefile \ + testing/sdr/dev/devices/log_test_cpp/cpp/Makefile \ + testing/sdr/dev/devices/writeonly_cpp/cpp/Makefile \ testing/sdr/dev/devices/DevC/cpp/Makefile \ testing/sdr/dev/devices/dev_kill_devmgr/cpp/Makefile \ testing/sdr/dev/devices/GPP/cpp/Makefile \ testing/sdr/dev/devices/java_dev/java/Makefile \ + testing/sdr/dev/devices/log_test_java/java/Makefile \ testing/sdr/dev/devices/LongDevice/cpp/Makefile \ + testing/sdr/dev/devices/dev_alloc_cpp/cpp/Makefile \ testing/sdr/dev/devices/devj/java/Makefile \ + testing/sdr/dev/devices/writeonly_java/java/Makefile \ testing/sdr/dev/devices/DevC/java/Makefile \ testing/sdr/dev/services/BasicService_java/java/Makefile \ + testing/sdr/dev/services/BasicService_cpp/cpp/Makefile \ testing/sdr/dom/deps/cpp_dep1/cpp/Makefile \ testing/sdr/dom/deps/cpp_dep1/cpp/cpp_dep1.pc \ testing/sdr/dom/deps/cpp_dep2/cpp/Makefile \ @@ -353,13 +369,20 @@ AC_CONFIG_FILES(Makefile \ testing/sdr/dom/components/linkedLibraryTest/Makefile \ testing/sdr/dom/components/TestCppsoftpkgDeps/Makefile \ testing/sdr/dom/components/javaDep/javaDep/Makefile \ + testing/sdr/dom/components/javaSoftpkgJarDep/java/Makefile \ testing/sdr/dom/components/java_comp/java/Makefile \ testing/sdr/dom/components/msg_through_java/java/Makefile \ testing/sdr/dom/components/huge_msg_java/java/Makefile \ + testing/sdr/dom/components/timeprop_java/java/Makefile \ + testing/sdr/dom/components/logger_java/java/Makefile \ + testing/sdr/dom/components/time_ja_now/java/Makefile \ testing/sdr/dom/components/HardLimit/HardLimit_java_impl1/Makefile \ testing/sdr/dom/components/SimpleComponent/SimpleComponent_cpp_impl1/Makefile \ testing/sdr/dom/components/BasicAC/basicac_java_impl1/Makefile \ testing/sdr/dom/components/BasicAC/BasicAC_cpp_impl1/Makefile \ + testing/sdr/dom/components/BasicShared/cpp/Makefile \ + testing/sdr/dom/components/logger/cpp/Makefile \ + testing/sdr/dom/components/alloc_shm/cpp/Makefile \ testing/sdr/dom/components/MessageReceiverCpp/Makefile \ testing/sdr/dom/components/MessageSenderCpp/Makefile \ testing/sdr/dom/components/EventSend/EventSend_java_impl1/Makefile \ @@ -380,19 +403,27 @@ AC_CONFIG_FILES(Makefile \ testing/sdr/dom/components/C2/cpp/Makefile \ testing/sdr/dom/components/ECM_CPP/cpp/Makefile \ testing/sdr/dom/components/ECM_JAVA/java/Makefile \ + testing/sdr/dom/components/EmptyString/cpp/Makefile \ + testing/sdr/dom/components/EmptyString/java/Makefile \ testing/sdr/dom/components/PropertyChange_C1/cpp/Makefile \ testing/sdr/dom/components/PropertyChange_J1/java/Makefile \ testing/sdr/dom/components/Property_CPP/cpp/Makefile \ testing/sdr/dom/components/msg_through_cpp/cpp/Makefile \ + testing/sdr/dom/components/busycomp/cpp/Makefile \ + testing/sdr/dom/components/slow_stop_cpp/cpp/Makefile \ testing/sdr/dom/components/commandline_prop/cpp/Makefile \ + testing/sdr/dom/components/timeprop_cpp/cpp/Makefile \ + testing/sdr/dom/components/time_cp_now/cpp/Makefile \ testing/sdr/dom/components/nocommandline_prop/cpp/Makefile \ testing/sdr/dom/components/Property_JAVA/java/Makefile \ testing/sdr/dom/components/foo/bar/comp/cpp/Makefile \ testing/sdr/dom/components/foo/bar/jv/java/Makefile \ testing/sdr/dom/components/huge_msg_cpp/cpp/Makefile \ testing/sdr/dom/components/zero_length/cpp/Makefile \ - testing/sdr/dom/components/svc_error_cpp/cpp/Makefile \ testing/sdr/dom/components/TestCppOptionalProps/cpp/Makefile \ - testing/sdr/dom/components/TestJavaOptionalProps/java/Makefile ) + testing/sdr/dom/components/TestJavaOptionalProps/java/Makefile \ + testing/sdr/dom/components/svc_fn_error_cpp/cpp/Makefile \ + testing/sdr/dom/components/svc_fn_error_java/java/Makefile ]) +]) AC_OUTPUT diff --git a/redhawk/src/control/framework/FileManager_impl.cpp b/redhawk/src/control/framework/FileManager_impl.cpp index 319886771..a6185a180 100644 --- a/redhawk/src/control/framework/FileManager_impl.cpp +++ b/redhawk/src/control/framework/FileManager_impl.cpp @@ -76,22 +76,16 @@ FileManager_impl::FileManager_impl (const char* _fsroot): mountedFileSystems(), mountsLock() { - TRACE_ENTER(FileManager_impl); - TRACE_EXIT(FileManager_impl); } FileManager_impl::~FileManager_impl() { - TRACE_ENTER(FileManager_impl) - TRACE_EXIT(FileManager_impl); } void FileManager_impl::mount (const char* mountPoint, CF::FileSystem_ptr fileSystem) throw (CORBA::SystemException, CF::InvalidFileName, CF::FileManager::InvalidFileSystem, CF::FileManager::MountPointAlreadyExists) { - TRACE_ENTER(FileManager_impl); - if (CORBA::is_nil(fileSystem)) { throw CF::FileManager::InvalidFileSystem(); } @@ -114,18 +108,14 @@ void FileManager_impl::mount (const char* mountPoint, CF::FileSystem_ptr fileSys } } - LOG_TRACE(FileManager_impl, "Mounting remote file system on " << mountPath); + RH_TRACE(_fileSysLog, "Mounting remote file system on " << mountPath); mountedFileSystems.push_back(MountPoint(mountPath, fileSystem)); - - TRACE_EXIT(FileManager_impl) } void FileManager_impl::unmount (const char* mountPoint) throw (CORBA::SystemException, CF::FileManager::NonExistentMount) { - TRACE_ENTER(FileManager_impl); - std::string mountPath = normalizeMountPath(mountPoint); // Exclusive access to the mount table is required. @@ -134,10 +124,8 @@ void FileManager_impl::unmount (const char* mountPoint) // Find the mount and remove it. for (MountList::iterator mount = mountedFileSystems.begin(); mount != mountedFileSystems.end(); ++mount) { if (mount->path == mountPath) { - LOG_TRACE(FileManager_impl, "Unmounting remote file system on " << mountPath); + RH_TRACE(_fileSysLog, "Unmounting remote file system on " << mountPath); mountedFileSystems.erase(mount); - - TRACE_EXIT(FileManager_impl); return; } } @@ -150,13 +138,11 @@ void FileManager_impl::unmount (const char* mountPoint) void FileManager_impl::remove (const char* fileName) throw (CORBA::SystemException, CF::FileException, CF::InvalidFileName) { - TRACE_ENTER(FileManager_impl); - if (!ossie::isValidFileName(fileName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid file name"); } - LOG_TRACE(FileManager_impl, "Removing file " << fileName); + RH_TRACE(_fileSysLog, "Removing file " << fileName); // Lock the mount table shared to allow others to access the file system, // but prevent changes to the mount table itself. @@ -165,23 +151,19 @@ void FileManager_impl::remove (const char* fileName) // Check if file is on one of the mounted file systems MountList::iterator mount = getMountForPath(fileName); if (mount == mountedFileSystems.end()) { - LOG_TRACE(FileManager_impl, "Removing local file"); + RH_TRACE(_fileSysLog, "Removing local file"); FileSystem_impl::remove(fileName); } else { std::string filePath = mount->getRelativePath(fileName); - LOG_TRACE(FileManager_impl, "Removing " << filePath << " on remote file system mounted at " << mount->path); + RH_TRACE(_fileSysLog, "Removing " << filePath << " on remote file system mounted at " << mount->path); mount->fs->remove(filePath.c_str()); } - - TRACE_EXIT(FileManager_impl) } void FileManager_impl::copy (const char* sourceFileName, const char* destinationFileName) throw (CORBA::SystemException, CF::InvalidFileName, CF::FileException) { - TRACE_ENTER(FileManager_impl); - // Validate absolute file names if (sourceFileName[0] != '/' || !ossie::isValidFileName(sourceFileName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid source file name"); @@ -191,7 +173,7 @@ void FileManager_impl::copy (const char* sourceFileName, const char* destination throw CF::InvalidFileName(CF::CF_EINVAL, "Destination file name is identical to source file name"); } - LOG_TRACE(FileManager_impl, "Copy " << sourceFileName << " to " << destinationFileName); + RH_TRACE(_fileSysLog, "Copy " << sourceFileName << " to " << destinationFileName); // Lock the mount table shared to allow others to access the file system, // but prevent changes to the mount table itself. @@ -204,11 +186,11 @@ void FileManager_impl::copy (const char* sourceFileName, const char* destination // Source and destination are on the same file system... if (sourceMount == mountedFileSystems.end()) { // ...which is also the local file system. - LOG_TRACE(FileManager_impl, "Copying locally"); + RH_TRACE(_fileSysLog, "Copying locally"); FileSystem_impl::copy(sourceFileName, destinationFileName); } else { // ...which is a remote file system. - LOG_TRACE(FileManager_impl, "Copying locally on remote file system"); + RH_TRACE(_fileSysLog, "Copying locally on remote file system"); const std::string srcPath = sourceMount->getRelativePath(sourceFileName); const std::string dstPath = destMount->getRelativePath(destinationFileName); sourceMount->fs->copy(srcPath.c_str(), dstPath.c_str()); @@ -216,7 +198,7 @@ void FileManager_impl::copy (const char* sourceFileName, const char* destination return; } - LOG_TRACE(FileManager_impl, "Copying between filesystems"); + RH_TRACE(_fileSysLog, "Copying between filesystems"); // Open the source file (may be local). CF::File_var srcFile; @@ -300,7 +282,7 @@ void FileManager_impl::copy (const char* sourceFileName, const char* destination } } catch(...) { - LOG_ERROR(FileManager_impl, eout.str()); + RH_ERROR(_fileSysLog, eout.str()); fe = true; } @@ -322,7 +304,7 @@ void FileManager_impl::copy (const char* sourceFileName, const char* destination throw(CF::FileException()); } } catch(...) { - LOG_ERROR(FileManager_impl, eout.str()); + RH_ERROR(_fileSysLog, eout.str()); fe = true; } @@ -344,23 +326,19 @@ void FileManager_impl::copy (const char* sourceFileName, const char* destination throw(CF::FileException()); } } catch(...) { - LOG_ERROR(FileManager_impl, eout.str()); + RH_ERROR(_fileSysLog, eout.str()); fe = true; } if ( fe ) { throw(CF::FileException()); } - - TRACE_EXIT(FileManager_impl); } void FileManager_impl::move (const char* sourceFileName, const char* destinationFileName) throw (CORBA::SystemException, CF::InvalidFileName, CF::FileException) { - TRACE_ENTER(FileManager_impl); - // Validate absolute file names if (sourceFileName[0] != '/' || !ossie::isValidFileName(sourceFileName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid source file name"); @@ -370,7 +348,7 @@ void FileManager_impl::move (const char* sourceFileName, const char* destination throw CF::InvalidFileName(CF::CF_EINVAL, "Destination file name is identical to source file name"); } - LOG_TRACE(FileManager_impl, "Move " << sourceFileName << " to " << destinationFileName); + RH_TRACE(_fileSysLog, "Move " << sourceFileName << " to " << destinationFileName); // Lock the mount table shared to allow others to access the file system, // but prevent changes to the mount table itself. @@ -383,11 +361,11 @@ void FileManager_impl::move (const char* sourceFileName, const char* destination // Source and destination are on the same file system... if (sourceMount == mountedFileSystems.end()) { // ...which is also the local file system. - LOG_TRACE(FileManager_impl, "Moving locally"); + RH_TRACE(_fileSysLog, "Moving locally"); FileSystem_impl::move(sourceFileName, destinationFileName); } else { // ...which is a remote file system. - LOG_TRACE(FileManager_impl, "Moving locally on remote file system"); + RH_TRACE(_fileSysLog, "Moving locally on remote file system"); const std::string srcPath = sourceMount->getRelativePath(sourceFileName); const std::string dstPath = destMount->getRelativePath(destinationFileName); sourceMount->fs->move(srcPath.c_str(), dstPath.c_str()); @@ -395,7 +373,7 @@ void FileManager_impl::move (const char* sourceFileName, const char* destination return; } - LOG_TRACE(FileManager_impl, "Moving between filesystems"); + RH_TRACE(_fileSysLog, "Moving between filesystems"); // Perform a copy followed by a remove, which is the only way we can move // across file systems. This operation is not atomic, and making it atomic @@ -404,21 +382,17 @@ void FileManager_impl::move (const char* sourceFileName, const char* destination // the domain's FileManager from interfering with this operation. this->copy(sourceFileName, destinationFileName); this->remove(sourceFileName); - - TRACE_EXIT(FileManager_impl); } CORBA::Boolean FileManager_impl::exists (const char* fileName) throw (CORBA::SystemException, CF::InvalidFileName) { - TRACE_ENTER(FileManager_impl); - if (!ossie::isValidFileName(fileName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid file name"); } - LOG_TRACE(FileManager_impl, "Checking for existence of " << fileName); + RH_TRACE(_fileSysLog, "Checking for existence of " << fileName); // Lock the mount table shared to allow others to access the file system, // but prevent changes to the mount table itself. @@ -428,15 +402,14 @@ CORBA::Boolean FileManager_impl::exists (const char* fileName) MountList::iterator mount = getMountForPath(fileName); CORBA::Boolean status; if (mount == mountedFileSystems.end()) { - LOG_TRACE(FileManager_impl, "Checking local file system"); + RH_TRACE(_fileSysLog, "Checking local file system"); status = FileSystem_impl::exists(fileName); } else { std::string filePath = mount->getRelativePath(fileName); - LOG_TRACE(FileManager_impl, "Checking for " << filePath << " on remote file system mounted at " << mount->path); + RH_TRACE(_fileSysLog, "Checking for " << filePath << " on remote file system mounted at " << mount->path); status = mount->fs->exists(filePath.c_str()); } - TRACE_EXIT(FileManager_impl); return status; } @@ -444,9 +417,7 @@ CORBA::Boolean FileManager_impl::exists (const char* fileName) CF::FileSystem::FileInformationSequence* FileManager_impl::list (const char* pattern) throw (CORBA::SystemException, CF::FileException, CF::InvalidFileName) { - TRACE_ENTER(FileManager_impl); - - LOG_TRACE(FileManager_impl, "List files with pattern " << pattern); + RH_TRACE(_fileSysLog, "List files with pattern " << pattern); CF::FileSystem::FileInformationSequence_var result; @@ -456,7 +427,7 @@ CF::FileSystem::FileInformationSequence* FileManager_impl::list (const char* pat MountList::iterator mount = getMountForPath(pattern); if (mount == mountedFileSystems.end()) { - LOG_TRACE(FileManager_impl, "Listing local file system"); + RH_TRACE(_fileSysLog, "Listing local file system"); result = FileSystem_impl::list(pattern); // Check for any mount points that match the pattern. @@ -479,7 +450,7 @@ CF::FileSystem::FileInformationSequence* FileManager_impl::list (const char* pat const std::string searchPath = mount->getRelativePath(pattern); if (searchPath.empty()) { // Exact match for mount point - LOG_TRACE(FileManager_impl, "List mount point " << mount->path); + RH_TRACE(_fileSysLog, "List mount point " << mount->path); result = new CF::FileSystem::FileInformationSequence(); result->length(1); result[0].name = CORBA::string_dup(mount->path.substr(1).c_str()); @@ -488,12 +459,11 @@ CF::FileSystem::FileInformationSequence* FileManager_impl::list (const char* pat result[0].fileProperties.length(0); } else { // List contents of mount point - LOG_TRACE(FileManager_impl, "Listing " << searchPath << " on remote file system mounted at " << mount->path); + RH_TRACE(_fileSysLog, "Listing " << searchPath << " on remote file system mounted at " << mount->path); result = mount->fs->list(searchPath.c_str()); } } - TRACE_EXIT(FileManager_impl); return result._retn(); } @@ -501,13 +471,11 @@ CF::FileSystem::FileInformationSequence* FileManager_impl::list (const char* pat CF::File_ptr FileManager_impl::create (const char* fileName) throw (CORBA::SystemException, CF::InvalidFileName, CF::FileException) { - TRACE_ENTER(FileManager_impl); - if (!ossie::isValidFileName(fileName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid file name"); } - LOG_TRACE(FileManager_impl, "Creating file " << fileName) + RH_TRACE(_fileSysLog, "Creating file " << fileName) // Lock the mount table shared to allow others to access the file system, // but prevent changes to the mount table itself. @@ -516,15 +484,14 @@ CF::File_ptr FileManager_impl::create (const char* fileName) MountList::iterator mount = getMountForPath(fileName); CF::File_var file; if (mount == mountedFileSystems.end()) { - LOG_TRACE(FileManager_impl, "Creating local file"); + RH_TRACE(_fileSysLog, "Creating local file"); file = FileSystem_impl::create(fileName); } else { const std::string filePath = mount->getRelativePath(fileName); - LOG_TRACE(FileManager_impl, "Creating " << filePath << " on remote file system mounted at " << mount->path); + RH_TRACE(_fileSysLog, "Creating " << filePath << " on remote file system mounted at " << mount->path); file = mount->fs->create(filePath.c_str()); } - TRACE_EXIT(FileManager_impl); return file._retn(); } @@ -532,13 +499,11 @@ CF::File_ptr FileManager_impl::create (const char* fileName) CF::File_ptr FileManager_impl::open (const char* fileName, CORBA::Boolean read_Only) throw (CORBA::SystemException, CF::InvalidFileName, CF::FileException) { - TRACE_ENTER(FileManager_impl); - if (!ossie::isValidFileName(fileName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid file name"); } - LOG_TRACE(FileManager_impl, "Opening file " << fileName << std::string((read_Only)?" readonly":" readwrite")); + RH_TRACE(_fileSysLog, "Opening file " << fileName << std::string((read_Only)?" readonly":" readwrite")); // Lock the mount table shared to allow others to access the file system, // but prevent changes to the mount table itself. @@ -547,15 +512,14 @@ CF::File_ptr FileManager_impl::open (const char* fileName, CORBA::Boolean read_O MountList::iterator mount = getMountForPath(fileName); CF::File_var file; if (mount == mountedFileSystems.end()) { - LOG_TRACE(FileManager_impl, "Opening local file"); + RH_TRACE(_fileSysLog, "Opening local file"); file = FileSystem_impl::open(fileName, read_Only); } else { const std::string filePath = mount->getRelativePath(fileName); - LOG_TRACE(FileManager_impl, "Opening " << filePath << " on remote file system mounted at " << mount->path); + RH_TRACE(_fileSysLog, "Opening " << filePath << " on remote file system mounted at " << mount->path); file = mount->fs->open(filePath.c_str(), read_Only); } - TRACE_EXIT(FileManager_impl); return file._retn(); } @@ -563,13 +527,11 @@ CF::File_ptr FileManager_impl::open (const char* fileName, CORBA::Boolean read_O void FileManager_impl::mkdir (const char* directoryName) throw (CORBA::SystemException, CF::FileException, CF::InvalidFileName) { - TRACE_ENTER(FileManager_impl); - if (!ossie::isValidFileName(directoryName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid directory name"); } - LOG_TRACE(FileManager_impl, "Making directory " << directoryName) + RH_TRACE(_fileSysLog, "Making directory " << directoryName) // Lock the mount table shared to allow others to access the file system, // but prevent changes to the mount table itself. @@ -577,28 +539,24 @@ void FileManager_impl::mkdir (const char* directoryName) MountList::iterator mount = getMountForPath(directoryName); if (mount == mountedFileSystems.end()) { - LOG_TRACE(FileManager_impl, "Making local directory"); + RH_TRACE(_fileSysLog, "Making local directory"); FileSystem_impl::mkdir(directoryName); } else { const std::string dirPath = mount->getRelativePath(directoryName); - LOG_TRACE(FileManager_impl, "Making directory " << dirPath << " on remote file system mounted at " << mount->path); + RH_TRACE(_fileSysLog, "Making directory " << dirPath << " on remote file system mounted at " << mount->path); mount->fs->mkdir(dirPath.c_str()); } - - TRACE_EXIT(FileManager_impl); } void FileManager_impl::rmdir (const char* directoryName) throw (CORBA::SystemException, CF::FileException, CF::InvalidFileName) { - TRACE_ENTER(FileManager_impl); - if (!ossie::isValidFileName(directoryName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid directory name"); } - LOG_TRACE(FileManager_impl, "Removing directory " << directoryName) + RH_TRACE(_fileSysLog, "Removing directory " << directoryName) // Lock the mount table shared to allow others to access the file system, // but prevent changes to the mount table itself. @@ -606,30 +564,26 @@ void FileManager_impl::rmdir (const char* directoryName) MountList::iterator mount = getMountForPath(directoryName); if (mount == mountedFileSystems.end()) { - LOG_TRACE(FileManager_impl, "Removing local directory"); + RH_TRACE(_fileSysLog, "Removing local directory"); FileSystem_impl::rmdir(directoryName); } else { const std::string dirPath = mount->getRelativePath(directoryName); - LOG_TRACE(FileManager_impl, "Removing directory " << dirPath << " on remote file system mounted at " << mount->path); + RH_TRACE(_fileSysLog, "Removing directory " << dirPath << " on remote file system mounted at " << mount->path); mount->fs->rmdir(dirPath.c_str()); } - - TRACE_EXIT(FileManager_impl); } void FileManager_impl::query (CF::Properties& fileSysProperties) throw (CORBA::SystemException, CF::FileSystem::UnknownFileSystemProperties) { - TRACE_ENTER(FileManager_impl); - CF::Properties unknownProps; // Lock the mount table shared to allow others to access the file system, // but prevent changes to the mount table itself. boost::shared_lock lock(mountsLock); if (fileSysProperties.length () == 0) { - LOG_TRACE(FileManager_impl, "Query all properties (SIZE, AVAILABLE_SPACE)"); + RH_TRACE(_fileSysLog, "Query all properties (SIZE, AVAILABLE_SPACE)"); fileSysProperties.length(2); fileSysProperties[0].id = CORBA::string_dup("SIZE"); CORBA::ULongLong size = getSize(); @@ -660,8 +614,6 @@ void FileManager_impl::query (CF::Properties& fileSysProperties) if (unknownProps.length() > 0) { throw CF::FileSystem::UnknownFileSystemProperties(unknownProps); } - - TRACE_EXIT(FileManager_impl) } CORBA::ULongLong FileManager_impl::getCombinedProperty (const char* propId) @@ -684,8 +636,6 @@ CORBA::ULongLong FileManager_impl::getCombinedProperty (const char* propId) CF::FileManager::MountSequence* FileManager_impl::getMounts () throw (CORBA::SystemException) { - TRACE_ENTER(FileManager_impl); - // Lock the mount table shared to allow others to access the file system, // but prevent changes to the mount table itself. boost::shared_lock lock(mountsLock); @@ -698,7 +648,6 @@ CF::FileManager::MountSequence* FileManager_impl::getMounts () result[index].fs = CF::FileSystem::_duplicate(ii->fs); } - TRACE_EXIT(FileManager_impl); return result._retn(); } diff --git a/redhawk/src/control/framework/FileSystem_impl.cpp b/redhawk/src/control/framework/FileSystem_impl.cpp index 51f95f7cc..d8e6d7b1a 100644 --- a/redhawk/src/control/framework/FileSystem_impl.cpp +++ b/redhawk/src/control/framework/FileSystem_impl.cpp @@ -146,19 +146,14 @@ PREPARE_CF_LOGGING(FileSystem_impl) FileSystem_impl::FileSystem_impl (const char* _root): root(_root) { - TRACE_ENTER(FileSystem_impl); - TRACE_EXIT(FileSystem_impl); } FileSystem_impl::~FileSystem_impl () { - TRACE_ENTER(FileSystem_impl); - TRACE_EXIT(FileSystem_impl); } void FileSystem_impl::remove (const char* fileName) throw (CF::FileException, CF::InvalidFileName, CORBA::SystemException) { - TRACE_ENTER(FileSystem_impl); boost::mutex::scoped_lock lock(interfaceAccess); fs::path fname(root / fileName); @@ -178,25 +173,22 @@ void FileSystem_impl::remove (const char* fileName) throw (CF::FileException, CF } std::string searchPattern = BOOST_PATH_STRING(fname.filename()); - LOG_TRACE(FileSystem_impl, "Remove using search pattern " << searchPattern << " in " << dirPath); + RH_TRACE(_fileSysLog, "Remove using search pattern " << searchPattern << " in " << dirPath); const fs::directory_iterator end_itr; // an end iterator (by boost definition) for (fs::directory_iterator itr = fsops.begin(dirPath); itr != end_itr; fsops.increment(itr)) { const std::string& filename = BOOST_PATH_STRING(itr->path().filename()); if (fnmatch(searchPattern.c_str(), filename.c_str(), 0) == 0) { - LOG_TRACE(FileSystem_impl, "Removing file " << itr->path().string()); + RH_TRACE(_fileSysLog, "Removing file " << itr->path().string()); if (!fsops.remove(itr->path())) { throw CF::FileException(CF::CF_EEXIST, "File does not exist"); } } } - - TRACE_EXIT(FileSystem_impl); } void FileSystem_impl::move (const char* sourceFileName, const char* destinationFileName) throw (CORBA::SystemException, CF::InvalidFileName, CF::FileException) { - TRACE_ENTER(FileSystem_impl); boost::mutex::scoped_lock lock(interfaceAccess); // Validate file names @@ -225,17 +217,14 @@ void FileSystem_impl::move (const char* sourceFileName, const char* destinationF } // Perform the actual move; this works for directories as well as files. - LOG_TRACE(FileSystem_impl, "Moving local file " << sourcePath << " to " << destPath); + RH_TRACE(_fileSysLog, "Moving local file " << sourcePath << " to " << destPath); if (rename(sourcePath.string().c_str(), destPath.string().c_str())) { throw CF::FileException(CF::CF_EINVAL, "Unexpected failure in move"); } - - TRACE_EXIT(FileSystem_impl); } void FileSystem_impl::copy (const char* sourceFileName, const char* destinationFileName) throw (CORBA::SystemException, CF::InvalidFileName, CF::FileException) { - TRACE_ENTER(FileSystem_impl); boost::mutex::scoped_lock lock(interfaceAccess); // Validate file names @@ -264,25 +253,21 @@ void FileSystem_impl::copy (const char* sourceFileName, const char* destinationF } // Perform the copy. - LOG_TRACE(FileSystem_impl, "Copying local file " << sourcePath << " to " << destPath); + RH_TRACE(_fileSysLog, "Copying local file " << sourcePath << " to " << destPath); fsops.copy_file(sourcePath, destPath, fs::copy_option::overwrite_if_exists); - - TRACE_EXIT(FileSystem_impl); } CORBA::Boolean FileSystem_impl::exists (const char* fileName) throw (CORBA::SystemException, CF::InvalidFileName) { - TRACE_ENTER(FileSystem_impl); boost::mutex::scoped_lock lock(interfaceAccess); - LOG_TRACE(FileSystem_impl, "Checking for existence of SCA file " << fileName); + RH_TRACE(_fileSysLog, "Checking for existence of SCA file " << fileName); if (fileName[0] != '/' || !ossie::isValidFileName(fileName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid file name"); } bool status = _local_exists(fileName); - TRACE_EXIT(FileSystem_impl); return status; } @@ -290,7 +275,7 @@ bool FileSystem_impl::_local_exists (const char* fileName) { fs::path fname(root / fileName); UnreliableFS fsops; - LOG_TRACE(FileSystem_impl, "Checking for existence of local file " << fname.string()); + RH_TRACE(_fileSysLog, "Checking for existence of local file " << fname.string()); try { return fsops.exists(fname); } catch (const CF::FileException& exc) { @@ -302,8 +287,6 @@ bool FileSystem_impl::_local_exists (const char* fileName) CF::FileSystem::FileInformationSequence* FileSystem_impl::list (const char* pattern) throw (CORBA::SystemException, CF::FileException, CF::InvalidFileName) { - TRACE_ENTER(FileSystem_impl); - fs::path filePath(root / pattern); fs::path dirPath(filePath.parent_path()); UnreliableFS fsops; @@ -321,7 +304,7 @@ CF::FileSystem::FileInformationSequence* FileSystem_impl::list (const char* patt if ((searchPattern == ".") && (fsops.is_directory(filePath))) { searchPattern = "*"; } - LOG_TRACE(FileSystem_impl, "List using search pattern " << searchPattern << " in " << dirPath); + RH_TRACE(_fileSysLog, "List using search pattern " << searchPattern << " in " << dirPath); CF::FileSystem::FileInformationSequence_var result = new CF::FileSystem::FileInformationSequence; @@ -330,10 +313,10 @@ CF::FileSystem::FileInformationSequence* FileSystem_impl::list (const char* patt const std::string filename = BOOST_PATH_STRING(itr->path().filename()); if (fnmatch(searchPattern.c_str(), filename.c_str(), 0) == 0) { if ((filename.length() > 0) && (filename[0] == '.') && (filename != searchPattern)) { - LOG_TRACE(FileSystem_impl, "Ignoring hidden match " << filename); + RH_TRACE(_fileSysLog, "Ignoring hidden match " << filename); continue; } - LOG_TRACE(FileSystem_impl, "Match in list with " << filename); + RH_TRACE(_fileSysLog, "Match in list with " << filename); CORBA::ULong index = result->length(); result->length(index + 1); @@ -353,7 +336,7 @@ CF::FileSystem::FileInformationSequence* FileSystem_impl::list (const char* patt } catch ( ... ) { // this file is not good (i.e.: bad link) result->length(index); - LOG_WARN(FileSystem_impl, "File cannot be evaluated, excluding from list: " << filename); + RH_WARN(_fileSysLog, "File cannot be evaluated, excluding from list: " << filename); continue; } } @@ -373,15 +356,12 @@ CF::FileSystem::FileInformationSequence* FileSystem_impl::list (const char* patt } } - TRACE_EXIT(FileSystem_impl); return result._retn(); } CF::File_ptr FileSystem_impl::create (const char* fileName) throw (CORBA::SystemException, CF::InvalidFileName, CF::FileException) { - TRACE_ENTER(FileSystem_impl); - if (!ossie::isValidFileName(fileName)) { throw CF::InvalidFileName (CF::CF_EINVAL, "Invalid file name"); } else if (_local_exists(fileName)) { @@ -397,12 +377,11 @@ CF::File_ptr FileSystem_impl::create (const char* fileName) throw (CORBA::System std::string fileIOR = ossie::corba::objectToString(fileServant); file->setIOR(fileIOR); - TRACE_EXIT(FileSystem_impl); return fileServant._retn(); } void FileSystem_impl::closeAllFiles() { - LOG_TRACE(FileSystem_impl, "Closing all open file handles"); + RH_TRACE(_fileSysLog, "Closing all open file handles"); std::vector iors; { boost::mutex::scoped_lock lock(fileIORCountAccess); @@ -465,7 +444,6 @@ std::vector< std::string > FileSystem_impl::getFileIOR(const std::string& fileNa CF::File_ptr FileSystem_impl::open (const char* fileName, CORBA::Boolean read_Only) throw (CORBA::SystemException, CF::InvalidFileName, CF::FileException) { - TRACE_ENTER(FileSystem_impl); if (!ossie::isValidFileName(fileName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid file name"); } else if (!_local_exists(fileName)) { @@ -484,15 +462,12 @@ CF::File_ptr FileSystem_impl::open (const char* fileName, CORBA::Boolean read_On incrementFileIORCount(strFileName, fileIOR); file->setIOR(fileIOR); - TRACE_EXIT(FileSystem_impl); return fileObj._retn(); } void FileSystem_impl::mkdir (const char* directoryName) throw (CORBA::SystemException, CF::FileException, CF::InvalidFileName) { - TRACE_ENTER(FileSystem_impl); - if (!ossie::isValidFileName(directoryName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid file name"); } @@ -502,24 +477,20 @@ void FileSystem_impl::mkdir (const char* directoryName) throw (CORBA::SystemExce UnreliableFS fsops; for (fs::path::iterator walkPath = dirPath.begin(); walkPath != dirPath.end(); ++walkPath) { - LOG_TRACE(FileSystem_impl, "Walking path to create directories, current path " << currentPath.string()); + RH_TRACE(_fileSysLog, "Walking path to create directories, current path " << currentPath.string()); currentPath /= *walkPath; if (!fsops.exists(currentPath)) { - LOG_TRACE(FileSystem_impl, "Creating directory " << currentPath.string()); + RH_TRACE(_fileSysLog, "Creating directory " << currentPath.string()); fsops.create_directory(currentPath); } else if (!fsops.is_directory(currentPath)) { std::string msg = currentPath.string() + " is not a directory"; throw CF::FileException(CF::CF_ENOTDIR, msg.c_str()); } } - - TRACE_EXIT(FileSystem_impl); } void FileSystem_impl::removeDirectory(const fs::path& dirPath, bool doRemove) { - TRACE_ENTER(FileSystem_impl); - UnreliableFS fsops; const fs::directory_iterator end_itr; // past the end for (fs::directory_iterator itr = fsops.begin(dirPath); itr != end_itr; fsops.increment(itr)) { @@ -533,14 +504,10 @@ void FileSystem_impl::removeDirectory(const fs::path& dirPath, bool doRemove) if(doRemove) { fsops.remove(dirPath); } - - TRACE_EXIT(FileSystem_impl); } void FileSystem_impl::rmdir (const char* directoryName) throw (CORBA::SystemException, CF::FileException, CF::InvalidFileName) { - TRACE_ENTER(FileSystem_impl); - if (!ossie::isValidFileName(directoryName)) { throw CF::InvalidFileName(CF::CF_EINVAL, "Invalid directory name"); } @@ -557,17 +524,13 @@ void FileSystem_impl::rmdir (const char* directoryName) throw (CORBA::SystemExce // See the JTAP test for rmdir to understand this removeDirectory(dirPath, false); // Test for only empty directories removeDirectory(dirPath, true); // Only empty directories, remove them all - - TRACE_EXIT(FileSystem_impl); } void FileSystem_impl::query (CF::Properties& fileSysProperties) throw (CORBA::SystemException, CF::FileSystem::UnknownFileSystemProperties) { - TRACE_ENTER(FileSystem_impl); - if (fileSysProperties.length () == 0) { - LOG_TRACE(FileSystem_impl, "Query all properties (SIZE, AVAILABLE_SPACE)"); + RH_TRACE(_fileSysLog, "Query all properties (SIZE, AVAILABLE_SPACE)"); fileSysProperties.length(2); fileSysProperties[0].id = CORBA::string_dup("SIZE"); fileSysProperties[0].value <<= getSize(); @@ -584,8 +547,6 @@ void FileSystem_impl::query (CF::Properties& fileSysProperties) throw (CORBA::Sy } } } - - TRACE_EXIT(FileSystem_impl); } std::string FileSystem_impl::getLocalPath (const char* fileName) @@ -599,7 +560,7 @@ CORBA::ULongLong FileSystem_impl::getSize () const fs::space_info space = fs::space(root); return space.capacity; } catch (const fs::filesystem_error& ex) { - LOG_INFO(FileSystem_impl, "Unexpected error querying file system size: " << ex.what()); + RH_INFO(_fileSysLog, "Unexpected error querying file system size: " << ex.what()); return 0; } } @@ -610,7 +571,7 @@ CORBA::ULongLong FileSystem_impl::getAvailableSpace () const fs::space_info space = fs::space(root); return space.available; } catch (const fs::filesystem_error& ex) { - LOG_INFO(FileSystem_impl, "Unexpected error querying file system available space: " << ex.what()); + RH_INFO(_fileSysLog, "Unexpected error querying file system available space: " << ex.what()); return 0; } } diff --git a/redhawk/src/control/framework/File_impl.cpp b/redhawk/src/control/framework/File_impl.cpp index 741630b70..ef4199154 100644 --- a/redhawk/src/control/framework/File_impl.cpp +++ b/redhawk/src/control/framework/File_impl.cpp @@ -30,6 +30,7 @@ PREPARE_CF_LOGGING(File_impl) +rh_logger::LoggerPtr fileLog; File_impl* File_impl::Create (const char* fileName, FileSystem_impl *ptrFs) { @@ -48,9 +49,8 @@ File_impl::File_impl (const char* fileName, FileSystem_impl *_ptrFs, bool readOn ptrFs(_ptrFs), fileIOR("") { - TRACE_ENTER(File_impl) - LOG_TRACE(File_impl, "In constructor with " << fileName << " and path " << fullFileName); + RH_TRACE(fileLog, "In constructor with " << fileName << " and path " << fullFileName); int flags = 0; if (create) { @@ -72,16 +72,13 @@ File_impl::File_impl (const char* fileName, FileSystem_impl *_ptrFs, bool readOn throw CF::FileException(CF::CF_EIO, errmsg.c_str()); } - TRACE_EXIT(File_impl) } File_impl::~File_impl () { - TRACE_ENTER(File_impl); - LOG_TRACE(File_impl, "Closing file..... " << fullFileName ); + RH_TRACE(fileLog, "Closing file..... " << fullFileName ); if ( fd > 0 ) ::close(fd); - TRACE_EXIT(File_impl); } void File_impl::setIOR( const std::string &ior) @@ -99,7 +96,6 @@ char* File_impl::fileName () void File_impl::read (CF::OctetSequence_out data, CORBA::ULong length) throw (CORBA::SystemException, CF::File::IOException) { - TRACE_ENTER(File_impl) boost::mutex::scoped_lock lock(interfaceAccess); // GIOP messages cannot exceed a certain (configurable) size, or an unhelpful @@ -111,7 +107,7 @@ void File_impl::read (CF::OctetSequence_out data, CORBA::ULong length) throw CF::File::IOException(CF::CF_EIO, message.str().c_str()); } - LOG_TRACE(File_impl, "Reading " << length << " bytes from " << fName); + RH_TRACE(fileLog, "Reading " << length << " bytes from " << fName); // Pre-allocate a buffer long enough to contain the entire read. CORBA::Octet* buf = CF::OctetSequence::allocbuf(length); @@ -128,17 +124,14 @@ void File_impl::read (CF::OctetSequence_out data, CORBA::ULong length) // Hand the buffer over to a new OctetSequence; if file pointer was already at the end, // it will be a zero-length sequence (which follows the spec). - LOG_TRACE(File_impl, "Read " << count << " bytes from " << fName); + RH_TRACE(fileLog, "Read " << count << " bytes from " << fName); data = new CF::OctetSequence(length, count, buf, true); - - TRACE_EXIT(File_impl) } void File_impl::write (const CF::OctetSequence& data) throw (CORBA::SystemException, CF::File::IOException) { - TRACE_ENTER(File_impl) boost::mutex::scoped_lock lock(interfaceAccess); const char* buffer = reinterpret_cast(data.get_buffer()); @@ -151,8 +144,6 @@ void File_impl::write (const CF::OctetSequence& data) buffer += count; todo -= count; } - - TRACE_EXIT(File_impl) } @@ -160,18 +151,15 @@ CORBA::ULong File_impl::sizeOf () throw (CORBA::SystemException, CF::FileException) { boost::mutex::scoped_lock lock(interfaceAccess); - TRACE_ENTER(File_impl); CORBA::ULong size = getSize(); - TRACE_EXIT(File_impl); return size; } void File_impl::close () throw (CORBA::SystemException, CF::FileException) { - TRACE_ENTER(File_impl) boost::mutex::scoped_lock lock(interfaceAccess); try { @@ -210,26 +198,22 @@ void File_impl::close () throw CF::FileException(CF::CF_EIO, "Error closing file"); } - TRACE_EXIT(File_impl) } CORBA::ULong File_impl::filePointer () throw (CORBA::SystemException) { - TRACE_ENTER(File_impl); boost::mutex::scoped_lock lock(interfaceAccess); off_t pos = lseek(fd, 0, SEEK_CUR); - TRACE_EXIT(File_impl); return pos; }; void File_impl::setFilePointer (CORBA::ULong _filePointer) throw (CORBA::SystemException, CF::File::InvalidFilePointer, CF::FileException) { - TRACE_ENTER(File_impl) boost::mutex::scoped_lock lock(interfaceAccess); if (_filePointer > getSize()) { @@ -240,7 +224,6 @@ void File_impl::setFilePointer (CORBA::ULong _filePointer) throw CF::FileException(CF::CF_EIO, "Error setting file pointer for file"); } - TRACE_EXIT(File_impl) } CORBA::ULong File_impl::getSize () diff --git a/redhawk/src/control/framework/Makefile.am b/redhawk/src/control/framework/Makefile.am index 9fa964a63..eaa94cc10 100644 --- a/redhawk/src/control/framework/Makefile.am +++ b/redhawk/src/control/framework/Makefile.am @@ -30,14 +30,14 @@ libossiedomain_la_SOURCES = CorbaGC.cpp \ POACreator.cpp \ prop_utils.cpp -libossiedomain_la_CXXFLAGS = -Wall $(BOOST_CPPFLAGS) $(OMNICOS_CFLAGS) $(OMNIORB_CFLAGS) $(LOG4CXX_FLAGS) $(PERSISTENCE_CFLAGS) +libossiedomain_la_CXXFLAGS = -Wall -I$(OMNIORB_INCLUDEDIR)/omniORB4/internal $(BOOST_CPPFLAGS) $(OMNICOS_CFLAGS) $(OMNIORB_CFLAGS) $(LOG4CXX_FLAGS) $(PERSISTENCE_CFLAGS) libossiedomain_la_LIBADD = $(BOOST_LDFLAGS) $(BOOST_FILESYSTEM_LIB) $(BOOST_SERIALIZATION_LIB) $(BOOST_THREAD_LIB) $(BOOST_SYSTEM_LIB) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(LOG4CXX_LIBS) $(PERSISTENCE_LIBS) libossiedomain_la_LDFLAGS = -Wall bin_PROGRAMS = nodeBooter nodeBooter_SOURCES = nodebooter.cpp nodeBooter_CXXFLAGS = $(BOOST_CPPFLAGS) -nodeBooter_LDADD = $(BOOST_LDFLAGS) $(BOOST_FILESYSTEM_LIB) $(BOOST_SYSTEM_LIB) ../parser/libossieparser.la ./libossiedomain.la $(top_builddir)/base/framework/libossiecf.la $(top_builddir)/base/framework/idl/libossieidl.la $(OMNICOS_LIBS) $(OMNIORB_LIBS) +nodeBooter_LDADD = ../parser/libossieparser.la ./libossiedomain.la $(top_builddir)/base/framework/libossiecf.la $(top_builddir)/base/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_FILESYSTEM_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_REGEX_LIB) $(OMNICOS_LIBS) $(OMNIORB_LIBS) nodeBooter_LDFLAGS = -static bin_SCRIPTS = nodeCleanup.py diff --git a/redhawk/src/control/framework/helperFunctions.cpp b/redhawk/src/control/framework/helperFunctions.cpp index b810fae63..7bcaf3726 100644 --- a/redhawk/src/control/framework/helperFunctions.cpp +++ b/redhawk/src/control/framework/helperFunctions.cpp @@ -24,8 +24,19 @@ #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include namespace fs = boost::filesystem; @@ -52,6 +63,114 @@ std::string ossie::generateUUID() return std::string("DCE:") + strbuf; } + +static std::string _trim_addr( const std::string &addr, const std::string &exp="(.*):([^:]*)$" ) +{ + std::string ret; + boost::regex expr(exp.c_str()); + boost::smatch what; + if (boost::regex_search(addr, what, expr)) + { + ret = what[1]; + } + return ret; +} + + +static bool _match_remotes( CORBA::Object_ptr aobj, CORBA::Object_ptr bobj) +{ + bool retval=false; + omniIOR *a_ior = aobj->_PR_getobj()->_getIOR(); + omniIOR *b_ior = bobj->_PR_getobj()->_getIOR(); + omniIdentity* a_identity = aobj->_PR_getobj()->_identity(); + omniIdentity* b_identity = bobj->_PR_getobj()->_identity(); + omniRemoteIdentity *a_remote = omniRemoteIdentity::downcast(a_identity); + omniRemoteIdentity *b_remote = omniRemoteIdentity::downcast(b_identity); + if ( a_remote != NULL and b_remote != NULL ) { + omni::Rope *a_rope = a_remote->rope(); + omni::Rope *b_rope = b_remote->rope(); + RH_NL_TRACE("redhawk.corba.internal", "Rope A: " << a_rope << " Rope B: " << b_rope ); + if ( a_rope != NULL and b_rope != NULL and + a_rope == b_rope ) { + RH_NL_TRACE("redhawk.corba.internal", "Identities Same Rope == Same Remote Host."); + retval = true; + } + } + + // last ditch.. try IORInfo address list resolution + if ( !retval and a_ior and b_ior ) { + omniIOR::IORInfo *a_iorinfo = a_ior->getIORInfo(); + omniIOR::IORInfo *b_iorinfo = b_ior->getIORInfo(); + const omni::giopAddressList &a_addrs = a_iorinfo->addresses(); + const omni::giopAddressList &b_addrs = b_iorinfo->addresses(); + + omni::giopAddressList::const_iterator i, i_last, j, j_last, j_first; + i = a_addrs.begin(); + i_last = a_addrs.end(); + j_first= b_addrs.begin(); + j_last = b_addrs.end(); + if ( a_addrs.size() > b_addrs.size() ) { + i = b_addrs.begin(); + i_last = b_addrs.end(); + j_first = a_addrs.begin(); + j_last = a_addrs.end(); + } + + for (; i != i_last; i++) { + // try to match address space.. remove port string has + std::string a_addr = _trim_addr((*i)->address()); + j = j_first; + for (; j != j_last; j++) { + std::string b_addr = _trim_addr((*j)->address()); + RH_NL_TRACE("redhawk.corba.internal", "Identities A addr: " << a_addr << " B addr: " << b_addr ); + if ( a_addr == b_addr) { retval=true; return retval; } + } + } + } + + return retval; +} + + +bool ossie::sameHost(CORBA::Object_ptr aobj, CORBA::Object_ptr bobj) +{ + bool retval=false; + // if both identifies are the same + omniIdentity* a_identity = aobj->_PR_getobj()->_identity(); + omniIdentity* b_identity = bobj->_PR_getobj()->_identity(); + if ( a_identity->is_equivalent(b_identity) == true ) { + RH_NL_TRACE("redhawk.corba.internal", "Same identifies, so same host"); + retval = true; + } + else { + // if both identities are in the same process space then + // they are on the same host + if ( a_identity->inThisAddressSpace() and + b_identity->inThisAddressSpace() ) { + RH_NL_TRACE("redhawk.corba.internal", "Identifies Same address space..."); + retval = true; + } + else { + // if both identities processes then are on the same host + omniInProcessIdentity *a_proc = omniInProcessIdentity::downcast(a_identity); + omniInProcessIdentity *b_proc = omniInProcessIdentity::downcast(b_identity); + if ( a_proc != NULL and b_proc != NULL ) { + RH_NL_TRACE("redhawk.corba.internal", "Objects have ProcessIdentities, they are on same LOCAL HOST."); + retval= true; + } + else { + retval=_match_remotes( aobj, bobj ); + if ( retval ) { + RH_NL_TRACE("redhawk.corba.internal", "Remote Identities are on the SAME HOST."); + } + else { + RH_NL_TRACE("redhawk.corba.internal", "Remote Identities are different."); + } + } + } + } + return retval; +} std::string ossie::getCurrentDirName() { std::string retval; diff --git a/redhawk/src/control/framework/nodebooter.cpp b/redhawk/src/control/framework/nodebooter.cpp index c805398b0..c61fb2cf0 100644 --- a/redhawk/src/control/framework/nodebooter.cpp +++ b/redhawk/src/control/framework/nodebooter.cpp @@ -57,6 +57,8 @@ namespace fs = boost::filesystem; using namespace std; +#define ENABLE_PERSISTENCE (ENABLE_BDB_PERSISTENCE || ENABLE_GDBM_PERSISTENCE || ENABLE_SQLITE_PERSISTENCE) + CREATE_LOGGER(nodebooter); // Track the DomainManager and DeviceManager pids, if using fork-and-exec. @@ -154,11 +156,27 @@ void loadPRFExecParams (const std::string& prfFile, ExecParams& execParams) const ossie::SimpleProperty* simpleProp; simpleProp = dynamic_cast(*prop); if (!simpleProp) { - LOG_WARN(nodebooter, "Only execparams of type \"simple\" supported"); + LOG_WARN(nodebooter, "Only exec params of type \"simple\" supported"); + continue; + } else if (!simpleProp->getValue()) { + continue; + } + execParams[simpleProp->getID()] = simpleProp->getValue(); + } + + const std::vector& propertyProps = prf.getConstructProperties(); + + for ( prop = propertyProps.begin(); prop != propertyProps.end(); ++prop) { + const ossie::SimpleProperty* simpleProp; + simpleProp = dynamic_cast(*prop); + if (!simpleProp) { + // property properties that are not simples cannot be commandline, so no warning is needed continue; } else if (!simpleProp->getValue()) { continue; } + if (not simpleProp->isCommandLine()) + continue; execParams[simpleProp->getID()] = simpleProp->getValue(); } } @@ -194,7 +212,7 @@ static pid_t launchSPD ( LOG_DEBUG(nodebooter, "Loaded SPD file " << spdFile); LOG_DEBUG(nodebooter, "SPD Id: " << spd.getSoftPkgID()); - LOG_DEBUG(nodebooter, "SPD Name: " << spd.getSoftPkgName()); + LOG_DEBUG(nodebooter, "SPD Name: " << spd.getName()); // Find an implementation that we can run. const ossie::SPD::Implementation* impl = matchImplementation(spd.getImplementations(), deviceProps); @@ -224,6 +242,8 @@ static pid_t launchSPD ( LOG_TRACE(nodebooter, "Loading implementation-specific PRF: " << prfFile); loadPRFExecParams(prfFile, execParams); } + + execParams["SPD"] = spdFile; // Update the execparams with the user-supplied overrides. for (ExecParams::const_iterator param = overrideExecParams.begin(); param != overrideExecParams.end(); ++param) { @@ -240,7 +260,7 @@ static pid_t launchSPD ( // Create a C string array of the arguments to the executable from the code file name (0th argument) // and execparams. Note the importance of the final NULL, which terminates the array. std::vector argv; - argv.push_back(impl->getCodeFile()); + argv.push_back(impl->getCodeFile().c_str()); for (ExecParams::const_iterator param = execParams.begin(); param != execParams.end(); ++param) { LOG_TRACE(nodebooter, "EXEC_PARAM: " << param->first << "=\"" << param->second << "\""); argv.push_back(param->first.c_str()); @@ -316,6 +336,7 @@ static void setOwners(const std::string& user, const std::string& group) err << "]: " << strerror(errno); throw std::runtime_error(err.str()); } + setgid(gids[0]); std::cout << "Running as group: [ "; for (unsigned int gr_idx=0; gr_idx < groups.size(); gr_idx++) { @@ -350,6 +371,7 @@ static void setOwners(const std::string& user, const std::string& group) } else { setgroups(ngroups, groups); } + setgid(groups[0]); std::cout << "Running as group: [ "; for (unsigned int gr_idx=0; gr_idx < ngroups; gr_idx++) { std::cout << getgrgid(groups[gr_idx])->gr_name << "(gid=" << groups[gr_idx] << ") "; @@ -449,7 +471,6 @@ void usage() std::cerr << " --useloglib Use libossielogcfg.so to generate LOGGING_CONFIG_URI " << std::endl; std::cerr << " --bindapps Bind application and component registrations to the Domain and not the NamingService (DomainManager only)" << std::endl; std::cerr << " --dburl Store domain state in the following URL" << std::endl; - std::cerr << " --nopersist Disable DomainManager IOR persistence" << std::endl; std::cerr << " --force-rebind Overwrite any existing name binding for the DomainManager" << std::endl; std::cerr << " --daemon Run as UNIX daemon" << std::endl; std::cerr << " --pidfile Save PID in the specified file" << std::endl; @@ -508,7 +529,6 @@ void startDomainManager( string& domainName, const fs::path& sdrRootPath, const int& debugLevel, - const bool& noPersist, const bool & bind_apps, const string& logfile_uri, const bool& use_loglib, @@ -559,17 +579,19 @@ void startDomainManager( execParams["DMD_FILE"] = dmdFile; execParams["DOMAIN_NAME"] = domainName; execParams["SDRROOT"] = sdrRootPath.string(); - std::stringstream debugLevel_str; - debugLevel_str << debugLevel; - execParams["DEBUG_LEVEL"] = debugLevel_str.str(); - if (noPersist) { - execParams["PERSISTENCE"] = "false"; + if (debugLevel != -1) { + std::stringstream debugLevel_str; + debugLevel_str << debugLevel; + execParams["DEBUG_LEVEL"] = debugLevel_str.str(); } if (!logfile_uri.empty()) { execParams["LOGGING_CONFIG_URI"] = logfile_uri; } if (!db_uri.empty()) { + execParams["PERSISTENCE"] = "true"; execParams["DB_URL"] = db_uri; + } else { + execParams["PERSISTENCE"] = "false"; } if (!endPoint.empty()) { execParams["-ORBendPoint"] = endPoint; @@ -687,9 +709,11 @@ void startDeviceManager( execParams["DOMAIN_NAME"] = domainName; execParams["SDRROOT"] = sdrRootPath.string(); execParams["SDRCACHE"] = devMgrCache; - std::stringstream debugLevel_str; - debugLevel_str << debugLevel; - execParams["DEBUG_LEVEL"] = debugLevel_str.str(); + if (debugLevel != -1) { + std::stringstream debugLevel_str; + debugLevel_str << debugLevel; + execParams["DEBUG_LEVEL"] = debugLevel_str.str(); + } if (!logfile_uri.empty()) { execParams["LOGGING_CONFIG_URI"] = logfile_uri; } @@ -788,7 +812,7 @@ int main(int argc, char* argv[]) string orb_init_ref; string domainName; string endPoint; - int debugLevel = 3; + int debugLevel = -1; bool bind_apps=false; bool startDeviceManagerRequested = false; @@ -799,9 +823,6 @@ int main(int argc, char* argv[]) std::string user; std::string group; - // If "--nopersist" is asserted, turn off persistent IORs. - bool noPersist = false; - // enable/disable use of libossielogcfg.so to resolve LOGGING_CONFIG_URI values bool use_loglib = false; @@ -834,11 +855,8 @@ int main(int argc, char* argv[]) dmdFile = tmpdmdfile; } } - startDomainManagerRequested = true; - } - - if( strcmp( argv[i], "-d" ) == 0 ) { + } else if( strcmp( argv[i], "-d" ) == 0 ) { if( i + 1 < argc && strcmp( argv[i + 1], "--" ) != 0) { dcdFile = argv[i+1]; if( dcdFile.find(".dcd.xml") == string::npos ) { @@ -852,9 +870,7 @@ int main(int argc, char* argv[]) usage(); exit(EXIT_FAILURE); } - } - - if( strcmp( argv[i], "-sdrroot" ) == 0 ) { + } else if( strcmp( argv[i], "-sdrroot" ) == 0 ) { if( i + 1 < argc && strcmp( argv[i + 1], "--" ) != 0) { sdrRoot = argv[i+1]; } else { @@ -862,9 +878,7 @@ int main(int argc, char* argv[]) usage(); exit(EXIT_FAILURE); } - } - - if( strcmp( argv[i], "-sdrcache" ) == 0 ) { + } else if( strcmp( argv[i], "-sdrcache" ) == 0 ) { if( i + 1 < argc && strcmp( argv[i + 1], "--" ) != 0) { sdrCache = argv[i+1]; } else { @@ -872,9 +886,7 @@ int main(int argc, char* argv[]) usage(); exit(EXIT_FAILURE); } - } - - if( strcmp( argv[i], "-domainname" ) == 0 ) { + } else if( strcmp( argv[i], "-domainname" ) == 0 ) { if( i + 1 < argc && strcmp( argv[i + 1], "--" ) != 0) { domainName = argv[i+1]; std::cerr << "[nodeBooter] warning: -domainname is deprecated. Please use --domainname\n"; @@ -884,9 +896,7 @@ int main(int argc, char* argv[]) usage(); exit(EXIT_FAILURE); } - } - - if( strcmp( argv[i], "--domainname" ) == 0 ) { + } else if( strcmp( argv[i], "--domainname" ) == 0 ) { if( i + 1 < argc && strcmp( argv[i + 1], "--" ) != 0) { domainName = argv[i+1]; } @@ -895,15 +905,9 @@ int main(int argc, char* argv[]) usage(); exit(EXIT_FAILURE); } - } - - - if (( strcmp( argv[i], "--bindapps" ) == 0 )) { + } else if (( strcmp( argv[i], "--bindapps" ) == 0 )) { bind_apps = true; - } - - - if (( strcmp( argv[i], "-log4cxx" ) == 0 ) || ( strcmp( argv[i], "-logcfgfile" ) == 0 )) { + } else if (( strcmp( argv[i], "-log4cxx" ) == 0 ) || ( strcmp( argv[i], "-logcfgfile" ) == 0 )) { if( i + 1 empty_string_vector; + for (std::vector::iterator idx=idxs.begin();idx!=idxs.end();++idx) { + const ossie::SimpleSequenceProperty* _type = dynamic_cast(&propValue[*idx]); + structval_[*idx].value = ossie::strings_to_any(empty_string_vector, ossie::getTypeKind(_type->getType()), NULL); + } + } dataType.value <<= structval_; return dataType; } @@ -175,18 +205,19 @@ static CF::DataType overrideSimpleSequenceValue(const SimpleSequenceProperty* pr CF::DataType dataType; dataType.id = CORBA::string_dup(prop->getID()); CORBA::TCKind kind = ossie::getTypeKind(static_cast(prop->getType())); - dataType.value = ossie::strings_to_any(values, kind); + CORBA::TypeCode_ptr type = ossie::getTypeCode(static_cast(prop->getType())); + dataType.value = ossie::strings_to_any(values, kind, type); return dataType; } CF::DataType ossie::overridePropertyValue(const SimpleProperty* prop, const ComponentProperty* compprop) { const SimplePropertyRef* simpleref = dynamic_cast(compprop); if (!simpleref) { - LOG_WARN(prop_utils, "ignoring attempt to override simple property " << prop->getID() << " because override definition is not a simpleref"); + RH_WARN(proputilsLog, "ignoring attempt to override simple property " << prop->getID() << " because override definition is not a simpleref"); return convertPropertyToDataType(prop); } - LOG_TRACE(prop_utils, "overriding simple property id " << prop->getID()); + RH_TRACE(proputilsLog, "overriding simple property id " << prop->getID()); return overrideSimpleValue(prop, simpleref->getValue()); } @@ -194,34 +225,35 @@ CF::DataType ossie::overridePropertyValue(const SimpleSequenceProperty* prop, co CF::DataType dataType = convertPropertyToDataType(prop); if (dynamic_cast(compprop) != NULL) { const SimpleSequencePropertyRef* simpleseqref = dynamic_cast(compprop); - LOG_TRACE(prop_utils, "overriding simpleseq property id " << dataType.id); + RH_TRACE(proputilsLog, "overriding simpleseq property id " << dataType.id); CORBA::TCKind kind = ossie::getTypeKind(static_cast(prop->getType())); - dataType.value = ossie::strings_to_any(simpleseqref->getValues(), kind); + CORBA::TypeCode_ptr type = ossie::getTypeCode(static_cast(prop->getType())); + dataType.value = ossie::strings_to_any(simpleseqref->getValues(), kind, type); } else { - LOG_WARN(prop_utils, "ignoring attempt to override simple sequence property " << dataType.id << " because override definition is not a simpleseqref"); + RH_WARN(proputilsLog, "ignoring attempt to override simple sequence property " << dataType.id << " because override definition is not a simpleseqref"); } return dataType; } static CF::Properties overrideStructValues(const StructProperty* prop, const ossie::ComponentPropertyMap & values) { - const std::vector& props = prop->getValue(); - LOG_TRACE(prop_utils, "structure has " << props.size() << " elements"); + const PropertyList& props = prop->getValue(); + RH_TRACE(proputilsLog, "structure has " << props.size() << " elements"); CF::Properties structval; structval.length(props.size()); for (CORBA::ULong ii = 0; ii < structval.length(); ++ii) { - const Property* property = props[ii]; + const Property* property = &props[ii]; const std::string id = property->getID(); ossie::ComponentPropertyMap::const_iterator itemoverride = values.find(id); if (itemoverride == values.end()) { - LOG_TRACE(prop_utils, "using default value for struct element " << id); + RH_TRACE(proputilsLog, "using default value for struct element " << id); structval[ii] = convertPropertyToDataType(property); } else { if (dynamic_cast(property) != NULL) { - LOG_TRACE(prop_utils, "setting structure element " << id << " to " << (itemoverride->second)[0]); + RH_TRACE(proputilsLog, "setting structure element " << id << " to " << (itemoverride->second)[0]); structval[ii] = overrideSimpleValue(dynamic_cast(property), static_cast(itemoverride->second)->getValue()); } else if (dynamic_cast(property) != NULL) { - LOG_TRACE(prop_utils, "setting structure element " << id); + RH_TRACE(proputilsLog, "setting structure element " << id); structval[ii] = overrideSimpleSequenceValue(dynamic_cast(property), static_cast(itemoverride->second)->getValues()); } } @@ -232,27 +264,27 @@ static CF::Properties overrideStructValues(const StructProperty* prop, const oss static CF::Properties overrideStructValues(const StructProperty* prop, const ossie::ComponentPropertyMap & values, const CF::Properties& configureProperties) { - const std::vector& props = prop->getValue(); - LOG_TRACE(prop_utils, "structure has " << props.size() << " elements"); + const PropertyList& props = prop->getValue(); + RH_TRACE(proputilsLog, "structure has " << props.size() << " elements"); CF::Properties structval; structval.length(props.size()); for (CORBA::ULong ii = 0; ii < structval.length(); ++ii) { - const Property* property = props[ii]; + const Property* property = &props[ii]; const std::string id = property->getID(); ossie::ComponentPropertyMap::const_iterator itemoverride = values.find(id); if (dynamic_cast(itemoverride->second) != NULL) { if (itemoverride == values.end()) { - LOG_TRACE(prop_utils, "using default value for struct element " << id); + RH_TRACE(proputilsLog, "using default value for struct element " << id); structval[ii] = convertPropertyToDataType(property); } else { - LOG_TRACE(prop_utils, "setting structure element " << id << " to " << static_cast(itemoverride->second)->getValue()); + RH_TRACE(proputilsLog, "setting structure element " << id << " to " << static_cast(itemoverride->second)->getValue()); std::string value = static_cast(itemoverride->second)->getValue(); if (strncmp(value.c_str(), "__MATH__", 8) == 0) { CF::DataType dataType; const SimpleProperty* simple = dynamic_cast(property); dataType.id = CORBA::string_dup(simple->getID()); CORBA::TCKind kind = ossie::getTypeKind(simple->getType()); - LOG_TRACE(prop_utils, "Invoking custom OSSIE dynamic allocation property support") + RH_TRACE(proputilsLog, "Invoking custom OSSIE dynamic allocation property support") // Turn propvalue into a string for easy parsing std::string mathStatement = value.substr(8); if ((*mathStatement.begin() == '(') && (*mathStatement.rbegin() == ')')) { @@ -261,11 +293,11 @@ static CF::Properties overrideStructValues(const StructProperty* prop, const oss std::vector args; while ((mathStatement.length() > 0) && (mathStatement.find(',') != std::string::npos)) { args.push_back(mathStatement.substr(0, mathStatement.find(','))); - LOG_TRACE(prop_utils, "ARG " << args.back()) + RH_TRACE(proputilsLog, "ARG " << args.back()) mathStatement.erase(0, mathStatement.find(',') + 1); } args.push_back(mathStatement); - LOG_TRACE(prop_utils, "ARG " << args.back()) + RH_TRACE(proputilsLog, "ARG " << args.back()) if (args.size() != 3) { std::ostringstream eout; @@ -273,17 +305,17 @@ static CF::Properties overrideStructValues(const StructProperty* prop, const oss throw ossie::PropertyMatchingError(eout.str()); } - LOG_TRACE(prop_utils, "__MATH__ " << args[0] << " " << args[1] << " " << args[2]) + RH_TRACE(proputilsLog, "__MATH__ " << args[0] << " " << args[1] << " " << args[2]) double operand; operand = strtod(args[0].c_str(), NULL); // See if there is a property in the component - LOG_TRACE(prop_utils, "Attempting to find matching property for " << args[1]) + RH_TRACE(proputilsLog, "Attempting to find matching property for " << args[1]) const CF::DataType* matchingCompProp = 0; for (unsigned int j = 0; j < configureProperties.length(); j++) { if (strcmp(configureProperties[j].id, args[1].c_str()) == 0) { - LOG_TRACE(prop_utils, "Matched property for " << args[1]) + RH_TRACE(proputilsLog, "Matched property for " << args[1]) matchingCompProp = &configureProperties[j]; } // See if the property we're looking for is a member of a struct @@ -294,7 +326,7 @@ static CF::Properties overrideStructValues(const StructProperty* prop, const oss if (tmp_ref != NULL) { for (unsigned prop_idx = 0; prop_idxlength(); prop_idx++) { if (strcmp((*tmp_ref)[prop_idx].id, args[1].c_str()) == 0) { - LOG_TRACE(prop_utils, "Matched property for " << args[1]) + RH_TRACE(proputilsLog, "Matched property for " << args[1]) matchingCompProp = &(*tmp_ref)[prop_idx]; } } @@ -322,7 +354,7 @@ static CF::Properties overrideStructValues(const StructProperty* prop, const oss } } else if (dynamic_cast(itemoverride->second) != NULL) { if (itemoverride == values.end()) { - LOG_TRACE(prop_utils, "using default value for struct element " << id); + RH_TRACE(proputilsLog, "using default value for struct element " << id); structval[ii] = convertPropertyToDataType(property); } else { structval[ii] = overrideSimpleSequenceValue(static_cast(property), static_cast(itemoverride->second)->getValues()); @@ -338,10 +370,10 @@ CF::DataType ossie::overridePropertyValue(const StructProperty* prop, const Comp const StructPropertyRef* structref = dynamic_cast(compprop); if (structref) { - LOG_TRACE(prop_utils, "overriding struct property id " << dataType.id); + RH_TRACE(proputilsLog, "overriding struct property id " << dataType.id); dataType.value <<= overrideStructValues(prop, structref->getValue()); } else { - LOG_WARN(prop_utils, "ignoring attempt to override struct property " << dataType.id << " because override definition is not a structref"); + RH_WARN(proputilsLog, "ignoring attempt to override struct property " << dataType.id << " because override definition is not a structref"); } return dataType; } @@ -351,10 +383,10 @@ CF::DataType ossie::overridePropertyValue(const StructProperty* prop, const Comp const StructPropertyRef* structref = dynamic_cast(compprop); if (structref) { - LOG_TRACE(prop_utils, "overriding struct property id " << dataType.id << " (supports __MATH__)"); + RH_TRACE(proputilsLog, "overriding struct property id " << dataType.id << " (supports __MATH__)"); dataType.value <<= overrideStructValues(prop, structref->getValue(), configureProperties); } else { - LOG_WARN(prop_utils, "ignoring attempt to override struct property " << dataType.id << " because override definition is not a structref"); + RH_WARN(proputilsLog, "ignoring attempt to override struct property " << dataType.id << " because override definition is not a structref"); } return dataType; } @@ -364,10 +396,10 @@ CF::DataType ossie::overridePropertyValue(const StructSequenceProperty* prop, co const StructSequencePropertyRef* structsequenceref = dynamic_cast(compprop); if (structsequenceref) { - LOG_TRACE(prop_utils, "overriding structsequence property id " << dataType.id); + RH_TRACE(proputilsLog, "overriding structsequence property id " << dataType.id); const StructSequencePropertyRef::ValuesList& overrideValues = structsequenceref->getValues(); - LOG_TRACE(prop_utils, "structsequence has " << overrideValues.size() << " values"); + RH_TRACE(proputilsLog, "structsequence has " << overrideValues.size() << " values"); CORBA::AnySeq values; values.length(overrideValues.size()); @@ -381,7 +413,7 @@ CF::DataType ossie::overridePropertyValue(const StructSequenceProperty* prop, co } dataType.value <<= values; } else { - LOG_WARN(prop_utils, "ignoring attempt to override structsequence property " << dataType.id << " because override definition is not a structsequenceref"); + RH_WARN(proputilsLog, "ignoring attempt to override structsequence property " << dataType.id << " because override definition is not a structsequenceref"); } return dataType; } @@ -400,19 +432,19 @@ bool ossie::checkProcessor(const std::vector& processorDeps, const std::string processor = processorDeps[j]; if (processor != "") { matchProcessor = false; - LOG_TRACE(prop_utils, "Attempting to match processor " << processor << " against " << props.size() << " properties") + RH_TRACE(proputilsLog, "Attempting to match processor " << processor << " against " << props.size() << " properties") for (unsigned int i = 0; i < props.size(); i++) { if (dynamic_cast(props[i]) != NULL) { const SimpleProperty* matchingProp = dynamic_cast(props[i]); std::string action = matchingProp->getAction(); - LOG_TRACE(prop_utils, "Checking property " << matchingProp->getID() << " " << matchingProp->getName()) + RH_TRACE(proputilsLog, "Checking property " << matchingProp->getID() << " " << matchingProp->getName()) if (strcmp(matchingProp->getName(), "processor_name") == 0) { const char *tmp_value = matchingProp->getValue(); std::string dev_processor_name(""); if (tmp_value != NULL) { dev_processor_name = tmp_value; } - LOG_TRACE(prop_utils, "Performing comparison operation '" << dev_processor_name << "' " << action << " '" << processor << "'") + RH_TRACE(proputilsLog, "Performing comparison operation '" << dev_processor_name << "' " << action << " '" << processor << "'") matchProcessor = ossie::perform_action(dev_processor_name, processor, action); if (matchProcessor) break; } @@ -445,12 +477,12 @@ bool ossie::checkOs(const std::vector& osDeps, cons if (os != "") { matchOs = false; - LOG_TRACE(prop_utils, "Attempting to match os " << os << " PropertySet Size:" << props.size()); + RH_TRACE(proputilsLog, "Attempting to match os " << os << " PropertySet Size:" << props.size()); for (unsigned int i = 0; i < props.size(); i++) { if (dynamic_cast(props[i]) != NULL) { const SimpleProperty* matchingProp = dynamic_cast(props[i]); std::string action = matchingProp->getAction(); - LOG_TRACE(prop_utils, "Examine Property: name: " << matchingProp->getName() << " value:" << + RH_TRACE(proputilsLog, "Examine Property: name: " << matchingProp->getName() << " value:" << matchingProp->getValue() ); if (strcmp(matchingProp->getName(), "os_name") == 0) { const char *tmp_dev_os_name = matchingProp->getValue(); @@ -458,9 +490,9 @@ bool ossie::checkOs(const std::vector& osDeps, cons if (tmp_dev_os_name != NULL) { dev_os_name = tmp_dev_os_name; } - LOG_TRACE(prop_utils, "Performing comparison operation " << dev_os_name << " " << action << " " << os); + RH_TRACE(proputilsLog, "Performing comparison operation " << dev_os_name << " " << action << " " << os); matchOs = ossie::perform_action(dev_os_name, os, action); - LOG_TRACE(prop_utils, "Performing comparison operation " << dev_os_name << " " << action << " " << os << " RESULT:" << matchOs); + RH_TRACE(proputilsLog, "Performing comparison operation " << dev_os_name << " " << action << " " << os << " RESULT:" << matchOs); if (matchOs) { break; } @@ -469,10 +501,10 @@ bool ossie::checkOs(const std::vector& osDeps, cons } } - LOG_TRACE(prop_utils, "Attempting to match os version"); + RH_TRACE(proputilsLog, "Attempting to match os version"); if (osVersion != "") { matchOsVersion = false; - LOG_TRACE(prop_utils, "Attempting to match os version" << osVersion) + RH_TRACE(proputilsLog, "Attempting to match os version" << osVersion) for (unsigned int i = 0; i < props.size(); i++) { if (dynamic_cast(props[i]) != NULL) { const SimpleProperty* matchingProp = dynamic_cast(props[i]); @@ -483,9 +515,9 @@ bool ossie::checkOs(const std::vector& osDeps, cons if (tmp_dev_os_version != NULL) { dev_os_version = tmp_dev_os_version; } - LOG_TRACE(prop_utils, "Performing comparison operation " << dev_os_version << " " << action << " " << osVersion); + RH_TRACE(proputilsLog, "Performing comparison operation " << dev_os_version << " " << action << " " << osVersion); matchOsVersion = ossie::perform_action(dev_os_version, osVersion, action); - LOG_TRACE(prop_utils, "Performing comparison operation " << dev_os_version << " " << action << " " << osVersion << " RESULT:" << matchOsVersion); + RH_TRACE(proputilsLog, "Performing comparison operation " << dev_os_version << " " << action << " " << osVersion << " RESULT:" << matchOsVersion); if (matchOsVersion) break; } } @@ -516,7 +548,7 @@ CF::AllocationManager::AllocationResponseType ossie::assembleResponse(std::strin CF::DataType ossie::convertPropertyToDataType(const SimplePropertyRef* prop) { CF::DataType dataType; - dataType.id = CORBA::string_dup(prop->getID()); + dataType.id = prop->getID().c_str(); if (prop->getValue() != NULL) { std::string value(prop->getValue()); @@ -527,21 +559,21 @@ CF::DataType ossie::convertPropertyToDataType(const SimplePropertyRef* prop) { CF::DataType ossie::convertPropertyToDataType(const SimpleSequencePropertyRef* prop) { CF::DataType dataType; - dataType.id = CORBA::string_dup(prop->getID()); - dataType.value = ossie::strings_to_any(prop->getValues(), CORBA::tk_string); + dataType.id = prop->getID().c_str(); + dataType.value = ossie::strings_to_any(prop->getValues(), CORBA::tk_string, NULL); return dataType; } CF::DataType ossie::convertPropertyToDataType(const StructPropertyRef* prop) { CF::DataType dataType; - dataType.id = CORBA::string_dup(prop->getID()); + dataType.id = prop->getID().c_str(); CF::Properties structval_; StructPropertyRef::ValuesMap::const_iterator i; for (i = prop->getValue().begin(); i != prop->getValue().end(); ++i) { CF::DataType dt; dt = convertPropertyRefToDataType((*i).second); - LOG_TRACE(prop_utils, "setting struct item " << (*i).first); + RH_TRACE(proputilsLog, "setting struct item " << (*i).first); ossie::corba::push_back(structval_, dt); } dataType.value <<= structval_; @@ -550,7 +582,7 @@ CF::DataType ossie::convertPropertyToDataType(const StructPropertyRef* prop) { CF::DataType ossie::convertPropertyToDataType(const StructSequencePropertyRef* prop) { CF::DataType dataType; - dataType.id = CORBA::string_dup(prop->getID()); + dataType.id = prop->getID().c_str(); const StructSequencePropertyRef::ValuesList propValues = prop->getValues(); CORBA::AnySeq values; @@ -563,7 +595,7 @@ CF::DataType ossie::convertPropertyToDataType(const StructSequencePropertyRef* p for (i = propValues[ii].begin(); i != propValues[ii].end(); ++i) { CF::DataType dt; dt = convertPropertyRefToDataType((*i).second); - LOG_TRACE(prop_utils, "setting struct item " << (*i).first); + RH_TRACE(proputilsLog, "setting struct item " << (*i).first); ossie::corba::push_back(structval_, dt); } tmp_struct.value <<= structval_; @@ -604,7 +636,8 @@ CF::DataType ossie::convertDataTypeToPropertyType(const CF::DataType& value, con CORBA::Any ossie::convertAnyToPropertyType(const CORBA::Any& value, const SimpleSequenceProperty* property) { CORBA::TCKind kind = ossie::getTypeKind(static_cast(property->getType())); - return ossie::strings_to_any(ossie::any_to_strings(value), kind); + CORBA::TypeCode_ptr type = ossie::getTypeCode(static_cast(property->getType())); + return ossie::strings_to_any(ossie::any_to_strings(value), kind, type); } CORBA::Any ossie::convertAnyToPropertyType(const CORBA::Any& value, const StructProperty* property) @@ -613,7 +646,6 @@ CORBA::Any ossie::convertAnyToPropertyType(const CORBA::Any& value, const Struct const CF::Properties *depProps; if (value >>= depProps) { CF::Properties tmp_props; - std::vector structval = property->getValue(); for (unsigned int index = 0; index < depProps->length(); ++index) { const CF::DataType& item = (*depProps)[index]; const std::string propid(item.id); @@ -629,7 +661,17 @@ CORBA::Any ossie::convertAnyToPropertyType(const CORBA::Any& value, const Struct CORBA::Any ossie::convertAnyToPropertyType(const CORBA::Any& value, const StructSequenceProperty* property) { - return CORBA::Any(); + CORBA::Any result; + const CORBA::AnySeq* seq; + if (value >>= seq) { + CORBA::AnySeq tmp_seq; + const StructProperty& structdef = property->getStruct(); + for (CORBA::ULong index = 0; index < seq->length(); ++index) { + ossie::corba::push_back(tmp_seq, convertAnyToPropertyType((*seq)[index], &structdef)); + } + result <<= tmp_seq; + } + return result; } @@ -644,6 +686,17 @@ void ossie::convertComponentProperties( const ossie::ComponentPropertyList &cp_p cf_props[cf_props.length()-1] = dt; } } + +void ossie::convertComponentProperties( const ossie::ComponentPropertyList &cp_props, + redhawk::PropertyMap &cf_props ) +{ + ossie::ComponentPropertyList::const_iterator piter = cp_props.begin(); + for ( ; piter != cp_props.end(); piter++ ) { + CF::DataType dt = ossie::convertPropertyRefToDataType( *piter ); + cf_props.push_back(dt); + } +} + std::string ossie::retrieveParserErrorLineNumber(std::string message) { size_t begin_n_line = message.find_first_of(':'); @@ -658,4 +711,84 @@ std::string ossie::retrieveParserErrorLineNumber(std::string message) { ret_message += "."; } return ret_message; -}; +} + +bool ossie::structContainsMixedNilValues(const CF::Properties& properties) +{ + const redhawk::PropertyMap& fields = redhawk::PropertyMap::cast(properties); + bool nils = false; + bool values = false; + for (redhawk::PropertyMap::const_iterator prop = fields.begin(); prop != fields.end(); ++prop) { + if (prop->getValue().isNil()) { + nils = true; + } else { + values = true; + } + if (nils && values) { + return true; + } + } + return false; +} + +CF::Properties ossie::getPartialStructs(const CF::Properties& properties) +{ + CF::Properties partials; + const redhawk::PropertyMap& configProps = redhawk::PropertyMap::cast(properties); + for (redhawk::PropertyMap::const_iterator prop = configProps.begin(); prop != configProps.end(); ++prop) { + redhawk::Value::Type type = prop->getValue().getType(); + if (type == redhawk::Value::TYPE_PROPERTIES) { + // Property is a struct + if (ossie::structContainsMixedNilValues(prop->getValue().asProperties())) { + ossie::corba::push_back(partials, *prop); + } + } else if (type == redhawk::Value::TYPE_VALUE_SEQUENCE) { + // Property is a struct sequence + const redhawk::ValueSequence& sequence = prop->getValue().asSequence(); + for (redhawk::ValueSequence::const_iterator item = sequence.begin(); item != sequence.end(); ++item) { + if (item->getType() == redhawk::Value::TYPE_PROPERTIES) { + if (ossie::structContainsMixedNilValues(item->asProperties())) { + ossie::corba::push_back(partials, *prop); + continue; + } + } + } + } + } + return partials; +} + +CF::Properties ossie::getAffinityOptions(const ComponentInstantiation::AffinityProperties& affinityProps) +{ + // Store parsed affinity properties as a static singleton, protecting the + // load with a mutex; if the definitions are not set + static boost::scoped_ptr definitions; + static boost::mutex mutex; + if (!definitions) { + boost::mutex::scoped_lock lock(mutex); + if (!definitions) { + // Set the singleton first, under the assumption that if load fails + // once it will always fail, so that it doesn't re-try every time + definitions.reset(new ossie::Properties()); + try { + std::stringstream xml(redhawk::affinity::get_property_definitions()); + RH_TRACE(proputilsLog, "Loading affinity definitions: " << xml.str()); + definitions->load(xml); + } catch (...) { + RH_WARN(proputilsLog, "Error loading affinity defintions from library"); + } + } + } + + CF::Properties options; + BOOST_FOREACH(const ossie::ComponentProperty& propref, affinityProps) { + const Property* prop = definitions->getProperty(propref.getID()); + if (prop) { + CF::DataType dt = overridePropertyValue(prop, &propref); + ossie::corba::push_back(options, dt); + } else { + RH_WARN(proputilsLog, "Ignoring unknown affinity property " << propref.getID()); + } + } + return options; +} diff --git a/redhawk/src/control/include/ossie/DeviceManagerConfiguration.h b/redhawk/src/control/include/ossie/DeviceManagerConfiguration.h index 17b31040d..ff836f8ef 100644 --- a/redhawk/src/control/include/ossie/DeviceManagerConfiguration.h +++ b/redhawk/src/control/include/ossie/DeviceManagerConfiguration.h @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -30,8 +31,28 @@ #include "ossie/exceptions.h" #include "ossie/ossieparser.h" #include "ossie/componentProfile.h" +#include namespace ossie { + + class DevicePlacement : public ComponentPlacement + { + public: + ossie::optional_value deployOnDeviceID; + ossie::optional_value compositePartOfDeviceID; + ossie::optional_value DPDFile; + + const char* getDeployOnDeviceID() const; + + const char* getCompositePartOfDeviceID() const; + + const std::string getDPDFile() const; + + bool isDeployOn() const; + + bool isCompositePartOf() const; + }; + class DeviceManagerConfiguration { ENABLE_LOGGING @@ -47,7 +68,7 @@ namespace ossie { std::string deviceManagerSoftPkg; std::string domainManagerName; std::vector componentFiles; - std::vector componentPlacements; + std::vector componentPlacements; std::vector connections; }; @@ -57,7 +78,7 @@ namespace ossie { * information from a DCD file. You must call load() before calling any * other functions on this class. */ - DeviceManagerConfiguration() : _dcd(0) {} + DeviceManagerConfiguration() : _dcd() {} /* * Create a DeviceManagerConfiguration, parsing the DCD information provided by input. @@ -76,6 +97,8 @@ namespace ossie { public: void load(std::istream& input) throw (ossie::parser_error); + const bool isLoaded() const; + const char* getID() const; const char* getName() const; @@ -83,10 +106,10 @@ namespace ossie { const char* getDeviceManagerSoftPkg() const; const char* getDomainManagerName() const; - + const std::vector& getComponentFiles(); - const std::vector& getComponentPlacements(); + const std::vector& getComponentPlacements(); const std::vector& getConnections(); @@ -95,7 +118,7 @@ namespace ossie { const ComponentInstantiation& getComponentInstantiationById(std::string id) throw(std::out_of_range); private: - std::auto_ptr _dcd; + boost::shared_ptr _dcd; }; } diff --git a/redhawk/src/control/include/ossie/FileSystem_impl.h b/redhawk/src/control/include/ossie/FileSystem_impl.h index 94a4c68f2..6d027f10c 100644 --- a/redhawk/src/control/include/ossie/FileSystem_impl.h +++ b/redhawk/src/control/include/ossie/FileSystem_impl.h @@ -75,10 +75,16 @@ class FileSystem_impl: public virtual POA_CF::FileSystem void closeAllFiles(); + void setLogger(rh_logger::LoggerPtr logptr) { + _fileSysLog = logptr; + }; + protected: CORBA::ULongLong getSize () const; CORBA::ULongLong getAvailableSpace () const; + rh_logger::LoggerPtr _fileSysLog; + private: FileSystem_impl (const FileSystem_impl& _fsi); FileSystem_impl operator= (FileSystem_impl _fsi); diff --git a/redhawk/src/control/include/ossie/File_impl.h b/redhawk/src/control/include/ossie/File_impl.h index 1ed64c514..d7250c7fe 100644 --- a/redhawk/src/control/include/ossie/File_impl.h +++ b/redhawk/src/control/include/ossie/File_impl.h @@ -33,12 +33,14 @@ #include "ossie/debug.h" class FileSystem_impl; +extern rh_logger::LoggerPtr fileLog; class File_impl: public virtual POA_CF::File { ENABLE_LOGGING public: + static File_impl* Create (const char* fileName, FileSystem_impl* ptrFs); static File_impl* Open (const char* fileName, FileSystem_impl* ptrFs, bool readOnly); diff --git a/redhawk/src/control/include/ossie/ParserLogs.h b/redhawk/src/control/include/ossie/ParserLogs.h new file mode 100644 index 000000000..020d573a7 --- /dev/null +++ b/redhawk/src/control/include/ossie/ParserLogs.h @@ -0,0 +1,32 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __PARSERLOGS_H__ +#define __PARSERLOGS_H__ + +#include + +namespace redhawk { + +void setupParserLoggers(rh_logger::LoggerPtr parent); + +} + +#endif diff --git a/redhawk/src/control/include/ossie/Properties.h b/redhawk/src/control/include/ossie/Properties.h index d50f6491c..c505f77b1 100644 --- a/redhawk/src/control/include/ossie/Properties.h +++ b/redhawk/src/control/include/ossie/Properties.h @@ -49,31 +49,95 @@ namespace ossie { public: friend class Properties; - Property() {} + enum KindType { + KIND_CONFIGURE = 1<<1, + KIND_EXECPARAM = 1<<2, + KIND_ALLOCATION = 1<<3, + KIND_FACTORYPARAM = 1<<4, + KIND_TEST = 1<<5, + KIND_EVENT = 1<<6, + KIND_MESSAGE = 1<<7, + KIND_PROPERTY = 1<<8, + KIND_DEFAULT = KIND_CONFIGURE + }; + + struct Kinds + { + public: + Kinds() : + kinds(0) + { + } + + Kinds(KindType kind) : + kinds(kind) + { + } + + Kinds& operator|= (KindType kind) + { + kinds |= kind; + return *this; + } + + bool operator& (KindType kind) const + { + return (kinds & kind); + } + + Kinds operator| (KindType kind) + { + Kinds result(*this); + result |= kind; + return result; + } + + bool operator! () const + { + return (kinds == 0); + } + + private: + int kinds; + }; + + enum ActionType { + ACTION_EXTERNAL, + ACTION_EQ, + ACTION_NE, + ACTION_GT, + ACTION_LT, + ACTION_GE, + ACTION_LE, + ACTION_DEFAULT = ACTION_EXTERNAL + }; + + enum AccessType { + MODE_READWRITE, + MODE_READONLY, + MODE_WRITEONLY, + MODE_DEFAULT = MODE_READWRITE + }; - Property(const std::string& id, - const std::string& name, - const std::string& mode, - const std::string& action, - const std::vector& kinds); + Property() {} Property(const std::string& id, const std::string& name, - const std::string& mode, - const std::string& action, - const std::vector& kinds, - const std::string& cmdline ); + AccessType mode, + ActionType action, + Kinds kinds); virtual ~Property(); bool isReadOnly() const; bool isReadWrite() const; bool isWriteOnly() const; + bool canOverride() const; bool isAllocation() const; bool isConfigure() const; bool isProperty() const; bool isTest() const; - bool isCommandLine() const; + virtual bool isCommandLine() const; bool isExecParam() const; bool isFactoryParam() const; bool isEqual() const; @@ -86,32 +150,42 @@ namespace ossie { const char* getID() const; const char* getName() const; - const char* getMode() const; - const char* getAction() const; - const std::vector& getKinds() const; + AccessType getMode() const; + // NB: getAction() should return an ActionType; however, there are + // several places that use its return as a string for an argument to + // property helper functions in the base library. Before the parsers + // become "public" API, this should be revisited. + std::string getAction() const; + Kinds getKinds() const; std::string mapPrimitiveToComplex(const std::string& type) const; // Pure virtual functions virtual bool isNone() const = 0; virtual const std::string asString() const = 0; - virtual const Property* clone() const = 0; + virtual Property* clone() const = 0; + + virtual void override(const Property* otherProp) = 0; + virtual void override(const ComponentProperty* newValue) = 0; protected: // Common across all property types std::string id; std::string name; - std::string mode; - std::string action; - std::vector kinds; - std::string commandline; - - - // Pure virtual functions - virtual void override(const Property* otherProp) = 0; - virtual void override(const ComponentProperty* newValue) = 0; + AccessType mode; + ActionType action; + Kinds kinds; }; + inline Property* new_clone(const Property& property) { + return property.clone(); + } + + inline Property::Kinds operator|(Property::KindType a, Property::KindType b) + { + return Property::Kinds(a) | b; + } + /* * */ @@ -124,47 +198,39 @@ namespace ossie { SimpleProperty() {} SimpleProperty(const std::string& id, - const std::string& name, - const std::string& type, - const std::string& mode, - const std::string& action, - const std::vector& kinds, - const optional_value& value, - const std::string& complex_, - const std::string& commandline, - const std::string& optional); - - SimpleProperty(const std::string& id, - const std::string& name, - const std::string& type, - const std::string& mode, - const std::string& action, - const std::vector& kinds, - const optional_value& value); + const std::string& name, + const std::string& type, + AccessType mode, + ActionType action, + Kinds kinds, + const optional_value& value, + bool complex=false, + bool commandline=false, + bool optional=false); virtual ~SimpleProperty(); // SimpleProperty specific functions const char* getValue() const; + const std::string& getType() const; + bool isComplex() const; + bool isOptional() const; // Implementation of virtual functions + virtual bool isCommandLine() const; virtual bool isNone() const; virtual const std::string asString() const; - virtual const Property* clone() const; - const char* getType() const; - const char* getComplex() const; - const char* getCommandLine() const; - const char* getOptional() const; + virtual Property* clone() const; - protected: virtual void override(const Property* otherProp); virtual void override(const ComponentProperty* newValue); private: std::string type; optional_value value; - std::string _complex; - std::string optional; + bool complex; + bool commandline; + bool optional; }; /* @@ -181,41 +247,32 @@ namespace ossie { SimpleSequenceProperty(const std::string& id, const std::string& name, const std::string& type, - const std::string& mode, - const std::string& action, - const std::vector& kinds, + AccessType mode, + ActionType action, + Kinds kinds, const std::vector& values, - const std::string& complex_, - const std::string& optional); - - SimpleSequenceProperty(const std::string& id, - const std::string& name, - const std::string& type, - const std::string& mode, - const std::string& action, - const std::vector& kinds, - const std::vector& values); + bool complex=false, + bool optional=false); virtual ~SimpleSequenceProperty(); const std::vector& getValues() const; + const std::string& getType() const; + bool isComplex() const; + bool isOptional() const; virtual bool isNone() const; virtual const std::string asString() const; - virtual const Property* clone() const; - const char* getType() const; - const char* getComplex() const; - const char* getOptional() const; + virtual Property* clone() const; - protected: virtual void override(const Property* otherProp); virtual void override(const ComponentProperty* newValue); private: std::string type; std::vector values; - std::string _complex; - std::string optional; + bool complex; + bool optional; }; /* @@ -230,57 +287,29 @@ namespace ossie { StructProperty() {} StructProperty(const std::string& id, - const std::string& name, - const std::string& mode, - const std::vector& configurationkinds, - const std::vector& value) : - Property(id, name, mode, "external", configurationkinds) + const std::string& name, + AccessType mode, + Kinds configurationkinds, + const ossie::PropertyList& value) : + Property(id, name, mode, Property::ACTION_EXTERNAL, configurationkinds), + value(value) { - std::vector::const_iterator it; - for(it=value.begin(); it != value.end(); ++it) { - this->value.push_back(const_cast((*it)->clone())); - } } - StructProperty(const std::string& id, - const std::string& name, - const std::string& mode, - const std::vector& configurationkinds, - const ossie::PropertyList & value) : - Property(id, name, mode, "external", configurationkinds) - { - ossie::PropertyList::const_iterator it; - for(it=value.begin(); it != value.end(); ++it) { - this->value.push_back(const_cast(it->clone())); - } - } - - StructProperty(const StructProperty& other) : - Property(other.id, other.name, other.mode, other.action, other.kinds) - { - std::vector::const_iterator it; - for(it=other.value.begin(); it != other.value.end(); ++it) { - this->value.push_back(const_cast((*it)->clone())); - } - } - virtual ~StructProperty(); virtual bool isNone() const; virtual const std::string asString() const; - virtual const Property* clone() const; + virtual Property* clone() const; - StructProperty &operator=(const StructProperty& src); - - const std::vector& getValue() const ; + const PropertyList& getValue() const; const Property* getField(const std::string& id) const; - protected: virtual void override(const Property* otherProp); virtual void override(const ComponentProperty* newValue); private: - std::vector value; + PropertyList value; }; /* @@ -296,11 +325,11 @@ namespace ossie { StructSequenceProperty(const std::string& id, const std::string& name, - const std::string& mode, + AccessType mode, const StructProperty& structdef, - const std::vector& configurationkinds, + Kinds configurationkinds, const std::vector& values) : - Property(id, name, mode, "external", configurationkinds), + Property(id, name, mode, Property::ACTION_EXTERNAL, configurationkinds), structdef(structdef), values(values) { @@ -308,23 +337,13 @@ namespace ossie { virtual ~StructSequenceProperty(); - StructSequenceProperty(const StructSequenceProperty &src ) : - Property(src.id, src.name, src.mode, src.action, src.kinds), - structdef(src.structdef), - values(src.values) - { - } - - StructSequenceProperty & operator=( const StructSequenceProperty &src ); - virtual bool isNone() const; virtual const std::string asString() const; - virtual const Property* clone() const; + virtual Property* clone() const; const StructProperty& getStruct() const; const std::vector& getValues() const; - protected: virtual void override(const Property* otherProp); virtual void override(const ComponentProperty* newValue); @@ -333,12 +352,11 @@ namespace ossie { std::vector values; }; - template< typename charT, typename Traits> - std::basic_ostream& operator<<(std::basic_ostream &out, const Property& prop) - { - out << prop.asString(); - return out; - } + std::ostream& operator<<(std::ostream& out, const Property& prop); + std::ostream& operator<<(std::ostream& stream, Property::KindType kind); + std::ostream& operator<<(std::ostream& stream, Property::Kinds kinds); + std::ostream& operator<<(std::ostream& stream, Property::ActionType action); + std::ostream& operator<<(std::ostream& stream, Property::AccessType mode); /* * @@ -405,13 +423,11 @@ namespace ossie { Properties(std::istream& input) throw(ossie::parser_error); - Properties& operator=( const Properties &other); - virtual ~Properties(); const std::vector& getProperties() const; - const Property* getProperty(const std::string& id); + const Property* getProperty(const std::string& id) const; const std::vector& getConfigureProperties() const; diff --git a/redhawk/src/control/include/ossie/PropertyRef.h b/redhawk/src/control/include/ossie/PropertyRef.h new file mode 100644 index 000000000..3d3130c63 --- /dev/null +++ b/redhawk/src/control/include/ossie/PropertyRef.h @@ -0,0 +1,79 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __PROPERTYREF_H__ +#define __PROPERTYREF_H__ + +#include + +namespace ossie { + + class ComponentProperty; + + class DependencyRef { + public: + std::string type; + + virtual const std::string asString() const = 0; + + virtual ~DependencyRef() {}; + }; + + class PropertyRef : public DependencyRef + { + public: + PropertyRef() : + property() + { + } + + PropertyRef(ComponentProperty* prop) : + property(prop) + { + } + + PropertyRef(const ComponentProperty &prop) : + property(prop.clone()) + { + } + + PropertyRef(const PropertyRef& other) + { + if (other.property) { + property.reset(other.property->clone()); + } + } + + boost::shared_ptr property; + + virtual const std::string asString() const; + virtual ~PropertyRef(); + }; + + template< typename charT, typename Traits> + std::basic_ostream& operator<<(std::basic_ostream &out, const DependencyRef& ref) + { + out << ref.asString(); + return out; + } + +} + +#endif diff --git a/redhawk/src/control/include/ossie/SoftPkg.h b/redhawk/src/control/include/ossie/SoftPkg.h index fcbc62a65..fdc6be9f8 100644 --- a/redhawk/src/control/include/ossie/SoftPkg.h +++ b/redhawk/src/control/include/ossie/SoftPkg.h @@ -31,51 +31,19 @@ #include "ossie/ossieparser.h" #include "ossie/componentProfile.h" +#include "PropertyRef.h" +#include "UsesDevice.h" + namespace ossie { + class SoftPkg; + class Properties; + class ComponentDescriptor; + class SPD { public: typedef std::pair NameVersionPair; - class DependencyRef { - public: - std::string type; - - virtual const std::string asString() const = 0; - - virtual ~DependencyRef() {}; - }; - - class PropertyRef : public DependencyRef { - public: - PropertyRef() : - property() - { - } - - PropertyRef(ComponentProperty* prop) : - property(prop) - { - } - - PropertyRef(const ComponentProperty &prop) : - property(prop.clone()) - { - } - - PropertyRef(const PropertyRef& other) - { - if (other.property) { - property.reset(other.property->clone()); - } - } - - boost::shared_ptr< ossie::ComponentProperty > property; - - virtual const std::string asString() const; - virtual ~PropertyRef(); - }; - class SoftPkgRef : public DependencyRef { public: std::string localfile; @@ -96,65 +64,32 @@ namespace ossie { optional_value webpage; }; - class UsesDevice { - public: - std::string id; - std::string type; - std::vector dependencies; - std::vector softPkgDependencies; - - const char* getID() const { - return id.c_str(); - } - - const char* getType() const { - return type.c_str(); - } - - const std::vector& getDependencies() const { - return dependencies; - } - - const std::vector& getSoftPkgDependencies() const { - return softPkgDependencies; - } - }; class Code { public: + enum CodeType { + NONE, + EXECUTABLE, + KERNEL_MODULE, + SHARED_LIBRARY, + DRIVER + }; + // Required std::string localfile; // Optional - optional_value type; + CodeType type; optional_value entrypoint; optional_value stacksize; optional_value priority; Code() : localfile(""), - type(""), + type(NONE), entrypoint(), - stacksize((unsigned long long)0), - priority((unsigned long long)0) - {} - - Code(const Code& other) : - localfile(other.localfile), - type(other.type), - entrypoint(other.entrypoint), - stacksize(other.stacksize), - priority(other.priority) + stacksize(), + priority() {} - - Code& operator=(const Code& other) - { - localfile = other.localfile; - type = other.type; - entrypoint = other.entrypoint; - stacksize = other.stacksize; - priority = other.priority; - return *this; - } }; class Implementation { @@ -178,8 +113,8 @@ namespace ossie { NameVersionPair runtime; public: - const char* getID() const { - return implementationID.c_str(); + const std::string& getID() const { + return implementationID; } const std::vector& getProcessors() const { @@ -201,16 +136,12 @@ namespace ossie { } } - const char * getCodeFile() const { - return code.localfile.c_str(); + const std::string& getCodeFile() const { + return code.localfile; } - const char * getCodeType() const { - if (code.type.isSet()) { - return code.type->c_str(); - } else { - return 0; - } + Code::CodeType getCodeType() const { + return code.type; } const char * getEntryPoint() const { @@ -221,7 +152,7 @@ namespace ossie { } } - const std::vector& getUsesDevices() const { + const std::vector& getUsesDevices() const { return usesDevice; }; @@ -239,9 +170,14 @@ namespace ossie { // SPD Members public: + SPD() : + type("sca_compliant") + { + } + std::string id; std::string name; - optional_value type; + std::string type; optional_value version; optional_value title; optional_value description; @@ -254,38 +190,24 @@ namespace ossie { class SoftPkg { public: - SoftPkg() : _spd(0), _spdFile("") {} - + SoftPkg(); SoftPkg(std::istream& input, const std::string& _spdFile) throw (ossie::parser_error); - SoftPkg& operator=( SoftPkg other) - { - _spd = other._spd; - _spdFile = other._spdFile; - _spdPath = other._spdPath; - return *this; - } - public: void load(std::istream& input, const std::string& _spdFile) throw (ossie::parser_error); - const char* getSoftPkgID() const { + const std::string& getSoftPkgID() const { assert(_spd.get() != 0); - return _spd->id.c_str(); + return _spd->id; } - const char* getSoftPkgName() const { + const std::string& getName() const { assert(_spd.get() != 0); - return _spd->name.c_str(); + return _spd->name; } - const char* getSoftPkgType() const { - assert(_spd.get() != 0); - if (_spd->type.isSet()) { - return _spd->type->c_str(); - } else { - return "sca_compliant"; - } + const std::string& getSoftPkgType() const { + return _spd->type; } const char* getSoftPkgVersion() const { @@ -315,12 +237,12 @@ namespace ossie { } } - const char* getSPDPath() const { - return _spdPath.c_str(); + const std::string& getSPDPath() const { + return _spdPath; } - const char* getSPDFile() const { - return _spdFile.c_str(); + const std::string& getSPDFile() const { + return _spdFile; } const char* getPRFFile() const { @@ -346,53 +268,48 @@ namespace ossie { return _spd->authors; } - //const std::vector& getImplementations() const { const ossie::SPD::Implementations& getImplementations() const { assert(_spd.get() != 0); return _spd->implementations; } - const std::vector& getUsesDevices() const { + const ossie::SPD::Implementation* getImplementation(const std::string& id) const; + + const std::vector& getUsesDevices() const { assert(_spd.get() != 0); return _spd->usesDevice; }; - bool isScaCompliant() { + bool isScaCompliant() const { assert(_spd.get() != 0); - return (strcmp(getSoftPkgType(), "sca_compliant") == 0); + // Assume compliant unless explicitly set to non-compliant + return _spd->type != "sca_non_compliant"; } - bool isScaNonCompliant() { - assert(_spd.get() != 0); - return (strcmp(getSoftPkgType(), "sca_non_compliant") == 0); + const Properties* getProperties() const + { + return _properties.get(); } - + + void loadProperties(std::istream& file); + + const ComponentDescriptor* getDescriptor() const + { + return _descriptor.get(); + } + + void loadDescriptor(std::istream& file); + protected: std::auto_ptr _spd; + boost::shared_ptr _properties; + boost::shared_ptr _descriptor; std::string _spdFile; std::string _spdPath; }; - template< typename charT, typename Traits> - std::basic_ostream& operator<<(std::basic_ostream &out, const SPD::Code& code) - { - out << "localfile: " << code.localfile << " type: " << code.type << " entrypoint: " << code.entrypoint; - return out; - } - - template< typename charT, typename Traits> - std::basic_ostream& operator<<(std::basic_ostream &out, const SPD::DependencyRef& ref) - { - out << ref.asString(); - return out; - } - - template< typename charT, typename Traits> - std::basic_ostream& operator<<(std::basic_ostream &out, const SPD::UsesDevice& usesdev) - { - out << "Uses Device id: " << usesdev.id << " type: " << usesdev.type; - return out; - } + std::ostream& operator<<(std::ostream& out, SPD::Code::CodeType type); + std::ostream& operator<<(std::ostream& out, const SPD::Code& code); } #endif diff --git a/redhawk/src/control/include/ossie/SoftwareAssembly.h b/redhawk/src/control/include/ossie/SoftwareAssembly.h index 43d7a7bae..b43cbdfe4 100644 --- a/redhawk/src/control/include/ossie/SoftwareAssembly.h +++ b/redhawk/src/control/include/ossie/SoftwareAssembly.h @@ -27,7 +27,36 @@ #include"ossie/componentProfile.h" #include"ossie/exceptions.h" +#include "PropertyRef.h" +#include "UsesDevice.h" + namespace ossie { + + class Reservation { + public: + std::string kind; + std::string value; + + const std::string& getKind() const { + return kind; + } + + const std::string& getValue() const { + return value; + } + + void overloadValue(std::string& new_value) { + value = new_value; + } + + }; + + inline std::ostream& operator<<(std::ostream& out, const Reservation& resrv) + { + out << "Reservation kind: " << resrv.kind; + return out; + }; + class SoftwareAssembly { public: class HostCollocation { @@ -35,18 +64,34 @@ namespace ossie { std::string id; std::string name; std::vector placements; + std::vector usesdevicerefs; + std::vector reservations; - const char* getID() const { - return id.c_str(); + const std::string& getID() const { + return id; } - const char* getName() const { - return name.c_str(); + const std::string& getName() const { + return name; } const std::vector& getComponents() const { return placements; } + + const std::vector& getUsesDeviceRefs() const { + return usesdevicerefs; + } + + const std::vector& getReservations() const { + return reservations; + } + + void overloadReservation(std::string &value, int idx) { + reservations[idx].overloadValue(value); + } + + const ComponentInstantiation* getInstantiation(const std::string& refid) const; }; class Partitioning { @@ -56,18 +101,27 @@ namespace ossie { }; class Port { - public: - typedef enum { - NONE = 0, - USESIDENTIFIER, - PROVIDESIDENTIFIER, - SUPPORTEDIDENTIFIER - } port_type; - - std::string componentrefid; - std::string identifier; - std::string externalname; - port_type type; + public: + typedef enum { + NONE = 0, + USESIDENTIFIER, + PROVIDESIDENTIFIER, + SUPPORTEDIDENTIFIER + } port_type; + + std::string componentrefid; + std::string identifier; + std::string externalname; + port_type type; + + const std::string& getExternalName() const + { + if (externalname.empty()) { + return identifier; + } else { + return externalname; + } + } }; class Property { @@ -75,51 +129,30 @@ namespace ossie { std::string comprefid; std::string propid; std::string externalpropid; - }; - - class PropertyRef { - public: - PropertyRef (ComponentProperty* prop) : - property (prop) - { - } - - PropertyRef(const ComponentProperty &prop) : - property(prop.clone()) - { - } - - PropertyRef (const PropertyRef& copy) : - property (copy.property->clone()) - { - } - virtual ~PropertyRef () + const std::string& getExternalID() const { - + if (externalpropid.empty()) { + return propid; + } else { + return externalpropid; + } } - - std::string refId; - boost::shared_ptr< ossie::ComponentProperty > property; - }; - class UsesDevice { + class Option { public: - std::string id; - std::string type; - std::vector dependencies; - - const char* getId() const { - return id.c_str(); - } + std::string name; + std::string value; - const char* getType() const { - return type.c_str(); + const std::string& getName() const + { + return name; } - const std::vector& getDependencies() const { - return dependencies; + const std::string& getValue() const + { + return value; } }; @@ -133,38 +166,51 @@ namespace ossie { std::vector componentfiles; std::vector externalports; std::vector externalproperties; - std::vector usesdevice; + std::vector options; + std::vector usesdevice; }; - SoftwareAssembly() : _sad(0) {} + SoftwareAssembly(); SoftwareAssembly(std::istream& input) throw (ossie::parser_error); void load(std::istream& input) throw (ossie::parser_error); - const char* getID() const; + const std::string& getID() const; - const char* getName() const; + const std::string& getName() const; const std::vector& getComponentFiles() const; std::vector getAllComponents() const; + const std::vector& getComponentPlacements() const; + const std::vector& getHostCollocations() const; const std::vector& getConnections() const; - const char* getSPDById(const char* refid) const; + const ComponentFile* getComponentFile(const std::string& refid) const; + + const std::string& getAssemblyControllerRefId() const; - const char* getAssemblyControllerRefId() const; + const ComponentPlacement* getAssemblyControllerPlacement() const; const std::vector& getExternalPorts() const; const std::vector& getExternalProperties() const; - const std::vector& getUsesDevices() const; + const std::vector& getOptions() const; + + const std::vector& getUsesDevices() const; + + const ComponentInstantiation* getComponentInstantiation(const std::string& refid) const; protected: + void validateComponentPlacements(std::vector& placements); + void validateExternalPorts(std::vector& ports); + void validateExternalProperties(std::vector& properties); + std::auto_ptr _sad; }; } diff --git a/redhawk/src/control/include/ossie/UsesDevice.h b/redhawk/src/control/include/ossie/UsesDevice.h new file mode 100644 index 000000000..0dd47b6bc --- /dev/null +++ b/redhawk/src/control/include/ossie/UsesDevice.h @@ -0,0 +1,74 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef __USESDEVICE_H__ +#define __USESDEVICE_H__ + +#include +#include + +#include "PropertyRef.h" + +namespace ossie { + + class UsesDeviceRef { + public: + std::string id; + + const std::string& getID() const { + return id; + } + + }; + + inline std::ostream& operator<<(std::ostream& out, const UsesDeviceRef& usesdev) + { + out << "UsesDeviceRef id: " << usesdev.id; + return out; + } + + + class UsesDevice { + public: + std::string id; + std::string type; + std::vector dependencies; + + const std::string& getID() const { + return id; + } + + const std::string& getType() const { + return type; + } + + const std::vector& getDependencies() const { + return dependencies; + } + }; + + inline std::ostream& operator<<(std::ostream& out, const UsesDevice& usesdev) + { + out << "Uses Device id: " << usesdev.id << " type: " << usesdev.type; + return out; + } +} + +#endif diff --git a/redhawk/src/control/include/ossie/componentProfile.h b/redhawk/src/control/include/ossie/componentProfile.h index b5ea16222..815bf2745 100644 --- a/redhawk/src/control/include/ossie/componentProfile.h +++ b/redhawk/src/control/include/ossie/componentProfile.h @@ -32,6 +32,9 @@ #include "ossie/ossieparser.h" namespace ossie { + + class SoftPkg; + /* * */ @@ -42,9 +45,9 @@ namespace ossie { std::string type; public: - const char* getFileName() const; + const std::string& getFileName() const; - const char* getID() const; + const std::string& getID() const; }; /* @@ -58,7 +61,7 @@ namespace ossie { virtual ~ComponentProperty() {}; - const char* getID() const; + const std::string& getID() const; ComponentProperty* clone() const { return _clone(); @@ -95,6 +98,7 @@ namespace ossie { return out; } + /* * */ @@ -111,6 +115,12 @@ namespace ossie { const std::string _asString() const; }; + + class IdValue : public SimplePropertyRef { + + }; + + /* * */ @@ -166,31 +176,32 @@ namespace ossie { */ class ComponentInstantiation { public: - typedef std::pair LoggingConfig; - typedef ossie::ComponentPropertyList AffinityProperties; + typedef std::pair LoggingConfig; + typedef ossie::ComponentPropertyList AffinityProperties; std::string instantiationId; - ossie::optional_value namingservicename; - ossie::optional_value usageName; + std::string namingservicename; + std::string usageName; ossie::ComponentPropertyList properties; - std::string _startOrder; + ossie::optional_value startOrder; AffinityProperties affinityProperties; LoggingConfig loggingConfig; - public: - ComponentInstantiation(); + ossie::ComponentPropertyList deployerrequires; + ossie::ComponentPropertyList devicerequires; - ComponentInstantiation(const ComponentInstantiation& other); - ComponentInstantiation& operator=(const ComponentInstantiation &other); + public: + ComponentInstantiation(); virtual ~ComponentInstantiation(); public: - const char* getID() const; - - const char* getStartOrder() const; + const std::string& getID() const; + + bool hasStartOrder() const; + int getStartOrder() const; - const char* getUsageName() const; + const std::string& getUsageName() const; const ossie::ComponentPropertyList & getProperties() const; @@ -198,9 +209,14 @@ namespace ossie { const AffinityProperties &getAffinity() const; + const ossie::ComponentPropertyList & getDeployerRequires() const; + + const ossie::ComponentPropertyList & getDeviceRequires() const; + bool isNamingService() const; - const char* getFindByNamingServiceName() const; + const std::string& getFindByNamingServiceName() const; + }; /* @@ -208,30 +224,18 @@ namespace ossie { */ class ComponentPlacement { public: - bool ifDomainManager; std::string _componentFileRef; - ossie::optional_value deployOnDeviceID; - ossie::optional_value compositePartOfDeviceID; - ossie::optional_value DPDFile; std::vector instantiations; + // Resolved after parsing is complete + std::string filename; public: - const char* getDeployOnDeviceID() const; - - const char* getCompositePartOfDeviceID() const; - - const std::string getDPDFile() const; - const std::vector& getInstantiations() const; - const char* getFileRefId() const; - - bool isDeployOn() const; - - bool isCompositePartOf() const; + const std::string& getFileRefId() const; - bool isDomainManager() const; + const ComponentInstantiation* getInstantiation(const std::string& refid) const; }; /* diff --git a/redhawk/src/control/include/ossie/ossieSupport.h b/redhawk/src/control/include/ossie/ossieSupport.h index b8b411cd8..b716782b6 100644 --- a/redhawk/src/control/include/ossie/ossieSupport.h +++ b/redhawk/src/control/include/ossie/ossieSupport.h @@ -23,6 +23,7 @@ #define ORBSUPPORT_H #include +#include /* The ossieSupport namespace contains useful functions used throughout the @@ -40,6 +41,8 @@ namespace ossie std::string generateUUID(); + bool sameHost( CORBA::Object_ptr aobj, CORBA::Object_ptr bobj ); + std::string getCurrentDirName(); namespace helpers { diff --git a/redhawk/src/control/include/ossie/prop_utils.h b/redhawk/src/control/include/ossie/prop_utils.h index 05474cd92..cebb89d18 100644 --- a/redhawk/src/control/include/ossie/prop_utils.h +++ b/redhawk/src/control/include/ossie/prop_utils.h @@ -27,6 +27,7 @@ #include #include "ossie/Properties.h" +#include "ossie/PropertyMap.h" #include "ossie/SoftPkg.h" #include "ossie/componentProfile.h" @@ -36,6 +37,8 @@ namespace ossie { + extern rh_logger::LoggerPtr proputilsLog; + CF::DataType convertPropertyToDataType(const Property* prop); CF::DataType convertPropertyRefToDataType(const ComponentProperty* prop); CF::DataType convertPropertyRefToDataType(const ComponentProperty& prop); @@ -73,9 +76,16 @@ namespace ossie void convertComponentProperties( const ossie::ComponentPropertyList &cp_props, CF::Properties &cf_props ); + + void convertComponentProperties( const ossie::ComponentPropertyList &cp_props, + redhawk::PropertyMap &cf_props ); std::string retrieveParserErrorLineNumber(std::string message); + bool structContainsMixedNilValues(const CF::Properties& properties); + CF::Properties getPartialStructs(const CF::Properties& properties); + + CF::Properties getAffinityOptions(const ComponentInstantiation::AffinityProperties& affinityProps); } #endif diff --git a/redhawk/src/control/parser/ComponentDescriptor.cpp b/redhawk/src/control/parser/ComponentDescriptor.cpp index 0089c7cb8..bbe30edd5 100644 --- a/redhawk/src/control/parser/ComponentDescriptor.cpp +++ b/redhawk/src/control/parser/ComponentDescriptor.cpp @@ -26,7 +26,6 @@ #include "ossie/ComponentDescriptor.h" #include "internal/scd-parser.h" -using namespace scd; using namespace ossie; ComponentDescriptor::ComponentDescriptor(std::istream& input) throw (ossie::parser_error) { diff --git a/redhawk/src/control/parser/DeviceManagerConfiguration.cpp b/redhawk/src/control/parser/DeviceManagerConfiguration.cpp index da9b6af34..2247d627c 100644 --- a/redhawk/src/control/parser/DeviceManagerConfiguration.cpp +++ b/redhawk/src/control/parser/DeviceManagerConfiguration.cpp @@ -35,6 +35,10 @@ void DeviceManagerConfiguration::load(std::istream& input) throw (ossie::parser_ _dcd = ossie::internalparser::parseDCD(input); } +const bool DeviceManagerConfiguration::isLoaded() const { + return _dcd.get() != 0; +} + const char* DeviceManagerConfiguration::getID() const { assert(_dcd.get() != 0); return _dcd->id.c_str(); @@ -60,7 +64,7 @@ const std::vector& DeviceManagerConfiguration::getComponentFiles( return _dcd->componentFiles; } -const std::vector& DeviceManagerConfiguration::getComponentPlacements() { +const std::vector& DeviceManagerConfiguration::getComponentPlacements() { assert(_dcd.get() != 0); return _dcd->componentPlacements; } @@ -75,8 +79,8 @@ const char* DeviceManagerConfiguration::getFileNameFromRefId(const char* refid) const std::vector& componentFiles = getComponentFiles(); std::vector::const_iterator i; for (i = componentFiles.begin(); i != componentFiles.end(); ++i) { - if (strcmp(i->getID(), refid) == 0) { - return i->getFileName(); + if (i->getID() == refid) { + return i->getFileName().c_str(); } } @@ -84,8 +88,8 @@ const char* DeviceManagerConfiguration::getFileNameFromRefId(const char* refid) } const ComponentInstantiation& DeviceManagerConfiguration::getComponentInstantiationById(std::string id) throw(std::out_of_range) { - const std::vector& componentPlacements = getComponentPlacements(); - std::vector::const_iterator i; + const std::vector& componentPlacements = getComponentPlacements(); + std::vector::const_iterator i; for (i = componentPlacements.begin(); i != componentPlacements.end(); ++i) { assert(i->getInstantiations().size() > 0); const ComponentInstantiation& instantiation = i->getInstantiations().at(0); @@ -96,3 +100,39 @@ const ComponentInstantiation& DeviceManagerConfiguration::getComponentInstantiat } throw std::out_of_range("No instantiation with id " + id); } + + +// +// DevicePlacement +// +const char* DevicePlacement::getDeployOnDeviceID() const { + if (deployOnDeviceID.isSet()) { + return deployOnDeviceID->c_str(); + } else { + return 0; + } +} + +const char* DevicePlacement::getCompositePartOfDeviceID() const { + if (compositePartOfDeviceID.isSet()) { + return compositePartOfDeviceID->c_str(); + } else { + return 0; + } +} + +const std::string DevicePlacement::getDPDFile() const { + if (DPDFile.isSet()) { + return DPDFile->c_str(); + } else { + return 0; + } +} + +bool DevicePlacement::isDeployOn() const { + return deployOnDeviceID.isSet(); +} + +bool DevicePlacement::isCompositePartOf() const { + return compositePartOfDeviceID.isSet(); +} diff --git a/redhawk/src/control/parser/DomainManagerConfiguration.cpp b/redhawk/src/control/parser/DomainManagerConfiguration.cpp index fa8966128..b351c4fc9 100644 --- a/redhawk/src/control/parser/DomainManagerConfiguration.cpp +++ b/redhawk/src/control/parser/DomainManagerConfiguration.cpp @@ -23,7 +23,6 @@ #include"internal/dmd-parser.h" using namespace ossie; -using namespace dmd; // The implementation of these functions should come from the XSD produced drivers // When the XSD changes you will need to update these functions. diff --git a/redhawk/src/control/parser/Makefile.am b/redhawk/src/control/parser/Makefile.am index da691660f..e36b3aa09 100644 --- a/redhawk/src/control/parser/Makefile.am +++ b/redhawk/src/control/parser/Makefile.am @@ -22,13 +22,20 @@ noinst_LTLIBRARIES = libossieparser.la libossieparser_la_SOURCES = Properties.cpp \ - debug.cpp \ SoftPkg.cpp \ + PropertyRef.cpp \ DomainManagerConfiguration.cpp \ ComponentDescriptor.cpp \ DeviceManagerConfiguration.cpp \ SoftwareAssembly.cpp \ componentProfile.cpp \ + ParserLogs.cpp \ + internal/prf-parser.cpp \ + internal/dmd-parser.cpp \ + internal/dcd-parser.cpp \ + internal/sad-parser.cpp \ + internal/scd-parser.cpp \ + internal/spd-parser.cpp \ internal/prf-pskel.cpp \ internal/dmd-pskel.cpp \ internal/dcd-pskel.cpp \ @@ -64,42 +71,28 @@ CLEANFILES = internal/dcd-pskel.h internal/dcd-pskel.cpp \ internal/scd-pskel.h internal/scd-pskel.cpp \ internal/spd-pskel.h internal/spd-pskel.cpp -libossieparser_la_CXXFLAGS = -Wall $(STDINTF_CFLAGS) $(EXPAT_CFLAGS) $(BOOST_CPPFLAGS) -libossieparser_la_LIBADD = $(STDINTF_LIBS) $(EXPAT_LIBS) -libossieparser_la_LDFLAGS = -Wall $(EXPAT_LDFLAGS) +libossieparser_la_CXXFLAGS = -Wall $(EXPAT_CFLAGS) $(BOOST_CPPFLAGS) +libossieparser_la_LIBADD = $(EXPAT_LIBS) +libossieparser_la_LDFLAGS = $(EXPAT_LDFLAGS) AM_CPPFLAGS = -I../include -I. -I$(top_srcdir)/base/include -XSDFLAGS = --hxx-suffix .h --cxx-suffix .cpp --xml-parser expat --output-dir internal $(OSSIE_XSDFLAGS) +XSDFLAGS = --hxx-suffix .h --cxx-suffix .cpp --xml-parser expat --output-dir internal --generate-validation -internal/dmd-pskel.cpp: $(top_srcdir)/xml/xsd/dmd.xsd internal/dmd.map -# We need to keep xmlns items in the XSD, but we have to remove them from validation - -$(XSD) cxx-parser --root-element domainmanagerconfiguration --type-map internal/dmd.map $(XSDFLAGS) $<; sed -i 's/ns == "urn:mil:jpeojtrs:sca:dmd"/ns.empty()/g' internal/dmd-pskel.cpp; sed -i 's/"urn:mil:jpeojtrs:sca:dmd"/""/g' internal/dmd-pskel.cpp - -internal/dcd-pskel.cpp: $(top_srcdir)/xml/xsd/dcd.xsd internal/dcd.map -# We need to keep xmlns items in the XSD, but we have to remove them from validation - -$(XSD) cxx-parser --root-element deviceconfiguration --type-map internal/dcd.map $(XSDFLAGS) $<; sed -i 's/ns == "urn:mil:jpeojtrs:sca:dcd"/ns.empty()/g' internal/dcd-pskel.cpp; sed -i 's/"urn:mil:jpeojtrs:sca:dcd"/""/g' internal/dcd-pskel.cpp - -internal/spd-pskel.cpp: $(top_srcdir)/xml/xsd/spd.xsd internal/spd.map -# We need to keep xmlns items in the XSD, but we have to remove them from validation - -$(XSD) cxx-parser --root-element softpkg --type-map internal/spd.map $(XSDFLAGS) $<; sed -i 's/ns == "urn:mil:jpeojtrs:sca:spd"/ns.empty()/g' internal/spd-pskel.cpp; sed -i 's/"urn:mil:jpeojtrs:sca:spd"/""/g' internal/spd-pskel.cpp - -internal/scd-pskel.cpp: $(top_srcdir)/xml/xsd/scd.xsd internal/scd.map - -$(XSD) cxx-parser --root-element softwarecomponent --type-map internal/scd.map $(XSDFLAGS) $<; sed -i 's/ns == "urn:mil:jpeojtrs:sca:scd"/ns.empty()/g' internal/scd-pskel.cpp; sed -i 's/"urn:mil:jpeojtrs:sca:scd"/""/g' internal/scd-pskel.cpp - -internal/sad-pskel.cpp: $(top_srcdir)/xml/xsd/sad.xsd internal/sad.map -# We need to keep xmlns items in the XSD, but we have to remove them from validation - -$(XSD) cxx-parser --root-element softwareassembly --type-map internal/sad.map $(XSDFLAGS) $<; sed -i 's/ns == "urn:mil:jpeojtrs:sca:sad"/ns.empty()/g' internal/sad-pskel.cpp; sed -i 's/"urn:mil:jpeojtrs:sca:sad"/""/g' internal/sad-pskel.cpp - -internal/prf-pskel.cpp: $(top_srcdir)/xml/xsd/prf.xsd internal/prf.map -# We need to keep xmlns items in the XSD, but we have to remove them from validation - -$(XSD) cxx-parser --root-element properties --type-map internal/prf.map $(XSDFLAGS) $<; sed -i 's/ns == "urn:mil:jpeojtrs:sca:prf"/ns.empty()/g' internal/prf-pskel.cpp; sed -i 's/"urn:mil:jpeojtrs:sca:prf"/""/g' internal/prf-pskel.cpp +# Pattern rule to generate *-pskel files using the corresponding .xsd and .map +# files. We need to keep xmlns items in the XSD, but we have to remove them +# from the generated parser's validation (this is what the sed command is for). +internal/%-pskel.cpp: $(top_srcdir)/xml/xsd/%.xsd internal/%.map + $(AM_V_GEN)$(XSD) cxx-parser --type-map internal/$*.map $(XSDFLAGS) $< + $(AM_V_at)$(SED) -i 's/ns == "urn:mil:jpeojtrs:sca:$*"/ns.empty()/g' $@ + $(AM_V_at)$(SED) -i 's/"urn:mil:jpeojtrs:sca:$*"/""/g' $@ +# The DPD and profile parsers aren't used by the framework internal/dpd-pskel.cpp: $(top_srcdir)/xml/xsd/dpd.xsd - -$(XSD) cxx-parser --root-element devicepkg $(XSDFLAGS) $< + $(AM_V_GEN)$(XSD) cxx-parser --root-element devicepkg $(XSDFLAGS) $< internal/profile-pskel.cpp: $(top_srcdir)/xml/xsd/profile.xsd - -$(XSD) cxx-parser --root-element profile $(XSDFLAGS) $< + $(AM_V_GEN)$(XSD) cxx-parser --root-element profile $(XSDFLAGS) $< .PHONY: generate_noop_impl generate_test_driver diff --git a/redhawk/src/control/parser/ParserLogs.cpp b/redhawk/src/control/parser/ParserLogs.cpp new file mode 100644 index 000000000..bef579097 --- /dev/null +++ b/redhawk/src/control/parser/ParserLogs.cpp @@ -0,0 +1,37 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include "internal/spd-pimpl.h" +#include "internal/sad-pimpl.h" +#include "internal/dcd-pimpl.h" +#include "internal/dmd-pimpl.h" +#include "internal/prf-pimpl.h" + +using namespace redhawk; + +void redhawk::setupParserLoggers(rh_logger::LoggerPtr parent) { + spd::parserLog = parent->getChildLogger("spd","parsers"); + sad::parserLog = parent->getChildLogger("sad","parsers"); + prf::parserLog = parent->getChildLogger("prf","parsers"); + dmd::parserLog = parent->getChildLogger("dmd","parsers"); + dcd::parserLog = parent->getChildLogger("dcd","parsers"); +} diff --git a/redhawk/src/control/parser/Properties.cpp b/redhawk/src/control/parser/Properties.cpp index 98ca5eef3..2f333b06f 100644 --- a/redhawk/src/control/parser/Properties.cpp +++ b/redhawk/src/control/parser/Properties.cpp @@ -88,12 +88,6 @@ Properties::~Properties() LOG_TRACE(Properties, "Destruction for properties") } -Properties& Properties::operator=(const Properties &other) { - _prf = other._prf; - return *this; -} - - void Properties::load(std::istream& input) throw (ossie::parser_error) { std::auto_ptr t = ossie::internalparser::parsePRF(input); _prf.reset(t.release()); @@ -169,13 +163,13 @@ void Properties::join(ossie::Properties& props) throw (ossie::parser_error) { void Properties::override(const ossie::ComponentPropertyList & values) { - for ( ossie::ComponentPropertyList::const_iterator iter = values.begin(); iter != values.end(); ++iter) { - const ComponentProperty* new_value = &(*iter); - Property* property = const_cast(getProperty(new_value->getID())); + for (ossie::ComponentPropertyList::const_iterator iter = values.begin(); iter != values.end(); ++iter) { + const ComponentProperty* new_value = &(*iter); + const Property* property = const_cast(this)->getProperty(new_value->getID()); if (!property) { LOG_TRACE(Properties, "Skipping override of non-existent property " << new_value->getID()); } else { - property->override(new_value); + const_cast(property)->override(new_value); } } } @@ -186,7 +180,7 @@ const std::vector& Properties::getProperties() const return _prf->_allProperties; } -const Property* Properties::getProperty(const std::string& id) +const Property* Properties::getProperty(const std::string& id) const { assert(_prf.get() != 0); std::map::iterator p = _prf->_properties.find(id); @@ -241,24 +235,112 @@ const std::vector& Properties::getFactoryParamProperties() cons * Property class */ -Property::Property(const std::string& id, - const std::string& name, - const std::string& mode, - const std::string& action, - const std::vector& kinds) : - id(id), name(name), mode(mode), action(action), kinds(kinds), - commandline("false") -{}; +std::ostream& ossie::operator<<(std::ostream& stream, const ossie::Property& property) +{ + stream << property.asString(); + return stream; +} + +std::ostream& ossie::operator<<(std::ostream& stream, ossie::Property::KindType kind) +{ + switch (kind) { + case Property::KIND_CONFIGURE: + stream << "configure"; + break; + case Property::KIND_EXECPARAM: + stream << "execparam"; + break; + case Property::KIND_ALLOCATION: + stream << "allocation"; + break; + case Property::KIND_FACTORYPARAM: + stream << "factoryparam"; + break; + case Property::KIND_TEST: + stream << "test"; + break; + case Property::KIND_EVENT: + stream << "event"; + break; + case Property::KIND_MESSAGE: + stream << "message"; + break; + case Property::KIND_PROPERTY: + stream << "property"; + break; + default: + break; + } + return stream; +} + +std::ostream& ossie::operator<<(std::ostream& stream, ossie::Property::Kinds kinds) +{ + for (int bit = 1; bit <= Property::KIND_PROPERTY; bit <<= 1) { + Property::KindType flag = static_cast(bit); + if (kinds & flag) { + stream << flag << ","; + } + } + return stream; +} + +std::ostream& ossie::operator<<(std::ostream& stream, ossie::Property::ActionType action) +{ + switch (action) { + case ossie::Property::ACTION_GE: + stream << "ge"; + break; + case ossie::Property::ACTION_GT: + stream << "gt"; + break; + case ossie::Property::ACTION_LE: + stream << "le"; + break; + case ossie::Property::ACTION_LT: + stream << "lt"; + break; + case ossie::Property::ACTION_NE: + stream << "ne"; + break; + case ossie::Property::ACTION_EQ: + stream << "eq"; + break; + case ossie::Property::ACTION_EXTERNAL: + stream << "external"; + break; + } + return stream; +} + +std::ostream& ossie::operator<<(std::ostream& stream, ossie::Property::AccessType mode) +{ + switch (mode) { + case ossie::Property::MODE_READWRITE: + stream << "readwrite"; + break; + case ossie::Property::MODE_READONLY: + stream << "readonly"; + break; + case ossie::Property::MODE_WRITEONLY: + stream << "writeonly"; + break; + } + return stream; +} Property::Property(const std::string& id, const std::string& name, - const std::string& mode, - const std::string& action, - const std::vector& kinds, - const std::string& cmdline ): - id(id), name(name), mode(mode), action(action), kinds(kinds), - commandline(cmdline) -{}; + AccessType mode, + ActionType action, + Kinds kinds): + id(id), + name(name), + mode(mode), + action(action), + kinds((!kinds)?KIND_DEFAULT:kinds) +{ +} Property::~Property() { @@ -266,78 +348,32 @@ Property::~Property() bool Property::isAllocation() const { - TRACE_ENTER(Property); - - for (unsigned int i = 0; i < kinds.size (); i++) { - if (kinds[i] == "allocation") - { return true; } - } - - return false; + return kinds & KIND_ALLOCATION; } bool Property::isConfigure() const { - TRACE_ENTER(Property); - if (kinds.size() == 0) { - return true; - } - for (unsigned int i = 0; i < kinds.size (); i++) { - if (kinds[i] == "configure") - { return true; } - } - return false; + return kinds & KIND_CONFIGURE; } bool Property::isProperty() const { - TRACE_ENTER(Property); - // RESOLVE, could be default behavior with old style properties - //if (kinds.size() == 0) { - //return true; - //} - for (unsigned int i = 0; i < kinds.size (); i++) { - if (kinds[i] == "property") - { return true; } - } - - return false; + return kinds & KIND_PROPERTY; } bool Property::isTest() const { - TRACE_ENTER(Property); - - for (unsigned int i = 0; i < kinds.size (); i++) { - if (kinds[i] == "test") - { return true; } - } - - return false; + return kinds & KIND_TEST; } bool Property::isExecParam() const { - TRACE_ENTER(Property); - - for (unsigned int i = 0; i < kinds.size (); i++) { - if (kinds[i] == "execparam") - { return true; } - } - - return false; + return kinds & KIND_EXECPARAM; } bool Property::isFactoryParam() const { - TRACE_ENTER(Property); - - for (unsigned int i = 0; i < kinds.size (); i++) { - if (kinds[i] == "factoryparam") - { return true; } - } - - return false; + return kinds & KIND_FACTORYPARAM; } const char* Property::getID() const @@ -350,81 +386,91 @@ const char* Property::getName() const return name.c_str(); } -const char* Property::getMode() const +Property::AccessType Property::getMode() const { - return mode.c_str(); + return mode; } -const char* Property::getAction() const +std::string Property::getAction() const { - return action.c_str(); + std::ostringstream out; + out << action; + return out.str(); } -const std::vector & Property::getKinds() const +Property::Kinds Property::getKinds() const { return kinds; } bool Property::isReadOnly() const { - return (mode == "readonly"); + return (mode == MODE_READONLY); } bool Property::isCommandLine() const { - return (commandline == "true"); + return false; } bool Property::isReadWrite() const { - return (mode == "readwrite"); + return (mode == MODE_READWRITE); } bool Property::isWriteOnly() const { - return (mode == "writeonly"); + return (mode == MODE_WRITEONLY); +} + +bool Property::canOverride() const +{ + // Only allow overrides for writable or 'property' kind properties + if (isProperty()) { + return true; + } else { + return !isReadOnly(); + } } bool Property::isEqual() const { - return (action == "eq"); + return (action == ACTION_EQ); } bool Property::isNotEqual() const { - return (action == "ne"); + return (action == ACTION_NE); } - bool Property::isGreaterThan() const { - return (action == "gt"); + return (action == ACTION_GT); } bool Property::isLessThan() const { - return (action == "lt"); + return (action == ACTION_LT); } bool Property::isGreaterThanOrEqual() const { - return (action == "ge"); + return (action == ACTION_GE); } - bool Property::isLessThanOrEqual() const { - return (action == "le"); + return (action == ACTION_LE); } bool Property::isExternal() const { - return ((action == "external") || (action == "")); + return (action == ACTION_EXTERNAL); } std::string Property::mapPrimitiveToComplex(const std::string& type) const { - std::string newType; + std::string newType = type; if (type.compare("float") == 0) { newType = "complexFloat"; } else if (type.compare("double") == 0) { @@ -464,20 +510,20 @@ std::string Property::mapPrimitiveToComplex(const std::string& type) const SimpleProperty::SimpleProperty(const std::string& id, const std::string& name, const std::string& type, - const std::string& mode, - const std::string& action, - const std::vector& kinds, + AccessType mode, + ActionType action, + Kinds kinds, const optional_value& value, - const std::string& complex_, - const std::string& commandline_, - const std::string& optional) : - Property(id, name, mode, action, kinds, commandline_ ), + bool complex, + bool commandline, + bool optional) : + Property(id, name, mode, action, kinds), value(value), - _complex(complex_), + complex(complex), + commandline(commandline), optional(optional) { - commandline = commandline_; - if (_complex.compare("true") == 0) { + if (complex) { /* * Downstream processing expects complex types * (e.g., complexLong) rather than primitive @@ -493,27 +539,16 @@ SimpleProperty::SimpleProperty(const std::string& id, } } -/* - * A constructor that does not require the specification of - * whether or not the property is complex. If complexity - * is not specified, the property is assumed to be primitive. - */ -SimpleProperty::SimpleProperty(const std::string& id, - const std::string& name, - const std::string& type, - const std::string& mode, - const std::string& action, - const std::vector& kinds, - const optional_value& value) +SimpleProperty::~SimpleProperty() { - SimpleProperty(id, name, type, mode, action, kinds, value, "false", "false", "false"); } -SimpleProperty::~SimpleProperty() + +bool SimpleProperty::isCommandLine() const { + return commandline; } - bool SimpleProperty::isNone() const { return !value.isSet(); } @@ -536,24 +571,19 @@ void SimpleProperty::override(const ComponentProperty* newValue) { } } -const char* SimpleProperty::getType() const +const std::string& SimpleProperty::getType() const { - return type.c_str(); + return type; } -const char* SimpleProperty::getComplex() const +bool SimpleProperty::isComplex() const { - return _complex.c_str(); + return complex; } -const char* SimpleProperty::getCommandLine() const +bool SimpleProperty::isOptional() const { - return commandline.c_str(); -} - -const char* SimpleProperty::getOptional() const -{ - return optional.c_str(); + return optional; } const char* SimpleProperty::getValue() const @@ -568,42 +598,37 @@ const char* SimpleProperty::getValue() const const std::string SimpleProperty::asString() const { std::ostringstream out; out << "Simple Property: <'" << this->id << "' '" << this->name << " " << this->mode << " " << this->type << " '"; - std::vector::const_iterator i; - for (i = kinds.begin(); i != kinds.end(); ++i) { - out << *i << ", "; - } - out << "' "; + out << kinds << "' "; if (value.isSet()) { out << " = '" << *(this->value) << "'>"; } return out.str(); } -const Property* SimpleProperty::clone() const { - return new SimpleProperty(id, name, type, mode, action, kinds, value, _complex, commandline, optional); +Property* SimpleProperty::clone() const { + return new SimpleProperty(*this); } /* * SimpleSequenceProperty class */ -SimpleSequenceProperty::SimpleSequenceProperty( - const std::string& id, - const std::string& name, - const std::string& type, - const std::string& mode, - const std::string& action, - const std::vector& kinds, - const std::vector& values, - const std::string& complex_, - const std::string& optional) : - Property(id, name, mode, action, kinds), - type(type), - values(values), - _complex(complex_), - optional(optional) -{ - if (_complex.compare("true") == 0) { +SimpleSequenceProperty::SimpleSequenceProperty(const std::string& id, + const std::string& name, + const std::string& type, + AccessType mode, + ActionType action, + Kinds kinds, + const std::vector& values, + bool complex, + bool optional) : + Property(id, name, mode, action, kinds), + type(type), + values(values), + complex(complex), + optional(optional) +{ + if (complex) { /* * Downstream processing expects complex types * (e.g., complexLong) rather than primitive @@ -615,31 +640,6 @@ SimpleSequenceProperty::SimpleSequenceProperty( } } -/* - * A constructor that does not require the specification of - * whether or not the property is complex. If complexity - * is not specified, the property is assumed to be primitive. - */ -SimpleSequenceProperty::SimpleSequenceProperty( - const std::string& id, - const std::string& name, - const std::string& type, - const std::string& mode, - const std::string& action, - const std::vector& kinds, - const std::vector& values) -{ - SimpleSequenceProperty(id, - name, - type, - mode, - action, - kinds, - values, - "false", - "false"); -} - SimpleSequenceProperty::~SimpleSequenceProperty() { } @@ -667,19 +667,19 @@ void SimpleSequenceProperty::override(const ComponentProperty* newValue) { } } -const char* SimpleSequenceProperty::getType() const +const std::string& SimpleSequenceProperty::getType() const { - return type.c_str(); + return type; } -const char* SimpleSequenceProperty::getComplex() const +bool SimpleSequenceProperty::isComplex() const { - return _complex.c_str(); + return complex; } -const char* SimpleSequenceProperty::getOptional() const +bool SimpleSequenceProperty::isOptional() const { - return optional.c_str(); + return optional; } const std::vector& SimpleSequenceProperty::getValues() const @@ -697,8 +697,8 @@ const std::string SimpleSequenceProperty::asString() const { return out.str(); } -const Property* SimpleSequenceProperty::clone() const { - return new SimpleSequenceProperty(id, name, type, mode, action, kinds, values, _complex, optional); +Property* SimpleSequenceProperty::clone() const { + return new SimpleSequenceProperty(*this); } /* @@ -706,42 +706,12 @@ const Property* SimpleSequenceProperty::clone() const { */ StructProperty::~StructProperty() { - std::vector::iterator i; - for (i = value.begin(); i != value.end(); ++i) { - if ( *i ) delete *i; - } - value.clear(); -} - - -StructProperty& StructProperty::operator=(const StructProperty& src) -{ - id = src.id; - name =src.name; - mode=src.mode; - commandline = src.commandline; - action=src.action; - kinds=src.kinds; - /// clean out my old... - std::vector::iterator i; - for (i = value.begin(); i != value.end(); ++i) { - if ( *i ) delete *i; - } - value.clear(); - - // bring in the new... - std::vector::const_iterator it; - for(it=src.value.begin(); it != src.value.end(); ++it) { - this->value.push_back(const_cast((*it)->clone())); - } - - return *this; } bool StructProperty::isNone() const { // it is not possible to set only one of the structure values if (value.size() > 0) - return (value[0]->isNone()); + return (value[0].isNone()); else return true; } @@ -749,8 +719,7 @@ bool StructProperty::isNone() const { void StructProperty::override(const Property* otherProp) { const StructProperty* otherStructProp = dynamic_cast(otherProp); if (otherStructProp != NULL) { - value.clear(); - std::copy(otherStructProp->value.begin(), otherStructProp->value.end(), std::back_inserter(value)); + value = otherStructProp->getValue(); } else { LOG_WARN(StructProperty, "Ignoring override request") } @@ -767,25 +736,24 @@ void StructProperty::override(const ComponentProperty* newValue) { const std::string StructProperty::asString() const { std::ostringstream out; out << "'" << this->id << "' '" << this->name; - std::vector::const_iterator i; - for (i = value.begin(); i != value.end(); ++i) { - out << " " << **i << std::endl; + for (PropertyList::const_iterator i = value.begin(); i != value.end(); ++i) { + out << " " << *i << std::endl; } return out.str(); } -const Property* StructProperty::clone() const { - return new StructProperty(id, name, mode, kinds, value); +Property* StructProperty::clone() const { + return new StructProperty(*this); }; -const std::vector& StructProperty::getValue() const { +const ossie::PropertyList& StructProperty::getValue() const { return value; } const Property* StructProperty::getField(const std::string& fieldId) const { - for (std::vector::const_iterator field = value.begin(); field !=value.end(); ++field) { - if (fieldId == (*field)->getID()) { - return *field; + for (PropertyList::const_iterator field = value.begin(); field !=value.end(); ++field) { + if (fieldId == field->getID()) { + return &(*field); } } return 0; @@ -798,20 +766,6 @@ StructSequenceProperty::~StructSequenceProperty() { } -StructSequenceProperty& StructSequenceProperty::operator=( const StructSequenceProperty &src ) -{ - id = src.id; - name =src.name; - mode=src.mode; - commandline = src.commandline; - action=src.action; - kinds=src.kinds; - structdef = src.structdef; - values = values; - return *this; -} - - bool StructSequenceProperty::isNone() const { return (values.size() == 0); } @@ -844,7 +798,7 @@ const std::string StructSequenceProperty::asString() const { return out.str(); } -const Property* StructSequenceProperty::clone() const { +Property* StructSequenceProperty::clone() const { return new StructSequenceProperty(id, name, mode, structdef, kinds, values); } diff --git a/redhawk/src/control/parser/PropertyRef.cpp b/redhawk/src/control/parser/PropertyRef.cpp new file mode 100644 index 000000000..29661695a --- /dev/null +++ b/redhawk/src/control/parser/PropertyRef.cpp @@ -0,0 +1,32 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include + +using namespace ossie; + +PropertyRef::~PropertyRef() { + property.reset(); +} + +const std::string PropertyRef::asString() const { + return property->asString(); +} diff --git a/redhawk/src/control/parser/SoftPkg.cpp b/redhawk/src/control/parser/SoftPkg.cpp index 0c90a147f..d1946ef30 100644 --- a/redhawk/src/control/parser/SoftPkg.cpp +++ b/redhawk/src/control/parser/SoftPkg.cpp @@ -19,11 +19,24 @@ */ #include -#include "ossie/SoftPkg.h" + +#include +#include + +#include +#include +#include + #include "internal/spd-parser.h" using namespace ossie; +SoftPkg::SoftPkg() : + _spd(0), + _spdFile() +{ +} + SoftPkg::SoftPkg(std::istream& input, const std::string& spdFile) throw (ossie::parser_error) { this->load(input, spdFile); } @@ -53,12 +66,28 @@ void SoftPkg::load(std::istream& input, const std::string& spdFile) throw (ossie } } -SPD::PropertyRef::~PropertyRef() { - property.reset(); +void SoftPkg::loadProperties(std::istream& input) +{ + _properties = boost::make_shared(); + _properties->load(input); } -const std::string SPD::PropertyRef::asString() const { - return property->asString(); +void SoftPkg::loadDescriptor(std::istream& input) +{ + _descriptor = boost::make_shared(); + _descriptor->load(input); +} + +const SPD::Implementation* SoftPkg::getImplementation(const std::string& id) const +{ + assert(_spd.get() != 0); + BOOST_FOREACH(const SPD::Implementation& implementation, _spd->implementations) { + if (id == implementation.getID()) { + return &implementation; + } + } + + return 0; } const std::string SPD::SoftPkgRef::asString() const { @@ -66,3 +95,30 @@ const std::string SPD::SoftPkgRef::asString() const { out << "SoftPkgRef localfile: " << this->localfile << " implref: " << this->implref; return out.str(); } + +std::ostream& ossie::operator<<(std::ostream& out, SPD::Code::CodeType type) +{ + switch (type) { + case SPD::Code::EXECUTABLE: + out << "Executable"; + break; + case SPD::Code::KERNEL_MODULE: + out << "KernelModule"; + break; + case SPD::Code::SHARED_LIBRARY: + out << "SharedLibrary"; + break; + case SPD::Code::DRIVER: + out << "Driver"; + break; + default: + break; + } + return out; +} + +std::ostream& ossie::operator<<(std::ostream& out, const SPD::Code& code) +{ + out << "localfile: " << code.localfile << " type: " << code.type << " entrypoint: " << code.entrypoint; + return out; +} diff --git a/redhawk/src/control/parser/SoftwareAssembly.cpp b/redhawk/src/control/parser/SoftwareAssembly.cpp index e88908f78..6d6ec5cca 100644 --- a/redhawk/src/control/parser/SoftwareAssembly.cpp +++ b/redhawk/src/control/parser/SoftwareAssembly.cpp @@ -18,28 +18,97 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ +#include + #include "ossie/SoftwareAssembly.h" #include "internal/sad-parser.h" using namespace ossie; -SoftwareAssembly::SoftwareAssembly(std::istream& input) throw (ossie::parser_error) { +const ComponentInstantiation* SoftwareAssembly::HostCollocation::getInstantiation(const std::string& refid) const +{ + BOOST_FOREACH(const ComponentPlacement& placement, placements) { + const ComponentInstantiation* instantiation = placement.getInstantiation(refid); + if (instantiation) { + return instantiation; + } + } + return 0; +} + +SoftwareAssembly::SoftwareAssembly() : + _sad(0) +{ +} + +SoftwareAssembly::SoftwareAssembly(std::istream& input) throw (ossie::parser_error) : + _sad(0) +{ this->load(input); } void SoftwareAssembly::load(std::istream& input) throw (ossie::parser_error) { _sad = ossie::internalparser::parseSAD(input); + + // Validate that all componentplacement elements, both in hostcollocation + // elements and in partitioning, have componentfileref values referencing + // valid componentfile elements + BOOST_FOREACH(HostCollocation& collocation, _sad->partitioning.collocations) { + validateComponentPlacements(collocation.placements); + } + validateComponentPlacements(_sad->partitioning.placements); + + // Make sure assemblycontroller is set, and references a real component + if (_sad->assemblycontroller.empty()) { + throw ossie::parser_error("assemblycontroller is not set"); + } + if (!getComponentInstantiation(_sad->assemblycontroller)) { + throw ossie::parser_error("assemblycontroller has invalid componentinstantiationref '" + _sad->assemblycontroller + "'"); + } + + validateExternalPorts(_sad->externalports); + validateExternalProperties(_sad->externalproperties); +} + +void SoftwareAssembly::validateComponentPlacements(std::vector& placements) +{ + BOOST_FOREACH(ComponentPlacement& placement, placements) { + const std::string& file_ref = placement.getFileRefId(); + const ComponentFile* file = getComponentFile(file_ref); + if (!file) { + throw ossie::parser_error("componentplacement has invalid componentfileref '" + file_ref + "'"); + } + placement.filename = file->filename; + } +} + +void SoftwareAssembly::validateExternalPorts(std::vector& ports) +{ + BOOST_FOREACH(SoftwareAssembly::Port& port, ports) { + if (!getComponentInstantiation(port.componentrefid)) { + throw ossie::parser_error("external port '" + port.getExternalName() + "' has invalid componentrefid '" + port.componentrefid + "'"); + } + } } -const char* SoftwareAssembly::getID() const { +void SoftwareAssembly::validateExternalProperties(std::vector& properties) +{ + BOOST_FOREACH(SoftwareAssembly::Property& property, properties) { + if (!getComponentInstantiation(property.comprefid)) { + throw ossie::parser_error("external property '" + property.getExternalID() + "' has invalid comprefid '" + property.comprefid + "'"); + } + } +} + +const std::string& SoftwareAssembly::getID() const { assert(_sad.get() != 0); - return _sad->id.c_str(); + return _sad->id; } -const char* SoftwareAssembly::getName() const { +const std::string& SoftwareAssembly::getName() const { assert(_sad.get() != 0); - return _sad->name.c_str(); + return _sad->name; } const std::vector& SoftwareAssembly::getComponentFiles() const { @@ -66,6 +135,12 @@ std::vector SoftwareAssembly::getAllComponents() const { return result; } +const std::vector& SoftwareAssembly::getComponentPlacements() const +{ + assert(_sad.get() != 0); + return _sad->partitioning.placements; +} + const std::vector& SoftwareAssembly::getHostCollocations() const { assert(_sad.get() != 0); return _sad->partitioning.collocations; @@ -76,22 +151,22 @@ const std::vector& SoftwareAssembly::getConnections() const { return _sad->connections; } -const char* SoftwareAssembly::getSPDById(const char* refid) const { +const ComponentFile* SoftwareAssembly::getComponentFile(const std::string& refid) const { assert(_sad.get() != 0); const std::vector& componentFiles = getComponentFiles(); std::vector::const_iterator i; for (i = componentFiles.begin(); i != componentFiles.end(); ++i) { - if (strcmp(i->getID(), refid) == 0) { - return i->getFileName(); + if (i->getID() == refid) { + return &(*i); } } return 0; } -const char* SoftwareAssembly::getAssemblyControllerRefId() const { +const std::string& SoftwareAssembly::getAssemblyControllerRefId() const { assert(_sad.get() != 0); - return _sad->assemblycontroller.c_str(); + return _sad->assemblycontroller; } const std::vector& SoftwareAssembly::getExternalPorts() const { @@ -104,7 +179,45 @@ const std::vector& SoftwareAssembly::getExternalProp return _sad->externalproperties; } -const std::vector& SoftwareAssembly::getUsesDevices() const { +const std::vector& SoftwareAssembly::getOptions() const { + assert(_sad.get() != 0); + return _sad->options; +} + +const std::vector& SoftwareAssembly::getUsesDevices() const { assert(_sad.get() != 0); return _sad->usesdevice; } + +const ComponentPlacement * SoftwareAssembly::getAssemblyControllerPlacement() const +{ + BOOST_FOREACH(const ComponentPlacement& placement, _sad->partitioning.placements) { + const ComponentInstantiation* instantiation = placement.getInstantiation(_sad->assemblycontroller); + if (instantiation) return &(placement); + } + + BOOST_FOREACH(HostCollocation& collocation, _sad->partitioning.collocations) { + BOOST_FOREACH(const ComponentPlacement& placement, collocation.getComponents()) { + const ComponentInstantiation* instantiation = placement.getInstantiation(_sad->assemblycontroller); + if (instantiation) return &(placement); + } + } + return NULL; +} + +const ComponentInstantiation* SoftwareAssembly::getComponentInstantiation(const std::string& refid) const +{ + BOOST_FOREACH(HostCollocation& collocation, _sad->partitioning.collocations) { + const ComponentInstantiation* instantiation = collocation.getInstantiation(refid); + if (instantiation) { + return instantiation; + } + } + BOOST_FOREACH(const ComponentPlacement& placement, _sad->partitioning.placements) { + const ComponentInstantiation* instantiation = placement.getInstantiation(refid); + if (instantiation) { + return instantiation; + } + } + return 0; +} diff --git a/redhawk/src/control/parser/componentProfile.cpp b/redhawk/src/control/parser/componentProfile.cpp index 5689b15a5..9a5d74a30 100644 --- a/redhawk/src/control/parser/componentProfile.cpp +++ b/redhawk/src/control/parser/componentProfile.cpp @@ -18,6 +18,8 @@ * along with this program. If not, see http://www.gnu.org/licenses/. */ +#include + #include "ossie/componentProfile.h" using namespace ossie; @@ -35,19 +37,19 @@ ComponentProperty *ossie::new_clone(const ComponentProperty &a) { // // ComponentFile // -const char* ComponentFile::getFileName() const { - return filename.c_str(); +const std::string& ComponentFile::getFileName() const { + return filename; }; -const char* ComponentFile::getID() const { - return id.c_str(); +const std::string& ComponentFile::getID() const { + return id; }; // // ComponentProperty // -const char* ComponentProperty::getID() const { - return _id.c_str(); +const std::string& ComponentProperty::getID() const { + return _id; } @@ -122,49 +124,27 @@ const std::string StructSequencePropertyRef::_asString() const { // // ComponentInstantiation // -ComponentInstantiation::ComponentInstantiation() +ComponentInstantiation::ComponentInstantiation() { } -ComponentInstantiation::ComponentInstantiation(const ComponentInstantiation& other) { - instantiationId = other.instantiationId; - _startOrder = other._startOrder; - usageName = other.usageName; - namingservicename = other.namingservicename; - affinityProperties = other.affinityProperties; - loggingConfig = other.loggingConfig; - properties = other.properties; - -} - -ComponentInstantiation& ComponentInstantiation::operator=(const ComponentInstantiation &other) { - instantiationId = other.instantiationId; - _startOrder = other._startOrder; - usageName = other.usageName; - namingservicename = other.namingservicename; - properties = other.properties; - affinityProperties = other.affinityProperties; - loggingConfig = other.loggingConfig; - return *this; +ComponentInstantiation::~ComponentInstantiation() { } -ComponentInstantiation::~ComponentInstantiation() { +const std::string& ComponentInstantiation::getID() const { + return instantiationId; } -const char* ComponentInstantiation::getID() const { - return instantiationId.c_str(); +bool ComponentInstantiation::hasStartOrder() const { + return startOrder.isSet(); } -const char* ComponentInstantiation::getStartOrder() const { - return _startOrder.c_str(); +int ComponentInstantiation::getStartOrder() const { + return *startOrder; } -const char* ComponentInstantiation::getUsageName() const { - if (usageName.isSet()) { - return usageName->c_str(); - } else { - return 0; - } +const std::string& ComponentInstantiation::getUsageName() const { + return usageName; } const ossie::ComponentPropertyList & ComponentInstantiation::getProperties() const { @@ -172,15 +152,11 @@ const ossie::ComponentPropertyList & ComponentInstantiation::getProperties() con } bool ComponentInstantiation::isNamingService() const { - return namingservicename.isSet(); + return !namingservicename.empty(); } -const char* ComponentInstantiation::getFindByNamingServiceName() const { - if (namingservicename.isSet()) { - return namingservicename->c_str(); - } else { - return 0; - } +const std::string& ComponentInstantiation::getFindByNamingServiceName() const { + return namingservicename; } @@ -192,50 +168,31 @@ const ComponentInstantiation::LoggingConfig &ComponentInstantiation::getLoggingC return loggingConfig; } - -// -// ComponentPlacement -// -const char* ComponentPlacement::getDeployOnDeviceID() const { - if (deployOnDeviceID.isSet()) { - return deployOnDeviceID->c_str(); - } else { - return 0; - } -} - -const char* ComponentPlacement::getCompositePartOfDeviceID() const { - if (compositePartOfDeviceID.isSet()) { - return compositePartOfDeviceID->c_str(); - } else { - return 0; - } +const ossie::ComponentPropertyList & ComponentInstantiation::getDeployerRequires() const { + return deployerrequires; } -const std::string ComponentPlacement::getDPDFile() const { - if (DPDFile.isSet()) { - return DPDFile->c_str(); - } else { - return 0; - } +const ossie::ComponentPropertyList & ComponentInstantiation::getDeviceRequires() const { + return devicerequires; } +// +// ComponentPlacement +// const std::vector& ComponentPlacement::getInstantiations() const { return instantiations; }; -const char* ComponentPlacement::getFileRefId() const { - return _componentFileRef.c_str(); +const std::string& ComponentPlacement::getFileRefId() const { + return _componentFileRef; } -bool ComponentPlacement::isDeployOn() const { - return deployOnDeviceID.isSet(); -} - -bool ComponentPlacement::isCompositePartOf() const { - return compositePartOfDeviceID.isSet(); -} - -bool ComponentPlacement::isDomainManager() const { - return false; +const ComponentInstantiation* ComponentPlacement::getInstantiation(const std::string& refid) const +{ + BOOST_FOREACH(const ComponentInstantiation& instantiation, instantiations) { + if (instantiation.getID() == refid) { + return &instantiation; + } + } + return 0; } diff --git a/redhawk/src/control/parser/debug.cpp b/redhawk/src/control/parser/debug.cpp deleted file mode 100644 index cf682e8e5..000000000 --- a/redhawk/src/control/parser/debug.cpp +++ /dev/null @@ -1,19 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK core. - * - * REDHAWK core is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ diff --git a/redhawk/src/control/parser/internal/dcd-parser.cpp b/redhawk/src/control/parser/internal/dcd-parser.cpp new file mode 100644 index 000000000..5b136a79a --- /dev/null +++ b/redhawk/src/control/parser/internal/dcd-parser.cpp @@ -0,0 +1,219 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include "dcd-parser.h" +#include "dcd-pimpl.h" + +std::auto_ptr +ossie::internalparser::parseDCD(std::istream& input) throw (ossie::parser_error) +{ + try { + // Instantiate individual parsers. + // + ::dcd::deviceconfiguration_pimpl deviceconfiguration_p; + ::xml_schema::string_pimpl string_p; + ::dcd::devicemanagersoftpkg_pimpl devicemanagersoftpkg_p; + ::dcd::localfile_pimpl localfile_p; + ::dcd::componentfiles_pimpl componentfiles_p; + ::dcd::componentfile_pimpl componentfile_p; + ::dcd::partitioning_pimpl partitioning_p; + ::dcd::componentplacement_pimpl componentplacement_p; + ::dcd::componentfileref_pimpl componentfileref_p; + ::dcd::deployondevice_pimpl deployondevice_p; + ::dcd::compositepartofdevice_pimpl compositepartofdevice_p; + ::dcd::devicepkgfile_pimpl devicepkgfile_p; + ::dcd::componentinstantiation_pimpl componentinstantiation_p; + ::dcd::componentproperties_pimpl componentproperties_p; + ::dcd::simpleref_pimpl simpleref_p; + ::dcd::simplesequenceref_pimpl simplesequenceref_p; + ::dcd::values_pimpl values_p; + ::dcd::structref_pimpl structref_p; + ::dcd::structsequenceref_pimpl structsequenceref_p; + ::dcd::structvalue_pimpl structvalue_p; + ::dcd::domainmanager_pimpl domainmanager_p; + ::dcd::namingservice_pimpl namingservice_p; + ::dcd::connections_pimpl connections_p; + ::dcd::connectinterface_pimpl connectinterface_p; + ::dcd::usesport_pimpl usesport_p; + ::dcd::componentinstantiationref_pimpl componentinstantiationref_p; + ::dcd::devicethatloadedthiscomponentref_pimpl devicethatloadedthiscomponentref_p; + ::dcd::deviceusedbythiscomponentref_pimpl deviceusedbythiscomponentref_p; + ::dcd::findby_pimpl findby_p; + ::dcd::domainfinder_pimpl domainfinder_p; + ::dcd::providesport_pimpl providesport_p; + ::dcd::componentsupportedinterface_pimpl componentsupportedinterface_p; + ::dcd::filesystemnames_pimpl filesystemnames_p; + ::dcd::filesystemname_pimpl filesystemname_p; + ::dcd::affinity_pimpl affinity_p; + ::dcd::loggingconfig_pimpl loggingconfig_p; + ::dcd::deployerrequires_pimpl deployerrequires_p; + ::dcd::idvalue_pimpl idvalue_p; + + + // Connect the parsers together. + // + deviceconfiguration_p.parsers (string_p, + devicemanagersoftpkg_p, + componentfiles_p, + partitioning_p, + connections_p, + domainmanager_p, + filesystemnames_p, + string_p, + string_p); + + devicemanagersoftpkg_p.parsers (localfile_p); + + localfile_p.parsers (string_p); + + componentfiles_p.parsers (componentfile_p); + + componentfile_p.parsers (localfile_p, + string_p, + string_p); + + partitioning_p.parsers (componentplacement_p); + + componentplacement_p.parsers (componentfileref_p, + deployondevice_p, + compositepartofdevice_p, + devicepkgfile_p, + componentinstantiation_p); + + componentfileref_p.parsers (string_p); + + deployondevice_p.parsers (string_p); + + compositepartofdevice_p.parsers (string_p); + + devicepkgfile_p.parsers (localfile_p, + string_p); + + componentinstantiation_p.parsers (string_p, + componentproperties_p, + affinity_p, + loggingconfig_p, + deployerrequires_p, + string_p, + string_p); + + affinity_p.parsers (simpleref_p, + simplesequenceref_p, + structref_p, + structsequenceref_p); + + loggingconfig_p.parsers(string_p); + + deployerrequires_p.parsers(idvalue_p); + + componentproperties_p.parsers (simpleref_p, + simplesequenceref_p, + structref_p, + structsequenceref_p); + + idvalue_p.parsers (string_p, + string_p); + + simpleref_p.parsers (string_p, + string_p); + + simplesequenceref_p.parsers (values_p, + string_p); + + values_p.parsers (string_p); + + structref_p.parsers (simpleref_p, + simplesequenceref_p, + string_p); + + structsequenceref_p.parsers (structvalue_p, + string_p); + + structvalue_p.parsers (simpleref_p, + simplesequenceref_p); + + domainmanager_p.parsers (namingservice_p, + string_p); + + namingservice_p.parsers (string_p); + + connections_p.parsers (connectinterface_p); + + connectinterface_p.parsers (usesport_p, + providesport_p, + componentsupportedinterface_p, + findby_p, + string_p); + + usesport_p.parsers (string_p, + componentinstantiationref_p, + devicethatloadedthiscomponentref_p, + deviceusedbythiscomponentref_p, + findby_p); + + componentinstantiationref_p.parsers (string_p); + + devicethatloadedthiscomponentref_p.parsers (string_p); + + deviceusedbythiscomponentref_p.parsers (string_p, + string_p); + + findby_p.parsers (namingservice_p, + string_p, + domainfinder_p); + + domainfinder_p.parsers (string_p, + string_p); + + providesport_p.parsers (string_p, + componentinstantiationref_p, + devicethatloadedthiscomponentref_p, + deviceusedbythiscomponentref_p, + findby_p); + + componentsupportedinterface_p.parsers (string_p, + componentinstantiationref_p, + findby_p); + + filesystemnames_p.parsers (filesystemname_p); + + filesystemname_p.parsers (string_p, + string_p); + + // Parse the XML document. + // + ::xml_schema::document doc_p ( + deviceconfiguration_p, + "", + "deviceconfiguration"); + + deviceconfiguration_p.pre (); + doc_p.parse (input); + return (deviceconfiguration_p.post_deviceconfiguration ()); + } catch (const ::xml_schema::exception& e) { + std::ostringstream err; + err << e; + throw ossie::parser_error(err.str()); + } catch (const std::ios_base::failure& e) { + throw ossie::parser_error(e.what()); + } +} diff --git a/redhawk/src/control/parser/internal/dcd-parser.h b/redhawk/src/control/parser/internal/dcd-parser.h index 134f86a84..b7a775a7c 100644 --- a/redhawk/src/control/parser/internal/dcd-parser.h +++ b/redhawk/src/control/parser/internal/dcd-parser.h @@ -21,198 +21,14 @@ #ifndef __DCD_PARSER_H__ #define __DCD_PARSER_H__ -#include -#include -#include"ossie/exceptions.h" -#include "dcd-pimpl.h" +#include -#include +#include +#include namespace ossie { namespace internalparser { - inline std::auto_ptr parseDCD(std::istream& input) throw (ossie::parser_error) { - try { - // Instantiate individual parsers. - // - ::dcd::deviceconfiguration_pimpl deviceconfiguration_p; - ::xml_schema::string_pimpl string_p; - ::dcd::devicemanagersoftpkg_pimpl devicemanagersoftpkg_p; - ::dcd::localfile_pimpl localfile_p; - ::dcd::componentfiles_pimpl componentfiles_p; - ::dcd::componentfile_pimpl componentfile_p; - ::dcd::partitioning_pimpl partitioning_p; - ::dcd::componentplacement_pimpl componentplacement_p; - ::dcd::componentfileref_pimpl componentfileref_p; - ::dcd::deployondevice_pimpl deployondevice_p; - ::dcd::compositepartofdevice_pimpl compositepartofdevice_p; - ::dcd::devicepkgfile_pimpl devicepkgfile_p; - ::dcd::componentinstantiation_pimpl componentinstantiation_p; - ::dcd::componentproperties_pimpl componentproperties_p; - ::dcd::simpleref_pimpl simpleref_p; - ::dcd::simplesequenceref_pimpl simplesequenceref_p; - ::dcd::values_pimpl values_p; - ::dcd::structref_pimpl structref_p; - ::dcd::structsequenceref_pimpl structsequenceref_p; - ::dcd::structvalue_pimpl structvalue_p; - ::dcd::domainmanager_pimpl domainmanager_p; - ::dcd::namingservice_pimpl namingservice_p; - ::dcd::connections_pimpl connections_p; - ::dcd::connectinterface_pimpl connectinterface_p; - ::dcd::usesport_pimpl usesport_p; - ::dcd::componentinstantiationref_pimpl componentinstantiationref_p; - ::dcd::devicethatloadedthiscomponentref_pimpl devicethatloadedthiscomponentref_p; - ::dcd::deviceusedbythiscomponentref_pimpl deviceusedbythiscomponentref_p; - ::dcd::findby_pimpl findby_p; - ::dcd::domainfinder_pimpl domainfinder_p; - ::dcd::providesport_pimpl providesport_p; - ::dcd::componentsupportedinterface_pimpl componentsupportedinterface_p; - ::dcd::filesystemnames_pimpl filesystemnames_p; - ::dcd::filesystemname_pimpl filesystemname_p; - ::dcd::affinity_pimpl affinity_p; - ::dcd::loggingconfig_pimpl loggingconfig_p; - - - // Connect the parsers together. - // - deviceconfiguration_p.parsers (string_p, - devicemanagersoftpkg_p, - componentfiles_p, - partitioning_p, - connections_p, - domainmanager_p, - filesystemnames_p, - string_p, - string_p); - - devicemanagersoftpkg_p.parsers (localfile_p); - - localfile_p.parsers (string_p); - - componentfiles_p.parsers (componentfile_p); - - componentfile_p.parsers (localfile_p, - string_p, - string_p); - - partitioning_p.parsers (componentplacement_p); - - componentplacement_p.parsers (componentfileref_p, - deployondevice_p, - compositepartofdevice_p, - devicepkgfile_p, - componentinstantiation_p); - - componentfileref_p.parsers (string_p); - - deployondevice_p.parsers (string_p); - - compositepartofdevice_p.parsers (string_p); - - devicepkgfile_p.parsers (localfile_p, - string_p); - - componentinstantiation_p.parsers (string_p, - componentproperties_p, - affinity_p, - loggingconfig_p, - string_p); - - affinity_p.parsers (simpleref_p, - simplesequenceref_p, - structref_p, - structsequenceref_p); - - loggingconfig_p.parsers(string_p); - - componentproperties_p.parsers (simpleref_p, - simplesequenceref_p, - structref_p, - structsequenceref_p); - - simpleref_p.parsers (string_p, - string_p); - - simplesequenceref_p.parsers (values_p, - string_p); - - values_p.parsers (string_p); - - structref_p.parsers (simpleref_p, - simplesequenceref_p, - string_p); - - structsequenceref_p.parsers (structvalue_p, - string_p); - - structvalue_p.parsers (simpleref_p, - simplesequenceref_p); - - domainmanager_p.parsers (namingservice_p, - string_p); - - namingservice_p.parsers (string_p); - - connections_p.parsers (connectinterface_p); - - connectinterface_p.parsers (usesport_p, - providesport_p, - componentsupportedinterface_p, - findby_p, - string_p); - - usesport_p.parsers (string_p, - componentinstantiationref_p, - devicethatloadedthiscomponentref_p, - deviceusedbythiscomponentref_p, - findby_p); - - componentinstantiationref_p.parsers (string_p); - - devicethatloadedthiscomponentref_p.parsers (string_p); - - deviceusedbythiscomponentref_p.parsers (string_p, - string_p); - - findby_p.parsers (namingservice_p, - string_p, - domainfinder_p); - - domainfinder_p.parsers (string_p, - string_p); - - providesport_p.parsers (string_p, - componentinstantiationref_p, - devicethatloadedthiscomponentref_p, - deviceusedbythiscomponentref_p, - findby_p); - - componentsupportedinterface_p.parsers (string_p, - componentinstantiationref_p, - findby_p); - - filesystemnames_p.parsers (filesystemname_p); - - filesystemname_p.parsers (string_p, - string_p); - - // Parse the XML document. - // - ::xml_schema::document doc_p ( - deviceconfiguration_p, - "", - "deviceconfiguration"); - - deviceconfiguration_p.pre (); - doc_p.parse (input); - return (deviceconfiguration_p.post_deviceconfiguration ()); - } catch (const ::xml_schema::exception& e) { - std::ostringstream err; - err << e; - throw ossie::parser_error(err.str()); - } catch (const std::ios_base::failure& e) { - throw ossie::parser_error(e.what()); - } - } + std::auto_ptr parseDCD(std::istream& input) throw (ossie::parser_error); } } diff --git a/redhawk/src/control/parser/internal/dcd-pimpl.cpp b/redhawk/src/control/parser/internal/dcd-pimpl.cpp index 6ed35d46c..3b5adc8e1 100644 --- a/redhawk/src/control/parser/internal/dcd-pimpl.cpp +++ b/redhawk/src/control/parser/internal/dcd-pimpl.cpp @@ -30,6 +30,8 @@ CREATE_LOGGER(dcd_parser); +rh_logger::LoggerPtr dcd::parserLog; + namespace dcd { // deviceconfiguration_pimpl @@ -63,9 +65,9 @@ namespace dcd } void deviceconfiguration_pimpl:: - partitioning (const ::std::vector& partitioning) + partitioning (const ::std::vector& partitioning) { - LOG_TRACE(dcd_parser, "set partitioning") + RH_TRACE(dcd::parserLog, "set partitioning") assert(_dcd.get() != 0); _dcd->componentPlacements = partitioning; } @@ -73,7 +75,7 @@ namespace dcd void deviceconfiguration_pimpl:: domainmanager (const ::std::string& domainmanager) { - LOG_TRACE(dcd_parser, "set domainmanager") + RH_TRACE(dcd::parserLog, "set domainmanager") assert(_dcd.get() != 0); _dcd->domainManagerName = domainmanager; } @@ -81,7 +83,7 @@ namespace dcd void deviceconfiguration_pimpl:: connections (const ::std::vector& connections) { - LOG_TRACE(dcd_parser, "set connections") + RH_TRACE(dcd::parserLog, "set connections") assert(_dcd.get() != 0); _dcd->connections = connections; } @@ -111,7 +113,7 @@ namespace dcd ::std::auto_ptr deviceconfiguration_pimpl:: post_deviceconfiguration () { - LOG_TRACE(dcd_parser, "post deviceconfiguration") + RH_TRACE(dcd::parserLog, "post deviceconfiguration") return _dcd; } @@ -216,21 +218,21 @@ const ::ossie::ComponentFile &componentfile_pimpl:: void partitioning_pimpl:: pre () { - LOG_TRACE(dcd_parser, "pre partitioning"); + RH_TRACE(dcd::parserLog, "pre partitioning"); componentPlacements.clear(); } void partitioning_pimpl:: - componentplacement (const ::ossie::ComponentPlacement& componentplacement) + componentplacement (const ::ossie::DevicePlacement& componentplacement) { - LOG_TRACE(dcd_parser, "adding component placement " << componentplacement.getFileRefId()); + RH_TRACE(dcd::parserLog, "adding component placement " << componentplacement.getFileRefId()); componentPlacements.push_back(componentplacement); } - ::std::vector partitioning_pimpl:: + ::std::vector partitioning_pimpl:: post_partitioning () { - LOG_TRACE(dcd_parser, "post partitioning"); + RH_TRACE(dcd::parserLog, "post partitioning"); return componentPlacements; } @@ -240,7 +242,7 @@ const ::ossie::ComponentFile &componentfile_pimpl:: void componentplacement_pimpl:: pre () { - componentPlacement = ossie::ComponentPlacement(); + componentPlacement = ossie::DevicePlacement(); } void componentplacement_pimpl:: @@ -258,7 +260,7 @@ const ::ossie::ComponentFile &componentfile_pimpl:: void componentplacement_pimpl:: compositepartofdevice (const ::std::string& compositepartofdevice) { - LOG_TRACE(dcd_parser, "composite part of device " << compositepartofdevice); + RH_TRACE(dcd::parserLog, "composite part of device " << compositepartofdevice); componentPlacement.compositePartOfDeviceID = compositepartofdevice; } @@ -270,14 +272,14 @@ const ::ossie::ComponentFile &componentfile_pimpl:: void componentplacement_pimpl:: componentinstantiation (const ::ossie::ComponentInstantiation& componentinstantiation) { - LOG_TRACE(dcd_parser, "adding component instantiation " << componentinstantiation.getID() << " " << componentinstantiation.getUsageName()); + RH_TRACE(dcd::parserLog, "adding component instantiation " << componentinstantiation.getID() << " " << componentinstantiation.getUsageName()); componentPlacement.instantiations.push_back(componentinstantiation); } - const ::ossie::ComponentPlacement& componentplacement_pimpl:: + const ::ossie::DevicePlacement& componentplacement_pimpl:: post_componentplacement () { - LOG_TRACE(dcd_parser, "post componentplacement"); + RH_TRACE(dcd::parserLog, "post componentplacement"); return componentPlacement; } @@ -335,14 +337,14 @@ const ::ossie::ComponentFile &componentfile_pimpl:: void compositepartofdevice_pimpl:: refid (const ::std::string& refid) { - LOG_TRACE(dcd_parser, "compositepartofdevice refid" << compositepartofdevice); + RH_TRACE(dcd::parserLog, "compositepartofdevice refid" << compositepartofdevice); compositepartofdevice = refid; } ::std::string compositepartofdevice_pimpl:: post_compositepartofdevice () { - LOG_TRACE(dcd_parser, "post compositepartofdevice" << compositepartofdevice); + RH_TRACE(dcd::parserLog, "post compositepartofdevice" << compositepartofdevice); return compositepartofdevice; } @@ -383,17 +385,25 @@ const ::ossie::ComponentFile &componentfile_pimpl:: componentInstantiation = ossie::ComponentInstantiation(); } + void componentinstantiation_pimpl:: + startorder (const ::std::string& startorder) + { + // We have to parse the string into an integer here, rather than declaring + // startorder as an integer in the schema, for backwards compatibility. + componentInstantiation.startOrder = atoi(startorder.c_str()); + } + void componentinstantiation_pimpl:: usagename (const ::std::string& usagename) { - LOG_TRACE(dcd_parser, "setting usage name" << usagename); + RH_TRACE(dcd::parserLog, "setting usage name" << usagename); componentInstantiation.usageName = usagename; } void componentinstantiation_pimpl:: componentproperties (const ossie::ComponentPropertyList& componentproperties) { - LOG_TRACE(dcd_parser, "component properties"); + RH_TRACE(dcd::parserLog, "component properties"); componentInstantiation.properties = componentproperties; } @@ -406,21 +416,28 @@ const ::ossie::ComponentFile &componentfile_pimpl:: void componentinstantiation_pimpl:: affinity (const ossie::ComponentInstantiation::AffinityProperties& affinityProperties) { - LOG_TRACE(dcd_parser, "affinity properties") + RH_TRACE(dcd::parserLog, "affinity properties") componentInstantiation.affinityProperties = affinityProperties; } void componentinstantiation_pimpl::loggingconfig ( const ossie::ComponentInstantiation::LoggingConfig& log_cfg ) { - LOG_TRACE(dcd_parser, "component instantiation - logging config: " << log_cfg.first << "/" << log_cfg.second ); + RH_TRACE(dcd::parserLog, "component instantiation - logging config: " << log_cfg.first << "/" << log_cfg.second ); componentInstantiation.loggingConfig = log_cfg; } + void componentinstantiation_pimpl::deployerrequires (const ossie::ComponentPropertyList& deployerrequires) + { + RH_TRACE(dcd::parserLog, "deployer requires"); + componentInstantiation.deployerrequires = deployerrequires; + } + + const ::ossie::ComponentInstantiation& componentinstantiation_pimpl:: post_componentinstantiation () { - LOG_TRACE(dcd_parser, "post component instantiation " << componentInstantiation.getID() << " " << componentInstantiation.getUsageName()); + RH_TRACE(dcd::parserLog, "post component instantiation " << componentInstantiation.getID() << " " << componentInstantiation.getUsageName()); return componentInstantiation; } @@ -437,28 +454,28 @@ const ::ossie::ComponentInstantiation& componentinstantiation_pimpl:: void affinity_pimpl:: simpleref (const ossie::SimplePropertyRef& simpleref) { - LOG_TRACE(dcd_parser, "Adding simpleref "); + RH_TRACE(dcd::parserLog, "Adding simpleref "); affinityProperties.push_back(simpleref.clone()); } void affinity_pimpl:: simplesequenceref (const ossie::SimpleSequencePropertyRef& simplesequenceref) { - LOG_TRACE(dcd_parser, "Adding simplesequenceref"); + RH_TRACE(dcd::parserLog, "Adding simplesequenceref"); affinityProperties.push_back(simplesequenceref.clone()); } void affinity_pimpl:: structref (const ossie::StructPropertyRef& structref) { - LOG_TRACE(dcd_parser, "Adding structref"); + RH_TRACE(dcd::parserLog, "Adding structref"); affinityProperties.push_back(structref.clone()); } void affinity_pimpl:: structsequenceref (const ossie::StructSequencePropertyRef& structsequenceref) { - LOG_TRACE(dcd_parser, "Adding structsequenceref"); + RH_TRACE(dcd::parserLog, "Adding structsequenceref"); affinityProperties.push_back(structsequenceref.clone()); } @@ -485,10 +502,35 @@ const ::ossie::ComponentInstantiation& componentinstantiation_pimpl:: const ossie::ComponentInstantiation::LoggingConfig& loggingconfig_pimpl::post_loggingconfig ( ) { info.first = this->post_string(); + RH_TRACE(dcd::parserLog, " logging config: first " << info.first << "second " << info.second ); return info; } + // deployerrequires_pimpl + // + + void deployerrequires_pimpl:: + pre () + { + deployerrequires.clear(); + } + + void deployerrequires_pimpl:: + requires (const ossie::IdValue& idvalue) + { + RH_TRACE(dcd::parserLog, "Adding idvalue " << idvalue._id << " value " << idvalue._value ); + deployerrequires.push_back(idvalue.clone()); + } + + const ossie::ComponentPropertyList& deployerrequires_pimpl:: + post_deployerrequires () + { + return deployerrequires; + } + + + // componentproperties_pimpl // @@ -501,28 +543,28 @@ const ::ossie::ComponentInstantiation& componentinstantiation_pimpl:: void componentproperties_pimpl:: simpleref (const ossie::SimplePropertyRef& simpleref) { - LOG_TRACE(dcd_parser, "Adding simpleref "); + RH_TRACE(dcd::parserLog, "Adding simpleref "); componentProperties.push_back(simpleref.clone()); } void componentproperties_pimpl:: simplesequenceref (const ossie::SimpleSequencePropertyRef& simplesequenceref) { - LOG_TRACE(dcd_parser, "Adding simplesequenceref"); + RH_TRACE(dcd::parserLog, "Adding simplesequenceref"); componentProperties.push_back(simplesequenceref.clone()); } void componentproperties_pimpl:: structref (const ossie::StructPropertyRef& structref) { - LOG_TRACE(dcd_parser, "Adding structref"); + RH_TRACE(dcd::parserLog, "Adding structref"); componentProperties.push_back(structref.clone()); } void componentproperties_pimpl:: structsequenceref (const ossie::StructSequencePropertyRef& structsequenceref) { - LOG_TRACE(dcd_parser, "Adding structsequenceref"); + RH_TRACE(dcd::parserLog, "Adding structsequenceref"); componentProperties.push_back(structsequenceref.clone()); } @@ -580,34 +622,66 @@ const ::ossie::ComponentInstantiation& componentinstantiation_pimpl:: return info; } + // idvalueref_pimpl + // + + void idvalue_pimpl:: + pre () + { + RH_TRACE(dcd::parserLog, "pre idvalue"); + simple = ossie::IdValue(); + } + + void idvalue_pimpl:: + id (const ::std::string& id) + { + RH_TRACE(dcd::parserLog, "idvalue id: " << id); + simple._id = id; + } + + void idvalue_pimpl:: + value (const ::std::string& value) + { + RH_TRACE(dcd::parserLog, "idvalue value: " << value); + simple._value = value; + } + + const ossie::IdValue& idvalue_pimpl:: + post_idvalue () + { + RH_TRACE(dcd::parserLog, "post idvalue"); + return simple; + } + + // simpleref_pimpl // void simpleref_pimpl:: pre () { - LOG_TRACE(dcd_parser, "pre simpleref"); + RH_TRACE(dcd::parserLog, "pre simpleref"); simple = ossie::SimplePropertyRef(); } void simpleref_pimpl:: refid (const ::std::string& refid) { - LOG_TRACE(dcd_parser, "simpleref id: " << refid); + RH_TRACE(dcd::parserLog, "simpleref id: " << refid); simple._id = refid; } void simpleref_pimpl:: value (const ::std::string& value) { - LOG_TRACE(dcd_parser, "simpleref value: " << value); + RH_TRACE(dcd::parserLog, "simpleref value: " << value); simple._value = value; } const ossie::SimplePropertyRef& simpleref_pimpl:: post_simpleref () { - LOG_TRACE(dcd_parser, "post simpleref"); + RH_TRACE(dcd::parserLog, "post simpleref"); return simple; } @@ -931,7 +1005,7 @@ const ossie::StructSequencePropertyRef& structsequenceref_pimpl:: void connections_pimpl:: pre () { - LOG_TRACE(dcd_parser, "pre connections"); + RH_TRACE(dcd::parserLog, "pre connections"); connections.clear(); } @@ -944,13 +1018,13 @@ const ossie::StructSequencePropertyRef& structsequenceref_pimpl:: if (strlen(connectinterface.getID()) == 0) { connections.back().connectionId = ossie::generateUUID();; } - LOG_TRACE(dcd_parser, "added connection id " << connections.back().getID() << " type " << connections.back().getType()); + RH_TRACE(dcd::parserLog, "added connection id " << connections.back().getID() << " type " << connections.back().getType()); } ::std::vector connections_pimpl:: post_connections () { - LOG_TRACE(dcd_parser, "post connections"); + RH_TRACE(dcd::parserLog, "post connections"); return connections; } @@ -960,7 +1034,7 @@ const ossie::StructSequencePropertyRef& structsequenceref_pimpl:: void connectinterface_pimpl:: pre () { - LOG_TRACE(dcd_parser, "pre connect interface"); + RH_TRACE(dcd::parserLog, "pre connect interface"); connection.reset(new ossie::Connection()); } @@ -994,14 +1068,14 @@ const ossie::StructSequencePropertyRef& structsequenceref_pimpl:: void connectinterface_pimpl:: id (const ::std::string& id) { - LOG_TRACE(dcd_parser, "connection id " << id); + RH_TRACE(dcd::parserLog, "connection id " << id); connection->connectionId = id; } ::ossie::Connection connectinterface_pimpl:: post_connectinterface () { - LOG_TRACE(dcd_parser, "post connect interface"); + RH_TRACE(dcd::parserLog, "post connect interface"); return *connection; } diff --git a/redhawk/src/control/parser/internal/dcd-pimpl.h b/redhawk/src/control/parser/internal/dcd-pimpl.h index 78041f3dc..82047c58a 100644 --- a/redhawk/src/control/parser/internal/dcd-pimpl.h +++ b/redhawk/src/control/parser/internal/dcd-pimpl.h @@ -28,9 +28,12 @@ #define CXX___XML_XSD_DCD_PIMPL_H #include "dcd-pskel.h" +#include namespace dcd { + extern rh_logger::LoggerPtr parserLog; + class deviceconfiguration_pimpl: public deviceconfiguration_pskel { public: @@ -47,7 +50,7 @@ namespace dcd componentfiles (const ::std::vector&); virtual void - partitioning (const ::std::vector&); + partitioning (const ::std::vector&); virtual void domainmanager (const ::std::string&); @@ -148,13 +151,13 @@ namespace dcd pre (); virtual void - componentplacement (const ::ossie::ComponentPlacement&); + componentplacement (const ::ossie::DevicePlacement&); - virtual ::std::vector + virtual ::std::vector post_partitioning (); private: - std::vector componentPlacements; + std::vector componentPlacements; }; class componentplacement_pimpl: public virtual componentplacement_pskel @@ -178,12 +181,12 @@ namespace dcd virtual void componentinstantiation (const ::ossie::ComponentInstantiation&); - virtual const ::ossie::ComponentPlacement& + virtual const ::ossie::DevicePlacement& post_componentplacement (); private: - //std::auto_ptr componentPlacement; - ossie::ComponentPlacement componentPlacement; + //std::auto_ptr componentPlacement; + ossie::DevicePlacement componentPlacement; }; class componentfileref_pimpl: public virtual componentfileref_pskel @@ -265,12 +268,18 @@ namespace dcd virtual void id (const ::std::string&); + virtual void + startorder (const ::std::string&); + virtual void affinity (const ossie::ComponentInstantiation::AffinityProperties& ); virtual void loggingconfig (const ossie::ComponentInstantiation::LoggingConfig& ); + virtual void + deployerrequires (const ossie::ComponentPropertyList& ); + virtual const ::ossie::ComponentInstantiation& post_componentinstantiation (); @@ -319,6 +328,24 @@ namespace dcd }; + class deployerrequires_pimpl: public virtual deployerrequires_pskel + { + public: + virtual void + pre (); + + virtual void + requires (const ossie::IdValue &); + + virtual const ossie::ComponentPropertyList& + post_deployerrequires (); + + private: + ossie::ComponentPropertyList deployerrequires; + }; + + + class componentproperties_pimpl: public virtual componentproperties_pskel { public: @@ -379,6 +406,27 @@ namespace dcd std::pair info; }; + + class idvalue_pimpl: public virtual idvalue_pskel + { + public: + virtual void + pre (); + + virtual void + id (const ::std::string&); + + virtual void + value (const ::std::string&); + + virtual const ossie::IdValue& + post_idvalue (); + + private: + ossie::IdValue simple; + }; + + class simpleref_pimpl: public virtual simpleref_pskel { public: diff --git a/redhawk/src/control/parser/internal/dcd.map b/redhawk/src/control/parser/internal/dcd.map index 2e9b544b0..8d7ed6b1f 100644 --- a/redhawk/src/control/parser/internal/dcd.map +++ b/redhawk/src/control/parser/internal/dcd.map @@ -25,8 +25,8 @@ namespace urn:mil:jpeojtrs:sca:dcd { deviceconfiguration "::std::auto_ptr"; componentfiles "const ::std::vector&" "const ::std::vector&"; componentfile "const ::ossie::ComponentFile&" "const ::ossie::ComponentFile&"; - componentplacement "const ::ossie::ComponentPlacement&" "const ::ossie::ComponentPlacement&"; - partitioning "::std::vector"; + componentplacement "const ::ossie::DevicePlacement&" "const ::ossie::DevicePlacement&"; + partitioning "::std::vector"; componentinstantiation "const ::ossie::ComponentInstantiation&" "const ::ossie::ComponentInstantiation&"; connections "::std::vector"; connectinterface "::ossie::Connection"; @@ -56,4 +56,6 @@ namespace urn:mil:jpeojtrs:sca:dcd { deviceusedbythiscomponentref "::std::pair"; affinity "const ossie::ComponentInstantiation::AffinityProperties&" "const ossie::ComponentInstantiation::AffinityProperties&"; loggingconfig "const ossie::ComponentInstantiation::LoggingConfig&" "const ossie::ComponentInstantiation::LoggingConfig&"; + deployerrequires "const ossie::ComponentPropertyList&" "const ossie::ComponentPropertyList&"; + idvalue "const ossie::IdValue&" "const ossie::IdValue&"; } diff --git a/redhawk/src/control/parser/internal/dmd-parser.cpp b/redhawk/src/control/parser/internal/dmd-parser.cpp new file mode 100644 index 000000000..84550ee1a --- /dev/null +++ b/redhawk/src/control/parser/internal/dmd-parser.cpp @@ -0,0 +1,85 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include "dmd-parser.h" +#include "dmd-pimpl.h" + +std::auto_ptr +ossie::internalparser::parseDMD(std::istream& input) throw (ossie::parser_error) +{ + using namespace dmd; + + try { + // Instantiate individual parsers. + // + domainmanagerconfiguration_pimpl domainmanagerconfiguration_p; + ::xml_schema::string_pimpl string_p; + domainmanagersoftpkg_pimpl domainmanagersoftpkg_p; + localfile_pimpl localfile_p; + services_pimpl services_p; + service_pimpl service_p; + findby_pimpl findby_p; + namingservice_pimpl namingservice_p; + ::xml_schema::any_simple_type_pimpl any_simple_type_p; + domainfinder_pimpl domainfinder_p; + + // Connect the parsers together. + // + domainmanagerconfiguration_p.parsers (string_p, + domainmanagersoftpkg_p, + services_p, + string_p, + string_p); + + domainmanagersoftpkg_p.parsers (localfile_p); + + localfile_p.parsers (string_p); + + services_p.parsers (service_p); + + service_p.parsers (string_p, + findby_p); + + findby_p.parsers (namingservice_p, + string_p, + domainfinder_p); + + namingservice_p.parsers (any_simple_type_p); + + domainfinder_p.parsers (string_p, + string_p); + + // Parse the XML document. + // + ::xml_schema::document doc_p (domainmanagerconfiguration_p, "", "domainmanagerconfiguration"); + + domainmanagerconfiguration_p.pre (); + doc_p.parse (input); + return (domainmanagerconfiguration_p.post_domainmanagerconfiguration ()); + } catch (const ::xml_schema::exception& e) { + std::ostringstream err; + err << e; + throw ossie::parser_error(err.str()); + } catch (const std::ios_base::failure& e) { + throw ossie::parser_error(e.what()); + } +} diff --git a/redhawk/src/control/parser/internal/dmd-parser.h b/redhawk/src/control/parser/internal/dmd-parser.h index 16d34d188..638762430 100644 --- a/redhawk/src/control/parser/internal/dmd-parser.h +++ b/redhawk/src/control/parser/internal/dmd-parser.h @@ -21,71 +21,14 @@ #ifndef __DMD_PARSER_H__ #define __DMD_PARSER_H__ -#include -#include -#include "dmd-pimpl.h" -#include"ossie/exceptions.h" +#include + +#include +#include namespace ossie { namespace internalparser { - inline std::auto_ptr parseDMD(std::istream& input) throw (ossie::parser_error) { - using namespace dmd; - - try { - // Instantiate individual parsers. - // - domainmanagerconfiguration_pimpl domainmanagerconfiguration_p; - ::xml_schema::string_pimpl string_p; - domainmanagersoftpkg_pimpl domainmanagersoftpkg_p; - localfile_pimpl localfile_p; - services_pimpl services_p; - service_pimpl service_p; - findby_pimpl findby_p; - namingservice_pimpl namingservice_p; - ::xml_schema::any_simple_type_pimpl any_simple_type_p; - domainfinder_pimpl domainfinder_p; - - // Connect the parsers together. - // - domainmanagerconfiguration_p.parsers (string_p, - domainmanagersoftpkg_p, - services_p, - string_p, - string_p); - - domainmanagersoftpkg_p.parsers (localfile_p); - - localfile_p.parsers (string_p); - - services_p.parsers (service_p); - - service_p.parsers (string_p, - findby_p); - - findby_p.parsers (namingservice_p, - string_p, - domainfinder_p); - - namingservice_p.parsers (any_simple_type_p); - - domainfinder_p.parsers (string_p, - string_p); - - // Parse the XML document. - // - ::xml_schema::document doc_p (domainmanagerconfiguration_p, "", "domainmanagerconfiguration"); - - domainmanagerconfiguration_p.pre (); - doc_p.parse (input); - return (domainmanagerconfiguration_p.post_domainmanagerconfiguration ()); - } catch (const ::xml_schema::exception& e) { - std::ostringstream err; - err << e; - throw ossie::parser_error(err.str()); - } catch (const std::ios_base::failure& e) { - throw ossie::parser_error(e.what()); - } - } + std::auto_ptr parseDMD(std::istream& input) throw (ossie::parser_error); } } diff --git a/redhawk/src/control/parser/internal/dmd-pimpl.cpp b/redhawk/src/control/parser/internal/dmd-pimpl.cpp index e15e4bb8e..f0019377e 100644 --- a/redhawk/src/control/parser/internal/dmd-pimpl.cpp +++ b/redhawk/src/control/parser/internal/dmd-pimpl.cpp @@ -28,6 +28,8 @@ CREATE_LOGGER(dmd_parser) +rh_logger::LoggerPtr dmd::parserLog; + namespace dmd { // domainmanagerconfiguration_pimpl @@ -36,7 +38,6 @@ namespace dmd void domainmanagerconfiguration_pimpl:: pre () { - LOG_TRACE(dmd_parser, "domainmanagerconfiguration pre"); _data.reset(new ossie::DomainManagerConfiguration::DMD()); } @@ -73,7 +74,7 @@ namespace dmd std::auto_ptr domainmanagerconfiguration_pimpl:: post_domainmanagerconfiguration () { - LOG_TRACE(dmd_parser, "domainmanagerconfiguration post"); + RH_TRACE(dmd::parserLog, "domainmanagerconfiguration post"); return _data; } diff --git a/redhawk/src/control/parser/internal/dmd-pimpl.h b/redhawk/src/control/parser/internal/dmd-pimpl.h index fb3f36d48..8bd30c75e 100644 --- a/redhawk/src/control/parser/internal/dmd-pimpl.h +++ b/redhawk/src/control/parser/internal/dmd-pimpl.h @@ -28,9 +28,11 @@ #define CXX___XML_XSD_DMD_PIMPL_H #include "dmd-pskel.h" +#include namespace dmd { + extern rh_logger::LoggerPtr parserLog; class domainmanagerconfiguration_pimpl: public domainmanagerconfiguration_pskel { diff --git a/redhawk/src/control/parser/internal/prf-parser.cpp b/redhawk/src/control/parser/internal/prf-parser.cpp new file mode 100644 index 000000000..6cab4018f --- /dev/null +++ b/redhawk/src/control/parser/internal/prf-parser.cpp @@ -0,0 +1,163 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include "prf-parser.h" +#include "prf-pimpl.h" + +std::auto_ptr ossie::internalparser::parsePRF(std::istream& input) throw (ossie::parser_error) +{ + using namespace prf; + + try { + // Instantiate individual parsers. + // + properties_pimpl properties_p; + ::xml_schema::string_pimpl string_p; + simple_pimpl simple_p; + Unit_pimpl Unit_p; + range_pimpl range_p; + enumerations_pimpl enumerations_p; + enumeration_pimpl enumeration_p; + kind_pimpl kind_p; + PropertyConfigurationType_pimpl PropertyConfigurationType_p; + action_pimpl action_p; + ActionType_pimpl ActionType_p; + AccessType_pimpl AccessType_p; + PropertyValueType_pimpl PropertyValueType_p; + simpleSequence_pimpl simpleSequence_p; + values_pimpl values_p; + test_pimpl test_p; + inputValue_pimpl inputValue_p; + resultValue_pimpl resultValue_p; + struct_pimpl struct_p; + configurationKind_pimpl configurationKind_p; + StructPropertyConfigurationType_pimpl StructPropertyConfigurationType_p; + structSequence_pimpl structSequence_p; + structValue_pimpl structValue_p; + IsComplex_pimpl IsComplex_p; + IsCommandLine_pimpl IsCommandLine_p; + IsOptional_pimpl IsOptional_p; + simpleRef_pimpl simpleRef_p; + simpleSequenceRef_pimpl simpleSequenceRef_p; + + // Connect the parsers together. + // + properties_p.parsers (string_p, + simple_p, + simpleSequence_p, + test_p, + struct_p, + structSequence_p); + + simple_p.parsers (string_p, + string_p, + Unit_p, + range_p, + enumerations_p, + kind_p, + action_p, + string_p, + AccessType_p, + string_p, + IsComplex_p, + IsCommandLine_p, + IsOptional_p, + PropertyValueType_p); + + range_p.parsers (string_p, + string_p); + + enumerations_p.parsers (enumeration_p); + + enumeration_p.parsers (string_p, + string_p); + + kind_p.parsers (PropertyConfigurationType_p); + + action_p.parsers (ActionType_p); + + simpleSequence_p.parsers (string_p, + values_p, + Unit_p, + range_p, + kind_p, + action_p, + string_p, + AccessType_p, + string_p, + PropertyValueType_p, + IsComplex_p, + IsOptional_p); + + values_p.parsers (string_p); + + test_p.parsers (string_p, + inputValue_p, + resultValue_p, + string_p); + + inputValue_p.parsers (simple_p); + + resultValue_p.parsers (simple_p); + + struct_p.parsers (string_p, + simple_p, + simpleSequence_p, + configurationKind_p, + string_p, + AccessType_p, + string_p); + + configurationKind_p.parsers (StructPropertyConfigurationType_p); + + structSequence_p.parsers (string_p, + struct_p, + structValue_p, + configurationKind_p, + string_p, + AccessType_p, + string_p); + + structValue_p.parsers (simpleRef_p, + simpleSequenceRef_p); + + simpleRef_p.parsers (string_p, + string_p); + + simpleSequenceRef_p.parsers (values_p, + string_p); + + // Parse the XML document. + // + ::xml_schema::document doc_p (properties_p, "properties"); + + properties_p.pre (); + doc_p.parse (input); + return properties_p.post_properties (); + } catch (const ::xml_schema::exception& e) { + std::ostringstream message; + message << e; + throw ossie::parser_error(message.str()); + } catch (const std::ios_base::failure& e) { + throw ossie::parser_error(e.what()); + } +} diff --git a/redhawk/src/control/parser/internal/prf-parser.h b/redhawk/src/control/parser/internal/prf-parser.h index dbfb49807..ff7492163 100644 --- a/redhawk/src/control/parser/internal/prf-parser.h +++ b/redhawk/src/control/parser/internal/prf-parser.h @@ -21,148 +21,14 @@ #ifndef __PRF_PARSER_H__ #define __PRF_PARSER_H__ -#include"prf-pimpl.h" -#include"ossie/exceptions.h" -#include"ossie/Properties.h" +#include + +#include +#include namespace ossie { namespace internalparser { - inline std::auto_ptr parsePRF(std::istream& input) throw (ossie::parser_error) { - using namespace prf; - try { - // Instantiate individual parsers. - // - properties_pimpl properties_p; - ::xml_schema::string_pimpl string_p; - simple_pimpl simple_p; - Unit_pimpl Unit_p; - range_pimpl range_p; - enumerations_pimpl enumerations_p; - enumeration_pimpl enumeration_p; - kind_pimpl kind_p; - PropertyConfigurationType_pimpl PropertyConfigurationType_p; - action_pimpl action_p; - ActionType_pimpl ActionType_p; - AccessType_pimpl AccessType_p; - PropertyValueType_pimpl PropertyValueType_p; - simpleSequence_pimpl simpleSequence_p; - values_pimpl values_p; - test_pimpl test_p; - inputValue_pimpl inputValue_p; - resultValue_pimpl resultValue_p; - struct_pimpl struct_p; - configurationKind_pimpl configurationKind_p; - StructPropertyConfigurationType_pimpl StructPropertyConfigurationType_p; - structSequence_pimpl structSequence_p; - structValue_pimpl structValue_p; - IsComplex_pimpl IsComplex_p; - IsCommandLine_pimpl IsCommandLine_p; - IsOptional_pimpl IsOptional_p; - simpleRef_pimpl simpleRef_p; - simpleSequenceRef_pimpl simpleSequenceRef_p; - - // Connect the parsers together. - // - properties_p.parsers (string_p, - simple_p, - simpleSequence_p, - test_p, - struct_p, - structSequence_p); - - simple_p.parsers (string_p, - string_p, - Unit_p, - range_p, - enumerations_p, - kind_p, - action_p, - string_p, - AccessType_p, - string_p, - IsComplex_p, - IsCommandLine_p, - IsOptional_p, - PropertyValueType_p); - - range_p.parsers (string_p, - string_p); - - enumerations_p.parsers (enumeration_p); - - enumeration_p.parsers (string_p, - string_p); - - kind_p.parsers (PropertyConfigurationType_p); - - action_p.parsers (ActionType_p); - - simpleSequence_p.parsers (string_p, - values_p, - Unit_p, - range_p, - kind_p, - action_p, - string_p, - AccessType_p, - string_p, - PropertyValueType_p, - IsComplex_p, - IsOptional_p); - - values_p.parsers (string_p); - - test_p.parsers (string_p, - inputValue_p, - resultValue_p, - string_p); - - inputValue_p.parsers (simple_p); - - resultValue_p.parsers (simple_p); - - struct_p.parsers (string_p, - simple_p, - simpleSequence_p, - configurationKind_p, - string_p, - AccessType_p, - string_p); - - configurationKind_p.parsers (StructPropertyConfigurationType_p); - - structSequence_p.parsers (string_p, - struct_p, - structValue_p, - configurationKind_p, - string_p, - AccessType_p, - string_p); - - structValue_p.parsers (simpleRef_p, - simpleSequenceRef_p); - - simpleRef_p.parsers (string_p, - string_p); - - simpleSequenceRef_p.parsers (values_p, - string_p); - - // Parse the XML document. - // - ::xml_schema::document doc_p (properties_p, "properties"); - - properties_p.pre (); - doc_p.parse (input); - return properties_p.post_properties (); - } catch (const ::xml_schema::exception& e) { - std::ostringstream message; - message << e; - throw ossie::parser_error(message.str()); - } catch (const std::ios_base::failure& e) { - throw ossie::parser_error(e.what()); - } - } + std::auto_ptr parsePRF(std::istream& input) throw (ossie::parser_error); } } #endif diff --git a/redhawk/src/control/parser/internal/prf-pimpl.cpp b/redhawk/src/control/parser/internal/prf-pimpl.cpp index 3022a6f86..0b275753d 100644 --- a/redhawk/src/control/parser/internal/prf-pimpl.cpp +++ b/redhawk/src/control/parser/internal/prf-pimpl.cpp @@ -34,6 +34,8 @@ using namespace prf; CREATE_LOGGER(prf_parser) +rh_logger::LoggerPtr prf::parserLog; + void Unit_pimpl:: pre () { @@ -55,11 +57,20 @@ pre () { } -::std::string AccessType_pimpl:: +::ossie::Property::AccessType AccessType_pimpl:: post_AccessType () { - const ::std::string& v (post_string ()); - return v; + const std::string& mode = post_string(); + if (mode == "readwrite") { + return ossie::Property::MODE_READWRITE; + } else if (mode == "readonly") { + return ossie::Property::MODE_READONLY; + } else if (mode == "writeonly") { + return ossie::Property::MODE_WRITEONLY; + } else { + RH_WARN(prf::parserLog, "Invalid mode '" << mode << "'"); + return ossie::Property::MODE_DEFAULT; + } } // IsComplex_pimpl @@ -70,11 +81,10 @@ pre () { } -::std::string IsComplex_pimpl:: +bool IsComplex_pimpl:: post_IsComplex () { - const ::std::string& v (post_string ()); - return v; + return post_string() == "true"; } // IsCommandLine_pimpl @@ -85,11 +95,10 @@ pre () { } -::std::string IsCommandLine_pimpl:: +bool IsCommandLine_pimpl:: post_IsCommandLine () { - const ::std::string& v (post_string ()); - return v; + return post_string() == "true"; } // IsOptional_pimpl @@ -100,11 +109,10 @@ pre () { } -::std::string IsOptional_pimpl:: +bool IsOptional_pimpl:: post_IsOptional () { - const ::std::string& v (post_string ()); - return v; + return post_string() == "true"; } // action_pimpl @@ -113,15 +121,16 @@ post_IsOptional () void action_pimpl:: pre () { + _type = ossie::Property::ACTION_DEFAULT; } void action_pimpl:: -type (const ::std::string& type) +type (ossie::Property::ActionType type) { _type = type; } -::std::string action_pimpl:: +ossie::Property::ActionType action_pimpl:: post_action () { return _type; @@ -135,11 +144,28 @@ pre () { } -std::string ActionType_pimpl:: +ossie::Property::ActionType ActionType_pimpl:: post_ActionType () { - const ::std::string& v (post_string ()); - return v; + const std::string& action = post_string(); + if (action == "ge") { + return ossie::Property::ACTION_GE; + } else if (action == "gt") { + return ossie::Property::ACTION_GT; + } else if (action == "le") { + return ossie::Property::ACTION_LE; + } else if (action == "lt") { + return ossie::Property::ACTION_LT; + } else if (action == "ne") { + return ossie::Property::ACTION_NE; + } else if (action == "eq") { + return ossie::Property::ACTION_EQ; + } else if (action == "external") { + return ossie::Property::ACTION_EXTERNAL; + } else { + RH_WARN(prf::parserLog, "Invalid action '" << action << "'"); + return ossie::Property::ACTION_DEFAULT; + } } // configurationKind_pimpl @@ -148,16 +174,16 @@ post_ActionType () void configurationKind_pimpl:: pre () { - _kindtype = "configure"; + _kindtype = ossie::Property::KIND_DEFAULT; } void configurationKind_pimpl:: -kindtype (const ::std::string& kindtype) +kindtype (ossie::Property::KindType kindtype) { _kindtype = kindtype; } -std::string configurationKind_pimpl:: +ossie::Property::KindType configurationKind_pimpl:: post_configurationKind () { return _kindtype; @@ -230,16 +256,16 @@ post_inputValue () void kind_pimpl:: pre () { - _type = "configure"; // The default is configure + _type = ossie::Property::KIND_DEFAULT; } void kind_pimpl:: -kindtype (const ::std::string& type) +kindtype (ossie::Property::KindType type) { _type = type; } -std::string kind_pimpl:: +ossie::Property::KindType kind_pimpl:: post_kind () { return _type; @@ -253,11 +279,32 @@ pre () { } -std::string PropertyConfigurationType_pimpl:: +ossie::Property::KindType PropertyConfigurationType_pimpl:: post_PropertyConfigurationType () { - const ::std::string& v (post_string ()); - return v; + const std::string& kindtype = post_string(); + RH_TRACE(prf::parserLog, "PropertyConfigurationType = " << kindtype); + if (kindtype == "configure") { + return ossie::Property::KIND_CONFIGURE; + } else if (kindtype == "execparam") { + return ossie::Property::KIND_EXECPARAM; + } else if (kindtype == "allocation") { + return ossie::Property::KIND_ALLOCATION; + } else if (kindtype == "factoryparam") { + return ossie::Property::KIND_FACTORYPARAM; + } else if (kindtype == "test") { + return ossie::Property::KIND_TEST; + } else if (kindtype == "event") { + return ossie::Property::KIND_EVENT; + } else if (kindtype == "message") { + return ossie::Property::KIND_MESSAGE; + } else if (kindtype == "property") { + return ossie::Property::KIND_PROPERTY; + } else { + // The generated part of the parser does not validate that the value is + // in the allowed range; just to be safe, treat it as the default + return ossie::Property::KIND_DEFAULT; + } } // StructPropertyConfigurationType_pimpl @@ -268,10 +315,14 @@ pre () { } -::std::string StructPropertyConfigurationType_pimpl:: +ossie::Property::KindType StructPropertyConfigurationType_pimpl:: post_StructPropertyConfigurationType () { - return post_PropertyConfigurationType (); + ossie::Property::KindType kind = post_PropertyConfigurationType(); + if (kind == ossie::Property::KIND_EXECPARAM) { + RH_WARN(prf::parserLog, "Struct properties cannot have kind 'execparam'"); + } + return kind; } // properties_pimpl @@ -289,19 +340,19 @@ description (const ::std::string& description) } void properties_pimpl:: -simple (const ossie::SimpleProperty& simple) +simple (ossie::SimpleProperty* simple) { - LOG_TRACE(prf_parser, "Adding simple property") + RH_TRACE(prf::parserLog, "Adding simple property " << simple->getID()) assert(_prf.get() != 0); - _prf->addProperty(simple.clone()); + _prf->addProperty(simple); } void properties_pimpl:: -simplesequence (const ossie::SimpleSequenceProperty& simplesequence) +simplesequence (ossie::SimpleSequenceProperty* simplesequence) { - LOG_TRACE(prf_parser, "Adding simple sequence property " << simplesequence.getID()); + RH_TRACE(prf::parserLog, "Adding simple sequence property " << simplesequence->getID()); assert(_prf.get() != 0); - _prf->addProperty(simplesequence.clone()); + _prf->addProperty(simplesequence); } void properties_pimpl:: @@ -310,25 +361,25 @@ test () } void properties_pimpl:: -struct_ (const ossie::StructProperty& struct_) +struct_ (ossie::StructProperty* struct_) { - LOG_TRACE(prf_parser, "Adding struct property " << struct_.getID()); + RH_TRACE(prf::parserLog, "Adding struct property " << struct_->getID()); assert(_prf.get() != 0); - _prf->addProperty(struct_.clone()); + _prf->addProperty(struct_); } void properties_pimpl:: -structsequence (const ossie::StructSequenceProperty& structsequence) +structsequence (ossie::StructSequenceProperty* structsequence) { - LOG_TRACE(prf_parser, "Adding struct sequence property " << structsequence.getID()); + RH_TRACE(prf::parserLog, "Adding struct sequence property " << structsequence->getID()); assert(_prf.get() != 0); - _prf->addProperty(structsequence.clone()); + _prf->addProperty(structsequence); } std::auto_ptr properties_pimpl:: post_properties () { - LOG_TRACE(prf_parser, "properties post") + RH_TRACE(prf::parserLog, "properties post") return _prf; } @@ -398,15 +449,15 @@ post_resultValue () void simple_pimpl:: pre () { - _id = ""; - _name = ""; - _type = ""; - _complex = ""; - _mode = ""; - _action = ""; - _optional = ""; - _commandline = ""; - _kinds.clear(); + _id.clear(); + _name.clear(); + _type.clear(); + _mode = ossie::Property::MODE_DEFAULT; + _action = ossie::Property::ACTION_DEFAULT; + _kinds = ossie::Property::Kinds(); + _complex = false; + _optional = false; + _commandline = false; _value.reset(); } @@ -420,7 +471,7 @@ description (const ::std::string& description) void simple_pimpl:: value (const ::std::string& value) { - LOG_TRACE(prf_parser, "simple_pimpl value " << value) + RH_TRACE(prf::parserLog, "simple_pimpl value " << value) _value = std::auto_ptr(new std::string(value)); } @@ -440,79 +491,77 @@ enumerations (const ::std::map& enumerations) } void simple_pimpl:: -kind (const ::std::string& kind) +kind (ossie::Property::KindType kind) { - LOG_TRACE(prf_parser, "simple_pimpl kind " << kind) - _kinds.push_back(kind); + RH_TRACE(prf::parserLog, "simple_pimpl kind " << kind) + _kinds |= kind; } void simple_pimpl:: -action (const ::std::string& action) +action (ossie::Property::ActionType action) { - LOG_TRACE(prf_parser, "simple_pimpl action " << action) + RH_TRACE(prf::parserLog, "simple_pimpl action " << action) _action = action; } void simple_pimpl:: id (const ::std::string& id) { - LOG_TRACE(prf_parser, "simple_pimpl id " << id) + RH_TRACE(prf::parserLog, "simple_pimpl id " << id) _id = id; } void simple_pimpl:: -mode (const ::std::string& mode) +mode (ossie::Property::AccessType mode) { - LOG_TRACE(prf_parser, "simple_pimpl mode " << mode) + RH_TRACE(prf::parserLog, "simple_pimpl mode " << mode) _mode = mode; } void simple_pimpl:: name (const ::std::string& name) { - LOG_TRACE(prf_parser, "simple_pimpl name " << name) + RH_TRACE(prf::parserLog, "simple_pimpl name " << name) _name = name; } void simple_pimpl:: type (const ::std::string& type) { - LOG_TRACE(prf_parser, "simple_pimpl type " << type) + RH_TRACE(prf::parserLog, "simple_pimpl type " << type) _type = type; } void simple_pimpl:: -complex (const ::std::string& complex_) +complex (bool complex) { - LOG_TRACE(prf_parser, "simple_pimpl complex " << complex_) - _complex = complex_; + RH_TRACE(prf::parserLog, "simple_pimpl complex " << complex) + _complex = complex; } void simple_pimpl:: -optional (const ::std::string& optional) +optional (bool optional) { - LOG_TRACE(prf_parser, "simple_pimpl optional " << optional) + RH_TRACE(prf::parserLog, "simple_pimpl optional " << optional) _optional = optional; } void simple_pimpl:: -commandline (const ::std::string& commandline) +commandline (bool commandline) { - LOG_TRACE(prf_parser, "simple_pimpl _commandline " << _commandline) + RH_TRACE(prf::parserLog, "simple_pimpl commandline " << commandline) _commandline = commandline; } -const ossie::SimpleProperty& simple_pimpl:: +ossie::SimpleProperty* simple_pimpl:: post_simple () { - if (_value.get() != 0) { - LOG_TRACE(prf_parser, "simple_pimpl post " << _id << " " << _name << " " << _value->c_str()); - _prop = ossie::SimpleProperty(_id, _name, _type, _mode, _action, _kinds, _value.get(), _complex, _commandline, _optional); + if (_value.get()) { + RH_TRACE(prf::parserLog, "simple_pimpl post " << _id << " " << _name << " " << _kinds << " " << *_value); } else { - LOG_TRACE(prf_parser, "simple_pimpl post " << _id << " " << _name << " None"); - _prop = ossie::SimpleProperty(_id, _name, _type, _mode, _action, _kinds, 0, _complex, _commandline, _optional); + RH_TRACE(prf::parserLog, "simple_pimpl post " << _id << " " << _name << " " << _kinds << " None"); } - return _prop; + return new ossie::SimpleProperty(_id, _name, _type, _mode, _action, _kinds, _value.get(), _complex, _commandline, _optional); } // simpleRef_pimpl @@ -576,15 +625,14 @@ post_simpleSequenceRef () void simpleSequence_pimpl:: pre () { - _id = ""; - _name = ""; - _type = ""; - _complex = ""; - _mode = ""; - _action = ""; - _optional = ""; - - _kinds.clear(); + _id.clear(); + _name.clear(); + _type.clear(); + _mode = ossie::Property::MODE_DEFAULT; + _action = ossie::Property::ACTION_DEFAULT; + _kinds = ossie::Property::Kinds(); + _complex = false; + _optional = false; _values.clear(); } @@ -600,9 +648,8 @@ values (const ::std::vector& values) for (unsigned i=0; i& range) } void simpleSequence_pimpl:: -kind (const ::std::string& kind) +kind (ossie::Property::KindType kind) { - LOG_TRACE(prf_parser, "simpleSequence_pimpl kind " << kind) - _kinds.push_back(kind); + RH_TRACE(prf::parserLog, "simpleSequence_pimpl kind " << kind) + _kinds |= kind; } void simpleSequence_pimpl:: -action (const ::std::string& action) +action (ossie::Property::ActionType action) { - LOG_TRACE(prf_parser, "simpleSequence_pimpl action " << action) + RH_TRACE(prf::parserLog, "simpleSequence_pimpl action " << action) _action = action; } void simpleSequence_pimpl:: id (const ::std::string& id) { - LOG_TRACE(prf_parser, "simpleSequence_pimpl id " << id) + RH_TRACE(prf::parserLog, "simpleSequence_pimpl id " << id) _id = id; } void simpleSequence_pimpl:: -mode (const ::std::string& mode) +mode (ossie::Property::AccessType mode) { - LOG_TRACE(prf_parser, "simple_pimpl mode " << mode) + RH_TRACE(prf::parserLog, "simple_pimpl mode " << mode) _mode = mode; } void simpleSequence_pimpl:: name (const ::std::string& name) { - LOG_TRACE(prf_parser, "simpleSequence_pimpl name " << name) + RH_TRACE(prf::parserLog, "simpleSequence_pimpl name " << name) _name = name; } void simpleSequence_pimpl:: type (const ::std::string& type) { - LOG_TRACE(prf_parser, "simpleSequence_pimpl type " << type) + RH_TRACE(prf::parserLog, "simpleSequence_pimpl type " << type) _type = type; } void simpleSequence_pimpl:: -complex (const ::std::string& complex_) +complex (bool complex) { - LOG_TRACE(prf_parser, "simpleSequence_pimpl complex " << complex_) - _complex = complex_; + RH_TRACE(prf::parserLog, "simpleSequence_pimpl complex " << complex) + _complex = complex; } void simpleSequence_pimpl:: -optional (const ::std::string& optional) +optional (bool optional) { - LOG_TRACE(prf_parser, "simpleSequence_pimpl optional " << optional) + RH_TRACE(prf::parserLog, "simpleSequence_pimpl optional " << optional) _optional = optional; } -const ossie::SimpleSequenceProperty& simpleSequence_pimpl:: +ossie::SimpleSequenceProperty* simpleSequence_pimpl:: post_simpleSequence () { - _prop = ossie::SimpleSequenceProperty(_id, - _name, - _type, - _mode, - _action, - _kinds, - _values, - _complex, - _optional); - - return _prop; + return new ossie::SimpleSequenceProperty(_id, + _name, + _type, + _mode, + _action, + _kinds, + _values, + _complex, + _optional); } // struct_pimpl @@ -694,11 +739,10 @@ void struct_pimpl:: pre () { - _id = ""; - _name = ""; - _type = ""; - _mode = ""; - _kinds.clear(); + _id.clear(); + _name.clear(); + _mode = ossie::Property::MODE_DEFAULT; + _kinds = ossie::Property::Kinds(); _value.clear(); } @@ -708,21 +752,21 @@ description (const ::std::string& desciption) } void struct_pimpl:: -simple (const ossie::SimpleProperty& property) +simple (ossie::SimpleProperty* property) { - _value.push_back(const_cast(property.clone())); + _value.push_back(property); } void struct_pimpl:: -simplesequence (const ossie::SimpleSequenceProperty& property) +simplesequence (ossie::SimpleSequenceProperty* property) { - _value.push_back(const_cast(property.clone())); + _value.push_back(property); } void struct_pimpl:: -configurationkind (const ::std::string& kind) +configurationkind (ossie::Property::KindType kind) { - _kinds.push_back(kind); + _kinds |= kind; } void struct_pimpl:: @@ -732,7 +776,7 @@ id (const ::std::string& id) } void struct_pimpl:: -mode (const ::std::string& mode) +mode (ossie::Property::AccessType mode) { _mode = mode; } @@ -743,21 +787,17 @@ name (const ::std::string& name) _name = name; } -const ossie::StructProperty& struct_pimpl:: +ossie::StructProperty* struct_pimpl:: post_struct () { - LOG_TRACE(prf_parser, "struct_pimpl post " << _id << " " << _name); - for (std::vector::const_iterator ii = _kinds.begin(); ii != _kinds.end(); ++ii) { - LOG_TRACE(prf_parser, " kind " << *ii); - } + RH_TRACE(prf::parserLog, "struct_pimpl post " << _id << " " << _name << " kinds " << _kinds); ossie::PropertyList::const_iterator i; for (i = _value.begin(); i != _value.end(); ++i) { - LOG_TRACE(prf_parser, " value " << *i) + RH_TRACE(prf::parserLog, " value " << *i) } - _prop = ossie::StructProperty(_id, _name, _mode, _kinds, _value); - return _prop; + return new ossie::StructProperty(_id, _name, _mode, _kinds, _value); } // structSequence_pimpl @@ -766,13 +806,12 @@ post_struct () void structSequence_pimpl:: pre () { - _id = ""; - _name = ""; - _type = ""; - _mode = ""; - _kinds.clear(); + _id.clear(); + _name.clear(); + _mode = ossie::Property::MODE_DEFAULT; + _kinds = ossie::Property::Kinds(); _values.clear(); - _struct = ossie::StructProperty(); // resets internal values vector + _struct.reset(); // resets internal values vector } void structSequence_pimpl:: @@ -781,66 +820,33 @@ description (const ::std::string& description) } void structSequence_pimpl:: -struct_ (const ossie::StructProperty& structProp) +struct_ (ossie::StructProperty* structProp) { - _struct = structProp; + _struct.reset(structProp); } void structSequence_pimpl:: structvalue (const ossie::ComponentPropertyMap& value) { - std::vector propValue; - std::vector rmprops; - const std::vector& defaults = _struct.getValue(); - for (std::vector::const_iterator prop = defaults.begin(); prop != defaults.end(); ++prop) { - const std::string id = (*prop)->getID(); + assert(_struct.get() != 0); + ossie::PropertyList propValue; + const ossie::PropertyList& defaults = _struct->getValue(); + for (ossie::PropertyList::const_iterator prop = defaults.begin(); prop != defaults.end(); ++prop) { + const std::string id = prop->getID(); ossie::ComponentPropertyMap::const_iterator ii = value.find(id); if (ii != value.end()) { - ossie::Property *p=NULL; - if (dynamic_cast(*prop) != NULL) { - const ossie::SimpleProperty* simp = dynamic_cast(*prop); - std::string val = static_cast(ii->second)->getValue(); - p = new ossie::SimpleProperty(id, - simp->getName(), - simp->getType(), - simp->getMode(), - simp->getAction(), - simp->getKinds(), - val, - simp->getComplex(), - simp->getCommandLine(), - simp->getOptional()); - rmprops.push_back(p); - - } else if (dynamic_cast(*prop) != NULL) { - const ossie::SimpleSequenceProperty* simpseq = dynamic_cast(*prop); - std::vector vals = static_cast(ii->second)->getValues(); - p = new ossie::SimpleSequenceProperty(id, - simpseq->getName(), - simpseq->getType(), - simpseq->getMode(), - simpseq->getAction(), - simpseq->getKinds(), - vals, - simpseq->getComplex(), - simpseq->getOptional()); - rmprops.push_back(p); - } else { - p = *prop; - } - - propValue.push_back(p); + ossie::Property* field = prop->clone(); + propValue.push_back(field); + field->override(ii->second); } } - _values.push_back(ossie::StructProperty(_struct.getID(), _struct.getName(), _struct.getMode(), _struct.getKinds(), propValue)); - // clean up unused properties.. - for ( std::vector::iterator i = rmprops.begin(); i != rmprops.end(); ++i) { if ( *i ) delete *i; } + _values.push_back(ossie::StructProperty(_struct->getID(), _struct->getName(), _struct->getMode(), _struct->getKinds(), propValue)); } void structSequence_pimpl:: -configurationkind (const ::std::string& kind) +configurationkind (ossie::Property::KindType kind) { - _kinds.push_back(kind); + _kinds |= kind; } void structSequence_pimpl:: @@ -850,7 +856,7 @@ id (const ::std::string& id) } void structSequence_pimpl:: -mode (const ::std::string& mode) +mode (ossie::Property::AccessType mode) { _mode = mode; } @@ -861,11 +867,10 @@ name (const ::std::string& name) _name = name; } -const ossie::StructSequenceProperty& structSequence_pimpl:: +ossie::StructSequenceProperty* structSequence_pimpl:: post_structSequence () { - _prop = ossie::StructSequenceProperty(_id, _name, _mode, _struct, _kinds, _values); - return _prop; + return new ossie::StructSequenceProperty(_id, _name, _mode, *_struct, _kinds, _values); } // structValue_pimpl diff --git a/redhawk/src/control/parser/internal/prf-pimpl.h b/redhawk/src/control/parser/internal/prf-pimpl.h index b3a7d5bab..760f85b30 100644 --- a/redhawk/src/control/parser/internal/prf-pimpl.h +++ b/redhawk/src/control/parser/internal/prf-pimpl.h @@ -29,8 +29,12 @@ #include #include "prf-pskel.h" +#include + namespace prf { + extern rh_logger::LoggerPtr parserLog; + class Unit_pimpl: public virtual Unit_pskel, public ::xml_schema::string_pimpl { @@ -49,7 +53,7 @@ namespace prf virtual void pre (); - virtual ::std::string + virtual ::ossie::Property::AccessType post_AccessType (); }; @@ -60,7 +64,7 @@ namespace prf virtual void pre (); - virtual ::std::string + virtual bool post_IsComplex (); }; @@ -71,7 +75,7 @@ namespace prf virtual void pre (); - virtual ::std::string + virtual bool post_IsCommandLine (); }; @@ -82,7 +86,7 @@ namespace prf virtual void pre (); - virtual ::std::string + virtual bool post_IsOptional (); }; @@ -93,12 +97,12 @@ namespace prf pre (); virtual void - type (const std::string&); + type (ossie::Property::ActionType); - virtual ::std::string + virtual ossie::Property::ActionType post_action (); private: - std::string _type; + ossie::Property::ActionType _type; }; class ActionType_pimpl: public virtual ActionType_pskel, @@ -108,7 +112,7 @@ namespace prf virtual void pre (); - virtual std::string + virtual ossie::Property::ActionType post_ActionType (); }; @@ -119,13 +123,13 @@ namespace prf pre (); virtual void - kindtype (const ::std::string&); + kindtype (ossie::Property::KindType); - virtual ::std::string + virtual ossie::Property::KindType post_configurationKind (); private: - std::string _kindtype; + ossie::Property::KindType _kindtype; }; class enumeration_pimpl: public virtual enumeration_pskel @@ -177,12 +181,13 @@ namespace prf pre (); virtual void - kindtype (const ::std::string&); + kindtype (ossie::Property::KindType); - virtual ::std::string + virtual ossie::Property::KindType post_kind (); + private: - std::string _type; + ossie::Property::KindType _type; }; class PropertyConfigurationType_pimpl: public virtual PropertyConfigurationType_pskel, @@ -192,7 +197,7 @@ namespace prf virtual void pre (); - virtual std::string + virtual ossie::Property::KindType post_PropertyConfigurationType (); }; @@ -203,7 +208,7 @@ namespace prf virtual void pre (); - virtual ::std::string + virtual ossie::Property::KindType post_StructPropertyConfigurationType (); }; @@ -217,19 +222,19 @@ namespace prf description (const ::std::string&); virtual void - simple (const ossie::SimpleProperty&); + simple (ossie::SimpleProperty*); virtual void - simplesequence (const ossie::SimpleSequenceProperty&); + simplesequence (ossie::SimpleSequenceProperty*); virtual void test (); virtual void - struct_ (const ossie::StructProperty&); + struct_ (ossie::StructProperty*); virtual void - structsequence (const ossie::StructSequenceProperty&); + structsequence (ossie::StructSequenceProperty*); virtual std::auto_ptr post_properties (); @@ -304,16 +309,16 @@ namespace prf enumerations (const ::std::map&); virtual void - kind (const ::std::string&); + kind (ossie::Property::KindType); virtual void - action (const ::std::string&); + action (ossie::Property::ActionType); virtual void id (const ::std::string&); virtual void - mode (const ::std::string&); + mode (ossie::Property::AccessType); virtual void name (const ::std::string&); @@ -322,29 +327,28 @@ namespace prf type (const ::std::string&); virtual void - complex (const ::std::string&); + complex (bool); virtual void - commandline (const ::std::string&); + commandline (bool); virtual void - optional (const ::std::string&); + optional (bool); - virtual const ossie::SimpleProperty& + virtual ossie::SimpleProperty* post_simple (); private: std::string _id; std::string _name; std::string _type; - std::string _complex; - std::string _mode; - std::string _action; - std::string _commandline; - std::string _optional; - std::vector _kinds; + bool _complex; + ossie::Property::AccessType _mode; + ossie::Property::ActionType _action; + bool _commandline; + bool _optional; + ossie::Property::Kinds _kinds; std::auto_ptr _value; - ossie::SimpleProperty _prop; }; class simpleRef_pimpl: public virtual simpleRef_pskel @@ -404,16 +408,16 @@ namespace prf range (const ::std::vector&); virtual void - kind (const ::std::string&); + kind (ossie::Property::KindType); virtual void - action (const ::std::string&); + action (ossie::Property::ActionType); virtual void id (const ::std::string&); virtual void - mode (const ::std::string&); + mode (ossie::Property::AccessType); virtual void name (const ::std::string&); @@ -422,25 +426,24 @@ namespace prf type (const ::std::string&); virtual void - complex (const ::std::string&); + complex (bool); virtual void - optional (const ::std::string&); + optional (bool); - virtual const ossie::SimpleSequenceProperty& + virtual ossie::SimpleSequenceProperty* post_simpleSequence (); private: std::string _id; std::string _name; std::string _type; - std::string _complex; - std::string _mode; - std::string _action; - std::string _optional; - std::vector _kinds; + bool _complex; + ossie::Property::AccessType _mode; + ossie::Property::ActionType _action; + bool _optional; + ossie::Property::Kinds _kinds; std::vector _values; - ossie::SimpleSequenceProperty _prop; }; class struct_pimpl: public virtual struct_pskel @@ -453,34 +456,32 @@ namespace prf description (const ::std::string&); virtual void - simple (const ossie::SimpleProperty&); + simple (ossie::SimpleProperty*); virtual void - simplesequence (const ossie::SimpleSequenceProperty&); + simplesequence (ossie::SimpleSequenceProperty*); virtual void - configurationkind (const ::std::string&); + configurationkind (ossie::Property::KindType); virtual void id (const ::std::string&); virtual void - mode (const ::std::string&); + mode (ossie::Property::AccessType); virtual void name (const ::std::string&); - virtual const ossie::StructProperty& + virtual ossie::StructProperty* post_struct (); private: std::string _id; std::string _name; - std::string _type; - std::string _mode; - std::vector _kinds; - ossie::PropertyList _value; - ossie::StructProperty _prop; + ossie::Property::AccessType _mode; + ossie::Property::Kinds _kinds; + ossie::PropertyList _value; }; class structSequence_pimpl: public virtual structSequence_pskel @@ -490,7 +491,7 @@ namespace prf pre (); virtual void - struct_ (const ossie::StructProperty&); + struct_ (ossie::StructProperty*); virtual void description (const ::std::string&); @@ -499,29 +500,27 @@ namespace prf structvalue (const ossie::ComponentPropertyMap&); virtual void - configurationkind (const ::std::string&); + configurationkind (ossie::Property::KindType); virtual void id (const ::std::string&); virtual void - mode (const ::std::string&); + mode (ossie::Property::AccessType); virtual void name (const ::std::string&); - virtual const ossie::StructSequenceProperty& + virtual ossie::StructSequenceProperty* post_structSequence (); private: std::string _id; std::string _name; - std::string _type; - std::string _mode; - std::vector _kinds; - ossie::StructProperty _struct; + ossie::Property::AccessType _mode; + ossie::Property::Kinds _kinds; + std::auto_ptr _struct; std::vector _values; - ossie::StructSequenceProperty _prop; }; class structValue_pimpl: public virtual structValue_pskel diff --git a/redhawk/src/control/parser/internal/prf.map b/redhawk/src/control/parser/internal/prf.map index 76f077e49..64e2c9921 100644 --- a/redhawk/src/control/parser/internal/prf.map +++ b/redhawk/src/control/parser/internal/prf.map @@ -26,28 +26,27 @@ namespace urn:mil:jpeojtrs:sca:prf { include "../../include/ossie/componentProfile.h"; properties "std::auto_ptr"; - simple "const ossie::SimpleProperty&" "const ossie::SimpleProperty&"; - simpleSequence "const ossie::SimpleSequenceProperty&" "const ossie::SimpleSequenceProperty&"; - struct "const ossie::StructProperty&" "const ossie::StructProperty&"; - structSequence "const ossie::StructSequenceProperty&" "const ossie::StructSequenceProperty&"; + simple "ossie::SimpleProperty*"; + simpleSequence "ossie::SimpleSequenceProperty*"; + struct "ossie::StructProperty*"; + structSequence "ossie::StructSequenceProperty*"; description "::std::string"; value "::std::string"; values "::std::vector"; units "::std::string"; - action "::std::string"; - kind "::std::string"; - kindtype "::std::string"; - StructPropertyConfigurationType "::std::string"; + action "::ossie::Property::ActionType" "::ossie::Property::ActionType"; + kind "::ossie::Property::KindType" "::ossie::Property::KindType"; + StructPropertyConfigurationType "::ossie::Property::KindType" "::ossie::Property::KindType"; PropertyValueType "::std::string"; - AccessType "::std::string"; - ActionType "::std::string"; - IsComplex "::std::string"; - IsCommandLine "::std::string"; - IsOptional "::std::string"; + AccessType "::ossie::Property::AccessType" "::ossie::Property::AccessType"; + ActionType "::ossie::Property::ActionType" "::ossie::Property::ActionType"; + IsComplex "bool" "bool"; + IsCommandLine "bool" "bool"; + IsOptional "bool" "bool"; Unit "::std::string"; - PropertyConfigurationType "::std::string"; - configurationKind "::std::string"; + PropertyConfigurationType "::ossie::Property::KindType" "::ossie::Property::KindType"; + configurationKind "::ossie::Property::KindType" "::ossie::Property::KindType"; range "::std::pair"; enumerations "::std::map"; enumeration "::std::map::value_type"; diff --git a/redhawk/src/control/parser/internal/sad-parser.cpp b/redhawk/src/control/parser/internal/sad-parser.cpp new file mode 100644 index 000000000..0e65aef98 --- /dev/null +++ b/redhawk/src/control/parser/internal/sad-parser.cpp @@ -0,0 +1,277 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include "sad-parser.h" +#include "sad-pimpl.h" + +std::auto_ptr +ossie::internalparser::parseSAD(std::istream& input) throw (ossie::parser_error) +{ + try { + // Instantiate individual parsers. + // + ::sad::softwareassembly_pimpl softwareassembly_p; + ::xml_schema::string_pimpl string_p; + ::sad::componentfiles_pimpl componentfiles_p; + ::sad::componentfile_pimpl componentfile_p; + ::sad::localfile_pimpl localfile_p; + ::sad::partitioning_pimpl partitioning_p; + ::sad::componentplacement_pimpl componentplacement_p; + ::sad::componentfileref_pimpl componentfileref_p; + ::sad::componentinstantiation_pimpl componentinstantiation_p; + ::sad::componentproperties_pimpl componentproperties_p; + ::sad::simpleref_pimpl simpleref_p; + ::sad::simplesequenceref_pimpl simplesequenceref_p; + ::sad::values_pimpl values_p; + ::sad::structref_pimpl structref_p; + ::sad::structsequenceref_pimpl structsequenceref_p; + ::sad::structvalue_pimpl structvalue_p; + ::sad::findcomponent_pimpl findcomponent_p; + ::sad::componentresourcefactoryref_pimpl componentresourcefactoryref_p; + ::sad::resourcefactoryproperties_pimpl resourcefactoryproperties_p; + ::sad::namingservice_pimpl namingservice_p; + ::sad::hostcollocation_pimpl hostcollocation_p; + ::sad::assemblycontroller_pimpl assemblycontroller_p; + ::sad::componentinstantiationref_pimpl componentinstantiationref_p; + ::sad::connections_pimpl connections_p; + ::sad::connectinterface_pimpl connectinterface_p; + ::sad::usesport_pimpl usesport_p; + ::sad::devicethatloadedthiscomponentref_pimpl devicethatloadedthiscomponentref_p; + ::sad::deviceusedbythiscomponentref_pimpl deviceusedbythiscomponentref_p; + ::sad::deviceusedbyapplication_pimpl deviceusedbyapplication_p; + ::sad::findby_pimpl findby_p; + ::sad::domainfinder_pimpl domainfinder_p; + ::sad::providesport_pimpl providesport_p; + ::sad::componentsupportedinterface_pimpl componentsupportedinterface_p; + ::sad::externalports_pimpl externalports_p; + ::sad::port_pimpl port_p; + ::sad::externalproperties_pimpl externalproperties_p; + ::sad::property_pimpl property_p; + ::sad::options_pimpl options_p; + ::sad::option_pimpl option_p; + ::sad::usesdevicedependencies_pimpl usesdevicedependencies_p; + ::sad::usesdevice_pimpl usesdevice_p; + ::sad::propertyref_pimpl propertyref_p; + ::sad::affinity_pimpl affinity_p; + ::sad::loggingconfig_pimpl loggingconfig_p; + ::sad::devicerequires_pimpl devicerequires_p; + ::sad::idvalue_pimpl idvalue_p; + ::sad::usesdeviceref_pimpl usesdeviceref_p; + ::sad::reservation_pimpl reservation_p; + + + // Connect the parsers together. + // + softwareassembly_p.parsers (string_p, + componentfiles_p, + partitioning_p, + assemblycontroller_p, + connections_p, + externalports_p, + externalproperties_p, + options_p, + usesdevicedependencies_p, + string_p, + string_p, + string_p); + + componentfiles_p.parsers (componentfile_p); + + componentfile_p.parsers (localfile_p, + string_p, + string_p); + + localfile_p.parsers (string_p); + + partitioning_p.parsers (componentplacement_p, + hostcollocation_p); + + componentplacement_p.parsers (componentfileref_p, + componentinstantiation_p); + + componentfileref_p.parsers (string_p); + + componentinstantiation_p.parsers (string_p, + componentproperties_p, + affinity_p, + loggingconfig_p, + findcomponent_p, + devicerequires_p, + string_p, + string_p); + + affinity_p.parsers(simpleref_p, + simplesequenceref_p, + structref_p, + structsequenceref_p); + + loggingconfig_p.parsers(string_p); + + devicerequires_p.parsers (idvalue_p); + + componentproperties_p.parsers (simpleref_p, + simplesequenceref_p, + structref_p, + structsequenceref_p); + + idvalue_p.parsers (string_p, + string_p); + + simpleref_p.parsers (string_p, + string_p); + + simplesequenceref_p.parsers (values_p, + string_p); + + values_p.parsers (string_p); + + structref_p.parsers (simpleref_p, + simplesequenceref_p, + string_p); + + structsequenceref_p.parsers (structvalue_p, + string_p); + + structvalue_p.parsers (simpleref_p, + simplesequenceref_p); + + findcomponent_p.parsers (componentresourcefactoryref_p, + namingservice_p); + + componentresourcefactoryref_p.parsers (resourcefactoryproperties_p, + string_p); + + resourcefactoryproperties_p.parsers (simpleref_p, + simplesequenceref_p, + structref_p, + structsequenceref_p); + + namingservice_p.parsers (string_p); + + hostcollocation_p.parsers (componentplacement_p, + usesdeviceref_p, + reservation_p, + string_p, + string_p); + + usesdeviceref_p.parsers (string_p); + + reservation_p.parsers (string_p, string_p); + + assemblycontroller_p.parsers (componentinstantiationref_p); + + componentinstantiationref_p.parsers (string_p); + + connections_p.parsers (connectinterface_p); + + connectinterface_p.parsers (usesport_p, + providesport_p, + componentsupportedinterface_p, + findby_p, + string_p); + + usesport_p.parsers (string_p, + componentinstantiationref_p, + devicethatloadedthiscomponentref_p, + deviceusedbythiscomponentref_p, + deviceusedbyapplication_p, + findby_p); + + devicethatloadedthiscomponentref_p.parsers (string_p); + + deviceusedbythiscomponentref_p.parsers (string_p, + string_p); + + deviceusedbyapplication_p.parsers(string_p); + + findby_p.parsers (namingservice_p, + string_p, + domainfinder_p); + + domainfinder_p.parsers (string_p, + string_p); + + providesport_p.parsers (string_p, + componentinstantiationref_p, + devicethatloadedthiscomponentref_p, + deviceusedbythiscomponentref_p, + deviceusedbyapplication_p, + findby_p); + + componentsupportedinterface_p.parsers (string_p, + componentinstantiationref_p, + devicethatloadedthiscomponentref_p, + deviceusedbythiscomponentref_p, + deviceusedbyapplication_p, + findby_p); + + externalports_p.parsers (port_p); + + port_p.parsers (string_p, + string_p, + string_p, + string_p, + componentinstantiationref_p, + string_p); + + externalproperties_p.parsers (property_p); + + property_p.parsers (string_p, + string_p, + string_p); + + options_p.parsers (option_p); + + option_p.parsers (string_p, + string_p); + + usesdevicedependencies_p.parsers (usesdevice_p); + + usesdevice_p.parsers (propertyref_p, + simpleref_p, + simplesequenceref_p, + structref_p, + structsequenceref_p, + string_p, + string_p); + + propertyref_p.parsers (string_p, + string_p); + + // Parse the XML document. + // + ::xml_schema::document doc_p ( + softwareassembly_p, + "", + "softwareassembly"); + + softwareassembly_p.pre (); + doc_p.parse (input); + return (softwareassembly_p.post_softwareassembly ()); + } catch (const ::xml_schema::exception& e) { + std::ostringstream err; + err << e; + throw ossie::parser_error(err.str()); + } catch (const std::ios_base::failure& e) { + throw ossie::parser_error(e.what()); + } +} diff --git a/redhawk/src/control/parser/internal/sad-parser.h b/redhawk/src/control/parser/internal/sad-parser.h index be402e194..563242e18 100644 --- a/redhawk/src/control/parser/internal/sad-parser.h +++ b/redhawk/src/control/parser/internal/sad-parser.h @@ -21,241 +21,15 @@ #ifndef __SAD_PARSER_H__ #define __SAD_PARSER_H__ -#include -#include -#include"ossie/exceptions.h" -#include "sad-pimpl.h" +#include +#include -#include +#include +#include namespace ossie { namespace internalparser { - inline std::auto_ptr parseSAD(std::istream& input) throw (ossie::parser_error) { - try { - // Instantiate individual parsers. - // - ::sad::softwareassembly_pimpl softwareassembly_p; - ::xml_schema::string_pimpl string_p; - ::sad::componentfiles_pimpl componentfiles_p; - ::sad::componentfile_pimpl componentfile_p; - ::sad::localfile_pimpl localfile_p; - ::sad::partitioning_pimpl partitioning_p; - ::sad::componentplacement_pimpl componentplacement_p; - ::sad::componentfileref_pimpl componentfileref_p; - ::sad::componentinstantiation_pimpl componentinstantiation_p; - ::sad::componentproperties_pimpl componentproperties_p; - ::sad::simpleref_pimpl simpleref_p; - ::sad::simplesequenceref_pimpl simplesequenceref_p; - ::sad::values_pimpl values_p; - ::sad::structref_pimpl structref_p; - ::sad::structsequenceref_pimpl structsequenceref_p; - ::sad::structvalue_pimpl structvalue_p; - ::sad::findcomponent_pimpl findcomponent_p; - ::sad::componentresourcefactoryref_pimpl componentresourcefactoryref_p; - ::sad::resourcefactoryproperties_pimpl resourcefactoryproperties_p; - ::sad::namingservice_pimpl namingservice_p; - ::sad::hostcollocation_pimpl hostcollocation_p; - ::sad::assemblycontroller_pimpl assemblycontroller_p; - ::sad::componentinstantiationref_pimpl componentinstantiationref_p; - ::sad::connections_pimpl connections_p; - ::sad::connectinterface_pimpl connectinterface_p; - ::sad::usesport_pimpl usesport_p; - ::sad::devicethatloadedthiscomponentref_pimpl devicethatloadedthiscomponentref_p; - ::sad::deviceusedbythiscomponentref_pimpl deviceusedbythiscomponentref_p; - ::sad::deviceusedbyapplication_pimpl deviceusedbyapplication_p; - ::sad::findby_pimpl findby_p; - ::sad::domainfinder_pimpl domainfinder_p; - ::sad::providesport_pimpl providesport_p; - ::sad::componentsupportedinterface_pimpl componentsupportedinterface_p; - ::sad::externalports_pimpl externalports_p; - ::sad::port_pimpl port_p; - ::sad::externalproperties_pimpl externalproperties_p; - ::sad::property_pimpl property_p; - ::sad::usesdevicedependencies_pimpl usesdevicedependencies_p; - ::sad::usesdevice_pimpl usesdevice_p; - ::sad::propertyref_pimpl propertyref_p; - ::sad::affinity_pimpl affinity_p; - ::sad::loggingconfig_pimpl loggingconfig_p; - - - // Connect the parsers together. - // - softwareassembly_p.parsers (string_p, - componentfiles_p, - partitioning_p, - assemblycontroller_p, - connections_p, - externalports_p, - externalproperties_p, - usesdevicedependencies_p, - string_p, - string_p, - string_p); - - componentfiles_p.parsers (componentfile_p); - - componentfile_p.parsers (localfile_p, - string_p, - string_p); - - localfile_p.parsers (string_p); - - partitioning_p.parsers (componentplacement_p, - hostcollocation_p); - - componentplacement_p.parsers (componentfileref_p, - componentinstantiation_p); - - componentfileref_p.parsers (string_p); - - componentinstantiation_p.parsers (string_p, - componentproperties_p, - affinity_p, - loggingconfig_p, - findcomponent_p, - string_p, - string_p); - - affinity_p.parsers(simpleref_p, - simplesequenceref_p, - structref_p, - structsequenceref_p); - - loggingconfig_p.parsers(string_p); - - componentproperties_p.parsers (simpleref_p, - simplesequenceref_p, - structref_p, - structsequenceref_p); - - simpleref_p.parsers (string_p, - string_p); - - simplesequenceref_p.parsers (values_p, - string_p); - - values_p.parsers (string_p); - - structref_p.parsers (simpleref_p, - simplesequenceref_p, - string_p); - - structsequenceref_p.parsers (structvalue_p, - string_p); - - structvalue_p.parsers (simpleref_p, - simplesequenceref_p); - - findcomponent_p.parsers (componentresourcefactoryref_p, - namingservice_p); - - componentresourcefactoryref_p.parsers (resourcefactoryproperties_p, - string_p); - - resourcefactoryproperties_p.parsers (simpleref_p, - simplesequenceref_p, - structref_p, - structsequenceref_p); - - namingservice_p.parsers (string_p); - - hostcollocation_p.parsers (componentplacement_p, - string_p, - string_p); - - assemblycontroller_p.parsers (componentinstantiationref_p); - - componentinstantiationref_p.parsers (string_p); - - connections_p.parsers (connectinterface_p); - - connectinterface_p.parsers (usesport_p, - providesport_p, - componentsupportedinterface_p, - findby_p, - string_p); - - usesport_p.parsers (string_p, - componentinstantiationref_p, - devicethatloadedthiscomponentref_p, - deviceusedbythiscomponentref_p, - deviceusedbyapplication_p, - findby_p); - - devicethatloadedthiscomponentref_p.parsers (string_p); - - deviceusedbythiscomponentref_p.parsers (string_p, - string_p); - - deviceusedbyapplication_p.parsers(string_p); - - findby_p.parsers (namingservice_p, - string_p, - domainfinder_p); - - domainfinder_p.parsers (string_p, - string_p); - - providesport_p.parsers (string_p, - componentinstantiationref_p, - devicethatloadedthiscomponentref_p, - deviceusedbythiscomponentref_p, - deviceusedbyapplication_p, - findby_p); - - componentsupportedinterface_p.parsers (string_p, - componentinstantiationref_p, - devicethatloadedthiscomponentref_p, - deviceusedbythiscomponentref_p, - deviceusedbyapplication_p, - findby_p); - - externalports_p.parsers (port_p); - - port_p.parsers (string_p, - string_p, - string_p, - string_p, - componentinstantiationref_p, - string_p); - - externalproperties_p.parsers (property_p); - - property_p.parsers (string_p, - string_p, - string_p); - - usesdevicedependencies_p.parsers (usesdevice_p); - - usesdevice_p.parsers (propertyref_p, - simpleref_p, - simplesequenceref_p, - structref_p, - structsequenceref_p, - string_p, - string_p); - - propertyref_p.parsers (string_p, - string_p); - - // Parse the XML document. - // - ::xml_schema::document doc_p ( - softwareassembly_p, - "", - "softwareassembly"); - - softwareassembly_p.pre (); - doc_p.parse (input); - return (softwareassembly_p.post_softwareassembly ()); - } catch (const ::xml_schema::exception& e) { - std::ostringstream err; - err << e; - throw ossie::parser_error(err.str()); - } catch (const std::ios_base::failure& e) { - throw ossie::parser_error(e.what()); - } - } + std::auto_ptr parseSAD(std::istream& input) throw (ossie::parser_error); } } diff --git a/redhawk/src/control/parser/internal/sad-pimpl.cpp b/redhawk/src/control/parser/internal/sad-pimpl.cpp index 4f8c77660..1523e3830 100644 --- a/redhawk/src/control/parser/internal/sad-pimpl.cpp +++ b/redhawk/src/control/parser/internal/sad-pimpl.cpp @@ -31,6 +31,8 @@ CREATE_LOGGER(sad_parser); +rh_logger::LoggerPtr sad::parserLog; + namespace sad { // softwareassembly_pimpl @@ -50,9 +52,9 @@ namespace sad } void softwareassembly_pimpl:: - componentfiles (const ::std::vector& componentfiles) + componentfiles (::std::vector& componentfiles) { - _sad->componentfiles = componentfiles; + _sad->componentfiles.swap(componentfiles); } void softwareassembly_pimpl:: @@ -68,27 +70,33 @@ namespace sad } void softwareassembly_pimpl:: - connections (const ::std::vector& connections) + connections (::std::vector& connections) + { + _sad->connections.swap(connections); + } + + void softwareassembly_pimpl:: + externalports (::std::vector& externalports) { - _sad->connections = connections; + _sad->externalports.swap(externalports); } void softwareassembly_pimpl:: - externalports (const ::std::vector& externalports) + externalproperties (::std::vector& externalproperties) { - _sad->externalports = externalports; + _sad->externalproperties.swap(externalproperties); } void softwareassembly_pimpl:: - externalproperties (const ::std::vector& externalproperties) + options (::std::vector& options) { - _sad->externalproperties = externalproperties; + _sad->options.swap(options); } void softwareassembly_pimpl:: - usesdevicedependencies (const ::std::vector& usesdevices) + usesdevicedependencies (::std::vector& usesdevices) { - _sad->usesdevice = usesdevices; + _sad->usesdevice.swap(usesdevices); } void softwareassembly_pimpl:: @@ -124,7 +132,7 @@ namespace sad componentFiles.push_back(componentfile); } - ::std::vector componentfiles_pimpl:: + ::std::vector& componentfiles_pimpl:: post_componentfiles () { return componentFiles; @@ -206,7 +214,7 @@ namespace sad void partitioning_pimpl:: hostcollocation (const ossie::SoftwareAssembly::HostCollocation& hostcollocation) { - LOG_TRACE(sad_parser, "Adding host collocations"); + RH_TRACE(sad::parserLog, "Adding host collocations"); partitioning->collocations.push_back(hostcollocation); } @@ -278,7 +286,9 @@ namespace sad void componentinstantiation_pimpl:: startorder (const ::std::string& startorder) { - componentInstantiation._startOrder = startorder; + // We have to parse the string into an integer here, rather than declaring + // startorder as an integer in the schema, for backwards compatibility. + componentInstantiation.startOrder = atoi(startorder.c_str()); } void componentinstantiation_pimpl:: @@ -288,15 +298,15 @@ namespace sad } void componentinstantiation_pimpl:: - componentproperties ( const ossie::ComponentPropertyList& componentproperties) + componentproperties (ossie::ComponentPropertyList& componentproperties) { - componentInstantiation.properties = componentproperties; + componentInstantiation.properties.swap(componentproperties); } void componentinstantiation_pimpl:: findcomponent (const ::std::string& namingservicename) { - LOG_TRACE(sad_parser, "setting instantiation naming service name " << namingservicename); + RH_TRACE(sad::parserLog, "setting instantiation naming service name " << namingservicename); componentInstantiation.namingservicename = namingservicename; } @@ -307,18 +317,25 @@ namespace sad } void componentinstantiation_pimpl:: - affinity (const ossie::ComponentInstantiation::AffinityProperties& affinityProperties) + affinity (ossie::ComponentInstantiation::AffinityProperties& affinityProperties) { - LOG_TRACE(sad_parser, "affinity properties"); - componentInstantiation.affinityProperties= affinityProperties; + RH_TRACE(sad::parserLog, "affinity properties"); + componentInstantiation.affinityProperties.swap(affinityProperties); } void componentinstantiation_pimpl::loggingconfig ( const ossie::ComponentInstantiation::LoggingConfig& log_cfg ) { componentInstantiation.loggingConfig = log_cfg; + RH_TRACE(sad::parserLog, "componentinstantiation_pimpl logging cfg "<< componentInstantiation.loggingConfig.first.c_str() << " level " << componentInstantiation.loggingConfig.second.c_str() ); + } + + void componentinstantiation_pimpl::devicerequires (ossie::ComponentPropertyList& requiresproperties) + { + componentInstantiation.devicerequires.swap(requiresproperties); } + const ::ossie::ComponentInstantiation& componentinstantiation_pimpl:: post_componentinstantiation () { @@ -334,32 +351,32 @@ namespace sad void affinity_pimpl:: simpleref (const ossie::SimplePropertyRef& simpleref) { - LOG_TRACE(sad_parser, "Adding simpleref "); + RH_TRACE(sad::parserLog, "Adding simpleref "); affinityProperties.push_back(simpleref.clone()); } void affinity_pimpl:: simplesequenceref (const ossie::SimpleSequencePropertyRef& simplesequenceref) { - LOG_TRACE(sad_parser, "Adding simplesequenceref"); + RH_TRACE(sad::parserLog, "Adding simplesequenceref"); affinityProperties.push_back(simplesequenceref.clone()); } void affinity_pimpl:: structref (const ossie::StructPropertyRef& structref) { - LOG_TRACE(sad_parser, "Adding structref"); + RH_TRACE(sad::parserLog, "Adding structref"); affinityProperties.push_back(structref.clone()); } void affinity_pimpl:: structsequenceref (const ossie::StructSequencePropertyRef& structsequenceref) { - LOG_TRACE(sad_parser, "Adding structsequenceref"); + RH_TRACE(sad::parserLog, "Adding structsequenceref"); affinityProperties.push_back(structsequenceref.clone()); } - const ossie::ComponentInstantiation::AffinityProperties& affinity_pimpl::post_affinity () + ossie::ComponentInstantiation::AffinityProperties& affinity_pimpl::post_affinity () { return affinityProperties; } @@ -375,14 +392,39 @@ namespace sad void loggingconfig_pimpl::level ( const ::std::string &v ) { info.second=v; + RH_TRACE(sad::parserLog, " loggingconfig : parser found level " << v ); } const ossie::ComponentInstantiation::LoggingConfig& loggingconfig_pimpl::post_loggingconfig ( ) { info.first = this->post_string(); + RH_TRACE(sad::parserLog, " loggingconfig : first " << info.first << "second " << info.second ); return info; } + // devicerequires_pimpl + // + + void devicerequires_pimpl:: + pre () + { + devicerequires.clear(); + } + + void devicerequires_pimpl:: + requires (const ossie::IdValue& idvalue) + { + devicerequires.push_back(idvalue.clone()); + } + + ossie::ComponentPropertyList& devicerequires_pimpl:: + post_devicerequires () + { + return devicerequires; + } + + + // componentproperties_pimpl // @@ -416,7 +458,7 @@ namespace sad componentProperties.push_back(structsequenceref.clone()); } - const ossie::ComponentPropertyList& componentproperties_pimpl:: + ossie::ComponentPropertyList& componentproperties_pimpl:: post_componentproperties () { return componentProperties; @@ -445,7 +487,7 @@ namespace sad ::std::string findcomponent_pimpl:: post_findcomponent () { - LOG_TRACE(sad_parser, "post findcomponent: " << namingservicename) + RH_TRACE(sad::parserLog, "post findcomponent: " << namingservicename) return namingservicename; } @@ -584,6 +626,39 @@ namespace sad { } + + + // idvalueref_pimpl + // + + void idvalue_pimpl:: + pre () + { + RH_TRACE(sad::parserLog, "pre idvalue"); + simple = ossie::IdValue(); + } + + void idvalue_pimpl:: + id (const ::std::string& id) + { + RH_TRACE(sad::parserLog, "idvalue id: " << id); + simple._id = id; + } + + void idvalue_pimpl:: + value (const ::std::string& value) + { + RH_TRACE(sad::parserLog, "idvalue value: " << value); + simple._value = value; + } + + const ossie::IdValue& idvalue_pimpl:: + post_idvalue () + { + RH_TRACE(sad::parserLog, "post idvalue"); + return simple; + } + // simpleref_pimpl // @@ -867,6 +942,19 @@ namespace sad hostcollocation->placements.push_back(componentplacement); } + + void hostcollocation_pimpl:: + usesdeviceref (const ::ossie::UsesDeviceRef& usesdeviceref) + { + hostcollocation->usesdevicerefs.push_back(usesdeviceref); + } + + void hostcollocation_pimpl:: + reservation (const ::ossie::Reservation& reservation) + { + hostcollocation->reservations.push_back(reservation); + } + void hostcollocation_pimpl:: id (const ::std::string& id) { @@ -885,6 +973,62 @@ namespace sad return *hostcollocation; } + // usesdeviceref_pimpl + // + + void usesdeviceref_pimpl:: + pre () + { + udevref = ossie::UsesDeviceRef(); + } + + void usesdeviceref_pimpl:: + refid (const ::std::string& refid) + { + udevref.id = refid; + } + + const ossie::UsesDeviceRef& usesdeviceref_pimpl:: + post_usesdeviceref () + { + return udevref; + } + + + // reservation_pimpl + // + + void reservation_pimpl:: + pre () + { + resrv = ossie::Reservation(); + } + + void reservation_pimpl:: + kind (const ::std::string& kind) + { + resrv.kind = kind; + } + + void reservation_pimpl:: + value (const ::std::string& value) + { + resrv.value = value; + } + + std::string reservation_pimpl::post_string() + { + resrv.value = this->post_string(); + return resrv.value; + } + + const ossie::Reservation& reservation_pimpl:: + post_reservation () + { + return resrv; + } + + // assemblycontroller_pimpl // @@ -928,10 +1072,10 @@ namespace sad //connname << "connection_" << (connections.size()); //connections.back().connectionId = connname.str(); //} - LOG_TRACE(sad_parser, "added connection id " << connections.back().getID() << " type " << connections.back().getType()); + RH_TRACE(sad::parserLog, "added connection id " << connections.back().getID() << " type " << connections.back().getType()); } - ::std::vector connections_pimpl:: + ::std::vector& connections_pimpl:: post_connections () { return connections; @@ -976,7 +1120,7 @@ namespace sad void connectinterface_pimpl:: id (const ::std::string& id) { - LOG_TRACE(sad_parser, "connection id " << id); + RH_TRACE(sad::parserLog, "connection id " << id); connection->connectionId = id; } @@ -1154,7 +1298,7 @@ namespace sad extPorts.push_back(port); } - ::std::vector externalports_pimpl:: + ::std::vector& externalports_pimpl:: post_externalports () { return extPorts; @@ -1230,7 +1374,7 @@ namespace sad extProps.push_back(prop); } - ::std::vector externalproperties_pimpl:: + ::std::vector& externalproperties_pimpl:: post_externalproperties () { return extProps; @@ -1269,6 +1413,54 @@ namespace sad return *property; } + // options_pimpl + // + + void options_pimpl:: + pre () + { + extOptions.clear(); + } + + void options_pimpl:: + option (const ossie::SoftwareAssembly::Option& option) + { + extOptions.push_back(option); + } + + ::std::vector& options_pimpl:: + post_options () + { + return extOptions; + } + + // option_pimpl + // + + void option_pimpl:: + pre () + { + option.reset(new ossie::SoftwareAssembly::Option()); + } + + void option_pimpl:: + name(const ::std::string& name) + { + option->name = name; + } + + void option_pimpl:: + value(const ::std::string& value) + { + option->value = value; + } + + ::ossie::SoftwareAssembly::Option option_pimpl:: + post_option () + { + return *option; + } + // usesdevicedependencies_pimpl // @@ -1279,12 +1471,12 @@ namespace sad } void usesdevicedependencies_pimpl:: - usesdevice (const ossie::SoftwareAssembly::UsesDevice& use) + usesdevice (const ossie::UsesDevice& use) { usesDevices.push_back(use); } - ::std::vector usesdevicedependencies_pimpl:: + ::std::vector& usesdevicedependencies_pimpl:: post_usesdevicedependencies () { return usesDevices; @@ -1296,11 +1488,11 @@ namespace sad void usesdevice_pimpl:: pre () { - uses.reset(new ossie::SoftwareAssembly::UsesDevice()); + uses.reset(new ossie::UsesDevice()); } void usesdevice_pimpl:: - propertyref (const ossie::SoftwareAssembly::PropertyRef& propRef) + propertyref (const ossie::PropertyRef& propRef) { uses->dependencies.push_back(propRef); } @@ -1341,7 +1533,7 @@ namespace sad uses->type = type; } - ossie::SoftwareAssembly::UsesDevice usesdevice_pimpl:: + ossie::UsesDevice usesdevice_pimpl:: post_usesdevice () { return *uses; @@ -1368,10 +1560,10 @@ namespace sad propRef->_value = value; } - ossie::SoftwareAssembly::PropertyRef propertyref_pimpl:: + ossie::PropertyRef propertyref_pimpl:: post_propertyref () { - return ossie::SoftwareAssembly::PropertyRef(propRef->clone()); + return ossie::PropertyRef(propRef->clone()); } } diff --git a/redhawk/src/control/parser/internal/sad-pimpl.h b/redhawk/src/control/parser/internal/sad-pimpl.h index 2e510d86a..3e7207c79 100644 --- a/redhawk/src/control/parser/internal/sad-pimpl.h +++ b/redhawk/src/control/parser/internal/sad-pimpl.h @@ -28,9 +28,12 @@ #define CXX___XML_XSD_SAD_PIMPL_H #include "sad-pskel.h" +#include namespace sad { + extern rh_logger::LoggerPtr parserLog; + class softwareassembly_pimpl: public softwareassembly_pskel { public: @@ -41,7 +44,7 @@ namespace sad description (const ::std::string&); virtual void - componentfiles (const ::std::vector&); + componentfiles (::std::vector&); virtual void partitioning (const ossie::SoftwareAssembly::Partitioning&); @@ -50,16 +53,19 @@ namespace sad assemblycontroller (const ::std::string&); virtual void - connections (const ::std::vector&); + connections (::std::vector&); + + virtual void + externalports (::std::vector&); virtual void - externalports (const ::std::vector&); + externalproperties (::std::vector&); virtual void - externalproperties (const ::std::vector&); + options (::std::vector&); virtual void - usesdevicedependencies (const ::std::vector&); + usesdevicedependencies (::std::vector&); virtual void id (const ::std::string&); @@ -83,7 +89,7 @@ namespace sad virtual void componentfile (const ::ossie::ComponentFile&); - virtual ::std::vector + virtual ::std::vector& post_componentfiles (); private: @@ -192,7 +198,7 @@ namespace sad usagename (const ::std::string&); virtual void - componentproperties ( const ossie::ComponentPropertyList&); + componentproperties (ossie::ComponentPropertyList&); virtual void findcomponent (const ::std::string&); @@ -204,11 +210,14 @@ namespace sad startorder (const ::std::string&); virtual void - affinity (const ossie::ComponentInstantiation::AffinityProperties&); + affinity (ossie::ComponentInstantiation::AffinityProperties&); virtual void loggingconfig (const ossie::ComponentInstantiation::LoggingConfig&); + virtual void + devicerequires (ossie::ComponentPropertyList&); + virtual const ::ossie::ComponentInstantiation& post_componentinstantiation (); @@ -234,7 +243,7 @@ namespace sad virtual void structsequenceref (const ossie::StructSequencePropertyRef&); - const ossie::ComponentInstantiation::AffinityProperties& + ossie::ComponentInstantiation::AffinityProperties& post_affinity (); private: @@ -257,6 +266,24 @@ namespace sad ossie::ComponentInstantiation::LoggingConfig info; }; + class devicerequires_pimpl: public virtual devicerequires_pskel + { + public: + + virtual void + pre (); + + virtual void + requires (const ossie::IdValue&); + + virtual ossie::ComponentPropertyList& + post_devicerequires (); + + private: + ossie::ComponentPropertyList devicerequires; + }; + + class componentproperties_pimpl: public virtual componentproperties_pskel { @@ -277,7 +304,7 @@ namespace sad virtual void structsequenceref (const ossie::StructSequencePropertyRef&); - virtual const ossie::ComponentPropertyList& + virtual ossie::ComponentPropertyList& post_componentproperties (); private: @@ -394,6 +421,26 @@ namespace sad post_resourcefactoryproperties (); }; + class idvalue_pimpl: public virtual idvalue_pskel + { + public: + virtual void + pre (); + + virtual void + id (const ::std::string&); + + virtual void + value (const ::std::string&); + + virtual const ossie::IdValue& + post_idvalue (); + + private: + ossie::IdValue simple; + }; + + class simpleref_pimpl: public virtual simpleref_pskel { public: @@ -590,6 +637,12 @@ namespace sad virtual void componentplacement (const ::ossie::ComponentPlacement&); + virtual void + usesdeviceref (const ::ossie::UsesDeviceRef&); + + virtual void + reservation (const ::ossie::Reservation&); + virtual void id (const ::std::string&); @@ -603,6 +656,43 @@ namespace sad std::auto_ptr hostcollocation; }; + class usesdeviceref_pimpl: public virtual usesdeviceref_pskel + { + public: + virtual void + pre (); + + virtual void + refid (const ::std::string&); + + virtual const ossie::UsesDeviceRef& + post_usesdeviceref (); + + private: + ossie::UsesDeviceRef udevref; + }; + + class reservation_pimpl: public virtual reservation_pskel + { + public: + virtual void + pre (); + + virtual void + kind (const ::std::string&); + + virtual void + value (const ::std::string&); + + virtual const ossie::Reservation& + post_reservation (); + + virtual std::string post_string (); + + private: + ossie::Reservation resrv; + }; + class assemblycontroller_pimpl: public virtual assemblycontroller_pskel { public: @@ -628,7 +718,7 @@ namespace sad virtual void connectinterface (const ::ossie::Connection&); - virtual ::std::vector + virtual ::std::vector& post_connections (); private: @@ -765,7 +855,7 @@ namespace sad virtual void port (const ossie::SoftwareAssembly::Port&); - virtual ::std::vector + virtual ::std::vector& post_externalports (); private: @@ -812,7 +902,7 @@ namespace sad virtual void property (const ossie::SoftwareAssembly::Property&); - virtual ::std::vector + virtual ::std::vector& post_externalproperties (); private: @@ -841,6 +931,41 @@ namespace sad std::auto_ptr property; }; + class options_pimpl: public virtual options_pskel + { + public: + virtual void + pre (); + + virtual void + option (const ossie::SoftwareAssembly::Option&); + + virtual ::std::vector& + post_options (); + + private: + std::vector extOptions; + }; + + class option_pimpl: public virtual option_pskel + { + public: + virtual void + pre (); + + virtual void + name (const ::std::string&); + + virtual void + value (const ::std::string&); + + virtual ::ossie::SoftwareAssembly::Option + post_option (); + + private: + std::auto_ptr option; + }; + class usesdevicedependencies_pimpl: public virtual usesdevicedependencies_pskel { public: @@ -848,13 +973,13 @@ namespace sad pre (); virtual void - usesdevice (const ossie::SoftwareAssembly::UsesDevice&); + usesdevice (const ossie::UsesDevice&); - virtual ::std::vector + virtual ::std::vector& post_usesdevicedependencies (); private: - std::vector usesDevices; + std::vector usesDevices; }; class usesdevice_pimpl: public virtual usesdevice_pskel @@ -864,7 +989,7 @@ namespace sad pre (); virtual void - propertyref (const ossie::SoftwareAssembly::PropertyRef&); + propertyref (const ossie::PropertyRef&); virtual void simpleref (const ossie::SimplePropertyRef&); @@ -884,11 +1009,11 @@ namespace sad virtual void type (const ::std::string&); - virtual ossie::SoftwareAssembly::UsesDevice + virtual ossie::UsesDevice post_usesdevice (); private: - std::auto_ptr uses; + std::auto_ptr uses; }; class propertyref_pimpl: public virtual propertyref_pskel @@ -903,7 +1028,7 @@ namespace sad virtual void value (const ::std::string&); - virtual ::ossie::SoftwareAssembly::PropertyRef + virtual ::ossie::PropertyRef post_propertyref (); private: diff --git a/redhawk/src/control/parser/internal/sad.map b/redhawk/src/control/parser/internal/sad.map index fe4d47730..c26934912 100644 --- a/redhawk/src/control/parser/internal/sad.map +++ b/redhawk/src/control/parser/internal/sad.map @@ -23,13 +23,13 @@ namespace urn:mil:jpeojtrs:sca:sad { include "memory"; include "../include/ossie/SoftwareAssembly.h"; softwareassembly "::std::auto_ptr"; - componentfiles "::std::vector"; + componentfiles "::std::vector &"; componentfile "::ossie::ComponentFile"; componentplacement "const ::ossie::ComponentPlacement&" "const ::ossie::ComponentPlacement&"; partitioning "ossie::SoftwareAssembly::Partitioning"; hostcollocation "::ossie::SoftwareAssembly::HostCollocation"; componentinstantiation "const ::ossie::ComponentInstantiation &" "const ::ossie::ComponentInstantiation &"; - connections "::std::vector"; + connections "::std::vector &"; connectinterface "::ossie::Connection"; usesport "::ossie::UsesPort"; providesport "::ossie::ProvidesPort"; @@ -39,7 +39,7 @@ namespace urn:mil:jpeojtrs:sca:sad { findby "::ossie::FindBy"; namingservice "::std::string"; domainfinder "::std::pair"; - componentproperties "const ossie::ComponentPropertyList &" "const ossie::ComponentPropertyList &"; + componentproperties "ossie::ComponentPropertyList &"; simpleref "const ossie::SimplePropertyRef &" "const ossie::SimplePropertyRef &"; simplesequenceref "const ossie::SimpleSequencePropertyRef &" "const ossie::SimpleSequencePropertyRef &"; structref "const ossie::StructPropertyRef &" "const ossie::StructPropertyRef &"; @@ -53,17 +53,23 @@ namespace urn:mil:jpeojtrs:sca:sad { devicethatloadedthiscomponentref "::std::string"; deviceusedbythiscomponentref "::std::pair"; deviceusedbyapplication "::std::string"; - externalports "::std::vector"; + externalports "::std::vector &"; port "ossie::SoftwareAssembly::Port"; usesidentifier "::std::string"; providesidentifier "::std::string"; supportedidentifier "::std::string"; - externalproperties "::std::vector"; + externalproperties "::std::vector &"; property "ossie::SoftwareAssembly::Property"; - usesdevicedependencies "::std::vector"; - usesdevice "ossie::SoftwareAssembly::UsesDevice"; - propertyref "ossie::SoftwareAssembly::PropertyRef"; - affinity "const ossie::ComponentInstantiation::AffinityProperties&" "const ossie::ComponentInstantiation::AffinityProperties&"; + options "::std::vector &"; + option "ossie::SoftwareAssembly::Option"; + usesdevicedependencies "::std::vector &"; + usesdevice "ossie::UsesDevice"; + propertyref "ossie::PropertyRef"; + affinity "ossie::ComponentInstantiation::AffinityProperties&"; loggingconfig "const ossie::ComponentInstantiation::LoggingConfig&" "const ossie::ComponentInstantiation::LoggingConfig&"; + devicerequires "ossie::ComponentPropertyList &" "ossie::ComponentPropertyList &"; + idvalue "const ossie::IdValue&" "const ossie::IdValue&"; + usesdeviceref "const ossie::UsesDeviceRef&" "const ossie::UsesDeviceRef&"; + reservation "const ossie::Reservation&" "const ossie::Reservation&"; } diff --git a/redhawk/src/control/parser/internal/scd-parser.cpp b/redhawk/src/control/parser/internal/scd-parser.cpp new file mode 100644 index 000000000..5cbd5e699 --- /dev/null +++ b/redhawk/src/control/parser/internal/scd-parser.cpp @@ -0,0 +1,107 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include "scd-parser.h" +#include "scd-pimpl.h" + +std::auto_ptr +ossie::internalparser::parseSCD(std::istream& input) throw (ossie::parser_error) +{ + using namespace scd; + + try { + // Instantiate individual parsers. + // + softwarecomponent_pimpl softwarecomponent_p; + ::xml_schema::string_pimpl string_p; + componentRepId_pimpl componentRepId_p; + componentFeatures_pimpl componentFeatures_p; + supportsInterface_pimpl supportsInterface_p; + ports_pimpl ports_p; + provides_pimpl provides_p; + portType_pimpl portType_p; + type_pimpl type_p; + uses_pimpl uses_p; + interfaces_pimpl interfaces_p; + interface_pimpl interface_p; + inheritsInterface_pimpl inheritsInterface_p; + propertyFile_pimpl propertyFile_p; + localFile_pimpl localFile_p; + + // Connect the parsers together. + // + softwarecomponent_p.parsers (string_p, + componentRepId_p, + string_p, + componentFeatures_p, + interfaces_p, + propertyFile_p); + + componentRepId_p.parsers (string_p); + + componentFeatures_p.parsers (supportsInterface_p, + ports_p); + + supportsInterface_p.parsers (string_p, + string_p); + + ports_p.parsers (provides_p, + uses_p); + + provides_p.parsers (string_p, + portType_p, + string_p, + string_p); + + portType_p.parsers (type_p); + + uses_p.parsers (string_p, + portType_p, + string_p, + string_p); + + interfaces_p.parsers (interface_p); + + interface_p.parsers (inheritsInterface_p, + string_p, + string_p); + + inheritsInterface_p.parsers (string_p); + + propertyFile_p.parsers (localFile_p, + string_p); + + localFile_p.parsers (string_p); + + // Parse the XML document. + // + ::xml_schema::document doc_p (softwarecomponent_p, "softwarecomponent"); + + softwarecomponent_p.pre (); + doc_p.parse (input); + return (softwarecomponent_p.post_softwarecomponent ()); + } catch (const ::xml_schema::exception& e) { + throw ossie::parser_error(e.what()); + } catch (const std::ios_base::failure& e) { + throw ossie::parser_error(e.what()); + } +} diff --git a/redhawk/src/control/parser/internal/scd-parser.h b/redhawk/src/control/parser/internal/scd-parser.h index 477b0f50d..6fc4d0354 100644 --- a/redhawk/src/control/parser/internal/scd-parser.h +++ b/redhawk/src/control/parser/internal/scd-parser.h @@ -21,92 +21,14 @@ #ifndef __SCD_PARSER_H__ #define __SCD_PARSER_H__ -#include "scd-pimpl.h" -#include "ossie/exceptions.h" +#include + +#include +#include namespace ossie { namespace internalparser { - inline std::auto_ptr parseSCD(std::istream& input) throw (ossie::parser_error) - { - using namespace scd; - - try { - // Instantiate individual parsers. - // - softwarecomponent_pimpl softwarecomponent_p; - ::xml_schema::string_pimpl string_p; - componentRepId_pimpl componentRepId_p; - componentFeatures_pimpl componentFeatures_p; - supportsInterface_pimpl supportsInterface_p; - ports_pimpl ports_p; - provides_pimpl provides_p; - portType_pimpl portType_p; - type_pimpl type_p; - uses_pimpl uses_p; - interfaces_pimpl interfaces_p; - interface_pimpl interface_p; - inheritsInterface_pimpl inheritsInterface_p; - propertyFile_pimpl propertyFile_p; - localFile_pimpl localFile_p; - - // Connect the parsers together. - // - softwarecomponent_p.parsers (string_p, - componentRepId_p, - string_p, - componentFeatures_p, - interfaces_p, - propertyFile_p); - - componentRepId_p.parsers (string_p); - - componentFeatures_p.parsers (supportsInterface_p, - ports_p); - - supportsInterface_p.parsers (string_p, - string_p); - - ports_p.parsers (provides_p, - uses_p); - - provides_p.parsers (string_p, - portType_p, - string_p, - string_p); - - portType_p.parsers (type_p); - - uses_p.parsers (string_p, - portType_p, - string_p, - string_p); - - interfaces_p.parsers (interface_p); - - interface_p.parsers (inheritsInterface_p, - string_p, - string_p); - - inheritsInterface_p.parsers (string_p); - - propertyFile_p.parsers (localFile_p, - string_p); - - localFile_p.parsers (string_p); - - // Parse the XML document. - // - ::xml_schema::document doc_p (softwarecomponent_p, "softwarecomponent"); - - softwarecomponent_p.pre (); - doc_p.parse (input); - return (softwarecomponent_p.post_softwarecomponent ()); - } catch (const ::xml_schema::exception& e) { - throw ossie::parser_error(e.what()); - } catch (const std::ios_base::failure& e) { - throw ossie::parser_error(e.what()); - } - } + std::auto_ptr parseSCD(std::istream& input) throw (ossie::parser_error); } } #endif diff --git a/redhawk/src/control/parser/internal/spd-parser.cpp b/redhawk/src/control/parser/internal/spd-parser.cpp new file mode 100644 index 000000000..6b93c3610 --- /dev/null +++ b/redhawk/src/control/parser/internal/spd-parser.cpp @@ -0,0 +1,182 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include "spd-parser.h" +#include "spd-pimpl.h" + +std::auto_ptr ossie::internalparser::parseSPD(std::istream& input) throw (ossie::parser_error) +{ + try { + // Instantiate individual parsers. + // + ::spd::softPkg_pimpl softPkg_p; + ::xml_schema::string_pimpl string_p; + ::spd::author_pimpl author_p; + ::xml_schema::uri_pimpl uri_p; + ::spd::propertyFile_pimpl propertyFile_p; + ::spd::localFile_pimpl localFile_p; + ::spd::descriptor_pimpl descriptor_p; + ::spd::implementation_pimpl implementation_p; + ::spd::code_pimpl code_p; + ::xml_schema::unsigned_long_pimpl unsigned_long_p; + ::spd::codeFileType_pimpl codeFileType_p; + ::spd::compiler_pimpl compiler_p; + ::spd::programmingLanguage_pimpl programmingLanguage_p; + ::spd::humanLanguage_pimpl humanLanguage_p; + ::spd::runtime_pimpl runtime_p; + ::spd::os_pimpl os_p; + ::spd::processor_pimpl processor_p; + ::spd::dependency_pimpl dependency_p; + ::spd::softPkgRef_pimpl softPkgRef_p; + ::spd::implRef_pimpl implRef_p; + ::spd::propertyRef_pimpl propertyRef_p; + ::spd::usesDevice_pimpl usesDevice_p; + ::spd::aepcompliance_pimpl aepcompliance_p; + ::spd::simpleref_pimpl simpleref_p; + ::spd::simplesequenceref_pimpl simplesequenceref_p; + ::spd::structref_pimpl structref_p; + ::spd::structsequenceref_pimpl structsequenceref_p; + ::spd::values_pimpl values_p; + ::spd::structvalue_pimpl structvalue_p; + + // Connect the parsers together. + // + softPkg_p.parsers (string_p, + author_p, + string_p, + propertyFile_p, + descriptor_p, + implementation_p, + usesDevice_p, + string_p, + string_p, + string_p, + string_p); + + author_p.parsers (string_p, + string_p, + uri_p); + + propertyFile_p.parsers (localFile_p, + string_p); + + localFile_p.parsers (string_p); + + descriptor_p.parsers (localFile_p, + string_p); + + implementation_p.parsers (string_p, + propertyFile_p, + code_p, + compiler_p, + programmingLanguage_p, + humanLanguage_p, + runtime_p, + os_p, + processor_p, + dependency_p, + usesDevice_p, + string_p, + aepcompliance_p); + + code_p.parsers (localFile_p, + string_p, + unsigned_long_p, + unsigned_long_p, + codeFileType_p); + + compiler_p.parsers (string_p, + string_p); + + programmingLanguage_p.parsers (string_p, + string_p); + + humanLanguage_p.parsers (string_p); + + runtime_p.parsers (string_p, + string_p); + + os_p.parsers (string_p, + string_p); + + processor_p.parsers (string_p); + + dependency_p.parsers (softPkgRef_p, + propertyRef_p, + simpleref_p, + simplesequenceref_p, + structref_p, + structsequenceref_p, + string_p); + + softPkgRef_p.parsers (localFile_p, + implRef_p); + + implRef_p.parsers (string_p); + + propertyRef_p.parsers (string_p, + string_p); + + usesDevice_p.parsers (propertyRef_p, + simpleref_p, + simplesequenceref_p, + structref_p, + structsequenceref_p, + string_p, + string_p); + + simpleref_p.parsers (string_p, + string_p); + + simplesequenceref_p.parsers (values_p, + string_p); + + values_p.parsers (string_p); + + structref_p.parsers (simpleref_p, + simplesequenceref_p, + string_p); + + structsequenceref_p.parsers (structvalue_p, + string_p); + + structvalue_p.parsers (simpleref_p, + simplesequenceref_p); + + // Parse the XML document. + // + ::xml_schema::document doc_p ( + softPkg_p, + "", + "softpkg"); + + softPkg_p.pre (); + doc_p.parse (input); + return (softPkg_p.post_softPkg ()); + } catch (const ::xml_schema::exception& e) { + std::ostringstream err; + err << e; + throw ossie::parser_error(err.str()); + } catch (const std::ios_base::failure& e) { + throw ossie::parser_error(e.what()); + } +} diff --git a/redhawk/src/control/parser/internal/spd-parser.h b/redhawk/src/control/parser/internal/spd-parser.h index 618030cbb..3559b1fc1 100644 --- a/redhawk/src/control/parser/internal/spd-parser.h +++ b/redhawk/src/control/parser/internal/spd-parser.h @@ -21,173 +21,13 @@ #ifndef __SPD_PARSER_H__ #define __SPD_PARSER_H__ -#include -#include -#include"ossie/exceptions.h" -#include "spd-pimpl.h" - -#include +#include +#include +#include namespace ossie { namespace internalparser { - inline std::auto_ptr parseSPD(std::istream& input) throw (ossie::parser_error) { - using namespace spd; - - try { - // Instantiate individual parsers. - // - ::spd::softPkg_pimpl softPkg_p; - ::xml_schema::string_pimpl string_p; - ::spd::author_pimpl author_p; - ::xml_schema::uri_pimpl uri_p; - ::spd::propertyFile_pimpl propertyFile_p; - ::spd::localFile_pimpl localFile_p; - ::spd::descriptor_pimpl descriptor_p; - ::spd::implementation_pimpl implementation_p; - ::spd::code_pimpl code_p; - ::xml_schema::unsigned_long_pimpl unsigned_long_p; - ::spd::codeFileType_pimpl codeFileType_p; - ::spd::compiler_pimpl compiler_p; - ::spd::programmingLanguage_pimpl programmingLanguage_p; - ::spd::humanLanguage_pimpl humanLanguage_p; - ::spd::runtime_pimpl runtime_p; - ::spd::os_pimpl os_p; - ::spd::processor_pimpl processor_p; - ::spd::dependency_pimpl dependency_p; - ::spd::softPkgRef_pimpl softPkgRef_p; - ::spd::implRef_pimpl implRef_p; - ::spd::propertyRef_pimpl propertyRef_p; - ::spd::usesDevice_pimpl usesDevice_p; - ::spd::aepcompliance_pimpl aepcompliance_p; - ::spd::simpleref_pimpl simpleref_p; - ::spd::simplesequenceref_pimpl simplesequenceref_p; - ::spd::structref_pimpl structref_p; - ::spd::structsequenceref_pimpl structsequenceref_p; - ::spd::values_pimpl values_p; - ::spd::structvalue_pimpl structvalue_p; - - // Connect the parsers together. - // - softPkg_p.parsers (string_p, - author_p, - string_p, - propertyFile_p, - descriptor_p, - implementation_p, - usesDevice_p, - string_p, - string_p, - string_p, - string_p); - - author_p.parsers (string_p, - string_p, - uri_p); - - propertyFile_p.parsers (localFile_p, - string_p); - - localFile_p.parsers (string_p); - - descriptor_p.parsers (localFile_p, - string_p); - - implementation_p.parsers (string_p, - propertyFile_p, - code_p, - compiler_p, - programmingLanguage_p, - humanLanguage_p, - runtime_p, - os_p, - processor_p, - dependency_p, - usesDevice_p, - string_p, - aepcompliance_p); - - code_p.parsers (localFile_p, - string_p, - unsigned_long_p, - unsigned_long_p, - codeFileType_p); - - compiler_p.parsers (string_p, - string_p); - - programmingLanguage_p.parsers (string_p, - string_p); - - humanLanguage_p.parsers (string_p); - - runtime_p.parsers (string_p, - string_p); - - os_p.parsers (string_p, - string_p); - - processor_p.parsers (string_p); - - dependency_p.parsers (softPkgRef_p, - propertyRef_p, - simpleref_p, - simplesequenceref_p, - structref_p, - structsequenceref_p, - string_p); - - softPkgRef_p.parsers (localFile_p, - implRef_p); - - implRef_p.parsers (string_p); - - propertyRef_p.parsers (string_p, - string_p); - - usesDevice_p.parsers (propertyRef_p, - simpleref_p, - simplesequenceref_p, - structref_p, - structsequenceref_p, - string_p, - string_p); - - simpleref_p.parsers (string_p, - string_p); - - simplesequenceref_p.parsers (values_p, - string_p); - - values_p.parsers (string_p); - - structref_p.parsers (simpleref_p, - simplesequenceref_p, - string_p); - - structsequenceref_p.parsers (structvalue_p, - string_p); - - structvalue_p.parsers (simpleref_p, - simplesequenceref_p); - - // Parse the XML document. - // - ::xml_schema::document doc_p ( - softPkg_p, - "", - "softpkg"); - - softPkg_p.pre (); - doc_p.parse (input); - return (softPkg_p.post_softPkg ()); - } catch (const ::xml_schema::exception& e) { - std::ostringstream err; - err << e; - throw ossie::parser_error(err.str()); - } catch (const std::ios_base::failure& e) { - throw ossie::parser_error(e.what()); - } - } + std::auto_ptr parseSPD(std::istream& input) throw (ossie::parser_error); } } #endif diff --git a/redhawk/src/control/parser/internal/spd-pimpl.cpp b/redhawk/src/control/parser/internal/spd-pimpl.cpp index 753c6a66f..925b98b76 100644 --- a/redhawk/src/control/parser/internal/spd-pimpl.cpp +++ b/redhawk/src/control/parser/internal/spd-pimpl.cpp @@ -33,10 +33,11 @@ CREATE_LOGGER(spd_parser); // softPkg_pimpl // +rh_logger::LoggerPtr spd::parserLog; + void softPkg_pimpl:: pre () { - LOG_TRACE(spd_parser, "softpkg pre") _spd.reset(new ossie::SPD()); } @@ -61,28 +62,28 @@ description (const ::std::string& description) void softPkg_pimpl:: propertyfile (const ::std::string& propertyfile) { - LOG_TRACE(spd_parser, "softpkg propertyfile " << propertyfile) + RH_TRACE(spd::parserLog, "softpkg propertyfile " << propertyfile) _spd->properties = propertyfile; } void softPkg_pimpl:: descriptor (const ::std::string& descriptor) { - LOG_TRACE(spd_parser, "softpkg descriptor " << descriptor) + RH_TRACE(spd::parserLog, "softpkg descriptor " << descriptor) _spd->descriptor = descriptor; } void softPkg_pimpl:: implementation (const ossie::SPD::Implementation& implementation) { - LOG_TRACE(spd_parser, "softpkg impl " << implementation.implementationID << " entry point " << implementation.code.entrypoint) + RH_TRACE(spd::parserLog, "softpkg impl " << implementation.implementationID << " entry point " << implementation.code.entrypoint) _spd->implementations.push_back(implementation); } void softPkg_pimpl:: -usesdevice (const ossie::SPD::UsesDevice& usesdev) +usesdevice (const ossie::UsesDevice& usesdev) { - LOG_TRACE(spd_parser, "softpkg usesdev " << usesdev) + RH_TRACE(spd::parserLog, "softpkg usesdev " << usesdev) _spd->usesDevice.push_back(usesdev); } @@ -113,7 +114,7 @@ version (const ::std::string& version) std::auto_ptr softPkg_pimpl:: post_softPkg () { - LOG_TRACE(spd_parser, "softpkg post") + RH_TRACE(spd::parserLog, "softpkg post") return _spd; } @@ -233,35 +234,35 @@ post_descriptor () void implementation_pimpl:: pre () { - LOG_TRACE(spd_parser, "implementation pre") + RH_TRACE(spd::parserLog, "implementation pre") implementation.reset(new ossie::SPD::Implementation()); } void implementation_pimpl:: description (const ::std::string& description) { - LOG_TRACE(spd_parser, "implementation description " << description) + RH_TRACE(spd::parserLog, "implementation description " << description) // Ignored } void implementation_pimpl:: propertyfile (const ::std::string& propertyfile) { - LOG_TRACE(spd_parser, "implementation property file " << propertyfile) + RH_TRACE(spd::parserLog, "implementation property file " << propertyfile) implementation->prfFile = propertyfile; } void implementation_pimpl:: code (const ossie::SPD::Code& code) { - LOG_TRACE(spd_parser, "implementation code " << code) + RH_TRACE(spd::parserLog, "implementation code " << code) implementation->code = code; } void implementation_pimpl:: compiler (const ossie::SPD::NameVersionPair& compiler) { - LOG_TRACE(spd_parser, "implementation compiler " << compiler.first << " " << compiler.second) + RH_TRACE(spd::parserLog, "implementation compiler " << compiler.first << " " << compiler.second) implementation->compiler = compiler; } @@ -278,7 +279,7 @@ humanlanguage () void implementation_pimpl:: runtime (const ossie::SPD::NameVersionPair& runtime) { - LOG_TRACE(spd_parser, "implementation runtime " << runtime.first << " " << runtime.second) + RH_TRACE(spd::parserLog, "implementation runtime " << runtime.first << " " << runtime.second) implementation->runtime = runtime; } @@ -291,26 +292,26 @@ os (const ossie::SPD::NameVersionPair& os) void implementation_pimpl:: processor (const ::std::string& processor) { - LOG_TRACE(spd_parser, "implementation processor " << processor) + RH_TRACE(spd::parserLog, "implementation processor " << processor) implementation->processorDeps.push_back(processor); } void implementation_pimpl:: -dependency (ossie::SPD::DependencyRef* dep) +dependency (ossie::DependencyRef* dep) { assert(dep != 0); - LOG_TRACE(spd_parser, "add implementation dependencies " << *dep) - if (dynamic_cast(dep) != NULL) { - implementation->dependencies.push_back(*dynamic_cast(dep)); + RH_TRACE(spd::parserLog, "add implementation dependencies " << *dep) + if (dynamic_cast(dep) != NULL) { + implementation->dependencies.push_back(*dynamic_cast(dep)); } else if (dynamic_cast(dep) != NULL) { implementation->softPkgDependencies.push_back(*dynamic_cast(dep)); } } void implementation_pimpl:: -usesdevice (const ossie::SPD::UsesDevice& usesdev) +usesdevice (const ossie::UsesDevice& usesdev) { - LOG_TRACE(spd_parser, "implementation usesdev " << usesdev) + RH_TRACE(spd::parserLog, "implementation usesdev " << usesdev) implementation->usesDevice.push_back(usesdev); } @@ -328,7 +329,7 @@ aepcompliance () ossie::SPD::Implementation implementation_pimpl:: post_implementation () { - LOG_TRACE(spd_parser, "implementation post") + RH_TRACE(spd::parserLog, "implementation post") return *implementation; } @@ -338,49 +339,49 @@ post_implementation () void code_pimpl:: pre () { - LOG_TRACE(spd_parser, "code pre") + RH_TRACE(spd::parserLog, "code pre") code.reset(new ossie::SPD::Code()); } void code_pimpl:: localfile (const ::std::string& localfile) { - LOG_TRACE(spd_parser, "code localfile " << localfile) + RH_TRACE(spd::parserLog, "code localfile " << localfile) code->localfile = localfile; } void code_pimpl:: entrypoint (const ::std::string& entrypoint) { - LOG_TRACE(spd_parser, "code entrypoint " << entrypoint) + RH_TRACE(spd::parserLog, "code entrypoint " << entrypoint) code->entrypoint = entrypoint; } void code_pimpl:: stacksize (unsigned long long stacksize) { - LOG_TRACE(spd_parser, "code stacksize " << stacksize) + RH_TRACE(spd::parserLog, "code stacksize " << stacksize) code->stacksize = stacksize; } void code_pimpl:: priority (unsigned long long priority) { - LOG_TRACE(spd_parser, "code priority " << priority) + RH_TRACE(spd::parserLog, "code priority " << priority) code->priority = priority; } void code_pimpl:: -type (const ::std::string& type1) +type (ossie::SPD::Code::CodeType type1) { - LOG_TRACE(spd_parser, "code type " << type1) + RH_TRACE(spd::parserLog, "code type " << type1) code->type = type1; } ossie::SPD::Code code_pimpl:: post_code () { - LOG_TRACE(spd_parser, "code post " << *code) + RH_TRACE(spd::parserLog, "code post " << *code) assert(code.get() != 0); return *code; } @@ -518,43 +519,43 @@ pre () void dependency_pimpl:: softpkgref (const ossie::SPD::SoftPkgRef& ref) { - LOG_TRACE(spd_parser, "softpkg ref dep: " << ref) + RH_TRACE(spd::parserLog, "softpkg ref dep: " << ref) _ref.reset(new ossie::SPD::SoftPkgRef(ref)); } void dependency_pimpl:: -propertyref (const ossie::SPD::PropertyRef& ref) +propertyref (const ossie::PropertyRef& ref) { - LOG_TRACE(spd_parser, "property ref dep: " << ref) - _ref.reset(new ossie::SPD::PropertyRef(ref)); + RH_TRACE(spd::parserLog, "property ref dep: " << ref) + _ref.reset(new ossie::PropertyRef(ref)); } void dependency_pimpl:: simpleref (const ossie::SimplePropertyRef& ref) { - LOG_TRACE(spd_parser, "simple property ref dep: " << ref); - _ref.reset(new ossie::SPD::PropertyRef(ref)); + RH_TRACE(spd::parserLog, "simple property ref dep: " << ref); + _ref.reset(new ossie::PropertyRef(ref)); } void dependency_pimpl:: simplesequenceref (const ossie::SimpleSequencePropertyRef& ref) { - LOG_TRACE(spd_parser, "simple sequence property ref dep: " << ref); - _ref.reset(new ossie::SPD::PropertyRef(ref)); + RH_TRACE(spd::parserLog, "simple sequence property ref dep: " << ref); + _ref.reset(new ossie::PropertyRef(ref)); } void dependency_pimpl:: structref (const ossie::StructPropertyRef& ref) { - LOG_TRACE(spd_parser, "struct property ref dep: " << ref); - _ref.reset(new ossie::SPD::PropertyRef(ref)); + RH_TRACE(spd::parserLog, "struct property ref dep: " << ref); + _ref.reset(new ossie::PropertyRef(ref)); } void dependency_pimpl:: structsequenceref (const ossie::StructSequencePropertyRef& ref) { - LOG_TRACE(spd_parser, "struct sequence property ref dep: " << ref); - _ref.reset(new ossie::SPD::PropertyRef(ref)); + RH_TRACE(spd::parserLog, "struct sequence property ref dep: " << ref); + _ref.reset(new ossie::PropertyRef(ref)); } void dependency_pimpl:: @@ -564,11 +565,11 @@ type (const ::std::string& type) // } -ossie::SPD::DependencyRef* dependency_pimpl:: +ossie::DependencyRef* dependency_pimpl:: post_dependency () { assert(_ref.get() != 0); - LOG_TRACE(spd_parser, "dependency post " << *_ref); + RH_TRACE(spd::parserLog, "dependency post " << *_ref); return _ref.get(); } @@ -620,10 +621,10 @@ value (const ::std::string& value) _ref._value = value; } -ossie::SPD::PropertyRef propertyRef_pimpl:: +ossie::PropertyRef propertyRef_pimpl:: post_propertyRef () { - return ossie::SPD::PropertyRef(_ref); + return ossie::PropertyRef(_ref); } // softPkgRef_pimpl @@ -679,11 +680,11 @@ post_implRef () void usesDevice_pimpl:: pre () { - _uses.reset(new ossie::SPD::UsesDevice()); + _uses.reset(new ossie::UsesDevice()); } void usesDevice_pimpl:: -propertyref (const ossie::SPD::PropertyRef& propertyRef) +propertyref (const ossie::PropertyRef& propertyRef) { _uses->dependencies.push_back(propertyRef); } @@ -724,11 +725,11 @@ type (const ::std::string& type) _uses->type = type; } -ossie::SPD::UsesDevice usesDevice_pimpl:: +ossie::UsesDevice usesDevice_pimpl:: post_usesDevice () { assert(_uses.get() != 0); - LOG_TRACE(spd_parser, "post usesdev " << *_uses) + RH_TRACE(spd::parserLog, "post usesdev " << *_uses) return *_uses; } @@ -758,17 +759,22 @@ pre () { } -::std::string codeFileType_pimpl:: +ossie::SPD::Code::CodeType codeFileType_pimpl:: post_codeFileType () { - const ::std::string& v (post_nmtoken ()); - if ((v != "Executable") - || (v != "SharedLibrary") - || (v != "KernelModule") - || (v != "Driver")) { - // TODO throw invalid_value(this, v - } - return v; + const std::string& type = post_nmtoken(); + if (type == "Executable") { + return ossie::SPD::Code::EXECUTABLE; + } else if (type == "SharedLibrary") { + return ossie::SPD::Code::SHARED_LIBRARY; + } else if (type == "KernelModule") { + return ossie::SPD::Code::KERNEL_MODULE; + } else if (type == "Driver") { + return ossie::SPD::Code::DRIVER; + } else { + RH_WARN(spd::parserLog, "Invalid code type '" << type << "'"); + return ossie::SPD::Code::NONE; + } } // simpleref_pimpl diff --git a/redhawk/src/control/parser/internal/spd-pimpl.h b/redhawk/src/control/parser/internal/spd-pimpl.h index a6910fd81..0a0e78951 100644 --- a/redhawk/src/control/parser/internal/spd-pimpl.h +++ b/redhawk/src/control/parser/internal/spd-pimpl.h @@ -28,9 +28,12 @@ #define CXX___XML_XSD_SPD_PIMPL_H #include "spd-pskel.h" +#include namespace spd { + extern rh_logger::LoggerPtr parserLog; + class softPkg_pimpl: public softPkg_pskel { public: @@ -56,7 +59,7 @@ namespace spd implementation (const ossie::SPD::Implementation&); virtual void - usesdevice (const ossie::SPD::UsesDevice&); + usesdevice (const ossie::UsesDevice&); virtual void id (const ::std::string&); @@ -187,10 +190,10 @@ namespace spd processor (const ::std::string&); virtual void - dependency (ossie::SPD::DependencyRef*); + dependency (ossie::DependencyRef*); virtual void - usesdevice (const ossie::SPD::UsesDevice&); + usesdevice (const ossie::UsesDevice&); virtual void id (const ::std::string&); @@ -224,7 +227,7 @@ namespace spd priority (unsigned long long); virtual void - type (const ::std::string&); + type (ossie::SPD::Code::CodeType); virtual ossie::SPD::Code post_code (); @@ -326,7 +329,7 @@ namespace spd softpkgref (const ossie::SPD::SoftPkgRef& ref); virtual void - propertyref (const ossie::SPD::PropertyRef&); + propertyref (const ossie::PropertyRef&); virtual void simpleref (const ossie::SimplePropertyRef&); @@ -343,11 +346,11 @@ namespace spd virtual void type (const ::std::string&); - virtual ossie::SPD::DependencyRef* + virtual ossie::DependencyRef* post_dependency (); private: - std::auto_ptr _ref; + std::auto_ptr _ref; }; class runtime_pimpl: public virtual runtime_pskel @@ -381,7 +384,7 @@ namespace spd virtual void value (const ::std::string&); - virtual ossie::SPD::PropertyRef + virtual ossie::PropertyRef post_propertyRef (); private: @@ -432,7 +435,7 @@ namespace spd pre (); virtual void - propertyref (const ossie::SPD::PropertyRef&); + propertyref (const ossie::PropertyRef&); virtual void simpleref (const ossie::SimplePropertyRef&); @@ -452,11 +455,11 @@ namespace spd virtual void type (const ::std::string&); - virtual ossie::SPD::UsesDevice + virtual ossie::UsesDevice post_usesDevice (); private: - std::auto_ptr _uses; + std::auto_ptr _uses; }; class aepcompliance_pimpl: public virtual aepcompliance_pskel, @@ -477,7 +480,7 @@ namespace spd virtual void pre (); - virtual ::std::string + virtual ossie::SPD::Code::CodeType post_codeFileType (); }; diff --git a/redhawk/src/control/parser/internal/spd.map b/redhawk/src/control/parser/internal/spd.map index e7b0ca3ce..2045621da 100644 --- a/redhawk/src/control/parser/internal/spd.map +++ b/redhawk/src/control/parser/internal/spd.map @@ -32,21 +32,21 @@ softPkg "std::auto_ptr"; propertyFile "::std::string"; descriptor "::std::string"; author "ossie::SPD::Author"; -usesDevice "ossie::SPD::UsesDevice"; +usesDevice "ossie::UsesDevice"; implementation "ossie::SPD::Implementation"; -dependency "ossie::SPD::DependencyRef*"; -codeFileType "::std::string"; +dependency "ossie::DependencyRef*"; +codeFileType "::ossie::SPD::Code::CodeType" "::ossie::SPD::Code::CodeType"; code "ossie::SPD::Code"; compiler "ossie::SPD::NameVersionPair"; humanlangauge "::std::string"; programminglanguage "ossie::SPD::NameVersionPair"; -propertyRef "ossie::SPD::PropertyRef"; +propertyRef "ossie::PropertyRef"; softPkgRef "ossie::SPD::SoftPkgRef"; implRef "::std::string"; os "ossie::SPD::NameVersionPair"; processor "::std::string"; runtime "ossie::SPD::NameVersionPair"; -usesdevice "::std::vector"; +usesdevice "::std::vector"; simpleref "const ossie::SimplePropertyRef &" "const ossie::SimplePropertyRef &"; simplesequenceref "const ossie::SimpleSequencePropertyRef &" "const ossie::SimpleSequencePropertyRef &"; structref "const ossie::StructPropertyRef &" "const ossie::StructPropertyRef &"; diff --git a/redhawk/src/control/sdr/ComponentHost/.gitignore b/redhawk/src/control/sdr/ComponentHost/.gitignore new file mode 100644 index 000000000..30be27505 --- /dev/null +++ b/redhawk/src/control/sdr/ComponentHost/.gitignore @@ -0,0 +1 @@ +ComponentHost diff --git a/redhawk/src/control/sdr/ComponentHost/ComponentHost.cpp b/redhawk/src/control/sdr/ComponentHost/ComponentHost.cpp new file mode 100644 index 000000000..0a9ea417a --- /dev/null +++ b/redhawk/src/control/sdr/ComponentHost/ComponentHost.cpp @@ -0,0 +1,260 @@ +/* +* This file is protected by Copyright. Please refer to the COPYRIGHT file +* distributed with this source distribution. +* +* This file is part of REDHAWK core. +* +* REDHAWK core is free software: you can redistribute it and/or modify it +* under the terms of the GNU Lesser General Public License as published by the +* Free Software Foundation, either version 3 of the License, or (at your +* option) any later version. +* +* REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +* for more details. +* +* You should have received a copy of the GNU Lesser General Public License +* along with this program. If not, see http://www.gnu.org/licenses/. +*/ + +#include + +#include "ComponentHost.h" + +using namespace redhawk; +namespace fs = boost::filesystem; + +namespace redhawk { + struct ComponentEntry { + boost::scoped_ptr bundle; + Resource_impl* servant; + }; +} + +PREPARE_LOGGING(ComponentHost); + +ComponentHost::ComponentHost(const char* identifier, const char* label) : + Component(identifier, label), + counter(0) +{ + loadProperties(); +} + +ComponentHost::~ComponentHost() +{ + executorService.stop(); +} + +void ComponentHost::loadProperties() +{ + addProperty(preload, + preload, + "preload", + "", + "readwrite", + "", + "external", + "property"); +} + +void ComponentHost::constructor() +{ + executorService.start(); + + // Preload libraries as given in initial configuration (in most cases, this + // will be the PRF value, because ComponentHost is implicitly launched by + // the Domain or the Sandbox). This allows us to prevent common libraries + // like BulkIO from being loaded implicitly by components, which can lead + // to the component library being unable to be unloaded. + LOG_DEBUG(ComponentHost, "Preloading " << preload.size() << " libraries"); + for (std::vector::iterator libname = preload.begin(); libname != preload.end(); ++libname) { + try { + ModuleLoader::Preload(*libname, ModuleLoader::LAZY, ModuleLoader::GLOBAL); + } catch (const std::exception& exc) { + // NB: The library name should be at the front of the error message + LOG_WARN(ComponentHost, "Unable to preload library " << exc.what()); + } + } +} + +CORBA::Boolean ComponentHost::allocateCapacity(const CF::Properties& capacities) +{ + return false; +} + +void ComponentHost::deallocateCapacity(const CF::Properties& capacites) +{ +} + +CF::Device::UsageType ComponentHost::usageState() +{ + return CF::Device::IDLE; +} + +CF::Device::AdminType ComponentHost::adminState() +{ + return CF::Device::UNLOCKED; +} + +void ComponentHost::adminState(CF::Device::AdminType state) +{ +} + +CF::Device::OperationalType ComponentHost::operationalState() +{ + return CF::Device::ENABLED; +} + +char* ComponentHost::label() +{ + return Resource_impl::identifier(); +} + +CF::AggregateDevice_ptr ComponentHost::compositeDevice() +{ + return CF::AggregateDevice::_nil(); +} + +void ComponentHost::load(CF::FileSystem_ptr, const char* fileName, CF::LoadableDevice::LoadType loadKind) +{ +} + +void ComponentHost::unload(const char* fileName) +{ +} + +void ComponentHost::terminate(CF::ExecutableDevice::ProcessID_Type processId) +{ + Resource_impl* component = 0; + { + boost::mutex::scoped_lock lock(loadMutex); + ComponentTable::iterator entry = activeComponents.find(processId); + if (entry == activeComponents.end()) { + throw CF::ExecutableDevice::InvalidProcess(CF::CF_EINVAL, "No such component"); + } + component = entry->second->servant; + } + component->releaseObject(); +} + +CF::ExecutableDevice::ProcessID_Type ComponentHost::execute(const char* name, const CF::Properties& options, const CF::Properties& parameters) +{ + return executeLinked(name, options, parameters, CF::StringSequence()); +} + +std::string ComponentHost::getRealPath(const std::string& path) +{ + // Assume that all paths are relative to the deployment root, which is + // given by the launching device (or the Sandbox) + fs::path realpath = fs::path(getDeploymentRoot()) / path; + if (!fs::exists(realpath)) { + std::string message = "File " + path + " does not exist"; + throw CF::InvalidFileName(CF::CF_EEXIST, message.c_str()); + } + return realpath.string(); +} + +CF::ExecutableDevice::ProcessID_Type ComponentHost::executeLinked(const char* name, const CF::Properties& options, const CF::Properties& parameters, const CF::StringSequence& deps) +{ + const std::string path = getRealPath(name); + + boost::scoped_ptr bundle(new ModuleBundle(path)); + + boost::mutex::scoped_lock lock(loadMutex); + for (size_t ii = 0; ii < deps.length(); ++ii) { + const std::string libpath = getRealPath(std::string(deps[ii])); + LOG_DEBUG(ComponentHost, "Loading dependency: " << libpath); + try { + // We don't know which symbols are needed from this library; they + // just need to be accessible to the component entry point. Loading + // them as "local" instead of "global" allows symbol conflicts to + // be resolved correctly (it seems). + if (fs::is_directory(libpath)) { + bundle->loadDirectory(libpath, ModuleLoader::LAZY, ModuleLoader::LOCAL); + } else { + bundle->load(libpath, ModuleLoader::LAZY, ModuleLoader::LOCAL); + } + } catch (const std::exception& exc) { + LOG_ERROR(ComponentHost, "Unable to load dependency: " << exc.what()); + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EINVAL, exc.what()); + } + } + + LOG_DEBUG(ComponentHost, "Loading component module: " << path); + Module* module; + try { + // Resolve all required symbols now so that we can catch the error and + // turn it into an exception, rather than having the process exit at + // point-of-use + module = bundle->load(path, ModuleLoader::NOW, ModuleLoader::LOCAL); + } catch (const std::exception& exc) { + LOG_ERROR(ComponentHost, "Unable to load module: " << exc.what()) + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EINVAL, exc.what()); + } + + typedef Resource_impl* (*ConstructorPtr)(const std::string&, const std::string&); + ConstructorPtr make_component; + try { + LOG_DEBUG(ComponentHost, "Resolving module entry point"); + make_component = reinterpret_cast(module->symbol("make_component")); + } catch (const std::exception& exc) { + LOG_ERROR(ComponentHost, "Unable to load module entry point: " << exc.what()) + throw CF::ExecutableDevice::InvalidFunction(); + } + + LOG_DEBUG(ComponentHost, "Creating component"); + Resource_impl* servant = Resource_impl::create_component(make_component, parameters); + + ComponentEntry* component = new ComponentEntry; + component->bundle.swap(bundle); + component->servant = servant; + + int thread_id = ++counter; + activeComponents[thread_id] = component; + LOG_DEBUG(ComponentHost, "Assigning thread ID " << thread_id); + + servant->addReleaseListener(this, &ComponentHost::componentReleased); + + return thread_id; +} + +void ComponentHost::componentReleased(Resource_impl* component) +{ + LOG_DEBUG(ComponentHost, "Component released: " << component->getIdentifier()); + boost::mutex::scoped_lock lock(loadMutex); + ComponentTable::iterator entry; + for (entry = activeComponents.begin(); entry != activeComponents.end(); ++entry) { + if (entry->second->servant == component) { + break; + } + } + + if (entry == activeComponents.end()) { + LOG_DEBUG(ComponentHost, "Received release notification from unmanaged component " + << component->getIdentifier()); + return; + } + + executorService.execute(&ComponentHost::cleanupComponent, this, entry->second); + activeComponents.erase(entry); +} + +void ComponentHost::cleanupComponent(ComponentEntry* component) +{ + // Only if this is the last reference to the servant can we safely unload + // its shared libraries, because we need to know that it has been deleted + if (component->servant->_refcount_value() == 1) { + component->servant->_remove_ref(); + LOG_DEBUG(ComponentHost, "Unloading bundle " << component->bundle->name()); + component->bundle->unload(); + delete component; + return; + } + + // Try again after a small delay + LOG_DEBUG(ComponentHost, "Rescheduling component cleanup"); + boost::system_time when = boost::get_system_time() + boost::posix_time::microseconds(125); + executorService.schedule(when, &ComponentHost::cleanupComponent, this, component); +} + diff --git a/redhawk/src/control/sdr/ComponentHost/ComponentHost.h b/redhawk/src/control/sdr/ComponentHost/ComponentHost.h new file mode 100644 index 000000000..f1e399b7f --- /dev/null +++ b/redhawk/src/control/sdr/ComponentHost/ComponentHost.h @@ -0,0 +1,83 @@ +/* +* This file is protected by Copyright. Please refer to the COPYRIGHT file +* distributed with this source distribution. +* +* This file is part of REDHAWK core. +* +* REDHAWK core is free software: you can redistribute it and/or modify it +* under the terms of the GNU Lesser General Public License as published by the +* Free Software Foundation, either version 3 of the License, or (at your +* option) any later version. +* +* REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +* for more details. +* +* You should have received a copy of the GNU Lesser General Public License +* along with this program. If not, see http://www.gnu.org/licenses/. +*/ + +#ifndef COMPONENTHOST_H +#define COMPONENTHOST_H + +#include +#include + +#include "ModuleLoader.h" + +namespace redhawk { + class ComponentEntry; + + class ComponentHost : public Component, public virtual POA_CF::ExecutableDevice + { + ENABLE_LOGGING; + + public: + ComponentHost(const char* identifier, const char* label); + ~ComponentHost(); + + void constructor(); + + // Device functions + virtual CORBA::Boolean allocateCapacity(const CF::Properties& capacities); + virtual void deallocateCapacity(const CF::Properties& capacites); + virtual CF::Device::UsageType usageState(); + virtual CF::Device::AdminType adminState(); + virtual void adminState(CF::Device::AdminType state); + virtual CF::Device::OperationalType operationalState(); + virtual char* label(); + virtual CF::AggregateDevice_ptr compositeDevice(); + + // Loadable device functions + virtual void load(CF::FileSystem_ptr fs, const char* fileName, CF::LoadableDevice::LoadType loadKind); + virtual void unload(const char* fileName); + + // Executable device functions + virtual void terminate(CF::ExecutableDevice::ProcessID_Type processId); + virtual CF::ExecutableDevice::ProcessID_Type execute(const char* name, const CF::Properties& options, const CF::Properties& parameters); + virtual CF::ExecutableDevice::ProcessID_Type executeLinked(const char* name, const CF::Properties& options, const CF::Properties& parameters, const CF::StringSequence& deps); + + private: + void loadProperties(); + + void componentReleased(Resource_impl* object); + void cleanupComponent(ComponentEntry* entry); + + std::string getRealPath(const std::string& path); + + int counter; + + boost::mutex loadMutex; + typedef std::map ComponentTable; + ComponentTable activeComponents; + + // Threaded service for performing cleanup checks + redhawk::ExecutorService executorService; + + /// Property: preload + std::vector preload; + }; +} + +#endif // COMPONENTHOST_H diff --git a/redhawk/src/control/sdr/ComponentHost/ComponentHost.prf.xml b/redhawk/src/control/sdr/ComponentHost/ComponentHost.prf.xml new file mode 100644 index 000000000..186bb177d --- /dev/null +++ b/redhawk/src/control/sdr/ComponentHost/ComponentHost.prf.xml @@ -0,0 +1,33 @@ + + + + + + List of shared libraries to be preloaded by ComponentHost at startup time, to be made available for all components launched inside of this ComponentHost instance. + + libbulkio-2.2.so + libburstio.so + libfrontend-2.4.so + + + + + diff --git a/redhawk/src/control/sdr/ComponentHost/ComponentHost.scd.xml b/redhawk/src/control/sdr/ComponentHost/ComponentHost.scd.xml new file mode 100644 index 000000000..d32fcdd38 --- /dev/null +++ b/redhawk/src/control/sdr/ComponentHost/ComponentHost.scd.xml @@ -0,0 +1,64 @@ + + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/control/sdr/ComponentHost/ComponentHost.spd.xml b/redhawk/src/control/sdr/ComponentHost/ComponentHost.spd.xml new file mode 100644 index 000000000..6c5d5f57e --- /dev/null +++ b/redhawk/src/control/sdr/ComponentHost/ComponentHost.spd.xml @@ -0,0 +1,46 @@ + + + + + + + null + + + + + + + + + Deployable container for launching multiple components in the same process + + + ComponentHost + + + + + + + + + diff --git a/redhawk/src/control/sdr/ComponentHost/Makefile.am b/redhawk/src/control/sdr/ComponentHost/Makefile.am new file mode 100644 index 000000000..91bd9a689 --- /dev/null +++ b/redhawk/src/control/sdr/ComponentHost/Makefile.am @@ -0,0 +1,33 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +bindir = $(SDR_ROOT)/dom/mgr/rh/ComponentHost +bin_PROGRAMS = ComponentHost + +xmldir = $(SDR_ROOT)/dom/mgr/rh/ComponentHost +dist_xml_DATA = ComponentHost.scd.xml ComponentHost.prf.xml ComponentHost.spd.xml + +ComponentHost_SOURCES = ComponentHost.cpp ModuleLoader.cpp main.cpp + +ComponentHost_LDADD = $(top_builddir)/base/framework/libossiecf.la $(top_builddir)/base/framework/idl/libossieidl.la +ComponentHost_LDADD += $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) +ComponentHost_CPPFLAGS = -I$(top_srcdir)/base/include $(BOOST_CPPFLAGS) +ComponentHost_CXXFLAGS = -Wall + diff --git a/redhawk/src/control/sdr/ComponentHost/ModuleLoader.cpp b/redhawk/src/control/sdr/ComponentHost/ModuleLoader.cpp new file mode 100644 index 000000000..60fa9ae94 --- /dev/null +++ b/redhawk/src/control/sdr/ComponentHost/ModuleLoader.cpp @@ -0,0 +1,290 @@ +/* +* This file is protected by Copyright. Please refer to the COPYRIGHT file +* distributed with this source distribution. +* +* This file is part of REDHAWK core. +* +* REDHAWK core is free software: you can redistribute it and/or modify it +* under the terms of the GNU Lesser General Public License as published by the +* Free Software Foundation, either version 3 of the License, or (at your +* option) any later version. +* +* REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +* for more details. +* +* You should have received a copy of the GNU Lesser General Public License +* along with this program. If not, see http://www.gnu.org/licenses/. +*/ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#if BOOST_FILESYSTEM_VERSION < 3 +#define BOOST_PATH_STRING(x) (x) +#else +#define BOOST_PATH_STRING(x) (x).string() +#endif + +#include "ModuleLoader.h" + +using namespace redhawk; +namespace fs = boost::filesystem; + +namespace { + static std::string real_path(const std::string& filename) + { + char buf[PATH_MAX]; + ::realpath(filename.c_str(), &buf[0]); + return std::string(buf); + } +} + +bool Module::IsLoadable(const std::string& filename) +{ + std::ifstream file(filename.c_str()); + if (!file) { + return false; + } + char ident[EI_NIDENT]; + file.read(ident, EI_NIDENT); + if (!file) { + return false; + } + if (std::strncmp(ident, ELFMAG, SELFMAG)) { + return false; + } + return true; +} + +Module::Module(const std::string& path, void* handle) : + _path(path), + _name(BOOST_PATH_STRING(fs::path(path).filename())), + _handle(handle), + _refcount(1), + _modtime(_getModTime()) +{ +} + +const std::string& Module::path() const +{ + return _path; +} + +const std::string& Module::name() const +{ + return _name; +} + +bool Module::modified() +{ + return (_getModTime() != _modtime); +} + +void Module::incref() +{ + LOG_TRACE(ModuleLoader, "Incrementing reference count for " << _path); + ++_refcount; +} + +bool Module::decref() +{ + LOG_TRACE(ModuleLoader, "Decrementing reference count for " << _path); + _refcount--; + return (_refcount > 0); +} + +bool Module::close() +{ + if (dlclose(_handle)) { + LOG_ERROR(ModuleLoader, "Error closing dynamic library " << _path << ": " << dlerror()); + return false; + } + + // Try to reopen the library without loading it (RTLD_NOLOAD); if it is + // successful, the library could not be unloaded, usually due to unique + // symbols in the library (or a dependency) + _handle = dlopen(_path.c_str(), RTLD_LAZY | RTLD_NOLOAD); + return (_handle == 0); +} + +void* Module::symbol(const std::string& name) +{ + // Clear error state; dlysm() can succeed but return null if the symbol is + // supposed to be null, so checking dlerror() is the only reliable way to + // catch failures + dlerror(); + void *symbol = dlsym(_handle, name.c_str()); + if (const char* error = dlerror()) { + throw std::runtime_error(error); + } + return symbol; +} + +time_t Module::_getModTime() +{ + struct stat file_status; + stat(_path.c_str(), &file_status); + return file_status.st_mtime; +} + +PREPARE_LOGGING(ModuleLoader); + +ModuleLoader::ModuleLoader() +{ +} + +ModuleLoader& ModuleLoader::Instance() +{ + static ModuleLoader loader; + return loader; +} + +void ModuleLoader::Preload(const std::string& path, LoadBinding binding, LoadVisibility visibility) +{ + Instance().preload(path, binding, visibility); +} + +Module* ModuleLoader::Load(const std::string& path, LoadBinding binding, LoadVisibility visibility) +{ + return Instance().load(path, binding, visibility); +} + +void ModuleLoader::Unload(Module* module) +{ + Instance().unload(module); +} + +void ModuleLoader::preload(const std::string& filename, LoadBinding binding, LoadVisibility visibility) +{ + // Preloading does not go through the normal module resolution that loading + // does, because the use case is different: + // * filename will usually be just a module name (e.g., "libbulkio-2.1.so") + // that is located via LD_LIBRARY_PATH + // * preloaded libraries are not unloaded, so no need to reference count + LOG_DEBUG(ModuleLoader, "Preloading module " << filename); + int flags = binding | visibility; + void* handle = dlopen(filename.c_str(), flags); + if (!handle) { + throw std::runtime_error(dlerror()); + } +} + +Module* ModuleLoader::load(const std::string& filename, LoadBinding binding, LoadVisibility visibility) +{ + // Use the real path (symlinks followed, relative paths removed) to avoid + // loading the same module under different aliases + const std::string path = real_path(filename); + Module* module = findModule(path); + if (!module) { + LOG_DEBUG(ModuleLoader, "Loading module " << path); + int flags = binding | visibility; + void* handle = dlopen(path.c_str(), flags); + if (!handle) { + throw std::runtime_error(dlerror()); + } + module = new Module(path, handle); + _modules[path] = module; + } else { + if (module->modified()) { + throw std::runtime_error(path + ": library is already loaded but has been modified"); + } + module->incref(); + } + return module; +} + +void ModuleLoader::unload(Module* module) +{ + if (module->decref()) { + return; + } + + LOG_DEBUG(ModuleLoader, "Unloading module " << module->path()); + if (module->close()) { + // Close succeeded, clean up + _modules.erase(module->path()); + delete module; + } else { + // Close failed, reset refcount to 1; this library must hang + // around for the life of the process + LOG_WARN(ModuleLoader, "Dynamic library " << module->name() << " could not be unloaded," + << " some symbols may still be in use (" << module->path() << ")"); + module->incref(); + } +} + +Module* ModuleLoader::findModule(const std::string& path) +{ + LoadTable::iterator module = _modules.find(path); + if (module != _modules.end()) { + return module->second; + } + return 0; +} + + +ModuleBundle::ModuleBundle(const std::string& name) : + _name(name) +{ +} + +ModuleBundle::~ModuleBundle() +{ + unload(); +} + +const std::string& ModuleBundle::name() const +{ + return _name; +} + +Module* ModuleBundle::load(const std::string& path, ModuleLoader::LoadBinding binding, ModuleLoader::LoadVisibility visibility) +{ + Module* module = ModuleLoader::Load(path, binding, visibility); + _modules.push_back(module); + return module; +} + +void ModuleBundle::loadDirectory(const std::string& path, ModuleLoader::LoadBinding binding, ModuleLoader::LoadVisibility visibility) +{ + // Shared libraries often have multiple symbolic links to the same library + // (e.g., libxxx.so->libxxx.so.0) that can lead to a lot of excess loads. + // By remembering the real path of everything, we can load only unique + // libraries. + std::set known_files; + + for (fs::directory_iterator entry = fs::directory_iterator(path); entry != fs::directory_iterator(); ++entry) { + if (!fs::is_directory(entry->symlink_status())) { + // Get the filename and run it through realpath to follow symlinks, + // relative paths (e.g., "../") and extra slashes + std::string filename = real_path(entry->path().string()); + + // Skip files that we've already seen + if (!known_files.insert(filename).second) { + continue; + } + + if (Module::IsLoadable(filename)) { + load(filename, binding, visibility); + } + } + } +} + +void ModuleBundle::unload() +{ + // Unload modules in reverse order of loading + std::for_each(_modules.rbegin(), _modules.rend(), &ModuleLoader::Unload); + _modules.clear(); +} diff --git a/redhawk/src/control/sdr/ComponentHost/ModuleLoader.h b/redhawk/src/control/sdr/ComponentHost/ModuleLoader.h new file mode 100644 index 000000000..07af4f993 --- /dev/null +++ b/redhawk/src/control/sdr/ComponentHost/ModuleLoader.h @@ -0,0 +1,122 @@ +/* +* This file is protected by Copyright. Please refer to the COPYRIGHT file +* distributed with this source distribution. +* +* This file is part of REDHAWK core. +* +* REDHAWK core is free software: you can redistribute it and/or modify it +* under the terms of the GNU Lesser General Public License as published by the +* Free Software Foundation, either version 3 of the License, or (at your +* option) any later version. +* +* REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +* for more details. +* +* You should have received a copy of the GNU Lesser General Public License +* along with this program. If not, see http://www.gnu.org/licenses/. +*/ + +#ifndef MODULELOADER_H +#define MODULELOADER_H + +#include +#include +#include + +#include + +#include + +namespace redhawk { + + class Module { + public: + const std::string& name() const; + const std::string& path() const; + void* symbol(const std::string& name); + + bool modified(); + + static bool IsLoadable(const std::string& path); + + private: + Module(const std::string& path, void* handle); + + void incref(); + bool decref(); + + bool close(); + + time_t _getModTime(); + + friend class ModuleLoader; + + const std::string _path; + const std::string _name; + void* _handle; + int _refcount; + time_t _modtime; + }; + + class ModuleLoader { + + ENABLE_LOGGING; + + public: + enum LoadBinding { + LAZY = RTLD_LAZY, + NOW = RTLD_NOW + }; + + enum LoadVisibility { + GLOBAL = RTLD_GLOBAL, + LOCAL = RTLD_LOCAL + }; + + static void Preload(const std::string& path, LoadBinding binding, LoadVisibility visibility); + + static Module* Load(const std::string& path, LoadBinding binding, LoadVisibility visibility); + static void Unload(Module* module); + + private: + ModuleLoader(); + + static ModuleLoader& Instance(); + + void preload(const std::string& path, LoadBinding binding, LoadVisibility visibility); + + Module* load(const std::string& path, LoadBinding binding, LoadVisibility visibility); + void unload(Module* module); + + Module* findModule(const std::string& path); + + friend class Module; + + typedef std::map LoadTable; + LoadTable _modules; + }; + + class ModuleBundle { + public: + ModuleBundle(const std::string& name); + + ~ModuleBundle(); + + const std::string& name() const; + + Module* load(const std::string& path, ModuleLoader::LoadBinding binding, ModuleLoader::LoadVisibility visibility); + void loadDirectory(const std::string& path, ModuleLoader::LoadBinding binding, ModuleLoader::LoadVisibility visibility); + + void unload(); + + private: + const std::string _name; + typedef std::vector ModuleList; + ModuleList _modules; + }; + +} + +#endif // MODULELOADER_H diff --git a/redhawk/src/control/sdr/ComponentHost/main.cpp b/redhawk/src/control/sdr/ComponentHost/main.cpp new file mode 100644 index 000000000..bb5e22f87 --- /dev/null +++ b/redhawk/src/control/sdr/ComponentHost/main.cpp @@ -0,0 +1,28 @@ +/* +* This file is protected by Copyright. Please refer to the COPYRIGHT file +* distributed with this source distribution. +* +* This file is part of REDHAWK core. +* +* REDHAWK core is free software: you can redistribute it and/or modify it +* under the terms of the GNU Lesser General Public License as published by the +* Free Software Foundation, either version 3 of the License, or (at your +* option) any later version. +* +* REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +* for more details. +* +* You should have received a copy of the GNU Lesser General Public License +* along with this program. If not, see http://www.gnu.org/licenses/. +*/ + +#include "ComponentHost.h" + +int main(int argc, char* argv[]) +{ + redhawk::ComponentHost* servant; + Component::start_component(servant, argc, argv); + return 0; +} diff --git a/redhawk/src/control/sdr/Makefile.am b/redhawk/src/control/sdr/Makefile.am index 8f6cdf1d8..80a6fb7c3 100644 --- a/redhawk/src/control/sdr/Makefile.am +++ b/redhawk/src/control/sdr/Makefile.am @@ -32,7 +32,7 @@ servicesdir = $(devdir)/services dist_domain_DATA = domain/DomainManager.dmd.xml \ domain/DomainManager.dmd.xml.template -SUBDIRS = dommgr devmgr +SUBDIRS = dommgr devmgr ComponentHost install-exec-hook: $(mkdir_p) $(DESTDIR)$(depsdir) diff --git a/redhawk/src/control/sdr/devmgr/DeviceManager.prf.xml b/redhawk/src/control/sdr/devmgr/DeviceManager.prf.xml index a6b5f8bae..ac348dfe1 100644 --- a/redhawk/src/control/sdr/devmgr/DeviceManager.prf.xml +++ b/redhawk/src/control/sdr/devmgr/DeviceManager.prf.xml @@ -74,4 +74,11 @@ with this program. If not, see http://www.gnu.org/licenses/. + + + How often the Device Manager should check to see if the Domain contains a reference to this Device Manager (in seconds). This applies to persistence only + + 10 + + diff --git a/redhawk/src/control/sdr/devmgr/DeviceManager_DeployerSupport.cpp b/redhawk/src/control/sdr/devmgr/DeviceManager_DeployerSupport.cpp index 27623fd8d..f2f328c5d 100644 --- a/redhawk/src/control/sdr/devmgr/DeviceManager_DeployerSupport.cpp +++ b/redhawk/src/control/sdr/devmgr/DeviceManager_DeployerSupport.cpp @@ -57,7 +57,7 @@ static bool checkPath(const std::string& envpath, const std::string& pattern, ch } void DeviceManager_impl::createDeviceThreadAndHandleExceptions( - const ossie::ComponentPlacement& componentPlacement, + const ossie::DevicePlacement& componentPlacement, local_spd::ProgramProfile *compProfile, const std::string& componentType, const std::string& codeFilePath, @@ -65,17 +65,18 @@ void DeviceManager_impl::createDeviceThreadAndHandleExceptions( const std::string& compositeDeviceIOR ){ try { - std::string devcache; - std::string usageName; - createDeviceCacheLocation(devcache, usageName, instantiation); - // proces any instance overrides from DCD componentproperties const ossie::ComponentPropertyList& overrideProps = instantiation.getProperties(); for (unsigned int j = 0; j < overrideProps.size (); j++) { - LOG_TRACE(DeviceManager_impl, "Override Properties prop id " << overrideProps[j].getID()); + RH_TRACE(this->_baseLog, "Override Properties prop id " << overrideProps[j].getID()); compProfile->overrideProperty( overrideProps[j] ); } + std::string devcache; + std::string devcwd; + std::string usageName; + createDeviceCacheLocation(devcache, devcwd, usageName, compProfile, instantiation); + // these variables will cleanup path and environment from package mods that might have failed ProcessEnvironment restoreState; @@ -85,23 +86,24 @@ void DeviceManager_impl::createDeviceThreadAndHandleExceptions( codeFilePath, instantiation, devcache, + devcwd, usageName, compositeDeviceIOR ); } catch (std::runtime_error& ex) { - LOG_ERROR(DeviceManager_impl, + RH_ERROR(this->_baseLog, "The following runtime exception occurred: "<_baseLog, "The following standard exception occurred: "<_baseLog, "The following CORBA exception occurred: "<_baseLog, "Launching Device file failed with an unknown exception") throw; } @@ -110,39 +112,73 @@ void DeviceManager_impl::createDeviceThreadAndHandleExceptions( void DeviceManager_impl::createDeviceCacheLocation( std::string& devcache, + std::string& devcwd, std::string& usageName, - const ossie::ComponentInstantiation& instantiation) + local_spd::ProgramProfile *compProfile, + const ossie::ComponentInstantiation& instantiation ) { - // No log messages within this method as it is called between exec and fork - - // create device cache location - std::string baseDevCache = _cacheroot + "/." + _label; - if (instantiation.getUsageName() == 0) { + if (instantiation.getUsageName().empty()) { // no usage name was given, so create one. By definition, the instantiation id must be unique usageName = instantiation.instantiationId; } else { usageName = instantiation.getUsageName(); } - devcache = baseDevCache + "/" + usageName; + RH_DEBUG(this->_baseLog, "Getting Cache/Working Directories for: " << instantiation.instantiationId ); + redhawk::PropertyMap tmpProps = redhawk::PropertyMap::cast( compProfile->getNonNilConstructProperties() ); + std::string pcache; + std::string pcwd; + + if (tmpProps.find("cacheDirectory")!=tmpProps.end()) { + pcache = tmpProps["cacheDirectory"].toString(); + } + if (tmpProps.find("workingDirectory")!=tmpProps.end()) { + pcwd = tmpProps["workingDirectory"].toString(); + } + + if (pcache.empty()) { + std::string baseDevCache = _cacheroot + "/." + _label; + devcache = baseDevCache + "/" + usageName; + } else { + devcache = pcache; + } + + devcwd = pcwd; + + // create device cache location bool retval = this->makeDirectory(devcache); if (not retval) { - LOG_ERROR(DeviceManager_impl, "Unable to create the Device cache: " << devcache) - exit(-1); + std::ostringstream emsg; + emsg << "Unable to create the Device cache directory: " << devcache << " for device : " << usageName; + RH_ERROR(this->_baseLog, emsg.str() ); + std::runtime_error( emsg.str().c_str() ); + } + + // create device cwd location if needed + if (not devcwd.empty()) { + retval = this->makeDirectory(devcwd); + if (not retval) { + std::ostringstream emsg; + emsg << "Unable to create the Device working directory: " << devcache << " for device : " << usageName; + RH_ERROR(this->_baseLog, emsg.str() ); + std::runtime_error( emsg.str().c_str() ); + } } } + void DeviceManager_impl::createDeviceThread( - const ossie::ComponentPlacement& componentPlacement, + const ossie::DevicePlacement& componentPlacement, local_spd::ProgramProfile *compProfile, const std::string& componentType, const std::string& codeFilePath, const ossie::ComponentInstantiation& instantiation, const std::string& devcache, + const std::string& devcwd, const std::string& usageName, const std::string& compositeDeviceIOR ){ - LOG_DEBUG(DeviceManager_impl, "Launching " << componentType << " file " + RH_DEBUG(this->_baseLog, "Launching " << componentType << " file " << codeFilePath << " Usage name " << instantiation.getUsageName()); @@ -162,7 +198,7 @@ void DeviceManager_impl::createDeviceThread( if (CORBA::is_nil(_aggDev_obj)) { std::ostringstream emsg; emsg << "Failed to deploy '" << usageName << "': Invalid composite device IOR: " << compositeDeviceIOR; - LOG_ERROR(DeviceManager_impl, emsg.str() ); + RH_ERROR(this->_baseLog, emsg.str() ); return; } @@ -172,7 +208,7 @@ void DeviceManager_impl::createDeviceThread( std::ostringstream emsg; emsg <<"Failed to deploy '" << usageName << "': DeployOnDevice must be an Executable Device: Unable to narrow to Executable Device"; - LOG_ERROR(DeviceManager_impl, emsg.str()); + RH_ERROR(this->_baseLog, emsg.str()); return; } @@ -182,9 +218,9 @@ void DeviceManager_impl::createDeviceThread( // all conditions are met for a persona // Load shared library into device using load mechanism std::string execDevId = ossie::corba::returnString(execDevice->identifier()); - LOG_DEBUG(DeviceManager_impl, "Loading '" << codeFilePath << "' to parent device: " << execDevId ); + RH_DEBUG(this->_baseLog, "Loading '" << codeFilePath << "' to parent device: " << execDevId ); execDevice->load(_fileSys, codeFilePath.c_str(), CF::LoadableDevice::SHARED_LIBRARY); - LOG_DEBUG(DeviceManager_impl, "Load complete on device: " << execDevId); + RH_DEBUG(this->_baseLog, "Load complete on device: " << execDevId); const std::string realCompType = "device"; @@ -199,7 +235,7 @@ void DeviceManager_impl::createDeviceThread( // Pack execparams into CF::Properties to send to the parent CF::Properties personaProps; for (ExecparamList::iterator arg = execparams.begin(); arg != execparams.end(); ++arg) { - LOG_DEBUG(DeviceManager_impl, arg->first << "=\"" << arg->second << "\""); + RH_DEBUG(this->_baseLog, arg->first << "=\"" << arg->second << "\""); CF::DataType argument; argument.id = arg->first.c_str(); argument.value <<= arg->second; @@ -216,9 +252,9 @@ void DeviceManager_impl::createDeviceThread( } // Tell parent device to execute shared library using execute mechanism - LOG_DEBUG(DeviceManager_impl, "Execute '" << codeFilePath << "' on parent device: " << execDevice->identifier()); + RH_DEBUG(this->_baseLog, "Execute '" << codeFilePath << "' on parent device: " << execDevice->identifier()); execDevice->executeLinked(codeFilePath.c_str(), options, personaProps, dep_seq); - LOG_DEBUG(DeviceManager_impl, "Execute complete"); + RH_DEBUG(this->_baseLog, "Execute complete"); } else { @@ -239,14 +275,14 @@ void DeviceManager_impl::createDeviceThread( // check that our executable is good if (access(codeFilePath.c_str(), R_OK | X_OK) == -1) { std::string errMsg = "Missing read or execute file permissions on file: " + codeFilePath; - LOG_ERROR(DeviceManager_impl, errMsg ); + RH_ERROR(this->_baseLog, errMsg ); return; } // convert std:string to char * for execv std::vector argv(new_argv.size() + 1, NULL); for (std::size_t i = 0; i < new_argv.size(); ++i) { - LOG_DEBUG(DeviceManager_impl, "ARG: " << i << " VALUE " << new_argv[i] ); + RH_DEBUG(this->_baseLog, "ARG: " << i << " VALUE " << new_argv[i] ); argv[i] = const_cast (new_argv[i].c_str()); } @@ -259,7 +295,7 @@ void DeviceManager_impl::createDeviceThread( int pid = fork(); if (pid > 0) { // parent process: pid is the process ID of the child - LOG_TRACE(DeviceManager_impl, "Resulting PID: " << pid); + RH_TRACE(this->_baseLog, "Resulting PID: " << pid); // Add the new device/service to the pending list. When it registers, the remaining // fields will be filled out and it will be moved to the registered list. @@ -316,7 +352,14 @@ void DeviceManager_impl::createDeviceThread( ////////////////////////////////////////////////////////////// // switch to working directory - chdir(devcache.c_str()); + if (not devcwd.empty()) { + int retval = chdir(devcwd.c_str()); + if (retval) { + RH_ERROR(__logger, "Unable to change the current working directory to : " << devcwd); + } + } else { + chdir(devcache.c_str()); + } // honor affinity requests try { @@ -351,7 +394,7 @@ void DeviceManager_impl::createDeviceThread( // The most likely cause is the operating system running out of // threads, in which case the system is in bad shape. Exit // with an error to allow the system to recover. - LOG_ERROR(DeviceManager_impl, "[DeviceManager::execute] Cannot create device thread: " << strerror(errno)); + RH_ERROR(this->_baseLog, "[DeviceManager::execute] Cannot create device thread: " << strerror(errno)); throw; } } @@ -365,7 +408,7 @@ void DeviceManager_impl::createDeviceThread( */ void DeviceManager_impl::createDeviceExecStatement( std::vector< std::string >& new_argv, - const ossie::ComponentPlacement& componentPlacement, + const ossie::DevicePlacement& componentPlacement, local_spd::ProgramProfile* compProfile, const std::string& componentType, const std::string& codeFilePath, @@ -423,7 +466,7 @@ void DeviceManager_impl::createDeviceExecStatement( dpath = _domainName + "/" + _label; new_argv.push_back("DOM_PATH"); new_argv.push_back(dpath); - LOG_DEBUG(DeviceManager_impl, "DOM_PATH: arg: " << new_argv[new_argv.size()-2] << " value:" << dpath); + RH_DEBUG(this->_baseLog, "DOM_PATH: arg: " << new_argv[new_argv.size()-2] << " value:" << dpath); // check exec params... place all of them here.. allow for instance props to override the params... CF::Properties eParams = compProfile->getPopulatedExecParameters(); @@ -432,9 +475,9 @@ void DeviceManager_impl::createDeviceExecStatement( std::string p2; p1 = ossie::corba::returnString(eParams[i].id); p2 = ossie::any_to_string(eParams[i].value); - LOG_TRACE(DeviceManager_impl, "createDevicExecStatement id= " << p1 << " value= " << p2 ); + RH_TRACE(this->_baseLog, "createDevicExecStatement id= " << p1 << " value= " << p2 ); CORBA::TypeCode_var etype=eParams[i].value.type(); - LOG_TRACE(DeviceManager_impl, " createDeviceExecParams id= " << p1 << " type " << etype->kind() << " tk_bool " << CORBA::tk_boolean ); + RH_TRACE(this->_baseLog, " createDeviceExecParams id= " << p1 << " type " << etype->kind() << " tk_bool " << CORBA::tk_boolean ); if ( etype->kind() == CORBA::tk_boolean ) { std::string v(""); resolveBoolExec( p1, v, compProfile, instantiation); @@ -462,14 +505,14 @@ void DeviceManager_impl::resolveBoolExec( const std::string& const ossie::ComponentInstantiation& instantiation ){ const std::vector& eprop = compProfile->prf.getExecParamProperties(); - LOG_TRACE(DeviceManager_impl, "resolveBoolExec exec params size " << eprop.size() ); + RH_TRACE(this->_baseLog, "resolveBoolExec exec params size " << eprop.size() ); for (unsigned int j = 0; j < eprop.size(); j++) { std::string prop_id = eprop[j]->getID(); if ( prop_id == id ){ - LOG_TRACE(DeviceManager_impl, "resolveBoolExec exec id == instantiation prop " << id << " = " << prop_id ); + RH_TRACE(this->_baseLog, "resolveBoolExec exec id == instantiation prop " << id << " = " << prop_id ); const SimpleProperty* tmp = dynamic_cast(eprop[j]); if (tmp) { - LOG_TRACE(DeviceManager_impl, "Default exec boolean with PRF value: " << tmp->getValue()); + RH_TRACE(this->_baseLog, "Default exec boolean with PRF value: " << tmp->getValue()); if (tmp->getValue()) { std::string v(tmp->getValue()); if ( v.size() != 0 ) { @@ -481,14 +524,14 @@ void DeviceManager_impl::resolveBoolExec( const std::string& } const std::vector& cprop = compProfile->prf.getConstructProperties(); - LOG_TRACE(DeviceManager_impl, "resolveBoolExec ctor params size: " << cprop.size() ); + RH_TRACE(this->_baseLog, "resolveBoolExec ctor params size: " << cprop.size() ); for (unsigned int j = 0; j < cprop.size(); j++) { std::string prop_id = cprop[j]->getID(); if ( prop_id == id ){ - LOG_TRACE(DeviceManager_impl, "resolveBoolExec ctor id == instantiation prop " << id << " = " << prop_id ); + RH_TRACE(this->_baseLog, "resolveBoolExec ctor id == instantiation prop " << id << " = " << prop_id ); const SimpleProperty* tmp = dynamic_cast(cprop[j]); if (tmp) { - LOG_TRACE(DeviceManager_impl, "Default exec boolean with PRF value: " << tmp->getValue()); + RH_TRACE(this->_baseLog, "Default exec boolean with PRF value: " << tmp->getValue()); if (tmp->getValue()) { std::string v(tmp->getValue()); if ( v.size() != 0 ) { @@ -504,10 +547,10 @@ void DeviceManager_impl::resolveBoolExec( const std::string& for (unsigned int j = 0; j < overrideProps.size (); j++) { std::string prop_id = overrideProps[j].getID(); if ( prop_id == id ){ - LOG_TRACE(DeviceManager_impl, "resolveBoolExec instance id == instantiation prop " << id << " = " << prop_id ); + RH_TRACE(this->_baseLog, "resolveBoolExec instance id == instantiation prop " << id << " = " << prop_id ); const SimplePropertyRef* ref = dynamic_cast(&overrideProps[j]); if ( ref ) { - LOG_TRACE(DeviceManager_impl, "Overriding exec boolean with instance value: " << ref->getValue()); + RH_TRACE(this->_baseLog, "Overriding exec boolean with instance value: " << ref->getValue()); if (ref->getValue()) { std::string v(ref->getValue()); value = v; @@ -522,7 +565,7 @@ void DeviceManager_impl::resolveBoolExec( const std::string& DeviceManager_impl::ExecparamList DeviceManager_impl::createDeviceExecparams( - const ossie::ComponentPlacement& componentPlacement, + const ossie::DevicePlacement& componentPlacement, local_spd::ProgramProfile *compProfile, const std::string& componentType, const std::string& codeFilePath, @@ -582,9 +625,9 @@ DeviceManager_impl::ExecparamList DeviceManager_impl::createDeviceExecparams( std::string p2; p1 = ossie::corba::returnString(eParams[i].id); p2 = ossie::any_to_string(eParams[i].value); - LOG_DEBUG(DeviceManager_impl, " createDeviceExecParams id= " << p1 << " value= " << p2 ); + RH_DEBUG(this->_baseLog, " createDeviceExecParams id= " << p1 << " value= " << p2 ); CORBA::TypeCode_var etype=eParams[i].value.type(); - LOG_DEBUG(DeviceManager_impl, " createDeviceExecParams id= " << p1 << " type " << etype->kind() ); + RH_DEBUG(this->_baseLog, " createDeviceExecParams id= " << p1 << " type " << etype->kind() ); if ( etype->kind() == CORBA::tk_boolean ) { std::string v(""); resolveBoolExec( p1, v, compProfile, instantiation); @@ -635,19 +678,19 @@ void DeviceManager_impl::resolveLoggingConfiguration( const std::string & const ossie::ComponentPropertyList& instanceprops = instantiation.getProperties(); ossie::ComponentPropertyList::const_iterator iprops_iter; for (iprops_iter = instanceprops.begin(); iprops_iter != instanceprops.end(); iprops_iter++) { - if ((strcmp(iprops_iter->getID(), "LOGGING_CONFIG_URI") == 0) + if ((strcmp(iprops_iter->getID().c_str(), "LOGGING_CONFIG_URI") == 0) && (dynamic_cast(&(*iprops_iter)) != NULL)) { const SimplePropertyRef* simpleref = dynamic_cast(&(*iprops_iter)); logging_uri = simpleref->getValue(); - LOG_DEBUG(DeviceManager_impl, "resolveLoggingConfig: property log config:" << logging_uri); + RH_DEBUG(this->_baseLog, "resolveLoggingConfig: property log config:" << logging_uri); continue; } - if ((strcmp(iprops_iter->getID(), "LOG_LEVEL") == 0) + if ((strcmp(iprops_iter->getID().c_str(), "LOG_LEVEL") == 0) && (dynamic_cast(&(*iprops_iter)) != NULL)) { const SimplePropertyRef* simpleref = dynamic_cast(&(*iprops_iter)); debug_level = resolveDebugLevel(simpleref->getValue()); - LOG_DEBUG(DeviceManager_impl, "resolveLoggingConfig: property log level:" << debug_level); + RH_DEBUG(this->_baseLog, "resolveLoggingConfig: property log level:" << debug_level); continue; } } @@ -656,19 +699,19 @@ void DeviceManager_impl::resolveLoggingConfiguration( const std::string & ossie::ComponentInstantiation::LoggingConfig log_config=instantiation.getLoggingConfig(); if ( !log_config.first.empty()) { logging_uri = log_config.first; - LOG_DEBUG(DeviceManager_impl, "resolveLoggingConfig: loggingconfig log config:" << logging_uri); + RH_DEBUG(this->_baseLog, "resolveLoggingConfig: loggingconfig log config:" << logging_uri); } // check if debug value provided if ( !log_config.second.empty() ) { debug_level = resolveDebugLevel( log_config.second ); - LOG_DEBUG(DeviceManager_impl, "resolveLoggingConfig: loggingconfig debug_level:" << debug_level); + RH_DEBUG(this->_baseLog, "resolveLoggingConfig: loggingconfig debug_level:" << debug_level); } if ( getUseLogConfigResolver() ) { ossie::logging::LogConfigUriResolverPtr logcfg_resolver = ossie::logging::GetLogConfigUriResolver(); if ( logcfg_resolver ) { std::string t_uri = logcfg_resolver->get_uri( logcfg_path ); - LOG_DEBUG(DeviceManager_impl, "Resolve LOGGING_CONFIG_URI: key:" << logcfg_path << " value <" << t_uri << ">" ); + RH_DEBUG(this->_baseLog, "Resolve LOGGING_CONFIG_URI: key:" << logcfg_path << " value <" << t_uri << ">" ); if ( !t_uri.empty() ) logging_uri = t_uri; } } @@ -684,21 +727,21 @@ void DeviceManager_impl::resolveLoggingConfiguration( const std::string & } new_argv.push_back("LOGGING_CONFIG_URI"); new_argv.push_back(logging_uri); - LOG_DEBUG(DeviceManager_impl, "RSC: " << usageName << " LOGGING PARAM:VALUE " << new_argv[new_argv.size()-2] << ":" <_baseLog, "RSC: " << usageName << " LOGGING PARAM:VALUE " << new_argv[new_argv.size()-2] << ":" <getLevel()); + + if (debug_level == -1) { + debug_level = _initialDebugLevel; } - // Convert the numeric level directly into its ASCII equivalent. - std::string dlevel=""; - dlevel.push_back(char(0x30 + debug_level)); - new_argv.push_back( "DEBUG_LEVEL"); - new_argv.push_back(dlevel); - LOG_DEBUG(DeviceManager_impl, "DEBUG_LEVEL: arg: " << new_argv[new_argv.size()-2] << " value:" << dlevel ); - + if (debug_level != -1) { + // Convert the numeric level directly into its ASCII equivalent. + std::string dlevel=""; + dlevel.push_back(char(0x30 + debug_level)); + new_argv.push_back( "DEBUG_LEVEL"); + new_argv.push_back(dlevel); + RH_DEBUG(this->_baseLog, "DEBUG_LEVEL: arg: " << new_argv[new_argv.size()-2] << " value:" << dlevel ); + } } @@ -707,7 +750,7 @@ void DeviceManager_impl::setEnvironment( ProcessEnvironment &env, const PackageMods &pkgMods ) { - LOG_TRACE(DeviceManager_impl, "setEnvironment setting enviroment deps:" << deps.size() ); + RH_TRACE(this->_baseLog, "setEnvironment setting enviroment deps:" << deps.size() ); // // get modifications for each of the dependency files that were loaded @@ -715,11 +758,11 @@ void DeviceManager_impl::setEnvironment( ProcessEnvironment &env, PackageModList mod_list; for (unsigned int i=0; i_baseLog, "setEnvironment looking for dep: " << dep ); if ( pkgMods.find(dep) == pkgMods.end()) { // it is not a loaded package } else { - LOG_TRACE(DeviceManager_impl, "adding to Mod List dep: " << dep ); + RH_TRACE(this->_baseLog, "adding to Mod List dep: " << dep ); mod_list.push_back(sharedPkgs[dep]); } } @@ -745,7 +788,7 @@ void DeviceManager_impl::setEnvironment( ProcessEnvironment &env, if (!current_path.empty()) { path_mod += ":" + current_path; } - LOG_TRACE(DeviceManager_impl, "setEnvironment env: " << mod->first << " value:" << path_mod ); + RH_TRACE(this->_baseLog, "setEnvironment env: " << mod->first << " value:" << path_mod ); env.setenv(mod->first, path_mod); current_path = path_mod; } @@ -765,13 +808,13 @@ void DeviceManager_impl::loadDependencies( local_spd::ProgramProfile *component, if (!implementation) { std::ostringstream emsg; emsg << "No implementation selected for dependency " << (*dep)->getName(); - LOG_ERROR(DeviceManager_impl, emsg.str()); + RH_ERROR(this->_baseLog, emsg.str()); throw std::runtime_error( emsg.str().c_str() ); } // Recursively load dependencies std::string pkgId = (*dep)->getName(); - LOG_TRACE(DeviceManager_impl, "Loading dependencies for soft package " << pkgId ); + RH_TRACE(this->_baseLog, "Loading dependencies for soft package " << pkgId ); loadDependencies(component, device, implementation->getSoftPkgDependencies()); // Determine absolute path of dependency's local file @@ -788,7 +831,7 @@ void DeviceManager_impl::loadDependencies( local_spd::ProgramProfile *component, } const std::string fileName = codeLocalFile.string(); - LOG_DEBUG(DeviceManager_impl, "Loading dependency local file " << fileName); + RH_DEBUG(this->_baseLog, "Loading dependency local file " << fileName); try { if ( !CORBA::is_nil(device) ) { device->load(_fileSys, fileName.c_str(), codeType); @@ -799,7 +842,7 @@ void DeviceManager_impl::loadDependencies( local_spd::ProgramProfile *component, } catch (...) { std::ostringstream emsg ; emsg << "Failure loading file " << fileName; - LOG_ERROR(DeviceManager_impl, emsg.str()); + RH_ERROR(this->_baseLog, emsg.str()); throw std::runtime_error( emsg.str().c_str() ); } @@ -822,7 +865,7 @@ void DeviceManager_impl::do_load ( CF::FileSystem_ptr fs, if ((loadKind != CF::LoadableDevice::EXECUTABLE) && (loadKind != CF::LoadableDevice::SHARED_LIBRARY)) { std::ostringstream emsg; emsg << "File: " << fileName << " is not CF::LoadableDevice::EXECUTABLE or CF::LoadableDevice::SHARED_LIBRARY"; - LOG_ERROR(DeviceManager_impl, emsg.str() ); + RH_ERROR(this->_baseLog, emsg.str() ); throw std::runtime_error( emsg.str().c_str() ); } @@ -838,14 +881,14 @@ void DeviceManager_impl::do_load ( CF::FileSystem_ptr fs, if (!fs::exists (workpath)) { std::ostringstream emsg; emsg << "File : " << workingFileName << " does not exist"; - LOG_ERROR(DeviceManager_impl, emsg.str()); + RH_ERROR(this->_baseLog, emsg.str()); throw std::runtime_error(emsg.str().c_str()); } } catch(...){ std::ostringstream emsg; emsg << "Unknown exception when testing file : " << workingFileName ; - LOG_ERROR(DeviceManager_impl, emsg.str()); + RH_ERROR(this->_baseLog, emsg.str()); throw std::runtime_error(emsg.str().c_str()); } @@ -860,7 +903,7 @@ void DeviceManager_impl::do_load ( CF::FileSystem_ptr fs, // // Check to see if it's a C library // - LOG_DEBUG(DeviceManager_impl, "do_load check read elf file: " << workingFileName ); + RH_DEBUG(this->_baseLog, "do_load check read elf file: " << workingFileName ); std::string command = "readelf -h "; command += workingFileName; command += std::string(" 2>&1"); // redirect stdout to /dev/null @@ -879,7 +922,7 @@ void DeviceManager_impl::do_load ( CF::FileSystem_ptr fs, // Check to see if it's a Python module if (!CLibrary) { - LOG_DEBUG(DeviceManager_impl, "do_load checking python module file: " << workingFileName ); + RH_DEBUG(this->_baseLog, "do_load checking python module file: " << workingFileName ); currentPath = ossie::getCurrentDirName(); std::string::size_type lastSlash = workingFileName.find_last_of("/"); if (lastSlash == std::string::npos) { // there are no slashes in the name @@ -895,7 +938,7 @@ void DeviceManager_impl::do_load ( CF::FileSystem_ptr fs, std::string command = "python -c \"import "; command += fileOrDirectoryName; command += std::string("\" 2>&1"); // redirect stdout and stderr to /dev/null - LOG_DEBUG(DeviceManager_impl, "do_load check python module (.py) cmd= " << command << + RH_DEBUG(this->_baseLog, "do_load check python module (.py) cmd= " << command << " workingFileName: " << workingFileName << " currentDirectory: " << currentPath); FILE *fileCheck = popen(command.c_str(), "r"); @@ -925,7 +968,7 @@ void DeviceManager_impl::do_load ( CF::FileSystem_ptr fs, std::string command = "python -c \"import "; command += fileOrDirectoryName; command += std::string("\" 2>&1"); // redirect stdout and stderr to /dev/null - LOG_DEBUG(DeviceManager_impl, "cmd= " << command << + RH_DEBUG(this->_baseLog, "cmd= " << command << " relativeFileName: " << relativeFileName << " relativePath: " << relativePath); FILE *fileCheck = popen(command.c_str(), "r"); @@ -944,7 +987,7 @@ void DeviceManager_impl::do_load ( CF::FileSystem_ptr fs, // Check to see if it's a Java package if (!CLibrary and !PythonPackage) { - LOG_DEBUG(DeviceManager_impl, "do_load trying JAVA test for : " << workingFileName ); + RH_DEBUG(this->_baseLog, "do_load trying JAVA test for : " << workingFileName ); if ( ossie::helpers::is_jarfile( workingFileName ) == 0 ) { env_changes.addJavaPath(workingFileName); JavaJar = true; @@ -954,7 +997,7 @@ void DeviceManager_impl::do_load ( CF::FileSystem_ptr fs, // It doesn't match anything, assume that it's a set of libraries if (!(CLibrary || PythonPackage || JavaJar)) { const std::string additionalPath = workingFileName; - LOG_DEBUG(DeviceManager_impl, "do_load Adding (PATH) Directory ") + RH_DEBUG(this->_baseLog, "do_load Adding (PATH) Directory ") env_changes.addLibPath(additionalPath); env_changes.addJavaPath(additionalPath); env_changes.addOctavePath(additionalPath); diff --git a/redhawk/src/control/sdr/devmgr/DeviceManager_impl.cpp b/redhawk/src/control/sdr/devmgr/DeviceManager_impl.cpp index 375f45a49..0a358dc66 100644 --- a/redhawk/src/control/sdr/devmgr/DeviceManager_impl.cpp +++ b/redhawk/src/control/sdr/devmgr/DeviceManager_impl.cpp @@ -25,7 +25,9 @@ #include #include #include +#include +#include #include #include #include @@ -54,9 +56,14 @@ DeviceManager_impl::DeviceManager_impl( const struct utsname &uname, bool useLogCfgResolver, const char *cpuBlackList, - bool* internalShutdown) : + bool* internalShutdown, + const std::string &spdFile, + int initialDebugLevel ): + Logging_impl("DeviceManager"), + DomainWatchThread(NULL), _registeredDevices(), - devmgr_info(0) + devmgr_info(0), + _initialDebugLevel(initialDebugLevel) { __logger = rh_logger::Logger::getResourceLogger("DeviceManager_impl"); @@ -69,9 +76,26 @@ DeviceManager_impl::DeviceManager_impl( _internalShutdown = internalShutdown; _useLogConfigUriResolver = useLogCfgResolver; + _spdFile = spdFile; + // save os and processor when matching deployments - processor_name = _uname.machine; - os_name = _uname.sysname; + addProperty(processor_name, + _uname.machine, + "DCE:fefb9c66-d14a-438d-ad59-2cfd1adb272b", + "processor_name", + "readonly", + "", + "eq", + "property,allocation"); + + addProperty(os_name, + _uname.sysname, + "DCE:4a23ad60-0b25-4121-a630-68803a498f75", + "os_name", + "readonly", + "", + "eq", + "property,allocation"); // resolve local sdr root fs::path tsdr = fs::path(_rootfs); @@ -144,6 +168,15 @@ DeviceManager_impl::DeviceManager_impl( "millisec", "external", "property"); + + addProperty(DOMAIN_REFRESH, + 10, + "DOMAIN_REFRESH", + "DOMAIN_REFRESH", + "readwrite", + "seconds", + "external", + "property"); // translate cpuBlackList to cpu ids try { @@ -161,7 +194,9 @@ DeviceManager_impl::DeviceManager_impl( gethostname(_hostname, 1024); std::string hostname(_hostname); HOSTNAME = hostname; - this->_dmnMgr = CF::DomainManager::_nil(); + this->_dmnMgr = CF::DomainManager::_nil(); + domain_persistence = false; + this->DOMAIN_REFRESH = 0; } @@ -198,34 +233,158 @@ void DeviceManager_impl::killPendingDevices (int signal, int timeout) { } +int DeviceManager_impl::checkDomain() +{ + CF::DomainManager::DeviceManagerSequence_var devMgrs; + try { + devMgrs = this->_dmnMgr->deviceManagers(); + } catch ( ... ) { + if ((this->startDomainWarn.tv_sec == 0) and (this->startDomainWarn.tv_usec == 0)) { + RH_WARN(this->_baseLog, "Unable to contact the Domain Manager"); + gettimeofday(&startDomainWarn, NULL); + return DomainCheckThread::NOOP; + } + struct timeval now; + gettimeofday(&now, NULL); + float minutes = 15; + if ((now.tv_sec - startDomainWarn.tv_sec) >= (minutes * 60)) { + RH_WARN(this->_baseLog, "Unable to contact the Domain Manager"); + gettimeofday(&startDomainWarn, NULL); + return DomainCheckThread::NOOP; + } + return DomainCheckThread::NOOP; + } + + for (unsigned int i=0; ilength(); i++) { + if (devMgrs[i]->_is_equivalent(this->_this())) { + return DomainCheckThread::NOOP; + } + } + + this->reset(); + + return DomainCheckThread::NOOP; +} + +void DeviceManager_impl::domainRefreshChanged(float oldValue, float newValue) +{ + if ((not this->domain_persistence) and (newValue != 0)) { + this->DOMAIN_REFRESH = 0; + std::string message("DOMAIN_REFRESH can only be set when the Domain Manager persistence is enabled"); + redhawk::PropertyMap query_props; + query_props["DOMAIN_REFRESH"] = redhawk::Value(newValue); + throw(CF::PropertySet::InvalidConfiguration(message.c_str(), query_props)); + } + this->DOMAIN_REFRESH = newValue; + this->DomainWatchThread->updateDelay(this->DOMAIN_REFRESH); +} + +void DeviceManager_impl::reset() +{ + if (_adminState == DEVMGR_SHUTTING_DOWN) + return; + + // release all devices and services + clean_registeredServices(); + clean_externalServices(); + clean_registeredDevices(); + + try { + deleteFileSystems(); + } catch ( ... ) { + } + + // try to get the reference + bool done = false; + while (not done) { + try { + getDomainManagerReference (_domainName.c_str()); + usleep(500); + done = true; + } catch ( ... ) { + } + } + // call postContructor + postConstructor(_domainName.c_str()); +} + +void DeviceManager_impl::setLogLevel( const char *logger_id, const CF::LogLevel newLevel ) throw (CF::UnknownIdentifier) +{ + BOOST_FOREACH(DeviceNode* _device, _registeredDevices) { + CF::Device_var device_ref = _device->device; + try { + device_ref->setLogLevel(logger_id, newLevel); + return; + } catch (const CF::UnknownIdentifier& ex) { + } + } + Logging_impl::setLogLevel(logger_id, newLevel); +} + +CF::LogLevel DeviceManager_impl::getLogLevel( const char *logger_id ) throw (CF::UnknownIdentifier) +{ + BOOST_FOREACH(DeviceNode* _device, _registeredDevices) { + CF::Device_var device_ref = _device->device; + try { + CF::LogLevel level = device_ref->getLogLevel(logger_id); + return level; + } catch (const CF::UnknownIdentifier& ex) { + } + } + return Logging_impl::getLogLevel(logger_id); +} + +CF::StringSequence* DeviceManager_impl::getNamedLoggers() +{ + CF::StringSequence_var retval = Logging_impl::getNamedLoggers(); + BOOST_FOREACH(DeviceNode* _device, _registeredDevices) { + CF::Device_var device_ref = _device->device; + CF::StringSequence_var device_logger_list = device_ref->getNamedLoggers(); + for (unsigned int i=0; ilength(); i++) { + ossie::corba::push_back(retval, CORBA::string_dup(device_logger_list[i])); + } + } + return retval._retn(); +} + +void DeviceManager_impl::resetLog() +{ + BOOST_FOREACH(DeviceNode* _device, _registeredDevices) { + CF::Device_var device_ref = _device->device; + device_ref->resetLog(); + } + Logging_impl::resetLog(); +} + + void DeviceManager_impl::parseDeviceConfigurationProfile(const char *overrideDomainName){ - LOG_TRACE(DeviceManager_impl, "Using DCD profile " << _deviceConfigurationProfile); + RH_TRACE(this->_baseLog, "Using DCD profile " << _deviceConfigurationProfile); try { _fileSys->exists(_deviceConfigurationProfile.c_str()); } catch( CF::InvalidFileName& _ex ) { std::ostringstream emsg; emsg <<"Terminating device manager; DCD file " << _deviceConfigurationProfile << " does not exist; " << _ex.msg; - LOG_TRACE(DeviceManager_impl, emsg.str()); + RH_TRACE(this->_baseLog, emsg.str()); throw std::runtime_error(emsg.str().c_str()); } catch ( std::exception& ex ) { std::ostringstream emsg; emsg << "The following standard exception occurred: "<_baseLog, emsg.str()); throw std::runtime_error(emsg.str().c_str()); } catch ( CORBA::Exception& ex ) { std::ostringstream emsg; emsg << "The following CORBA exception occurred: "<_baseLog, emsg.str()); throw std::runtime_error(emsg.str().c_str()); } catch( ... ) { std::ostringstream emsg; emsg << "Terminating device manager; unknown exception checking if " << _deviceConfigurationProfile << " exists; "; - LOG_TRACE(DeviceManager_impl, emsg.str()); + RH_TRACE(this->_baseLog, emsg.str()); throw std::runtime_error(emsg.str().c_str()); } - LOG_TRACE(DeviceManager_impl, "Parsing DCD profile") + RH_TRACE(this->_baseLog, "Parsing DCD profile") try { File_stream _dcd(_fileSys, _deviceConfigurationProfile.c_str()); node_dcd.load(_dcd); @@ -235,34 +394,34 @@ void DeviceManager_impl::parseDeviceConfigurationProfile(const char *overrideDom std::ostringstream eout; eout << "Exiting device manager; failure parsing DCD: " << _deviceConfigurationProfile << ". " << parser_error_line << " The XML parser returned the following error: " << e.what(); - LOG_TRACE(DeviceManager_impl, eout.str()); + RH_TRACE(this->_baseLog, eout.str()); throw std::runtime_error(e.what()); } catch ( std::exception& ex ) { std::ostringstream eout; eout << "The following standard exception occurred: "<_baseLog, eout.str()) throw std::runtime_error(eout.str().c_str()); } catch ( CORBA::Exception& ex ) { std::ostringstream eout; eout << "The following CORBA exception occurred: "<_baseLog, eout.str()) throw std::runtime_error(eout.str().c_str()); } catch ( ... ) { - LOG_TRACE(DeviceManager_impl, "Exiting device manager; Unexpected failure parsing DCD: " << _deviceConfigurationProfile ); + RH_TRACE(this->_baseLog, "Exiting device manager; Unexpected failure parsing DCD: " << _deviceConfigurationProfile ); throw std::runtime_error("unexpected error"); } _identifier = node_dcd.getID(); _label = node_dcd.getName(); - LOG_TRACE(DeviceManager_impl, "DeviceManager id: " << node_dcd.getID() << " name: " << node_dcd.getName()); + RH_TRACE(this->_baseLog, "DeviceManager id: " << node_dcd.getID() << " name: " << node_dcd.getName()); if (overrideDomainName == NULL) { - LOG_TRACE(DeviceManager_impl, "Reading domainname from DCD file") + RH_TRACE(this->_baseLog, "Reading domainname from DCD file") CORBA::String_var tmp_domainManagerName = node_dcd.getDomainManagerName(); _domainManagerName = (char *)tmp_domainManagerName; _domainName = _domainManagerName.substr(0, _domainManagerName.find_first_of("/")); } else { - LOG_TRACE(DeviceManager_impl, "Overriding domainname from DCD file") + RH_TRACE(this->_baseLog, "Overriding domainname from DCD file") _domainName = overrideDomainName; _domainManagerName = _domainName + "/" + _domainName; } @@ -287,7 +446,7 @@ void DeviceManager_impl::parseSpd() { devmgr_info = local_spd::ProgramProfile::LoadProfile( _fileSys, devmgrsoftpkg.c_str(), _local_dom_filesys ); } catch( std::runtime_error &ex ) { - LOG_TRACE(DeviceManager_impl, ex.what() ); + RH_TRACE(this->_baseLog, ex.what() ); throw; } @@ -324,11 +483,11 @@ void DeviceManager_impl::setupImplementationForHost() { pref._id = "DCE:fefb9c66-d14a-438d-ad59-2cfd1adb272b"; pref._value = processor_name; _my_host.push_back( pref.clone() ); - LOG_INFO(DeviceManager_impl, "adding in property for :" << pref._id << " value : " << processor_name ); + RH_INFO(this->_baseLog, "adding in property for :" << pref._id << " value : " << processor_name ); pref._id = "DCE:4a23ad60-0b25-4121-a630-68803a498f75"; pref._value = os_name; _my_host.push_back( pref.clone() ); - LOG_INFO(DeviceManager_impl, "adding in property for :" << pref._id << " value : " << os_name ); + RH_INFO(this->_baseLog, "adding in property for :" << pref._id << " value : " << os_name ); host_props.override(_my_host); local_spd::ImplementationInfo::List _allDevManImpls; @@ -336,7 +495,7 @@ void DeviceManager_impl::setupImplementationForHost() { local_spd::ImplementationInfo::List::const_iterator itr; if (_allDevManImpls.size() == 0) { std::string emsg="Device manager SPD has no implementations to match against."; - LOG_ERROR(DeviceManager_impl, emsg); + RH_ERROR(this->_baseLog, emsg); throw std::runtime_error(emsg); } @@ -353,10 +512,10 @@ void DeviceManager_impl::setupImplementationForHost() { if (!found_impl){ std::ostringstream oss; oss << "Unable to find device manager implementation to match processor: " << _uname.machine; - LOG_TRACE(DeviceManager_impl, oss.str() ); + RH_TRACE(this->_baseLog, oss.str() ); throw std::runtime_error( oss.str().c_str()); } - LOG_TRACE(DeviceManager_impl, "Using device manager implementation " << devmgr_info->getID()); + RH_TRACE(this->_baseLog, "Using device manager implementation " << devmgr_info->getID()); } void DeviceManager_impl::resolveNamingContext(){ @@ -366,12 +525,12 @@ void DeviceManager_impl::resolveNamingContext(){ try { CORBA::Object_var obj = ossie::corba::InitialNamingContext()->resolve(base_context); rootContext = CosNaming::NamingContext::_narrow(obj); - LOG_TRACE(DeviceManager_impl, "Connected"); + RH_TRACE(this->_baseLog, "Connected"); break; } catch ( ... ) { if (!warnedMissing) { warnedMissing = true; - LOG_WARN(DeviceManager_impl, "Unable to find naming context " << _domainManagerName << "; retrying"); + RH_WARN(this->_baseLog, "Unable to find naming context " << _domainManagerName << "; retrying"); } } // Sleep for a tenth of a second to give the DomainManager a chance to @@ -380,11 +539,11 @@ void DeviceManager_impl::resolveNamingContext(){ // If a shutdown occurs while waiting, turn it into an exception. if (*_internalShutdown) { - LOG_TRACE(DeviceManager_impl, "Interrupted when waiting to locate DomainManager naming context"); + RH_TRACE(this->_baseLog, "Interrupted when waiting to locate DomainManager naming context"); throw std::runtime_error("Interrupted when waiting to locate DomainManager naming context"); } } - LOG_TRACE(DeviceManager_impl, "Resolved DomainManager naming context"); + RH_TRACE(this->_baseLog, "Resolved DomainManager naming context"); } /* @@ -407,29 +566,29 @@ bool DeviceManager_impl::getCodeFilePath( FileSystem_impl*& fs_servant, bool useLocalFileSystem ) { - LOG_TRACE(DeviceManager_impl, "getCodeFile: spdPath: " << SPDParser.getSPDPath() ); - LOG_TRACE(DeviceManager_impl, "getCodeFile: localFileName: " << matchedDeviceImpl.getLocalFileName()); - LOG_TRACE(DeviceManager_impl, "getCodeFile: entryPoint: " << matchedDeviceImpl.getEntryPoint()); + RH_TRACE(this->_baseLog, "getCodeFile: spdPath: " << SPDParser.getSPDPath() ); + RH_TRACE(this->_baseLog, "getCodeFile: localFileName: " << matchedDeviceImpl.getLocalFileName()); + RH_TRACE(this->_baseLog, "getCodeFile: entryPoint: " << matchedDeviceImpl.getEntryPoint()); // get code file (the path to the device that must be run) fs::path codeFile = fs::path(matchedDeviceImpl.getLocalFileName()); if (!codeFile.has_root_directory()) { codeFile = fs::path(SPDParser.getSPDPath()) / codeFile; - LOG_TRACE(DeviceManager_impl, "code localfile had relative path; absolute path: " << codeFile); + RH_TRACE(this->_baseLog, "code localfile had relative path; absolute path: " << codeFile); } codeFile = codeFile.normalize(); fs::path entryPoint; if (matchedDeviceImpl.getEntryPoint().size() != 0) { - LOG_TRACE(DeviceManager_impl, "Using provided entry point: " << matchedDeviceImpl.getEntryPoint()) + RH_TRACE(this->_baseLog, "Using provided entry point: " << matchedDeviceImpl.getEntryPoint()) entryPoint = fs::path(matchedDeviceImpl.getEntryPoint()); if (!entryPoint.has_root_directory()) { entryPoint = fs::path(SPDParser.getSPDPath()) / entryPoint; - LOG_TRACE(DeviceManager_impl, "code entrypoint had relative path; absolute path: " << entryPoint); + RH_TRACE(this->_baseLog, "code entrypoint had relative path; absolute path: " << entryPoint); } entryPoint = entryPoint.normalize(); } else if (matchedDeviceImpl.getEntryPoint().size() == 0) { - LOG_ERROR(DeviceManager_impl, "not instantiating device; no entry point provided"); + RH_ERROR(this->_baseLog, "not instantiating device; no entry point provided"); return false; } @@ -442,17 +601,17 @@ bool DeviceManager_impl::getCodeFilePath( } if (codeFilePath.length() == 0) { - LOG_WARN(DeviceManager_impl, "Invalid device file. Could not find executable for " << codeFile) + RH_WARN(this->_baseLog, "Invalid device file. Could not find executable for " << codeFile) return false; } if (access(localFilePath.c_str(), F_OK) == -1) { std::string errMsg = "Unable to access local filesystem file: " + localFilePath; - LOG_ERROR(DeviceManager_impl, errMsg ); + RH_ERROR(this->_baseLog, errMsg ); return false; } - LOG_TRACE(DeviceManager_impl, "Code file path: " << codeFilePath) + RH_TRACE(this->_baseLog, "Code file path: " << codeFilePath) return true; } @@ -467,21 +626,21 @@ void DeviceManager_impl::bindNamingContext() { try { devMgrContext = rootContext->bind_new_context(devMgrContextName); } catch (CosNaming::NamingContext::AlreadyBound&) { - LOG_WARN(DeviceManager_impl, "Device manager name already bound") + RH_WARN(this->_baseLog, "Device manager name already bound") rootContext->unbind(devMgrContextName); devMgrContext = rootContext->bind_new_context(devMgrContextName); } catch ( std::exception& ex ) { std::ostringstream eout; eout << "The following standard exception occurred: "<_baseLog, eout.str()) throw std::runtime_error(eout.str().c_str()); } catch ( CORBA::Exception& ex ) { std::ostringstream eout; eout << "The following CORBA exception occurred: "<_baseLog, eout.str()) throw std::runtime_error(eout.str().c_str()); } catch( ... ) { - LOG_FATAL(DeviceManager_impl, "Unable to create device manager context") + RH_FATAL(this->_baseLog, "Unable to create device manager context") throw std::runtime_error("unexpected error"); } } @@ -495,26 +654,26 @@ void DeviceManager_impl::bindNamingContext() { */ void DeviceManager_impl::getDomainManagerReferenceAndCheckExceptions() { - LOG_INFO(DeviceManager_impl, "Connecting to Domain Manager " << _domainManagerName) + RH_INFO(this->_baseLog, "Connecting to Domain Manager " << _domainManagerName) try { getDomainManagerReference(_domainManagerName); } catch ( std::exception& ex ) { std::ostringstream eout; eout << "The following standard exception occurred: "<_baseLog, eout.str()) throw std::runtime_error(eout.str().c_str()); } catch ( CORBA::Exception& ex ) { std::ostringstream eout; eout << "The following CORBA exception occurred: "<_baseLog, eout.str()) throw std::runtime_error(eout.str().c_str()); } catch ( ... ) { - LOG_FATAL(DeviceManager_impl, "[DeviceManager::post_constructor] Unable to get a reference to the DomainManager"); + RH_FATAL(this->_baseLog, "[DeviceManager::post_constructor] Unable to get a reference to the DomainManager"); throw std::runtime_error("unexpected error"); } if (CORBA::is_nil(_dmnMgr)) { - LOG_FATAL(DeviceManager_impl, "Failure getting Domain Manager") + RH_FATAL(this->_baseLog, "Failure getting Domain Manager") throw std::runtime_error("unexpected error"); } } @@ -522,7 +681,7 @@ void DeviceManager_impl::getDomainManagerReferenceAndCheckExceptions() { void DeviceManager_impl::registerDeviceManagerWithDomainManager( CF::DeviceManager_var& my_object_var) { - LOG_TRACE(DeviceManager_impl, "Registering with DomainManager"); + RH_TRACE(this->_baseLog, "Registering with DomainManager"); int64_t cnt=0; while (true) { if (*_internalShutdown) { @@ -534,52 +693,52 @@ void DeviceManager_impl::registerDeviceManagerWithDomainManager( break; } catch (const CORBA::TRANSIENT& ex) { // The DomainManager isn't currently reachable, but it may become accessible again. - if ( !(++cnt % 10) ) {LOG_WARN(DeviceManager_impl, "DomainManager not available, TRANSIENT condition: retry cnt" << cnt); } + if ( !(++cnt % 10) ) {RH_WARN(this->_baseLog, "DomainManager not available, TRANSIENT condition: retry cnt" << cnt); } usleep(100000); } catch (const CORBA::OBJECT_NOT_EXIST& ex) { // This error occurs while the DomainManager is still being constructed - if ( !(++cnt % 10) ) {LOG_WARN(DeviceManager_impl, "DomainManager not available, DOES NOT EXIST condition: retry cnt" << cnt); } + if ( !(++cnt % 10) ) {RH_WARN(this->_baseLog, "DomainManager not available, DOES NOT EXIST condition: retry cnt" << cnt); } usleep(100000); } catch (const CF::DomainManager::RegisterError& e) { - LOG_ERROR(DeviceManager_impl, "Failed to register with domain manager due to: " << e.msg); + RH_ERROR(this->_baseLog, "Failed to register with domain manager due to: " << e.msg); throw std::runtime_error("Error registering with Domain Manager"); } catch (const CF::InvalidObjectReference& _ex) { - LOG_FATAL(DeviceManager_impl, "While registering DevMgr with DomMgr: " << _ex.msg); + RH_FATAL(this->_baseLog, "While registering DevMgr with DomMgr: " << _ex.msg); throw std::runtime_error("Error registering with Domain Manager"); } catch ( std::exception& ex ) { std::ostringstream eout; eout << "The following standard exception occurred: "<_baseLog, eout.str()) throw std::runtime_error(eout.str().c_str()); } catch ( CORBA::Exception& ex ) { std::ostringstream eout; eout << "The following CORBA exception occurred: "<_baseLog, eout.str()) throw std::runtime_error(eout.str().c_str()); } catch (...) { - LOG_FATAL(DeviceManager_impl, "While registering DevMgr with DomMgr: Unknown Exception"); + RH_FATAL(this->_baseLog, "While registering DevMgr with DomMgr: Unknown Exception"); throw std::runtime_error("Error registering with Domain Manager"); } } } void DeviceManager_impl::getCompositeDeviceIOR( - std::string& compositeDeviceIOR, - const std::vector& componentPlacements, - const ossie::ComponentPlacement& componentPlacementInst) { + std::string& compositeDeviceIOR, + const std::vector& componentPlacements, + const ossie::DevicePlacement& componentPlacementInst) { //see if component is composite part of device - LOG_TRACE(DeviceManager_impl, "Checking composite part of device"); + RH_TRACE(this->_baseLog, "Checking composite part of device"); if (componentPlacementInst.isCompositePartOf()) { std::string parentDeviceRefid = componentPlacementInst.getCompositePartOfDeviceID(); - LOG_TRACE(DeviceManager_impl, "CompositePartOfDevice: <" << parentDeviceRefid << ">"); + RH_TRACE(this->_baseLog, "CompositePartOfDevice: <" << parentDeviceRefid << ">"); //find parent ID and stringify the IOR for (unsigned int cp_idx = 0; cp_idx < componentPlacements.size(); cp_idx++) { // must match to a particular instance for (unsigned int ci_idx = 0; ci_idx < componentPlacements[cp_idx].getInstantiations().size(); ci_idx++) { - const char* instanceID = componentPlacements[cp_idx].instantiations[ci_idx].getID(); - if (strcmp(instanceID, parentDeviceRefid.c_str()) == 0) { - LOG_TRACE(DeviceManager_impl, "CompositePartOfDevice: Found parent device instance <" + const std::string& instanceID = componentPlacements[cp_idx].instantiations[ci_idx].getID(); + if (instanceID == parentDeviceRefid) { + RH_TRACE(this->_baseLog, "CompositePartOfDevice: Found parent device instance <" << componentPlacements[cp_idx].getInstantiations()[ci_idx].getID() << "> for child device <" << componentPlacementInst.getFileRefId() << ">"); // now get the associated IOR @@ -587,7 +746,7 @@ void DeviceManager_impl::getCompositeDeviceIOR( std::string tmpior = getIORfromID(instanceID); if (!tmpior.empty()) { compositeDeviceIOR = tmpior; - LOG_TRACE(DeviceManager_impl, "CompositePartOfDevice: Found parent device IOR <" << compositeDeviceIOR << ">"); + RH_TRACE(this->_baseLog, "CompositePartOfDevice: Found parent device IOR <" << compositeDeviceIOR << ">"); break; } usleep(100); @@ -605,12 +764,12 @@ CF::Properties DeviceManager_impl::getResourceOptions( const ossie::ComponentIns CF::Properties affinity_options; const ossie::ComponentInstantiation::AffinityProperties c_props = instantiation.getAffinity(); if ( c_props.size() > 0 ){ - LOG_DEBUG(DeviceManager_impl, "Converting AFFINITY properties, resource: " << instantiation.getUsageName()); + RH_DEBUG(this->_baseLog, "Converting AFFINITY properties, resource: " << instantiation.getUsageName()); ossie::convertComponentProperties( instantiation.getAffinity(), affinity_options ); // Pass all afinity settings under single options list for ( uint32_t i=0; i < affinity_options.length(); i++ ) { CF::DataType dt = affinity_options[i]; - LOG_DEBUG(DeviceManager_impl, "Found Affinity Property: directive id:" << dt.id << "/" << ossie::any_to_string( dt.value )) ; + RH_DEBUG(this->_baseLog, "Found Affinity Property: directive id:" << dt.id << "/" << ossie::any_to_string( dt.value )) ; } } @@ -619,7 +778,7 @@ CF::Properties DeviceManager_impl::getResourceOptions( const ossie::ComponentIns options.length(options.length()+1); options[options.length()-1].id = CORBA::string_dup("AFFINITY"); options[options.length()-1].value <<= affinity_options; - LOG_DEBUG(DeviceManager_impl,"Extending Options property set with Affinity properties, nprops: " << affinity_options.length()); + RH_DEBUG(this->_baseLog,"Extending Options property set with Affinity properties, nprops: " << affinity_options.length()); } return options; } @@ -638,7 +797,7 @@ bool DeviceManager_impl::getDeviceOrService( bool supported = false; type = comp->scd.getComponentType(); - LOG_TRACE(DeviceManager_impl, "Softpkg type " << type) + RH_TRACE(this->_baseLog, "Softpkg type " << type) // Normalize type into either device or service // This is contrary to the spec, but existing devices/service may depend @@ -653,16 +812,13 @@ bool DeviceManager_impl::getDeviceOrService( supported = true; } else { - LOG_ERROR(DeviceManager_impl, + RH_ERROR(this->_baseLog, "Attempt to launch unsupported component type " << type) } return supported; } - - - /* * Parsing constructor * @@ -680,11 +836,14 @@ void DeviceManager_impl::postConstructor ( { myObj = _this(); + PropertySet_impl::setLogger(this->_baseLog->getChildLogger("PropertySet", "")); + PortSupplier_impl::setLogger(this->_baseLog->getChildLogger("PortSupplier", "")); // Create the device file system in the DeviceManager POA. - LOG_TRACE(DeviceManager_impl, "Creating device file system") + RH_TRACE(this->_baseLog, "Creating device file system") FileSystem_impl* fs_servant = new FileSystem_impl(_fsroot.c_str()); PortableServer::POA_var poa = ossie::corba::RootPOA()->find_POA("DeviceManager", 1); PortableServer::ObjectId_var oid = poa->activate_object(fs_servant); + fs_servant->setLogger(_baseLog->getChildLogger("FileSystem", "")); fs_servant->_remove_ref(); _fileSys = fs_servant->_this(); fileSysIOR = ossie::corba::objectToString(_fileSys); @@ -692,9 +851,27 @@ void DeviceManager_impl::postConstructor ( // create filesystem for local dom root.. used for softpkgs FileSystem_impl *local_dom_fs = new FileSystem_impl(_local_domroot.c_str()); oid = poa->activate_object(local_dom_fs); + local_dom_fs->setLogger(_baseLog->getChildLogger("localFileSystem", "")); local_dom_fs->_remove_ref(); _local_dom_filesys = local_dom_fs->_this(); + ossie::proputilsLog = _baseLog->getChildLogger("proputils",""); + ossie::SpdSupport::spdSupportLog = _baseLog->getChildLogger("spdSupport",""); + fileLog = _baseLog->getChildLogger("File",""); + + std::string std_logconfig_uri; + if (!logging_config_uri.empty()) { + std_logconfig_uri = ossie::logging::ResolveLocalUri(logging_config_uri, _fsroot, logging_config_uri); + } + std::string expanded_config = getExpandedLogConfig(std_logconfig_uri); + this->_baseLog->configureLogger(expanded_config, true); + if (_initialDebugLevel != -1) { + rh_logger::LevelPtr level = ossie::logging::ConvertCFLevelToRHLevel(ossie::logging::ConvertDebugToCFLevel(_initialDebugLevel)); + this->_baseLog->setLevel(level); + } + + redhawk::setupParserLoggers(this->_baseLog); + // // setup DeviceManager context from dcd, software profile, find matching implementation // and allocation properties @@ -735,33 +912,33 @@ void DeviceManager_impl::postConstructor ( // try fallback method idm_registration->channel = ossie::events::connectToEventChannel(rootContext, "IDM_Channel"); if (CORBA::is_nil(idm_registration->channel)) { - LOG_INFO(DeviceManager_impl, "IDM channel not found. Continuing without using the IDM channel"); + RH_INFO(this->_baseLog, "IDM channel not found. Continuing without using the IDM channel"); } else { IDM_IOR = ossie::corba::objectToString(idm_registration->channel); } } } catch(...){ - LOG_INFO(DeviceManager_impl, "IDM channel not found. Continuing without using the IDM channel"); + RH_INFO(this->_baseLog, "IDM channel not found. Continuing without using the IDM channel"); } _adminState = DEVMGR_REGISTERED; // create device manager cache location std::string devmgrcache(_cacheroot + "/." + _label); - LOG_TRACE(DeviceManager_impl, "Creating DevMgr cache: " << devmgrcache) + RH_TRACE(this->_baseLog, "Creating DevMgr cache: " << devmgrcache) bool retval = this->makeDirectory(devmgrcache); if (not retval) { std::ostringstream eout; eout << "Unable to create the Device Manager cache: " << devmgrcache; - LOG_ERROR(DeviceManager_impl, eout.str()) + RH_ERROR(this->_baseLog, eout.str()) throw std::runtime_error(eout.str().c_str()); } //Parse local components from DCD files - LOG_TRACE(DeviceManager_impl, "Grabbing component placements") - const std::vector& componentPlacements = node_dcd.getComponentPlacements(); - LOG_TRACE(DeviceManager_impl, "ComponentPlacement size is " << componentPlacements.size()) + RH_TRACE(this->_baseLog, "Grabbing component placements") + const std::vector& componentPlacements = node_dcd.getComponentPlacements(); + RH_TRACE(this->_baseLog, "ComponentPlacement size is " << componentPlacements.size()) //////////////////////////////////////////////////////////////////////////// // Split component placements by compositePartOf tag @@ -771,20 +948,20 @@ void DeviceManager_impl::postConstructor ( // - Iterate and launch all deployOnDevice compPlacements DeploymentList standaloneComponentPlacements; DeploymentList compositePartDeviceComponentPlacements; - ComponentPlacements::const_iterator constCompPlaceIter; + DevicePlacements::const_iterator constCompPlaceIter; for (constCompPlaceIter = componentPlacements.begin(); constCompPlaceIter != componentPlacements.end(); constCompPlaceIter++) { local_spd::ProgramProfile *newResource = 0; - const ComponentPlacement &componentPlacement = *constCompPlaceIter; + const DevicePlacement &componentPlacement = *constCompPlaceIter; std::string compId(constCompPlaceIter->instantiations[0].getID()); std::ostringstream emsg; emsg << "Skipping instantiation of device " << compId; try { // load up device/service software profile - LOG_TRACE(DeviceManager_impl, "Getting file name for refid " << componentPlacement.getFileRefId()); - const char* spdFile = node_dcd.getFileNameFromRefId(componentPlacement.getFileRefId()); + RH_TRACE(this->_baseLog, "Getting file name for refid " << componentPlacement.getFileRefId()); + const char* spdFile = node_dcd.getFileNameFromRefId(componentPlacement.getFileRefId().c_str()); newResource = local_spd::ProgramProfile::LoadProfile( _fileSys, spdFile, _local_dom_filesys ); // check if we have matching implementation @@ -816,13 +993,13 @@ void DeviceManager_impl::postConstructor ( } catch ( std::runtime_error &ex ) { - LOG_ERROR(DeviceManager_impl, ex.what() ); - LOG_ERROR(DeviceManager_impl, emsg.str() ); + RH_ERROR(this->_baseLog, ex.what() ); + RH_ERROR(this->_baseLog, emsg.str() ); if (newResource) delete newResource; continue; } catch ( ... ) { - LOG_ERROR(DeviceManager_impl, emsg.str() ); + RH_ERROR(this->_baseLog, emsg.str() ); if (newResource) delete newResource; continue; } @@ -835,18 +1012,18 @@ void DeviceManager_impl::postConstructor ( cIter != standaloneComponentPlacements.end(); cIter++) { - const ComponentPlacement &compPlacement = cIter->first; + const DevicePlacement &compPlacement = cIter->first; local_spd::ProgramProfile *compProfile = cIter->second; const local_spd::ImplementationInfo *matchingImpl = compProfile->getSelectedImplementation(); std::string compId(compPlacement.instantiations[0].getID()); - LOG_INFO(DeviceManager_impl, "Placing Component CompId: " << compId << " ProfileName : " << compProfile->getName() ); + RH_INFO(this->_baseLog, "Placing Component CompId: " << compId << " ProfileName : " << compProfile->getName() ); // should not happen if (!matchingImpl) continue; ossie::Properties deviceProperties; if (!addDeviceImplProperties( compProfile, *matchingImpl )) { - LOG_INFO(DeviceManager_impl, "Skipping instantiation of device '" << compProfile->getInstantiationIdentifier() << + RH_INFO(this->_baseLog, "Skipping instantiation of device '" << compProfile->getInstantiationIdentifier() << ", failed to merge properties "); continue; } @@ -862,7 +1039,7 @@ void DeviceManager_impl::postConstructor ( cpInstIter++) { const ComponentInstantiation instantiation = *cpInstIter; - LOG_TRACE(DeviceManager_impl, "Placing component id: " << instantiation.getID()); + RH_TRACE(this->_baseLog, "Placing component id: " << instantiation.getID()); // setup profile with instantiation context recordComponentInstantiationId(instantiation, matchingImpl->getId()); @@ -911,7 +1088,7 @@ void DeviceManager_impl::postConstructor ( compPlaceIter != compositePartDeviceComponentPlacements.end(); compPlaceIter++) { - const ComponentPlacement &compPlacement = compPlaceIter->first; + const DevicePlacement &compPlacement = compPlaceIter->first; local_spd::ProgramProfile *compProfile = compPlaceIter->second; std::string compId("UT OHHH"); // get Device Manager implementation @@ -923,17 +1100,17 @@ void DeviceManager_impl::postConstructor ( compId = compPlacement.instantiations[0].getID(); } else { - LOG_FATAL(DeviceManager_impl, "Missing Instantiaion for Placing Composite ParentCompId: " << compositePartDeviceID << " ProfileName : " << compProfile->getName() ); + RH_FATAL(this->_baseLog, "Missing Instantiaion for Placing Composite ParentCompId: " << compositePartDeviceID << " ProfileName : " << compProfile->getName() ); } - LOG_INFO(DeviceManager_impl, "Placing Composite ParentCompId: " << compositePartDeviceID << " ProfileName : " << compProfile->getName() << " CompID " << compId ); + RH_INFO(this->_baseLog, "Placing Composite ParentCompId: " << compositePartDeviceID << " ProfileName : " << compProfile->getName() << " CompID " << compId ); bool foundCompositeDeployed = false; for (cIter = standaloneComponentPlacements.begin(); cIter != standaloneComponentPlacements.end(); cIter++) { - const ComponentPlacement &parentPlacement = cIter->first; + const DevicePlacement &parentPlacement = cIter->first; local_spd::ProgramProfile *parentProfile = cIter->second; const std::vector &parentInstantiations = parentPlacement.getInstantiations(); @@ -969,20 +1146,20 @@ void DeviceManager_impl::postConstructor ( } if (foundCompositeDeployed == false) { - LOG_ERROR(DeviceManager_impl, + RH_ERROR(this->_baseLog, "Unable to locate ComppositeParent '" << compositePartDeviceID << " for '" << compositePartDeviceID << "'... Skipping instantiation of '" << compId ); continue; } if (matchingImpl == NULL) { - LOG_ERROR(DeviceManager_impl, + RH_ERROR(this->_baseLog, "Skipping instantiation of device '" << compId << "' - '" << compProfile->spd.getSoftPkgID() << "; " << "no available device implementations match device manager properties") continue; } if (parentImpl == NULL) { - LOG_ERROR(DeviceManager_impl, + RH_ERROR(this->_baseLog, "Skipping instantiation of device '" << compId << "' - '" << compProfile->spd.getSoftPkgID() << "; " << "Composite parent has no matching implementations") continue; @@ -990,7 +1167,7 @@ void DeviceManager_impl::postConstructor ( // store the matchedDeviceImpl's implementation ID in a map for use with "getComponentImplementationId" if (!addDeviceImplProperties(compProfile, *matchingImpl)) { - LOG_ERROR(DeviceManager_impl,"Skipping instantiation of device '" << compId << "' - '" << compProfile->spd.getSoftPkgID() << "'"); + RH_ERROR(this->_baseLog,"Skipping instantiation of device '" << compId << "' - '" << compProfile->spd.getSoftPkgID() << "'"); continue; } @@ -1040,12 +1217,68 @@ void DeviceManager_impl::postConstructor ( } } } + + + if ( _spdFile.empty() ) return; + + File_stream devMgrSpdStream(_fileSys, _spdFile.c_str()); + ossie::SoftPkg parsedSpd; + ossie::Properties parsedPrf; + parsedSpd.load(devMgrSpdStream, _spdFile); + + if (parsedSpd.getPRFFile()) { + File_stream prf(_fileSys, parsedSpd.getPRFFile()); + parsedPrf.load(prf); + } + + redhawk::PropertyMap query_props; + query_props["PERSISTENCE"] = redhawk::Value(); + this->_dmnMgr->query(query_props); + domain_persistence = query_props["PERSISTENCE"].toBoolean(); + + redhawk::PropertyMap set_props; + std::vector props = parsedPrf.getConstructProperties(); + for (unsigned int i=0; iisCommandLine()) + continue; + if (props[i]->getMode() == ossie::Property::MODE_READONLY) + continue; + std::string prop_id(props[i]->getID()); + if ((prop_id == "DOMAIN_REFRESH") and (not this->domain_persistence)) + continue; + set_props[props[i]->getID()] = convertPropertyToDataType(props[i]).value; + } + + props = parsedPrf.getConfigureProperties(); + for (unsigned int i=0; igetMode() == ossie::Property::MODE_READONLY) + continue; + set_props[props[i]->getID()] = convertPropertyToDataType(props[i]).value; + } + + if (set_props.size() != 0) { + this->configure(set_props); + } + + if (domain_persistence) { + DomainWatchThread = new DomainCheckThread(this); + DomainWatchThread->updateDelay(this->DOMAIN_REFRESH); + this->startDomainWarn.tv_sec = 0; + this->startDomainWarn.tv_usec = 0; + DomainWatchThread->start(); + } else { + DomainWatchThread = NULL; + } + addPropertyListener(DOMAIN_REFRESH, this, &DeviceManager_impl::domainRefreshChanged); + } + const std::vector& DeviceManager_impl::getAllocationProperties() { return devmgr_info->prf.getAllocationProperties(); } + bool DeviceManager_impl::resolveSoftpkgDependencies( local_spd::ImplementationInfo *implementation ) { return resolveSoftpkgDependencies( implementation, devmgr_info->prf ); } @@ -1062,9 +1295,9 @@ bool DeviceManager_impl::resolveSoftpkgDependencies( local_spd::ImplementationIn local_spd::ImplementationInfo* spdImplInfo = resolveDependencyImplementation(*pkg, host_props); if (spdImplInfo) { const_cast< local_spd::SoftpkgInfo* >(pkg)->setSelectedImplementation(spdImplInfo); - LOG_DEBUG(DeviceManager_impl, "resolveSoftpkgDependencies: selected: " << pkg->getName()); + RH_DEBUG(this->_baseLog, "resolveSoftpkgDependencies: selected: " << pkg->getName()); } else { - LOG_DEBUG(DeviceManager_impl, "resolveSoftpkgDependencies: implementation match not found between soft package dependency and device"); + RH_DEBUG(this->_baseLog, "resolveSoftpkgDependencies: implementation match not found between soft package dependency and device"); implementation->clearSelectedDependencyImplementations(); return false; } @@ -1105,7 +1338,7 @@ bool DeviceManager_impl::resolveImplementation( local_spd::ProgramProfile *rsc ) local_spd::ImplementationInfo::List::iterator itr = impls.begin(); for ( ; itr != impls.end(); itr++ ) { local_spd::ImplementationInfo *impl = *itr; - LOG_TRACE(DeviceManager_impl, + RH_TRACE(this->_baseLog, "Attempting to match device " << rsc->getName() << " implementation id: " << impl->getId() << " to device manager " << devmgr_info->getInstantiationIdentifier() ); @@ -1113,14 +1346,14 @@ bool DeviceManager_impl::resolveImplementation( local_spd::ProgramProfile *rsc ) if ( impl->checkProcessorAndOs( devmgr_info->prf ) ) { rsc->setSelectedImplementation( impl ); result = true; - LOG_TRACE(DeviceManager_impl, + RH_TRACE(this->_baseLog, "found matching processing device implementation, device " << rsc->getName() << " implementation id: " << impl->getId() ); break; } } - LOG_TRACE(DeviceManager_impl, "Done finding matching device implementation"); + RH_TRACE(this->_baseLog, "Done finding matching device implementation"); return result; } @@ -1135,46 +1368,46 @@ bool DeviceManager_impl::addDeviceImplProperties (local_spd::ProgramProfile *com ossie::Properties devProps; const std::string prfFile = deviceImpl.getPropertyFile(); if (prfFile.size()) { - LOG_TRACE(DeviceManager_impl, "deviceImplProps: Joining implementation-specific PRF file " << prfFile); + RH_TRACE(this->_baseLog, "deviceImplProps: Joining implementation-specific PRF file " << prfFile); if (!joinPRFProperties(prfFile, devProps )) { return false; } } else { - LOG_TRACE(DeviceManager_impl, "deviceImplProps: Device does not provide implementation-specific PRF file"); + RH_TRACE(this->_baseLog, "deviceImplProps: Device does not provide implementation-specific PRF file"); } // merge props together... compProfile->prf.join(devProps); // - LOG_TRACE(DeviceManager_impl, "deviceImplProps: Adding factory params"); + RH_TRACE(this->_baseLog, "deviceImplProps: Adding factory params"); const std::vector& fprop = devProps.getFactoryParamProperties(); for (unsigned int i = 0; i < fprop.size(); i++) { compProfile->addFactoryParameter(convertPropertyToDataType(fprop[i])); } - LOG_TRACE(DeviceManager_impl, "deviceImpProps: Adding exec params"); + RH_TRACE(this->_baseLog, "deviceImpProps: Adding exec params"); const std::vector& eprop = devProps.getExecParamProperties(); for (unsigned int i = 0; i < eprop.size(); i++) { - if (std::string(eprop[i]->getMode()) != "readonly") { - LOG_TRACE(DeviceManager_impl, "deviceImplProps: Adding exec param " << eprop[i]->getID() << " " << eprop[i]->getName()); + if ( !eprop[i]->isReadOnly() ) { + RH_TRACE(this->_baseLog, "deviceImplProps: Adding exec param " << eprop[i]->getID() << " " << eprop[i]->getName()); compProfile->addExecParameter(convertPropertyToDataType(eprop[i])); } else { - LOG_TRACE(DeviceManager_impl, "deviceImplProps: Ignoring readonly exec param " << eprop[i]->getID() << " " << eprop[i]->getName()); + RH_TRACE(this->_baseLog, "deviceImplProps: Ignoring readonly exec param " << eprop[i]->getID() << " " << eprop[i]->getName()); } } const std::vector& prop = devProps.getConfigureProperties(); for (unsigned int i = 0; i < prop.size(); i++) { if (!prop[i]->isReadOnly()) { - LOG_TRACE(DeviceManager_impl, "deviceImplProps: Adding configure prop " << prop[i]->getID() << " " << prop[i]->getName() << " " << prop[i]->isReadOnly()) + RH_TRACE(this->_baseLog, "deviceImplProps: Adding configure prop " << prop[i]->getID() << " " << prop[i]->getName() << " " << prop[i]->isReadOnly()) compProfile->addConfigureProperty(convertPropertyToDataType(prop[i])); } } const std::vector& cprop = devProps.getConstructProperties(); for (unsigned int i = 0; i < cprop.size(); i++) { - LOG_TRACE(DeviceManager_impl, "deviceImplProps: Adding construct prop " << cprop[i]->getID() << " " << cprop[i]->getName() << " " << cprop[i]->isReadOnly()); + RH_TRACE(this->_baseLog, "deviceImplProps: Adding construct prop " << cprop[i]->getID() << " " << cprop[i]->getName() << " " << cprop[i]->isReadOnly()); if (cprop[i]->isCommandLine()) { compProfile->addExecParameter(convertPropertyToDataType(cprop[i])); } else { @@ -1191,19 +1424,19 @@ bool DeviceManager_impl::joinPRFProperties (const std::string& prfFile, ossie::P try { // Check for the existence of the PRF file first so we can give a more meaningful error message. if (!_fileSys->exists(prfFile.c_str())) { - LOG_ERROR(DeviceManager_impl, "PRF file " << prfFile << " does not exist"); + RH_ERROR(this->_baseLog, "PRF file " << prfFile << " does not exist"); } else { - LOG_TRACE(DeviceManager_impl, "Loading PRF file " << prfFile); + RH_TRACE(this->_baseLog, "Loading PRF file " << prfFile); File_stream prfStream(_fileSys, prfFile.c_str()); properties.join(prfStream); - LOG_TRACE(DeviceManager_impl, "Loaded PRF file " << prfFile); + RH_TRACE(this->_baseLog, "Loaded PRF file " << prfFile); prfStream.close(); return true; } } catch (const ossie::parser_error& ex) { std::string parser_error_line = ossie::retrieveParserErrorLineNumber(ex.what()); - LOG_ERROR(DeviceManager_impl, "Error parsing PRF: " << prfFile << ". " << parser_error_line << " The XML parser returned the following error: " << ex.what()); - } CATCH_LOG_ERROR(DeviceManager_impl, "Failure parsing PRF: " << prfFile); + RH_ERROR(this->_baseLog, "Error parsing PRF: " << prfFile << ". " << parser_error_line << " The XML parser returned the following error: " << ex.what()); + } CATCH_RH_ERROR(this->_baseLog, "Failure parsing PRF: " << prfFile); return false; } @@ -1220,19 +1453,19 @@ DeviceManager_impl::getDomainManagerReference (const std::string& domainManagerN } catch (const CosNaming::NamingContext::NotFound&) { if (!warned) { warned = true; - LOG_WARN(DeviceManager_impl, "DomainManager not registered with NameService; retrying"); + RH_WARN(this->_baseLog, "DomainManager not registered with NameService; retrying"); } } catch( CORBA::SystemException& se ) { - LOG_ERROR(DeviceManager_impl, "[DeviceManager::getDomainManagerReference] \"get_object_from_name\" failed with CORBA::SystemException") + RH_ERROR(this->_baseLog, "[DeviceManager::getDomainManagerReference] \"get_object_from_name\" failed with CORBA::SystemException") throw; } catch ( std::exception& ex ) { - LOG_ERROR(DeviceManager_impl, "The following standard exception occurred: "<_baseLog, "The following standard exception occurred: "<_baseLog, "The following CORBA exception occurred: "<_baseLog, "[DeviceManager::getDomainManagerReference] \"get_object_from_name\" failed with Unknown Exception") throw; } @@ -1248,15 +1481,15 @@ DeviceManager_impl::getDomainManagerReference (const std::string& domainManagerN try { _dmnMgr = CF::DomainManager::_narrow (obj); - LOG_TRACE(DeviceManager_impl, "Accessing DomainManager : " << domainManagerName); + RH_TRACE(this->_baseLog, "Accessing DomainManager : " << domainManagerName); } catch ( std::exception& ex ) { - LOG_ERROR(DeviceManager_impl, "The following standard exception occurred: "<_baseLog, "The following standard exception occurred: "<_baseLog, "The following CORBA exception occurred: "<_baseLog, "[DeviceManager::getDomainManagerReference] \"CF:DomainManager::_narrow\" failed with Unknown Exception") throw; } } @@ -1272,8 +1505,7 @@ throw (CORBA::SystemException) CF::FileSystem_ptr DeviceManager_impl::fileSys ()throw (CORBA:: SystemException) { - CF::FileSystem_var result = _fileSys; - return result._retn(); + return CF::FileSystem::_duplicate(_fileSys); } @@ -1319,13 +1551,12 @@ DeviceManager_impl::registeredServices ()throw (CORBA::SystemException) return result._retn(); } - void DeviceManager_impl::registerDevice (CF::Device_ptr registeringDevice) throw (CORBA::SystemException, CF::InvalidObjectReference) { if (CORBA::is_nil (registeringDevice)) { - LOG_WARN(DeviceManager_impl, "Attempted to register NIL device") + RH_WARN(this->_baseLog, "Attempted to register NIL device") throw (CF::InvalidObjectReference("[DeviceManager::registerDevice] Cannot register Device. registeringDevice is a nil reference.")); } @@ -1335,12 +1566,12 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) ossie::corba::overrideBlockingCall(registeringDevice,getClientWaitTime()); std::string deviceLabel = ossie::corba::returnString(registeringDevice->label()); std::string device_id = ossie::corba::returnString(registeringDevice->identifier()); - LOG_INFO(DeviceManager_impl, "Registering device " << deviceLabel << " device id " << device_id << " on Device Manager " << _label); + RH_INFO(this->_baseLog, "Registering device " << deviceLabel << " device id " << device_id << " on Device Manager " << _label); if ( deviceIsRegistered( registeringDevice ) == true ) { std::ostringstream eout; eout << "Device is already registred: "<< deviceLabel; - LOG_WARN(DeviceManager_impl, eout.str()); + RH_WARN(this->_baseLog, eout.str()); return; } @@ -1351,7 +1582,7 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) catch(...) { std::ostringstream eout; eout << "Loading Device's SPD failed, device:" << deviceLabel; - LOG_ERROR(DeviceManager_impl, eout.str()); + RH_ERROR(this->_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } @@ -1366,6 +1597,8 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) // due to a lack of threads (which would result in blocking // the mutex lock, which would prevent shutdown from killing // this). + bool allregistered = false; + { boost::recursive_mutex::scoped_lock lock(registeredDevicesmutex); //Get properties from SPD @@ -1373,7 +1606,7 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) std::string spd_name = spdinfo->getName(); std::string spd_id = spdinfo->getID(); std::string deviceid = ossie::corba::returnString(registeringDevice->identifier()); - LOG_INFO(DeviceManager_impl, "Device LABEL: " << deviceLabel << " SPD loaded: " << spd_name << "' - '" << spd_id ); + RH_INFO(this->_baseLog, "Device LABEL: " << deviceLabel << " SPD loaded: " << spd_name << "' - '" << spd_id ); // @@ -1382,10 +1615,10 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) if (spdinfo->isConfigurable ()) { try { // - LOG_DEBUG(DeviceManager_impl, "Initialize properties for spd/device label: " << spd_name << "/" << deviceLabel); + RH_DEBUG(this->_baseLog, "Initialize properties for spd/device label: " << spd_name << "/" << deviceLabel); const CF::Properties cprops = spdinfo->getNonNilConstructProperties(); for (unsigned int j = 0; j < cprops.length (); j++) { - LOG_DEBUG(DeviceManager_impl, "initializeProperties prop id " << cprops[j].id ); + RH_DEBUG(this->_baseLog, "initializeProperties prop id " << cprops[j].id ); } // Try to set the initial values for the component's properties registeringDevice->initializeProperties(cprops); @@ -1393,61 +1626,61 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) std::ostringstream eout; eout << "Device '" << deviceLabel << "' - '" << spd_id << "' may not have been initialized correctly; " << "Call to initializeProperties() resulted in InvalidConfiguration exception. Device registration with Device Manager failed"; - LOG_ERROR(DeviceManager_impl, eout.str()); + RH_ERROR(this->_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } catch(CF::PropertySet::PartialConfiguration& e) { std::ostringstream eout; eout << "Device '" << deviceLabel << "' - '" << spd_id << "' may not have been configured correctly; " << "Call to initializeProperties() resulted in PartialConfiguration exception."; - LOG_ERROR(DeviceManager_impl, eout.str()); + RH_ERROR(this->_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } catch ( std::exception& ex ) { std::ostringstream eout; eout << "The following standard exception occurred: "<_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } catch ( const CORBA::Exception& ex ) { std::ostringstream eout; eout << "The following CORBA exception occurred: "<_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } catch( ... ) { std::ostringstream eout; eout << "Failed to initialize device properties: '"; eout << deviceLabel << "' with device id: '" << spd_id; eout << "'initializeProperties' failed with Unknown Exception" << "Device registration with Device Manager failed "; - LOG_ERROR(DeviceManager_impl, eout.str()); + RH_ERROR(this->_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } } - LOG_DEBUG(DeviceManager_impl, "Initializing device " << deviceLabel << " on Device Manager " << _label); + RH_DEBUG(this->_baseLog, "Initializing device " << deviceLabel << " on Device Manager " << _label); try { registeringDevice->initialize(); } catch (CF::LifeCycle::InitializeError& ex) { std::ostringstream eout; eout << "Device "<< deviceLabel << " threw a CF::LifeCycle::InitializeError exception"<<". Device registration with Device Manager failed"; - LOG_ERROR(DeviceManager_impl, eout.str()); + RH_ERROR(this->_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } catch ( std::exception& ex ) { std::ostringstream eout; eout << "The following standard exception occurred: "<_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } catch ( const CORBA::Exception& ex ) { std::ostringstream eout; eout << "The following CORBA exception occurred: "<_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } //configure properties try { - LOG_DEBUG(DeviceManager_impl, "Configuring device " << deviceLabel << " on Device Manager " << _label); + RH_DEBUG(this->_baseLog, "Configuring device " << deviceLabel << " on Device Manager " << _label); const CF::Properties cprops = spdinfo->getNonNilConfigureProperties(); - LOG_TRACE(DeviceManager_impl, "Listing configuration properties"); + RH_TRACE(this->_baseLog, "Listing configuration properties"); for (unsigned int j=0; j_baseLog, "Prop id " << cprops[j].id ); } if (cprops.length() != 0) registeringDevice->configure (cprops); @@ -1455,23 +1688,23 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) std::ostringstream eout; eout << "Device '" << deviceLabel << "' - '" << spd_id << "' may not have been configured correctly; " << "Call to configure() resulted in PartialConfiguration exception."; - LOG_ERROR(DeviceManager_impl, eout.str()) + RH_ERROR(this->_baseLog, eout.str()) throw(CF::InvalidObjectReference(eout.str().c_str())); } catch (CF::PropertySet::InvalidConfiguration& ex) { std::ostringstream eout; eout << "Device '" << deviceLabel << "' - '" << spd_id << "' may not have been configured correctly; " << "Call to configure() resulted in InvalidConfiguration exception. Device registration with Device Manager failed"; - LOG_ERROR(DeviceManager_impl, eout.str()); + RH_ERROR(this->_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } catch ( std::exception& ex ) { std::ostringstream eout; eout << "The following standard exception occurred: "<_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } catch ( const CORBA::Exception& ex ) { std::ostringstream eout; eout << "The following CORBA exception occurred: "<_baseLog, eout.str()); throw(CF::InvalidObjectReference(eout.str().c_str())); } @@ -1479,7 +1712,7 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) // registered if (!deviceIsRegistered (registeringDevice)) { // if the device is not registered, then add it to the naming context - LOG_TRACE(DeviceManager_impl, "Binding device to name " << deviceLabel) + RH_TRACE(this->_baseLog, "Binding device to name " << deviceLabel) CosNaming::Name_var device_name = ossie::corba::stringToName(deviceLabel.c_str()); try { devMgrContext->bind(device_name, registeringDevice); @@ -1487,12 +1720,12 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) // there is already something bound to that name // from the perspective of this framework implementation, the multiple names are not acceptable // consider this a registered device - LOG_WARN(DeviceManager_impl, "Device is already registered"); + RH_WARN(this->_baseLog, "Device is already registered"); return; } increment_registeredDevices(registeringDevice); } else { - LOG_WARN(DeviceManager_impl, "Device is already registered"); + RH_WARN(this->_baseLog, "Device is already registered"); return; } @@ -1500,253 +1733,29 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) // the new device with the Domain Manager if (_adminState == DEVMGR_REGISTERED) { try { - LOG_INFO(DeviceManager_impl, "Registering device " << deviceLabel << " on Domain Manager " << _domainName ); + RH_INFO(this->_baseLog, "Registering device " << deviceLabel << " on Domain Manager " << _domainName ); _dmnMgr->registerDevice (registeringDevice, myObj); } catch( CF::DomainManager::RegisterError& e ) { - LOG_ERROR(DeviceManager_impl, "Failed to register device to domain manager due to: " << e.msg); + RH_ERROR(this->_baseLog, "Failed to register device to domain manager due to: " << e.msg); } catch ( std::exception& ex ) { - LOG_ERROR(DeviceManager_impl, "The following standard exception occurred: "<_baseLog, "The following standard exception occurred: "<_baseLog, "Failed to register device to domain manager due to: " << e._name()); } } else { - LOG_WARN(DeviceManager_impl, "Skipping DomainManager registerDevice because the device manager isn't registered") + RH_WARN(this->_baseLog, "Skipping DomainManager registerDevice because the device manager isn't registered") } - LOG_TRACE(DeviceManager_impl, "Done registering device " << deviceLabel); - - //The registerDevice operation shall write a FAILURE_ALARM log record to a - //DomainManagers Log, upon unsuccessful registration of a Device to the DeviceManagers - //registeredDevices. -} - - - - -void DeviceManager_impl::registerRogueDevice (CF::Device_ptr registeringDevice) -{ - - //Get properties from SPD - std::string spdFile = ossie::corba::returnString(registeringDevice->softwareProfile()); - std::string deviceLabel = ossie::corba::returnString(registeringDevice->label()); - std::string device_id = ossie::corba::returnString(registeringDevice->identifier()); - - // Open the SPD file using the SCA FileSystem - LOG_TRACE(DeviceManager_impl, "Building DRogue Device Info From SPD File"); - std::auto_ptr< local_spd::ProgramProfile > spdinfo; - try { - spdinfo = local_spd::ProgramProfile::LoadProgramProfile(_fileSys, spdFile.c_str(),_local_dom_filesys ); - - } - catch(...) { - std::ostringstream eout; - eout << "Loading Device's SPD failed, device:" << ossie::corba::returnString(registeringDevice->label()); - LOG_ERROR(DeviceManager_impl, eout.str()); - throw(CF::InvalidObjectReference(eout.str().c_str())); - } - - std::string spd_name = spdinfo->getName(); - std::string spd_id = spdinfo->getID(); - LOG_INFO(DeviceManager_impl, "Device LABEL: " << deviceLabel << " SPD loaded: " << spd_name << "' - '" << spd_id ); - - CF::Properties componentProperties; - DeviceManagerConfiguration DCDParser; - try { - File_stream _dcd(_fileSys, _deviceConfigurationProfile.c_str()); - DCDParser.load(_dcd); - _dcd.close(); - } catch ( std::exception& ex ) { - std::ostringstream eout; - eout << "The following standard exception occurred: "<_baseLog, "Done registering device " << deviceLabel); + allregistered = verifyAllRegistered(); } - - // get properties from device PRF that matches the registering device - std::string deviceid = ossie::corba::returnString(registeringDevice->identifier()); - try { - const ComponentInstantiation& instantiation = DCDParser.getComponentInstantiationById(deviceid); - if (instantiation.getUsageName() != NULL) - std::string tmp_name = instantiation.getUsageName(); // this is here to get rid of a warning - } catch (std::out_of_range& e) { - std::ostringstream eout; - eout << "[DeviceManager::registerDevice] Failed to parse DCD"; - LOG_ERROR(DeviceManager_impl, eout.str()); - throw(CF::InvalidObjectReference(eout.str().c_str())); - } catch ( std::exception& ex ) { - std::ostringstream eout; - eout << "The following standard exception occurred: "<overrideProperty( overrideProps[j] ); + if (allregistered) { + startOrder(); } - // - // call resource's initializeProperties method to handle any properties required for construction - // - if (spdinfo->isConfigurable ()) { - try { - // - LOG_DEBUG(DeviceManager_impl, "Initialize properties for spd/device label: " << spd_name << "/" << deviceLabel); - const CF::Properties cprops = spdinfo->getNonNilConstructProperties(); - for (unsigned int j = 0; j < cprops.length (); j++) { - LOG_DEBUG(DeviceManager_impl, "initializeProperties prop id " << cprops[j].id ); - } - // Try to set the initial values for the component's properties - registeringDevice->initializeProperties(cprops); - } catch(CF::PropertySet::InvalidConfiguration& e) { - std::ostringstream eout; - eout << "Device '" << deviceLabel << "' - '" << spd_id << "' may not have been initialized correctly; " - << "Call to initializeProperties() resulted in InvalidConfiguration exception. Device registration with Device Manager failed"; - LOG_ERROR(DeviceManager_impl, eout.str()); - throw(CF::InvalidObjectReference(eout.str().c_str())); - } catch(CF::PropertySet::PartialConfiguration& e) { - std::ostringstream eout; - eout << "Device '" << deviceLabel << "' - '" << spd_id << "' may not have been configured correctly; " - << "Call to initializeProperties() resulted in PartialConfiguration exception."; - LOG_ERROR(DeviceManager_impl, eout.str()); - throw(CF::InvalidObjectReference(eout.str().c_str())); - } catch ( std::exception& ex ) { - std::ostringstream eout; - eout << "The following standard exception occurred: "<initialize(); - } catch (CF::LifeCycle::InitializeError& ex) { - std::ostringstream eout; - eout << "Device "<< deviceLabel << " threw a CF::LifeCycle::InitializeError exception"<<". Device registration with Device Manager failed"; - LOG_ERROR(DeviceManager_impl, eout.str()); - throw(CF::InvalidObjectReference(eout.str().c_str())); - } catch ( std::exception& ex ) { - std::ostringstream eout; - eout << "The following standard exception occurred: "<getNonNilConfigureProperties(); - LOG_TRACE(DeviceManager_impl, "Listing configuration properties"); - for (unsigned int j=0; jconfigure (cprops); - } catch (CF::PropertySet::PartialConfiguration& ex) { - std::ostringstream eout; - eout << "Device '" << deviceLabel << "' - '" << spd_id << "' may not have been configured correctly; " - << "Call to configure() resulted in PartialConfiguration exception."; - LOG_ERROR(DeviceManager_impl, eout.str()) - throw(CF::InvalidObjectReference(eout.str().c_str())); - } catch (CF::PropertySet::InvalidConfiguration& ex) { - std::ostringstream eout; - eout << "Device '" << deviceLabel << "' - '" << spd_id << "' may not have been configured correctly; " - << "Call to configure() resulted in InvalidConfiguration exception. Device registration with Device Manager failed"; - LOG_ERROR(DeviceManager_impl, eout.str()); - throw(CF::InvalidObjectReference(eout.str().c_str())); - } catch ( std::exception& ex ) { - std::ostringstream eout; - eout << "The following standard exception occurred: "<bind(device_name, registeringDevice); - } catch ( ... ) { - // there is already something bound to that name - // from the perspective of this framework implementation, the multiple names are not acceptable - // consider this a registered device - LOG_WARN(DeviceManager_impl, "Device is already registered"); - return; - } - increment_registeredDevices(registeringDevice); - } else { - LOG_WARN(DeviceManager_impl, "Device is already registered"); - return; - } - - // If this Device Manager is registered with a Domain Manager, register - // the new device with the Domain Manager - if (_adminState == DEVMGR_REGISTERED) { - try { - LOG_INFO(DeviceManager_impl, "Registering device " << deviceLabel << " on Domain Manager"); - _dmnMgr->registerDevice (registeringDevice, myObj); - } catch( CF::DomainManager::RegisterError& e ) { - LOG_ERROR(DeviceManager_impl, "Failed to register device to domain manager due to: " << e.msg); - } catch ( std::exception& ex ) { - LOG_ERROR(DeviceManager_impl, "The following standard exception occurred: "<_baseLog, "Attempt to unregister nil device") throw (CF::InvalidObjectReference("Cannot unregister Device. registeringDevice is a nil reference.")); } @@ -1792,10 +1800,10 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) dev_id = ossie::corba::returnString(registeredDevice->identifier()); dev_name = ossie::corba::returnString(registeredDevice->label()); } catch ( std::exception& ex ) { - LOG_ERROR(DeviceManager_impl, "The following standard exception occurred: "<_baseLog, "The following standard exception occurred: "<_baseLog, "The following CORBA exception occurred: "<_baseLog, "Cannot unregister Device. registeringDevice was not registered.") throw (CF::InvalidObjectReference("Cannot unregister Device. registeringDevice was not registered.")); } - - TRACE_EXIT(DeviceManager_impl); } void DeviceManager_impl::deleteFileSystems() @@ -1820,20 +1826,59 @@ void DeviceManager_impl::deleteFileSystems() _fileSys = CF::FileSystem::_nil(); } +void DeviceManager_impl::stopOrder() +{ + unsigned long timeout = 3; // seconds; + for (std::vector >::reverse_iterator item=start_order.rbegin(); item!=start_order.rend();++item) { + bool started = false; + for(DeviceList::iterator _dev=_registeredDevices.begin(); _dev!=_registeredDevices.end(); ++_dev) { + if ((*_dev)->identifier == item->first) { + try { + omniORB::setClientCallTimeout((*_dev)->device, timeout * 1000); + (*_dev)->device->stop(); + } catch ( ... ) { + } + started = true; + break; + } + } + if (started) + continue; + for(ServiceList::iterator _svc=_registeredServices.begin(); _svc!=_registeredServices.end(); ++_svc) { + if ((*_svc)->identifier == item->first) { + try { + CF::Resource_ptr res = CF::Resource::_narrow((*_svc)->service); + if (not CORBA::is_nil(res)) { + omniORB::setClientCallTimeout(res, timeout * 1000); + res->stop(); + } + } catch ( ... ) { + } + break; + } + } + } +} + void DeviceManager_impl::shutdown () throw (CORBA::SystemException) { + if (DomainWatchThread) + this->DomainWatchThread->stop(); + *_internalShutdown = true; - LOG_DEBUG(DeviceManager_impl, "SHUTDOWN START........." << *_internalShutdown) + RH_DEBUG(this->_baseLog, "SHUTDOWN START........." << *_internalShutdown) if ((_adminState == DEVMGR_SHUTTING_DOWN) || (_adminState == DEVMGR_SHUTDOWN)) { - LOG_DEBUG(DeviceManager_impl, "SHUTTIING DOWN NOW......" ); + RH_DEBUG(this->_baseLog, "SHUTTIING DOWN NOW......" ); return; } _adminState = DEVMGR_SHUTTING_DOWN; + stopOrder(); + // SR:501 // The shutdown operation shall unregister the DeviceManager from the DomainManager. // Although unclear, a failure here should NOT prevent us from trying to clean up @@ -1842,7 +1887,7 @@ throw (CORBA::SystemException) CF::DeviceManager_var self = _this(); if ( !CORBA::is_nil(_dmnMgr ) ) { _dmnMgr->unregisterDeviceManager(self); - LOG_DEBUG(DeviceManager_impl, "SHUTDOWN ......... unregisterDeviceManager "); + RH_DEBUG(this->_baseLog, "SHUTDOWN ......... unregisterDeviceManager "); } } catch( ... ) { } @@ -1854,10 +1899,10 @@ throw (CORBA::SystemException) if ( !CORBA::is_nil(_dmnMgr ) ) { CF::EventChannelManager_var ecm = _dmnMgr->eventChannelMgr(); if ( CORBA::is_nil(ecm) == false && idm_registration.operator->() != NULL ){ - LOG_INFO(DeviceManager_impl, "Unregister IDM CHANNEL:" << idm_registration->reg.reg_id); + RH_INFO(this->_baseLog, "Unregister IDM CHANNEL:" << idm_registration->reg.reg_id); ecm->unregister( idm_registration->reg ); } - LOG_DEBUG(DeviceManager_impl, "SHUTDOWN ......... Unregister IDM_CHANNEL"); + RH_DEBUG(this->_baseLog, "SHUTDOWN ......... Unregister IDM_CHANNEL"); } }catch(...){ } @@ -1871,7 +1916,7 @@ throw (CORBA::SystemException) clean_externalServices(); clean_registeredDevices(); - LOG_DEBUG(DeviceManager_impl, "SHUTDOWN ......... Unbinding device manager context"); + RH_DEBUG(this->_baseLog, "SHUTDOWN ......... Unbinding device manager context"); try { CosNaming::Name devMgrContextName; devMgrContextName.length(1); @@ -1893,7 +1938,7 @@ throw (CORBA::SystemException) } catch ( ... ) { } - LOG_DEBUG(DeviceManager_impl, "SHUTDOWN ......... completed"); + RH_DEBUG(this->_baseLog, "SHUTDOWN ......... completed"); } void @@ -1902,7 +1947,7 @@ DeviceManager_impl::registerService (CORBA::Object_ptr registeringService, throw (CORBA::SystemException, CF::InvalidObjectReference) { boost::recursive_mutex::scoped_lock lock(registeredDevicesmutex); - LOG_INFO(DeviceManager_impl, "Registering service " << name) + RH_INFO(this->_baseLog, "Registering service " << name) if (CORBA::is_nil (registeringService)) { throw (CF::InvalidObjectReference("Cannot register service, registeringService is a nil reference.")); @@ -1912,29 +1957,16 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) // Register the service with the Device manager, unless it is already // registered - if (!serviceIsRegistered(name)) { - // Per the specification, service usagenames are not optional and *MUST* be - // unique per each service type. Therefore, a domain cannot have two - // services of the same usagename. - LOG_TRACE(DeviceManager_impl, "Binding service to name " << name); - CosNaming::Name_var service_name = ossie::corba::stringToName(name); - try { - rootContext->rebind(service_name, registeringService); - } catch ( ... ) { - // there is already something bound to that name - // from the perspective of this framework implementation, the multiple names are not acceptable - // consider this a registered device - LOG_WARN(DeviceManager_impl, "Service is already registered") - return; - } - - increment_registeredServices(registeringService, name); - - } else { - LOG_WARN(DeviceManager_impl, "Service is already registered") + if (serviceIsRegistered(name)) { + RH_WARN(this->_baseLog, "Service " << name << " is already registered") return; } + // + // If the service support's any of the redhawk resource startup interfaces. + // + tryResourceStartup( registeringService, name ); + //The registerService operation shall register the registeringService with the DomainManager //when the DeviceManager has already registered to the DomainManager and the //registeringService has been successfully added to the DeviceManagers registeredServices @@ -1942,15 +1974,34 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) if (_adminState == DEVMGR_REGISTERED) { try { _dmnMgr->registerService(registeringService, myObj, name); + } catch (const CF::DomainManager::RegisterError& error) { + RH_ERROR(this->_baseLog, "Unable to register service '" << name << "' with domain manager: " + << error.msg); + // If we know the PID, try to terminate the service + ServiceNode* service = _getPendingService(name); + if (service && (service->pid != 0)) { + _terminateProcessThreaded(service->pid); + } + return; } catch ( ... ) { - CosNaming::Name_var service_name = ossie::corba::stringToName(name); - rootContext->unbind(service_name); - _registeredServices.pop_back(); - LOG_ERROR(DeviceManager_impl, "Failed to register service to the domain manager; unregistering the service from the device manager") + RH_ERROR(this->_baseLog, "Failed to register service to the domain manager; unregistering the service from the device manager") throw; } } + // If we've made it this far, move the service to from the pending list to + // the registered list + increment_registeredServices(registeringService, name); + + bool allregistered = verifyAllRegistered(); + + // Release the lock, and if all devices and services are registered, start + // them + lock.unlock(); + if (allregistered) { + startOrder(); + } + //The registerService operation shall write a FAILURE_ALARM log record, upon unsuccessful //registration of a Service to the DeviceManagers registeredServices. //The registerService operation shall raise the CF InvalidObjectReference exception when the @@ -1962,7 +2013,7 @@ DeviceManager_impl::unregisterService (CORBA::Object_ptr registeredService, const char* name) throw (CORBA::SystemException, CF::InvalidObjectReference) { - LOG_INFO(DeviceManager_impl, "Unregistering service " << name) + RH_INFO(this->_baseLog, "Unregistering service " << name) if (CORBA::is_nil (registeredService)) { /*writeLogRecord(FAILURE_ALARM,invalid reference input parameter.); */ @@ -1974,6 +2025,13 @@ throw (CORBA::SystemException, CF::InvalidObjectReference) if (serviceFound) return; + // Ignore potential unregistration from a service on the pending list, + // which usually indicates that registering with the domain failed and we + // sent it a termination signal + ServiceNode* service = _getPendingService(name); + if (service) { + return; + } //If it didn't find registeredDevice, then throw an exception /*writeLogRecord(FAILURE_ALARM,invalid reference input parameter.);*/ @@ -1992,10 +2050,10 @@ local_spd::ProgramProfile *DeviceManager_impl::findProfile (const std::string &c DeploymentList::iterator iter; for( iter=deployed_comps.begin(); iter != deployed_comps.end(); iter++ ) { std::string cid = iter->second->getInstantiationIdentifier(); - LOG_TRACE(DeviceManager_impl, "Looking for Profile match: RegisteringInstanceID/ProfileInstanceId: " << componentInstantiationId << + RH_TRACE(this->_baseLog, "Looking for Profile match: RegisteringInstanceID/ProfileInstanceId: " << componentInstantiationId << " / " << iter->second->getInstantiationIdentifier() ); if ( componentInstantiationId == cid ) { - LOG_TRACE(DeviceManager_impl, "Looking for Profile FOUND MATCH " << + RH_TRACE(this->_baseLog, "Looking for Profile FOUND MATCH " << iter->second->getInstantiationIdentifier() ); ret=iter->second; break; @@ -2005,6 +2063,34 @@ local_spd::ProgramProfile *DeviceManager_impl::findProfile (const std::string &c } +local_spd::ProgramProfile *DeviceManager_impl::findProfile (const std::string &usageName, + const std::string &componentInstantiationId) +{ + SCOPED_LOCK(componentImplMapmutex); + local_spd::ProgramProfile *ret=0; + DeploymentList::iterator iter; + for( iter=deployed_comps.begin(); iter != deployed_comps.end(); iter++ ) { + std::string cid = iter->second->getInstantiationIdentifier(); + std::string cname = iter->second->getUsageName(); + RH_TRACE(this->_baseLog, "Looking for Profile match (InstanceID): Registering/Profile: " << componentInstantiationId << + " / " << cid ); + if ( componentInstantiationId == cid ) { + RH_TRACE(this->_baseLog, "Looking for Profile FOUND MATCH (instantiation) " << cid ); + ret=iter->second; + break; + } + + RH_TRACE(this->_baseLog, "Looking for Profile match (UsageName): Registering/Profile: " << usageName << "/" << cname ); + if ( cname == usageName ) { + RH_TRACE(this->_baseLog, "Looking for Profile FOUND MATCH (usageName) " << cname ); + ret=iter->second; + break; + } + } + return ret; +} + + char * DeviceManager_impl::getComponentImplementationId (const char* componentInstantiationId) @@ -2069,41 +2155,41 @@ bool DeviceManager_impl::makeDirectory(std::string path) int retval = mkdir(initialDir.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); if (retval == -1) { if (errno == ENOENT) { - LOG_WARN(DeviceManager_impl, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". Non-existent root directory.") + RH_WARN(this->_baseLog, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". Non-existent root directory.") success = false; } else if (errno == EEXIST) { - LOG_TRACE(DeviceManager_impl, "Directory (from " << workingFileName << ") " << initialDir <<" already exists. No need to make a new one.") + RH_TRACE(this->_baseLog, "Directory (from " << workingFileName << ") " << initialDir <<" already exists. No need to make a new one.") } else if (errno == EACCES) { - LOG_WARN(DeviceManager_impl, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". Please check your write permissions.") + RH_WARN(this->_baseLog, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". Please check your write permissions.") success = false; } else if (errno == ENOTDIR) { - LOG_WARN(DeviceManager_impl, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". One of the components of the path is not a directory.") + RH_WARN(this->_baseLog, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". One of the components of the path is not a directory.") success = false; } else if (errno == ELOOP) { - LOG_WARN(DeviceManager_impl, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". A loop exists in the symbolic links in the path.") + RH_WARN(this->_baseLog, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". A loop exists in the symbolic links in the path.") success = false; } else if (errno == EMLINK) { - LOG_WARN(DeviceManager_impl, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". The link count of the parent directory exceeds LINK_MAX.") + RH_WARN(this->_baseLog, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". The link count of the parent directory exceeds LINK_MAX.") success = false; } else if (errno == ENAMETOOLONG) { - LOG_WARN(DeviceManager_impl, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". The path name is too long.") + RH_WARN(this->_baseLog, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". The path name is too long.") success = false; } else if (errno == EROFS) { - LOG_WARN(DeviceManager_impl, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". This is a read-only file system.") + RH_WARN(this->_baseLog, "Failed to create directory (from " << workingFileName << ") " << initialDir <<". This is a read-only file system.") success = false; } else { - LOG_WARN(DeviceManager_impl, "Attempt to create directory (from " << workingFileName << ") " << initialDir <<" failed with the following error number: " << errno) + RH_WARN(this->_baseLog, "Attempt to create directory (from " << workingFileName << ") " << initialDir <<" failed with the following error number: " << errno) success = false; } } else { - LOG_TRACE(DeviceManager_impl, "Creating directory (from " << workingFileName << ") " << initialDir) + RH_TRACE(this->_baseLog, "Creating directory (from " << workingFileName << ") " << initialDir) } begin_pos = pos + 1; } } bool retval = checkWriteAccess(path); if (not retval) { - LOG_ERROR(DeviceManager_impl, "The Device Manager (or one of its children) does not have write permission to one or more files in the cache.") + RH_ERROR(this->_baseLog, "The Device Manager (or one of its children) does not have write permission to one or more files in the cache.") return false; } return success; @@ -2116,19 +2202,19 @@ bool DeviceManager_impl::checkWriteAccess(std::string &path) dp = opendir(path.c_str()); if (dp == NULL) { if (errno == ENOENT) { - LOG_WARN(DeviceManager_impl, "Failed to create directory " << path <<".") + RH_WARN(this->_baseLog, "Failed to create directory " << path <<".") } else if (errno == EACCES) { - LOG_WARN(DeviceManager_impl, "Failed to create directory " << path <<". Please check your write permissions.") + RH_WARN(this->_baseLog, "Failed to create directory " << path <<". Please check your write permissions.") } else if (errno == ENOTDIR) { - LOG_WARN(DeviceManager_impl, "Failed to create directory " << path <<". One of the components of the path is not a directory.") + RH_WARN(this->_baseLog, "Failed to create directory " << path <<". One of the components of the path is not a directory.") } else if (errno == EMFILE) { - LOG_WARN(DeviceManager_impl, "Failed to create directory " << path <<". Too many file descriptors open by the process.") + RH_WARN(this->_baseLog, "Failed to create directory " << path <<". Too many file descriptors open by the process.") } else if (errno == ENFILE) { - LOG_WARN(DeviceManager_impl, "Failed to create directory " << path <<". Too many file descriptors open by the system.") + RH_WARN(this->_baseLog, "Failed to create directory " << path <<". Too many file descriptors open by the system.") } else if (errno == ENOMEM) { - LOG_WARN(DeviceManager_impl, "Failed to create directory " << path <<". Insufficient memory to complete the operation.") + RH_WARN(this->_baseLog, "Failed to create directory " << path <<". Insufficient memory to complete the operation.") } else { - LOG_WARN(DeviceManager_impl, "Attempt to create directory " << path <<" failed with the following error number: " << errno) + RH_WARN(this->_baseLog, "Attempt to create directory " << path <<" failed with the following error number: " << errno) } return false; } @@ -2137,7 +2223,7 @@ bool DeviceManager_impl::checkWriteAccess(std::string &path) if ((name == ".") or (name == "..")) continue; std::string full_name = path + "/" + name; if (access(full_name.c_str(), W_OK) == -1) { - LOG_WARN(DeviceManager_impl, "The file '" << full_name << "' cannot be overwritten by the Device Manager process (or one of its children).") + RH_WARN(this->_baseLog, "The file '" << full_name << "' cannot be overwritten by the Device Manager process (or one of its children).") (void) closedir(dp); return false; } @@ -2198,17 +2284,6 @@ bool DeviceManager_impl::decrement_registeredServices(CORBA::Object_ptr register void DeviceManager_impl::local_unregisterService(CORBA::Object_ptr service, const std::string& name) { - // Unbind service from the naming service - - // Per the specification, service usagenames are not optional and *MUST* be - // unique per each service type. Therefore, a domain cannot have two - // services of the same usagename. - CosNaming::Name_var tmpServiceName = ossie::corba::stringToName(name); - try { - rootContext->unbind(tmpServiceName); - } catch ( ... ){ - } - // Ddon't unregisterService from the domain manager if we are SHUTTING_DOWN if (_adminState == DEVMGR_REGISTERED){ try { @@ -2260,7 +2335,7 @@ void DeviceManager_impl::local_unregisterDevice(CF::Device_ptr device, const std // Unbind device from the naming service CosNaming::Name_var tmpDeviceName = ossie::corba::stringToName(label); devMgrContext->unbind(tmpDeviceName); - } CATCH_LOG_ERROR(DeviceManager_impl, "Unable to unbind device: " << label ) + } CATCH_RH_ERROR(this->_baseLog, "Unable to unbind device: " << label ) // Per SR:490, don't unregisterDevice from the domain manager if we are SHUTTING_DOWN if (_adminState == DEVMGR_REGISTERED) { @@ -2272,6 +2347,78 @@ void DeviceManager_impl::local_unregisterDevice(CF::Device_ptr device, const std } +bool DeviceManager_impl::verifyAllRegistered() { + if (_pendingDevices.empty() and _pendingServices.empty()) + return true; + return false; +} + +void DeviceManager_impl::startOrder() +{ + const std::vector& componentPlacements = node_dcd.getComponentPlacements(); + for(std::vector::const_iterator cP = componentPlacements.begin(); cP!=componentPlacements.end(); cP++) { + if (cP->getInstantiations()[0].startOrder.isSet()) { + int cP_order = *(cP->getInstantiations()[0].startOrder.get()); + std::string cP_id(cP->getInstantiations()[0].getID()); + std::vector >::iterator _o=start_order.begin(); + for ( ; _o!=start_order.end(); _o++) { + if (_o->second >= cP_order) { + start_order.insert(_o, std::make_pair(cP_id, cP_order)); + break; + } + } + if (_o == start_order.end()) + start_order.push_back(std::make_pair(cP_id, cP_order)); + } + } + for (std::vector >::iterator item=start_order.begin(); item!=start_order.end();item++) { + bool started = false; + for (DeviceList::iterator dev=_registeredDevices.begin(); dev!=_registeredDevices.end(); dev++) { + if ((*dev)->identifier == item->first) { + RH_TRACE(this->_baseLog, "Starting device " << (*dev)->label); + try { + (*dev)->device->start(); + } catch (const CF::Resource::StartError& exc) { + RH_ERROR(this->_baseLog, "Device " << (*dev)->label << " failed to start: " << exc.msg); + } catch (const CORBA::SystemException& exc) { + RH_ERROR(this->_baseLog, "Device " << (*dev)->label << " failed to start: " + << ossie::corba::describeException(exc)); + } + started = true; + break; + } + } + if (started) + continue; + for (ServiceList::iterator svc=_registeredServices.begin(); svc!=_registeredServices.end(); svc++) { + if ((*svc)->identifier == item->first) { + const std::string& identifier = (*svc)->identifier; + RH_TRACE(this->_baseLog, "Starting service " << identifier); + CORBA::Object_ptr obj = (*svc)->service; + if (!(obj->_is_a(CF::Resource::_PD_repoId))) { + RH_WARN(this->_baseLog, "Service " << identifier + << " has a startorder value but does not inherit from Resource"); + break; + } + CF::Resource_var res = ossie::corba::_narrowSafe(obj); + if (CORBA::is_nil(res)) { + RH_ERROR(this->_baseLog, "Service " << identifier << " cannot be narrowed to Resource"); + break; + } + try { + res->start(); + } catch (const CF::Resource::StartError& exc) { + RH_ERROR(this->_baseLog, "Service " << identifier << " failed to start: " << exc.msg); + } catch (const CORBA::SystemException& exc) { + RH_ERROR(this->_baseLog, "Service " << identifier << " failed to start: " + << ossie::corba::describeException(exc)); + } + break; + } + } + } +} + /* * increment the registered services sequences along with the id and table tables */ @@ -2282,7 +2429,7 @@ void DeviceManager_impl::increment_registeredServices(CORBA::Object_ptr register ServiceNode* serviceNode = 0; for (ServiceList::iterator serviceIter = _pendingServices.begin(); serviceIter != _pendingServices.end(); ++serviceIter) { - if (strcmp((*serviceIter)->label.c_str(), name) == 0){ + if ((*serviceIter)->label == name) { serviceNode = *serviceIter; _pendingServices.erase(serviceIter); break; @@ -2292,7 +2439,7 @@ void DeviceManager_impl::increment_registeredServices(CORBA::Object_ptr register if (!serviceNode){ // A service is registering that was not launched by this DeviceManager. Create a node // to manage it, but mark the PID as 0, as there is no process to monitor. - LOG_WARN(DeviceManager_impl, "Registering service " << name << " was not launched by this DeviceManager"); + RH_WARN(this->_baseLog, "Registering service " << name << " was not launched by this DeviceManager"); serviceNode = new ServiceNode; serviceNode->identifier = name; serviceNode->pid = 0; @@ -2332,7 +2479,7 @@ void DeviceManager_impl::increment_registeredDevices(CF::Device_ptr registeringD if (!deviceNode) { // A device is registering that was not launched by this DeviceManager. Create a node // to manage it, but mark the PID as 0, as there is no process to monitor. - LOG_WARN(DeviceManager_impl, "Registering device " << identifier << " was not launched by this DeviceManager"); + RH_WARN(this->_baseLog, "Registering device " << identifier << " was not launched by this DeviceManager"); deviceNode = new DeviceNode; deviceNode->identifier = identifier; deviceNode->pid = 0; @@ -2380,7 +2527,7 @@ CF::DeviceSequence* DeviceManager_impl::registeredDevices () throw (CORBA::Syste return result._retn(); } -std::string DeviceManager_impl::getIORfromID(const char* instanceid) +std::string DeviceManager_impl::getIORfromID(const std::string& instanceid) { boost::recursive_mutex::scoped_lock lock(registeredDevicesmutex); @@ -2506,7 +2653,7 @@ void DeviceManager_impl::clean_registeredDevices() // should update the registered devices list; it is possible that the // device node will be deleted before the lock is re-acquired, so local // copies of any objects must be used - LOG_INFO(DeviceManager_impl, "Releasing device " << label); + RH_INFO(this->_baseLog, "Releasing device " << label); lock.unlock(); try { // 3 seconds or use cfg option @@ -2530,7 +2677,7 @@ void DeviceManager_impl::clean_registeredDevices() } } - LOG_DEBUG(DeviceManager_impl, "Sending SIGNAL TREE to to device process " ); + RH_DEBUG(this->_baseLog, "Sending SIGNAL TREE to to device process " ); // Clean up device processes, starting with an orderly shutdown and // escalating as needed // NOTE: If the DeviceManager was terminated with a ^C, sending SIGINT may @@ -2611,7 +2758,7 @@ void DeviceManager_impl::childExited (pid_t pid, int status) // The pid should always be found; if it is not, it must be a logic error. if (!deviceNode && !serviceNode) { - LOG_ERROR(DeviceManager_impl, "Process " << pid << " is not associated with a registered device"); + RH_ERROR(this->_baseLog, "Process " << pid << " is not associated with a registered device"); return; } @@ -2624,12 +2771,12 @@ void DeviceManager_impl::childExited (pid_t pid, int status) if (WIFSIGNALED(status)) { if (deviceNode) { - LOG_WARN(DeviceManager_impl, "Child process " << label << " (pid " << pid << ") has terminated with signal " << WTERMSIG(status)); + RH_WARN(this->_baseLog, "Child process " << label << " (pid " << pid << ") has terminated with signal " << WTERMSIG(status)); } else { // it's a service, so no termination through signal is the correct behavior - LOG_INFO(DeviceManager_impl, "Child process " << label << " (pid " << pid << ") has terminated with signal " << WTERMSIG(status)); + RH_INFO(this->_baseLog, "Child process " << label << " (pid " << pid << ") has terminated with signal " << WTERMSIG(status)); } } else { - LOG_INFO(DeviceManager_impl, "Child process " << label << " (pid " << pid << ") has exited with status " << WEXITSTATUS(status)); + RH_INFO(this->_baseLog, "Child process " << label << " (pid " << pid << ") has exited with status " << WEXITSTATUS(status)); } if (deviceNode) { @@ -2651,3 +2798,168 @@ bool DeviceManager_impl::allChildrenExited () return false; } + + +void DeviceManager_impl::tryResourceStartup( CORBA::Object_ptr registeringService, + const std::string &svc_name ) +{ + try { + + local_spd::ProgramProfile *spdinfo = findProfile(svc_name, svc_name); + + if ( !spdinfo ) { + std::ostringstream eout; + eout << "Unable to find componentplacement information for for Service:" << svc_name; + RH_WARN(this->_baseLog, eout.str()); + throw(CF::InvalidObjectReference(eout.str().c_str())); + } + + // + // Try standard Redhawk resource startup... + // initializeProperties, initialized, configure + // + CF::LifeCycle_var svc_lc = ossie::corba::_narrowSafe (registeringService); + CF::PropertySet_var svc_ps = ossie::corba::_narrowSafe (registeringService); + CF::PropertyEmitter_var svc_em = ossie::corba::_narrowSafe (registeringService); + std::ostringstream eout; + std::string emsg; + try { + RH_DEBUG(this->_baseLog, "Initialize properties for spd/service: " << spdinfo->getName() << "/" << svc_name); + const CF::Properties cprops = spdinfo->getNonNilConstructProperties(); + for (unsigned int j = 0; j < cprops.length (); j++) { + RH_DEBUG(this->_baseLog, "initializeProperties prop id " << cprops[j].id ); + } + + if ( !CORBA::is_nil(svc_em)) { + // Try to set the initial values for the resource + RH_DEBUG(this->_baseLog, "Calling Service: " << svc_name << " initializeProperties props: " << cprops.length()); + svc_em->initializeProperties(cprops); + } + else { + if ( cprops.length() > 0 ) { + RH_WARN(this->_baseLog,"Service: " << svc_name << " has configuration properties but does not implement PropertEmitter interface."); + } + } + + }catch(CF::PropertySet::InvalidConfiguration& e) { + eout << "Invalid Configuration exception occurred, service '" << svc_name <<"."; + } catch(CF::PropertySet::PartialConfiguration& e) { + eout << "Partial configuration exception for Service '" << svc_name << "."; + } catch ( std::exception& ex ) { + eout << "Standard exception occurred: "< 0 ) { + RH_WARN(this->_baseLog, eout.str() << " Continuing with normal service registration."); + return; + } + + RH_DEBUG(this->_baseLog, "Initializing Service " << svc_name << " on DeviceManager: " << _label); + eout.clear(); eout.str(""); + try { + if ( !CORBA::is_nil(svc_lc)) { + RH_DEBUG(this->_baseLog, "Calling Service " << svc_name << " initialize method."); + svc_lc->initialize(); + } + else { + RH_DEBUG(this->_baseLog, "Service does not implement LifeCycle interface."); + } + + } catch (CF::LifeCycle::InitializeError& ex) { + eout << "Service: "<< svc_name << " threw a CF::LifeCycle::InitializeError exception."; + } catch ( std::exception& ex ) { + eout << "The following standard exception occurred: "< 0 ) { + RH_WARN(this->_baseLog, eout.str() << " Continuing with normal service registration."); + return; + } + + eout.clear(); eout.str(""); + //configure properties + try { + RH_DEBUG(this->_baseLog, "Configuring service " << svc_name << " on Device Manager " << _label); + const CF::Properties cprops = spdinfo->getNonNilConfigureProperties(); + RH_TRACE(this->_baseLog, "Listing configuration properties"); + for (unsigned int j=0; j_baseLog, "Prop id " << cprops[j].id ); + } + if (cprops.length() != 0) { + if ( !CORBA::is_nil(svc_ps) ) { + RH_DEBUG(this->_baseLog, "Calling Service's configure method with properties: " << cprops.length()); + svc_ps->configure (cprops); + } + else { + eout << "Service has configuration properties but does not implement PropertSet interface. Continuing with normal service registration."; + } + } + + } catch (CF::PropertySet::PartialConfiguration& ex) { + eout << "Partial configuration exception for Service '" << svc_name << "."; + } catch (CF::PropertySet::InvalidConfiguration& ex) { + eout << "Invalid Configuration exception occurred, service '" << svc_name <<"."; + } catch ( std::exception& ex ) { + eout << "Standard exception occurred: "< 0 ) { + RH_WARN(this->_baseLog, eout.str() << " Continuing with normal service registration."); + return; + } + + } + catch(...){ + RH_WARN(this->_baseLog, "Error processing SoftwareProfile for Service: " << svc_name << ", continue with normal registration."); + return; + } + + +} + +DeviceManager_impl::ServiceNode* DeviceManager_impl::_getPendingService(const std::string& name) +{ + boost::recursive_mutex::scoped_lock lock(registeredDevicesmutex); + for (ServiceList::iterator svc = _pendingServices.begin(); svc != _pendingServices.end(); ++svc) { + if ((*svc)->label == name) { + return *svc; + } + } + return 0; +} + +void DeviceManager_impl::_terminateProcess(pid_t pid) +{ + kill(pid, SIGTERM); + + boost::system_time end = boost::get_system_time() + boost::posix_time::milliseconds(500); + while (boost::get_system_time() < end) { + if (kill(pid, 0) != 0) { + return; + } + boost::this_thread::sleep(boost::posix_time::milliseconds(1)); + } + + kill(pid, SIGKILL); +} + +void DeviceManager_impl::_terminateProcessThreaded(pid_t pid) +{ + // Send termination signals and wait in a daemon thread to avoid blocking + // the calling thread + boost::thread thread(&DeviceManager_impl::_terminateProcess, this, pid); +} diff --git a/redhawk/src/control/sdr/devmgr/DeviceManager_impl.h b/redhawk/src/control/sdr/devmgr/DeviceManager_impl.h index 16b57428d..45b1c7af4 100644 --- a/redhawk/src/control/sdr/devmgr/DeviceManager_impl.h +++ b/redhawk/src/control/sdr/devmgr/DeviceManager_impl.h @@ -31,27 +31,37 @@ #include #include #include +#include #include #include #include +#include +#include #include #include #include +#include "spdSupport.h" #include #include "spdSupport.h" #include "process_utils.h" #include +class DomainCheckThread; + class DeviceManager_impl: public virtual POA_CF::DeviceManager, + public Logging_impl, public PropertySet_impl, public PortSet_impl { ENABLE_LOGGING + + friend class DomainCheckThread; public: - DeviceManager_impl (const char*, const char*, const char*, const char*, const struct utsname &uname, bool, const char *, bool *); + DeviceManager_impl (const char*, const char*, const char*, const char*, const struct utsname &uname, bool, + const char *, bool *, const std::string&, int); ~DeviceManager_impl (); char* deviceConfigurationProfile () @@ -78,6 +88,9 @@ class DeviceManager_impl: // Run this after the constructor void postConstructor( const char*) throw (CORBA::SystemException, std::runtime_error); + // Re-start all devices and services, and re-associate with the Domain + void reset(); + void registerDevice (CF::Device_ptr registeringDevice) throw (CF::InvalidObjectReference, CORBA::SystemException); @@ -108,13 +121,25 @@ class DeviceManager_impl: uint32_t getClientWaitTime( ) { return CLIENT_WAIT_TIME; } + // set the log level for one of the loggers on a component on the waveform + void setLogLevel( const char *logger_id, const CF::LogLevel newLevel ) throw (CF::UnknownIdentifier); + + // get the log level from one of the loggers on a component on the waveform + CF::LogLevel getLogLevel( const char *logger_id ) throw (CF::UnknownIdentifier); + + // retrieves the list of named loggers from all the components associated with the waveform + CF::StringSequence* getNamedLoggers(); + + // reset the loggers on all components on the waveform + void resetLog(); + private: DeviceManager_impl (); // No default constructor DeviceManager_impl(DeviceManager_impl&); // No copying typedef boost::shared_ptr FileSystemPtr; - typedef std::vector< ossie::ComponentPlacement > ComponentPlacements; - typedef std::pair< ossie::ComponentPlacement, local_spd::ProgramProfile* > Deployment; + typedef std::vector< ossie::DevicePlacement > DevicePlacements; + typedef std::pair< ossie::DevicePlacement, local_spd::ProgramProfile* > Deployment; typedef std::vector< Deployment > DeploymentList; typedef std::list > ExecparamList; typedef std::map PackageMods; @@ -135,6 +160,12 @@ class DeviceManager_impl: CORBA::Object_var service; pid_t pid; }; + + DomainCheckThread *DomainWatchThread; + void domainRefreshChanged(float oldValue, float newValue); + int checkDomain(); + struct timeval startDomainWarn; + bool domain_persistence; typedef std::vector DeviceList; typedef std::vector ServiceList; @@ -154,7 +185,8 @@ class DeviceManager_impl: std::string HOSTNAME; float DEVICE_FORCE_QUIT_TIME; CORBA::ULong CLIENT_WAIT_TIME; - + float DOMAIN_REFRESH; + // read only attributes struct utsname _uname; std::string processor_name; @@ -162,6 +194,7 @@ class DeviceManager_impl: std::string _identifier; std::string _label; std::string _deviceConfigurationProfile; + std::string _spdFile; std::string _fsroot; std::string _cacheroot; std::string _local_sdrroot; @@ -176,7 +209,15 @@ class DeviceManager_impl: CF::FileSystem_var _local_dom_filesys; CF::FileSystem_var _fileSys; CF::DeviceManager_var myObj; + ossie::DeviceManagerConfiguration DCDParser; + bool checkWriteAccess(std::string &path); + + // + // tryResourceStartup - try the following interfaces initializeproperties, initialize, configure + // + void tryResourceStartup( CORBA::Object_ptr registeringService, + const std::string &svc_name ); enum DevMgrAdmnType { DEVMGR_REGISTERED, @@ -211,9 +252,9 @@ class DeviceManager_impl: CF::DeviceManager_var& my_object_var); void getCompositeDeviceIOR( - std::string& compositeDeviceIOR, - const std::vector& componentPlacements, - const ossie::ComponentPlacement& componentPlacementInst); + std::string& compositeDeviceIOR, + const std::vector& componentPlacements, + const ossie::DevicePlacement& componentPlacementInst); bool addDeviceImplProperties ( local_spd::ProgramProfile *compProfile, @@ -229,7 +270,7 @@ class DeviceManager_impl: int resolveDebugLevel( const std::string &level_in ); void resolveLoggingConfiguration( const std::string & usageName, - std::vector< std::string >& new_argv, + std::vector< std::string >& new_argv, const ossie::ComponentInstantiation& instantiation, const std::string &logcfg_path ); DeviceNode* getDeviceNode(const pid_t pid); @@ -250,12 +291,14 @@ class DeviceManager_impl: void createDeviceCacheLocation( std::string& devcache, + std::string& devcwd, std::string& usageName, + local_spd::ProgramProfile *compProfile, const ossie::ComponentInstantiation& instantiation); void createDeviceExecStatement( std::vector< std::string >& new_argv, - const ossie::ComponentPlacement& componentPlacement, + const ossie::DevicePlacement& componentPlacement, local_spd::ProgramProfile *compProfile, const std::string& componentType, const std::string& codeFilePath, @@ -264,7 +307,7 @@ class DeviceManager_impl: const std::string& compositeDeviceIOR ); void createDeviceThreadAndHandleExceptions( - const ossie::ComponentPlacement& componentPlacement, + const ossie::DevicePlacement& componentPlacement, local_spd::ProgramProfile *compProfile, const std::string& componentType, const std::string& codeFilePath, @@ -272,17 +315,18 @@ class DeviceManager_impl: const std::string& compositeDeviceIOR ); void createDeviceThread( - const ossie::ComponentPlacement& componentPlacement, + const ossie::DevicePlacement& componentPlacement, local_spd::ProgramProfile *compProfile, const std::string& componentType, const std::string& codeFilePath, const ossie::ComponentInstantiation& instantiation, const std::string& devcache, + const std::string& devcwd, const std::string& usageName, const std::string& compositeDeviceIOR ); ExecparamList createDeviceExecparams( - const ossie::ComponentPlacement& componentPlacement, + const ossie::DevicePlacement& componentPlacement, local_spd::ProgramProfile *compProfile, const std::string& componentType, const std::string& codeFilePath, @@ -301,6 +345,7 @@ class DeviceManager_impl: const std::string &impl_id ); local_spd::ProgramProfile *findProfile( const std::string &instantiationId ); + local_spd::ProgramProfile *findProfile( const std::string &usageName, const std::string &instantiationId ); bool deviceIsRegistered (CF::Device_ptr); bool serviceIsRegistered (const char*); void getDomainManagerReference(const std::string&); @@ -318,6 +363,11 @@ class DeviceManager_impl: void clean_registeredDevices(); void clean_registeredServices(); void clean_externalServices(); + bool verifyAllRegistered(); + + std::vector > start_order; + void startOrder(); + void stopOrder(); void local_unregisterService(CORBA::Object_ptr service, const std::string& name); void local_unregisterDevice(CF::Device_ptr device, const std::string& name); @@ -332,7 +382,12 @@ class DeviceManager_impl: void deleteFileSystems(); bool makeDirectory(std::string path); - std::string getIORfromID(const char* instanceid); + std::string getIORfromID(const std::string& instanceid); + + ServiceNode* _getPendingService(const std::string& name); + void _terminateProcess(pid_t pid); + void _terminateProcessThreaded(pid_t pid); + std::string deviceMgrIOR; std::string fileSysIOR; bool *_internalShutdown; @@ -354,8 +409,105 @@ class DeviceManager_impl: // Registration record for Domain's IDM_Channel ossie::events::EventChannelReg_var idm_registration; std::string IDM_IOR; + int _initialDebugLevel; + +}; + +class DomainCheckThread { + +public: + + enum { + NOOP = 0, + FINISH = -1, + }; + +private: + boost::thread* _thread; + volatile bool _running; + DeviceManager_impl * _target; + struct timespec _delay; + +public: + boost::thread*& _mythread; + +public: + DomainCheckThread( DeviceManager_impl *target, float delay=0.5) : + _thread(0), + _running(false), + _target(target), + _mythread(_thread) + { + updateDelay(delay); + } + + void start() { + if (!_thread) { + _running = true; + _thread = new boost::thread(&DomainCheckThread::run, this); + } + } + + void run() + { + while (_running) { + int state = _target->checkDomain(); + if (state == FINISH) { + return; + } else if (state == NOOP) { + nanosleep(&_delay, NULL); + } + else { + boost::this_thread::yield(); + } + } + } + + bool release(unsigned long secs=0, unsigned long usecs=0) { + + _running = false; + if (_thread) { + if ((secs == 0) && (usecs == 0)){ + _thread->join(); + } else { + boost::system_time waitime = boost::get_system_time() + boost::posix_time::seconds(secs) + boost::posix_time::microseconds(usecs); + if (!_thread->timed_join(waitime)) { + return false; + } + } + delete _thread; + _thread = 0; + } + + return true; + } + + void stop() { + _running = false; + if ( _thread ) _thread->interrupt(); + } + + ~DomainCheckThread() + { + if (_thread) { + release(0); + _thread = 0; + } + } + + void updateDelay(float delay) + { + _delay.tv_sec = (time_t)delay; + _delay.tv_nsec = (delay-_delay.tv_sec)*1e9; + } + + bool threadRunning() + { + return _running; + } }; + #endif /* __DEVICEMANAGER_IMPL__ */ diff --git a/redhawk/src/control/sdr/devmgr/Makefile.am b/redhawk/src/control/sdr/devmgr/Makefile.am index b459d95bc..e3b0f1a53 100644 --- a/redhawk/src/control/sdr/devmgr/Makefile.am +++ b/redhawk/src/control/sdr/devmgr/Makefile.am @@ -25,6 +25,6 @@ devmgr_PROGRAMS = DeviceManager DeviceManager_SOURCES = main.cpp spdSupport.cpp process_utils.cpp DeviceManager_DeployerSupport.cpp DeviceManager_impl.cpp DeviceManager_CPPFLAGS = -I../../include -I../../parser -I$(top_srcdir)/base/include -I$(top_srcdir)/base/framework/logging $(BOOST_CPPFLAGS) $(OMNIORB_CFLAGS) $(LOG4CXX_FLAGS) DeviceManager_CXXFLAGS = -Wall -DeviceManager_LDADD = ../../framework/libossiedomain.la ../../parser/libossieparser.la $(top_builddir)/base/framework/libossiecf.la $(top_builddir)/base/framework/idl/libossieidl.la $(OMNIORB_LIBS) $(BOOST_LDFLAGS) $(BOOST_FILESYSTEM_LIB) $(BOOST_SYSTEM_LIB) $(LOG4CXX_LIBS) -ldl +DeviceManager_LDADD = ../../framework/libossiedomain.la ../../parser/libossieparser.la $(top_builddir)/base/framework/libossiecf.la $(top_builddir)/base/framework/idl/libossieidl.la $(OMNIORB_LIBS) $(BOOST_LDFLAGS) $(BOOST_FILESYSTEM_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(LOG4CXX_LIBS) -ldl DeviceManager_LDFLAGS = -static diff --git a/redhawk/src/control/sdr/devmgr/main.cpp b/redhawk/src/control/sdr/devmgr/main.cpp index 8ef1d51d3..03db0462e 100644 --- a/redhawk/src/control/sdr/devmgr/main.cpp +++ b/redhawk/src/control/sdr/devmgr/main.cpp @@ -78,17 +78,6 @@ static void shutdown (void) } -// System Signal Interrupt Handler will allow proper ORB shutdown -void signal_catcher( int sig ) -{ - // IMPORTANT Don't call exit(...) in this function - // issue all CORBA calls that you need for cleanup here before calling ORB shutdown - if ((( sig == SIGINT ) || (sig == SIGQUIT) || (sig == SIGTERM))) { - shutdown(); - } -} - - static void child_exit (int sig) { pid_t pid; @@ -139,10 +128,11 @@ int sigprocessor(void ) { struct timeval tv = {0, 50}; int retval=SimpleThread::NOOP; + std::string logname("DeviceManagerLoader"); + if ( sig_fd > -1 ) { // don't care about writefds and exceptfds: - //LOG_TRACE(DeviceManager, "Checking for signals from SIGNALFD......" ); select(sig_fd+1, &readfds, NULL, NULL, &tv); if (FD_ISSET(sig_fd, &readfds)) { @@ -150,12 +140,12 @@ int sigprocessor(void ) { struct signalfd_siginfo si; ssize_t s = read(sig_fd, &si, sizeof(struct signalfd_siginfo)); if (s != sizeof(struct signalfd_siginfo)){ - LOG_ERROR(DeviceManager, "SIGCHLD handling error ..."); + RH_NL_ERROR(logname, "SIGCHLD handling error ..."); } // check for SIGCHLD if ( si.ssi_signo == SIGCHLD) { - LOG_TRACE(DeviceManager, "SignalChild is active....pid:." << si.ssi_pid); + RH_NL_TRACE(logname, "SignalChild is active....pid:." << si.ssi_pid); // Only concerned with children that exited; the status will be reported by // the DeviceManager's child handler switch (si.ssi_code) { @@ -170,7 +160,7 @@ int sigprocessor(void ) { // check if we need to exit... if ( si.ssi_signo == SIGINT ||si.ssi_signo == SIGQUIT || si.ssi_signo == SIGTERM ) { - LOG_INFO(DeviceManager, "DeviceManager received signal (INT or QUIT or TERM) for proces: " << si.ssi_pid); + RH_NL_INFO(logname, "DeviceManager received signal (INT or QUIT or TERM) for proces: " << si.ssi_pid); shutdown(); } } @@ -180,7 +170,7 @@ int sigprocessor(void ) { if (internalShutdown_devMgr && DeviceManager_servant->allChildrenExited() && DeviceManager_servant->isShutdown() ) { - LOG_DEBUG(DeviceManager, "Release the ORB, control back to main.cpp" ); + RH_NL_DEBUG(logname, "Release the ORB, control back to main.cpp" ); // devmgr is done with using orb.... release orb so control goes back to main ossie::corba::OrbShutdown(false); retval=SimpleThread::FINISH; // stop us too @@ -198,10 +188,12 @@ int main(int argc, char* argv[]) // parse command line options std::string dcdFile; std::string sdrRoot; + std::string spdFile; std::string sdrCache; std::string logfile_uri; std::string domainName; - int debugLevel = 3; + int debugLevel = -1; + int initialDebugLevel = -1; std::string dpath(""); std::string cpuBlackList(""); std::string node_name("DEVICE_MANAGER"); @@ -212,6 +204,8 @@ int main(int argc, char* argv[]) raise_limit(RLIMIT_NPROC, "process"); raise_limit(RLIMIT_NOFILE, "file descriptor"); + std::string logname("DeviceManagerLoader"); + for (int ii = 1; ii < argc; ++ii) { std::string param = argv[ii]; std::string pupper = boost::algorithm::to_upper_copy(param); @@ -228,6 +222,8 @@ int main(int argc, char* argv[]) dcdFile = argv[ii]; } else if (param == "SDRROOT") { sdrRoot = argv[ii]; + } else if (param == "SPD") { + spdFile = argv[ii]; } else if (param == "SDRCACHE") { sdrCache = argv[ii]; } else if (param == "DOMAIN_NAME") { @@ -244,6 +240,7 @@ int main(int argc, char* argv[]) std::cout<<"Logging level "< 0 ) { execparams[param] = argv[ii]; } @@ -278,12 +275,24 @@ int main(int argc, char* argv[]) } - pid_t pid = getpid(); - std::ostringstream os; - os << boost::asio::ip::host_name() << ":" << node_name << "_" << pid; - node_name = os.str(); + fs::path dcdPath = devRootPath / dcdFile; + std::ifstream dcdStream(dcdPath.string().c_str()); + if (!dcdStream) { + std::cerr << "Could not read DCD file " << dcdFile << std::endl; + exit(EXIT_FAILURE); + } + ossie::DeviceManagerConfiguration dcd; + try { + dcd.load(dcdStream); + } catch (const ossie::parser_error& ex) { + std::cerr << "Failed to parse DCD file " << dcdFile << ". The XML parser returned the following error: " << ex.what() << std::endl; + exit(EXIT_FAILURE); + } + dcdStream.close(); + + node_name = dcd.getName(); - os.str(""); + std::ostringstream os; os << domainName << "/" << node_name; dpath= os.str(); @@ -335,10 +344,12 @@ int main(int argc, char* argv[]) } } + // // apply logging settings to the library // ossie::logging::Configure(logcfg_uri, debugLevel, ctx); + execparams["LOGGING_CONFIG_URI"] = const_cast(logfile_uri.c_str()); /////////////////////////////////////////////////////////////////////////// // NO LOG_ STATEMENTS ABOVE THIS POINT @@ -352,24 +363,24 @@ int main(int argc, char* argv[]) err=sigemptyset(&sigset); err = sigaddset(&sigset, SIGINT); if ( err ) { - LOG_ERROR(DeviceManager, "sigaction(SIGINT): " << strerror(errno)); + RH_NL_ERROR(logname, "sigaction(SIGINT): " << strerror(errno)); exit(EXIT_FAILURE); } err = sigaddset(&sigset, SIGQUIT); if ( err ) { - LOG_ERROR(DeviceManager, "sigaction(SIGQUIT): " << strerror(errno)); + RH_NL_ERROR(logname, "sigaction(SIGQUIT): " << strerror(errno)); exit(EXIT_FAILURE); } err = sigaddset(&sigset, SIGTERM); if ( err ) { - LOG_ERROR(DeviceManager, "sigaction(SIGTERM): " << strerror(errno)); + RH_NL_ERROR(logname, "sigaction(SIGTERM): " << strerror(errno)); exit(EXIT_FAILURE); } err = sigaddset(&sigset, SIGCHLD); if ( err ) { - LOG_ERROR(DeviceManager, "sigaction(SIGCHLD): " << strerror(errno)); + RH_NL_ERROR(logname, "sigaction(SIGCHLD): " << strerror(errno)); exit(EXIT_FAILURE); } @@ -378,7 +389,7 @@ int main(int argc, char* argv[]) // Create the signalfd sig_fd = signalfd(-1, &sigset, SFD_NONBLOCK | SFD_CLOEXEC); if ( sig_fd == -1 ) { - LOG_ERROR(DeviceManager, "signalfd failed: " << strerror(errno)); + RH_NL_ERROR(logname, "signalfd failed: " << strerror(errno)); exit(EXIT_FAILURE); } @@ -403,21 +414,21 @@ int main(int argc, char* argv[]) // Map i686 to SCA x86 struct utsname un; if (uname(&un) != 0) { - LOG_ERROR(DeviceManager, "Unable to determine system information: " << strerror(errno)); + RH_NL_ERROR(logname, "Unable to determine system information: " << strerror(errno)); exit (0); } if (strcmp("i686", un.machine) == 0) { strcpy(un.machine, "x86"); } - LOG_DEBUG(DeviceManager, "Machine " << un.machine); - LOG_DEBUG(DeviceManager, "Version " << un.release); - LOG_DEBUG(DeviceManager, "OS " << un.sysname); + RH_NL_DEBUG(logname, "Machine " << un.machine); + RH_NL_DEBUG(logname, "Version " << un.release); + RH_NL_DEBUG(logname, "OS " << un.sysname); struct rlimit limit; if (getrlimit(RLIMIT_NPROC, &limit) == 0) { - LOG_DEBUG(DeviceManager, "Process limit " << limit.rlim_cur); + RH_NL_DEBUG(logname, "Process limit " << limit.rlim_cur); } if (getrlimit(RLIMIT_NOFILE, &limit) == 0) { - LOG_DEBUG(DeviceManager, "File descriptor limit " << limit.rlim_cur); + RH_NL_DEBUG(logname, "File descriptor limit " << limit.rlim_cur); } // Locate the physical location for the Device Manager's cache. @@ -443,10 +454,10 @@ int main(int argc, char* argv[]) devMgrCache = devRootPath.string(); } - LOG_INFO(DeviceManager, "Starting Device Manager with " << dcdFile); - LOG_DEBUG(DeviceManager, "Root of DeviceManager FileSystem set to " << devRootPath); - LOG_DEBUG(DeviceManager, "DevMgr cache set to " << devMgrCache); - LOG_DEBUG(DeviceManager, "Domain Name set to " << domainName); + RH_NL_INFO(logname, "Starting Device Manager with " << dcdFile); + RH_NL_DEBUG(logname, "Root of DeviceManager FileSystem set to " << devRootPath); + RH_NL_DEBUG(logname, "DevMgr cache set to " << devMgrCache); + RH_NL_DEBUG(logname, "Domain Name set to " << domainName); SimpleThread sigthread( sigprocessor ); int pstage=-1; @@ -459,8 +470,12 @@ int main(int argc, char* argv[]) un, useLogCfgResolver, cpuBlackList.c_str(), - &internalShutdown_devMgr + &internalShutdown_devMgr, + spdFile, + initialDebugLevel ); + DeviceManager_servant->saveLoggingContext(logfile_uri, initialDebugLevel, ctx); + DeviceManager_servant->setExecparamProperties(execparams); pstage=0; @@ -471,37 +486,39 @@ int main(int argc, char* argv[]) // for its deletion. PortableServer::POA_var devmgr_poa = root_poa->find_POA("DeviceManager", 1); PortableServer::ObjectId_var oid = devmgr_poa->activate_object(DeviceManager_servant); - + + + // finish initializing the Device Manager try { pstage++; DeviceManager_servant->postConstructor(domainName.c_str()); } catch (const CORBA::Exception& ex) { - LOG_FATAL(DeviceManager, "Startup failed with CORBA::" << ex._name() << " exception"); + RH_NL_FATAL(logname, "Startup failed with CORBA::" << ex._name() << " exception"); shutdown(); throw; } catch (const std::runtime_error& e) { - LOG_FATAL(DeviceManager, "Startup failed: " << e.what() ); + RH_NL_FATAL(logname, "Startup failed: " << e.what() ); shutdown(); throw; } catch (...) { - LOG_FATAL(DeviceManager, "Startup failed; unknown exception"); + RH_NL_FATAL(logname, "Startup failed; unknown exception"); shutdown(); throw; } pstage++; - LOG_INFO(DeviceManager, "Starting ORB!"); + RH_NL_INFO(logname, "Starting ORB!"); orb->run(); pstage++; - LOG_INFO(DeviceManager, "Goodbye!"); + RH_NL_INFO(logname, "Goodbye!"); } catch (const CORBA::Exception& ex) { - LOG_ERROR(DeviceManager, "Terminated with CORBA::" << ex._name() << " exception"); + RH_NL_ERROR(logname, "Terminated with CORBA::" << ex._name() << " exception"); throw; } catch (const std::exception& ex) { - LOG_ERROR(DeviceManager, "Terminated with exception: " << ex.what()); + RH_NL_ERROR(logname, "Terminated with exception: " << ex.what()); throw; } }catch(...) { @@ -530,7 +547,7 @@ int main(int argc, char* argv[]) DeviceManager_servant = 0; } - LOG_DEBUG(DeviceManager, "Farewell!") + RH_NL_DEBUG(logname, "Farewell!") ossie::corba::OrbShutdown(true); ossie::logging::Terminate(); diff --git a/redhawk/src/control/sdr/devmgr/spdSupport.cpp b/redhawk/src/control/sdr/devmgr/spdSupport.cpp index 61422c55d..645836e80 100644 --- a/redhawk/src/control/sdr/devmgr/spdSupport.cpp +++ b/redhawk/src/control/sdr/devmgr/spdSupport.cpp @@ -52,7 +52,7 @@ static void addProperty(const CF::DataType& dt, CF::Properties& prop) prop[index] = dt; } - +rh_logger::LoggerPtr ossie::SpdSupport::spdSupportLog; //////////////////////////////////////////////////// /* @@ -71,7 +71,7 @@ ImplementationInfo::ImplementationInfo(const SPD::Implementation& spdImpl) : osDeps(spdImpl.getOsDeps()), dependencyProperties() { - setLocalFileName(spdImpl.getCodeFile()); + setLocalFileName(spdImpl.getCodeFile().c_str()); setEntryPoint(spdImpl.getEntryPoint()); setCodeType(spdImpl.getCodeType()); setStackSize(spdImpl.code.stacksize.get()); @@ -79,11 +79,11 @@ ImplementationInfo::ImplementationInfo(const SPD::Implementation& spdImpl) : setPropertyFile(spdImpl.getPRFFile()); // Handle allocation property dependencies - LOG_TRACE(ImplementationInfo, "Loading component implementation property dependencies") - const std::vector& dependencies = spdImpl.getDependencies(); - std::vector::const_iterator ii; + RH_TRACE(ossie::SpdSupport::spdSupportLog, "Loading component implementation property dependencies") + const std::vector& dependencies = spdImpl.getDependencies(); + std::vector::const_iterator ii; for (ii = dependencies.begin(); ii != dependencies.end(); ++ii) { - LOG_TRACE(ImplementationInfo, "Loading component implementation property dependency '" << *ii); + RH_TRACE(ossie::SpdSupport::spdSupportLog, "Loading component implementation property dependency '" << *ii); addDependencyProperty(*ii); } } @@ -103,11 +103,11 @@ ImplementationInfo *ImplementationInfo::BuildImplementationInfo(CF::FileSystem_p std::auto_ptr impl(new ImplementationInfo(spdImpl)); // Handle allocation property dependencies - LOG_TRACE(ImplementationInfo, "Loading component implementation softpkg dependencies") + RH_TRACE(ossie::SpdSupport::spdSupportLog, "Loading component implementation softpkg dependencies") const std::vector& softpkgDependencies = spdImpl.getSoftPkgDependencies(); std::vector::const_iterator jj; for (jj = softpkgDependencies.begin(); jj != softpkgDependencies.end(); ++jj) { - LOG_TRACE(ImplementationInfo, "Loading component implementation softpkg dependency '" << *jj); + RH_TRACE(ossie::SpdSupport::spdSupportLog, "Loading component implementation softpkg dependency '" << *jj); std::auto_ptr softpkg(SoftpkgInfo::BuildSoftpkgInfo(depFileSys, jj->localfile.c_str(),depFileSys)); impl->addSoftPkgDependency(softpkg.release()); } @@ -187,25 +187,28 @@ const bool ImplementationInfo::hasPriority() const return _hasPriority; } -const std::vector& ImplementationInfo::getDependencyProperties() const +const std::vector& ImplementationInfo::getDependencyProperties() const { return dependencyProperties; } -void ImplementationInfo::setCodeType(const char* _type) +void ImplementationInfo::setCodeType(const SPD::Code::CodeType _type) { - std::string type(_type); - _codeType = type; - if (type == "KernelModule") { + switch (_type) { + case SPD::Code::KERNEL_MODULE: codeType = CF::LoadableDevice::KERNEL_MODULE; - } else if (type == "SharedLibrary") { + break; + case SPD::Code::SHARED_LIBRARY: codeType = CF::LoadableDevice::SHARED_LIBRARY; - } else if (type == "Executable") { + break; + case SPD::Code::EXECUTABLE: codeType = CF::LoadableDevice::EXECUTABLE; - } else if (type == "Driver") { + break; + case SPD::Code::DRIVER: codeType = CF::LoadableDevice::DRIVER; - } else { - LOG_WARN(ImplementationInfo, "Bad code type " << type); + break; + default: + RH_WARN(spdSupportLog, "Bad code type " << _type); } } @@ -248,7 +251,7 @@ void ImplementationInfo::setPriority(const unsigned long long* _priority) } } -void ImplementationInfo::addDependencyProperty(const SPD::PropertyRef& property) +void ImplementationInfo::addDependencyProperty(const PropertyRef& property) { dependencyProperties.push_back(property); } @@ -264,10 +267,10 @@ bool ImplementationInfo::checkProcessorAndOs(const Properties& _prf) const bool matchOs = checkOs(osDeps, _prf.getAllocationProperties()); if (!matchProcessor) { - LOG_DEBUG(ImplementationInfo, "Failed to match component processor to device allocation properties"); + RH_DEBUG(spdSupportLog, "Failed to match component processor to device allocation properties"); } if (!matchOs) { - LOG_DEBUG(ImplementationInfo, "Failed to match component os to device allocation properties"); + RH_DEBUG(spdSupportLog, "Failed to match component os to device allocation properties"); } return matchProcessor && matchOs; } @@ -299,7 +302,6 @@ SoftpkgInfo::~SoftpkgInfo() for (ImplementationInfo::List::iterator ii = _implementations.begin(); ii != _implementations.end(); ++ii) { delete *ii; } - } @@ -310,7 +312,7 @@ const char* SoftpkgInfo::getSpdFileName() const const char* SoftpkgInfo::getName() const { - return _name.c_str(); + return spd.getName().c_str(); } const char* SoftpkgInfo::getID() const @@ -322,7 +324,7 @@ SoftpkgInfo *SoftpkgInfo::BuildSoftpkgInfo(CF::FileSystem_ptr fileSys, const cha CF::FileSystem_ptr depFileSys ) { - LOG_TRACE(SoftpkgInfo, "Building soft package info from file " << spdFileName); + RH_TRACE(spdSupportLog, "Building soft package info from file " << spdFileName); std::auto_ptr softpkg(new SoftpkgInfo(spdFileName)); @@ -336,7 +338,7 @@ SoftpkgInfo *SoftpkgInfo::BuildSoftpkgInfo(CF::FileSystem_ptr fileSys, const cha bool SoftpkgInfo::parseProfile(CF::FileSystem_ptr fileSys, CF::FileSystem_ptr depFileSys ) { try { - LOG_TRACE(SoftpkgInfo, "Parsing SPD file: " << _spdFileName ); + RH_TRACE(spdSupportLog, "Parsing SPD file: " << _spdFileName ); File_stream spd_file(fileSys, _spdFileName.c_str()); spd.load(spd_file, _spdFileName.c_str()); spd_file.close(); @@ -360,16 +362,15 @@ bool SoftpkgInfo::parseProfile(CF::FileSystem_ptr fileSys, CF::FileSystem_ptr de } // Set name from the SPD - _name = spd.getSoftPkgName(); _identifier = spd.getSoftPkgID(); - LOG_DEBUG(SoftpkgInfo, "name/id " << _name << "/" << _identifier); + RH_DEBUG(spdSupportLog, "name/id " << spd.getName() << "/" << _identifier); // Extract implementation data from SPD file const std::vector & spd_i = spd.getImplementations(); for (unsigned int implCount = 0; implCount < spd_i.size(); implCount++) { const SPD::Implementation& spdImpl = spd_i[implCount]; - LOG_TRACE(SoftpkgInfo, "Adding implementation " << spdImpl.getID()); + RH_TRACE(spdSupportLog, "Adding implementation " << spdImpl.getID()); ImplementationInfo* newImpl = ImplementationInfo::BuildImplementationInfo(fileSys, spdImpl, depFileSys); addImplementation(newImpl); } @@ -421,7 +422,6 @@ ImplementationInfo *SoftpkgInfo::selectedImplementation() const PREPARE_CF_LOGGING(ProgramProfile); - std::auto_ptr ProgramProfile::LoadProgramProfile(CF::FileSystem_ptr fileSys, const char* spdFileName, CF::FileSystem_ptr depFileSys ) { @@ -433,85 +433,87 @@ ProgramProfile *ProgramProfile::LoadProfile(CF::FileSystem_ptr fileSys, const char* spdFileName, CF::FileSystem_ptr depFileSys ) { - LOG_TRACE(ProgramProfile, "Building component info from file " << spdFileName); + RH_TRACE(spdSupportLog, "Building component info from file " << spdFileName); std::auto_ptr newComponent(new ProgramProfile(spdFileName)); - if ( !newComponent->parseProfile(fileSys, depFileSys) ) { - return 0; - } + newComponent->load(fileSys, depFileSys ); + + return newComponent.release(); +} + +void ProgramProfile::load(CF::FileSystem_ptr fileSys, + CF::FileSystem_ptr depFileSys ) { + + RH_TRACE(spdSupportLog, "Building component info from file " << _spdFileName); + + parseProfile(fileSys, depFileSys); - if (newComponent->spd.getSCDFile() != 0) { + if (spd.getSCDFile() != 0) { try { - File_stream _scd(fileSys, newComponent->spd.getSCDFile()); - newComponent->scd.load(_scd); + File_stream _scd(fileSys, spd.getSCDFile()); + scd.load(_scd); _scd.close(); } catch (ossie::parser_error& e) { std::string parser_error_line = ossie::retrieveParserErrorLineNumber(e.what()); std::ostringstream eout; - eout << "Building component info problem; error parsing SCD: " << newComponent->spd.getSCDFile() << ". " << parser_error_line << " The XML parser returned the following error: " << e.what(); - LOG_TRACE(ProgramProfile, eout.str()); + eout << "Building component info problem; error parsing SCD: " << spd.getSCDFile() << ". " << parser_error_line << " The XML parser returned the following error: " << e.what(); + RH_TRACE(spdSupportLog, eout.str()); throw std::runtime_error(eout.str().c_str()); } catch( ... ) { std::ostringstream eout; - eout << "Building component info problem; unknown error parsing SCD: " << newComponent->spd.getSCDFile(); - LOG_TRACE(ProgramProfile, eout.str()); + eout << "Building component info problem; unknown error parsing SCD: " << spd.getSCDFile(); + RH_TRACE(spdSupportLog, eout.str()); throw std::runtime_error(eout.str().c_str()); } } - if (newComponent->spd.getPRFFile() != 0) { - LOG_DEBUG(ProgramProfile, "Loading component properties from " << newComponent->spd.getPRFFile()); + if (spd.getPRFFile() != 0) { + RH_DEBUG(spdSupportLog, "Loading component properties from " << spd.getPRFFile()); try { - File_stream _prf(fileSys, newComponent->spd.getPRFFile()); - LOG_DEBUG(ProgramProfile, "Parsing component properties"); - newComponent->prf.load(_prf); - LOG_TRACE(ProgramProfile, "Closing PRF file") + File_stream _prf(fileSys, spd.getPRFFile()); + RH_DEBUG(spdSupportLog, "Parsing component properties"); + prf.load(_prf); + RH_TRACE(spdSupportLog, "Closing PRF file") _prf.close(); } catch (ossie::parser_error& e) { std::string parser_error_line = ossie::retrieveParserErrorLineNumber(e.what()); std::ostringstream eout; - eout << "Building component info problem; error parsing PRF: " << newComponent->spd.getPRFFile() << ". " << parser_error_line << " The XML parser returned the following error: " << e.what(); - LOG_TRACE(ProgramProfile, eout.str()); + eout << "Building component info problem; error parsing PRF: " << spd.getPRFFile() << ". " << parser_error_line << " The XML parser returned the following error: " << e.what(); + RH_TRACE(spdSupportLog, eout.str()); throw std::runtime_error(eout.str().c_str()); } catch( ... ) { std::ostringstream eout; - eout << "Building component info problem; unknown error parsing PRF: " << newComponent->spd.getPRFFile(); - LOG_TRACE(ProgramProfile, eout.str()); + eout << "Building component info problem; unknown error parsing PRF: " << spd.getPRFFile(); + RH_TRACE(spdSupportLog, eout.str()); throw std::runtime_error(eout.str().c_str()); } } - if (newComponent->spd.isScaNonCompliant()) { - newComponent->setIsScaCompliant(false); - } else { - newComponent->setIsScaCompliant(true); - } - // Extract Properties from the implementation-agnostic PRF file // once we match the component to a device we can grab the implementation // specific PRF file - if (newComponent->spd.getPRFFile() != 0) { + if (spd.getPRFFile() != 0) { // Handle component properties - LOG_TRACE(ProgramProfile, "Adding factory params") - const std::vector& fprop = newComponent->prf.getFactoryParamProperties(); + RH_TRACE(spdSupportLog, "Adding factory params") + const std::vector& fprop = prf.getFactoryParamProperties(); for (unsigned int i = 0; i < fprop.size(); i++) { - newComponent->addFactoryParameter(convertPropertyToDataType(fprop[i])); + addFactoryParameter(convertPropertyToDataType(fprop[i])); } - LOG_TRACE(ProgramProfile, "Adding exec params") - const std::vector& eprop = newComponent->prf.getExecParamProperties(); + RH_TRACE(spdSupportLog, "Adding exec params") + const std::vector& eprop = prf.getExecParamProperties(); for (unsigned int i = 0; i < eprop.size(); i++) { - if (std::string(eprop[i]->getMode()) != "readonly") { - LOG_TRACE(ProgramProfile, "Adding exec param " << eprop[i]->getID() << " " << eprop[i]->getName()); - newComponent->addExecParameter(convertPropertyToDataType(eprop[i])); + if (!eprop[i]->isReadOnly()) { + RH_TRACE(spdSupportLog, "Adding exec param " << eprop[i]->getID() << " " << eprop[i]->getName()); + addExecParameter(convertPropertyToDataType(eprop[i])); } else { if ( eprop[i]->isProperty() ) { - LOG_TRACE(ProgramProfile, "Adding exec param (readonly property) " << eprop[i]->getID() << " " << eprop[i]->getName()); - newComponent->addExecParameter(convertPropertyToDataType(eprop[i])); + RH_TRACE(spdSupportLog, "Adding exec param (readonly property) " << eprop[i]->getID() << " " << eprop[i]->getName()); + addExecParameter(convertPropertyToDataType(eprop[i])); } else { - LOG_TRACE(ProgramProfile, "Ignoring readonly exec param " << eprop[i]->getID() << " " << eprop[i]->getName()); + RH_TRACE(spdSupportLog, "Ignoring readonly exec param " << eprop[i]->getID() << " " << eprop[i]->getName()); } } } @@ -523,39 +525,38 @@ ProgramProfile *ProgramProfile::LoadProfile(CF::FileSystem_ptr fileSys, // element // prop = prf->getMatchingProperties(); //for (unsigned int i=0; i < prop->size(); i++) { - // newComponent->addAllocationCapacity((*prop)[i]->getDataType()); + // addAllocationCapacity((*prop)[i]->getDataType()); //} - const std::vector& prop = newComponent->prf.getConfigureProperties(); + const std::vector& prop = prf.getConfigureProperties(); for (unsigned int i = 0; i < prop.size(); i++) { if (!prop[i]->isReadOnly()) { - LOG_TRACE(ProgramProfile, "Adding configure prop " << prop[i]->getID() << " " << prop[i]->getName() << " " << prop[i]->isReadOnly()) - newComponent->addConfigureProperty(convertPropertyToDataType(prop[i])); + RH_TRACE(spdSupportLog, "Adding configure prop " << prop[i]->getID() << " " << prop[i]->getName() << " " << prop[i]->isReadOnly()) + addConfigureProperty(convertPropertyToDataType(prop[i])); } } - const std::vector& cprop = newComponent->prf.getConstructProperties(); + const std::vector& cprop = prf.getConstructProperties(); for (unsigned int i = 0; i < cprop.size(); i++) { - LOG_TRACE(ProgramProfile, "Adding construct prop " << cprop[i]->getID() << " " << cprop[i]->getName() << " " << cprop[i]->isReadOnly()); + RH_TRACE(spdSupportLog, "Adding construct prop " << cprop[i]->getID() << " " << cprop[i]->getName() << " " << cprop[i]->isReadOnly()); if (cprop[i]->isCommandLine()) { - LOG_TRACE(ProgramProfile, "Adding (cmdline) construct prop " << cprop[i]->getID() << " " << cprop[i]->getName() << " " << cprop[i]->isReadOnly()); - newComponent->addExecParameter(convertPropertyToDataType(cprop[i])); + RH_TRACE(spdSupportLog, "Adding (cmdline) construct prop " << cprop[i]->getID() << " " << cprop[i]->getName() << " " << cprop[i]->isReadOnly()); + addExecParameter(convertPropertyToDataType(cprop[i])); } else { - newComponent->addConstructProperty(convertPropertyToDataType(cprop[i])); + addConstructProperty(convertPropertyToDataType(cprop[i])); } } } - newComponent->fillAllSeqForStructProperty(); - LOG_TRACE(ProgramProfile, "Done building component info from file " << spdFileName); - return newComponent.release(); + fillAllSeqForStructProperty(); + RH_TRACE(spdSupportLog, "Done building component info from file " << _spdFileName); } + ProgramProfile::ProgramProfile(const std::string& spdFileName) : SoftpkgInfo(spdFileName), - _isAssemblyController(false), - _isScaCompliant(true) + _isAssemblyController(false) { nicAssignment=""; resolved_softpkg_dependencies.resize(0); @@ -583,7 +584,7 @@ std::vector ProgramProfile::getResolvedSoftPkgDependencies() { return this->resolved_softpkg_dependencies; } -void ProgramProfile::setIdentifier(const char* _identifier, std::string instance_id) +void ProgramProfile::setIdentifier(const std::string & _identifier, const std::string &instance_id) { identifier = _identifier; // Per the SCA spec, the identifier is the instantiation ID:waveform_name @@ -596,18 +597,14 @@ void ProgramProfile::setNamingService(const bool _isNamingService) isNamingService = _isNamingService; } -void ProgramProfile::setNamingServiceName(const char* _namingServiceName) +void ProgramProfile::setNamingServiceName(const std::string &_namingServiceName) { - if ( _namingServiceName != 0 ) { - namingServiceName = _namingServiceName; - } + namingServiceName = _namingServiceName; } -void ProgramProfile::setUsageName(const char* _usageName) +void ProgramProfile::setUsageName(const std::string & _usageName) { - if (_usageName != 0) { - usageName = _usageName; - } + usageName = _usageName; } void ProgramProfile::setIsAssemblyController(bool _isAssemblyController) @@ -615,12 +612,13 @@ void ProgramProfile::setIsAssemblyController(bool _isAssemblyController) this->_isAssemblyController = _isAssemblyController; } + void ProgramProfile::setIsScaCompliant(bool _isScaCompliant) { this->_isScaCompliant = _isScaCompliant; } -void ProgramProfile::setNicAssignment(std::string nic) { +void ProgramProfile::setNicAssignment(const std::string &nic) { nicAssignment = nic; }; @@ -678,12 +676,12 @@ void ProgramProfile::overrideProperty(const ossie::ComponentProperty& propref) { void ProgramProfile::overrideProperty(const ossie::ComponentProperty* propref) { std::string propId = propref->getID(); - LOG_TRACE(ProgramProfile, "Instantiation property id = " << propId) + RH_TRACE(spdSupportLog, "Instantiation property id = " << propId) const Property* prop = prf.getProperty(propId); // Without a prop, we don't know how to convert the strings to the property any type if (prop == NULL) { if ( propId != "LOGGING_CONFIG_URI" and propId != "LOG_LEVEL" ) { - LOG_WARN(ProgramProfile, "Ignoring attempt to override property " << propId << " Reason: Property ID not exist in component") + RH_WARN(spdSupportLog, "Ignoring attempt to override property " << propId << " Reason: Property ID not exist in component") return; } @@ -691,7 +689,7 @@ void ProgramProfile::overrideProperty(const ossie::ComponentProperty* propref) { // allow intrinstic properties to be command line if ( propId == "LOGGING_CONFIG_URI" or propId == "LOG_LEVEL" ) { - LOG_DEBUG(ProgramProfile, "Allowing LOGGING_CONFIG_URI and LOG_LEVEL to be passed to override"); + RH_DEBUG(spdSupportLog, "Allowing LOGGING_CONFIG_URI and LOG_LEVEL to be passed to override"); //if (propId == "LOG_LEVEL") propId = "DEBUG_LEVEL"; CF::DataType prop; prop.id = propId.c_str(); @@ -711,7 +709,7 @@ void ProgramProfile::overrideSimpleProperty(const char* id, const std::string &v const Property* prop = prf.getProperty(id); // Without a prop, we don't know how to convert the strings to the property any type if (prop == NULL) { - LOG_WARN(ProgramProfile, "Ignoring attempt to override property " << id << " Reason: Property ID does not exist in component"); + RH_WARN(spdSupportLog, "Ignoring attempt to override property " << id << " Reason: Property ID does not exist in component"); return; } @@ -721,7 +719,7 @@ void ProgramProfile::overrideSimpleProperty(const char* id, const std::string &v CORBA::Any val = ossie::string_to_any(value, type); overrideProperty(id, val); } else { - LOG_WARN(ProgramProfile, "attempt to override non-simple property with string value"); + RH_WARN(spdSupportLog, "attempt to override non-simple property with string value"); } } @@ -732,12 +730,12 @@ void ProgramProfile::overrideProperty(const char* id, const CORBA::Any& value) if (prop != NULL) { if (prop->isReadOnly()) { if ( !prop->isProperty()) { - LOG_WARN(ProgramProfile, "Ignoring attempt to override readonly property " << id); + RH_WARN(spdSupportLog, "Ignoring attempt to override readonly property " << id); } else { // allow read-only exec param properties if ( prop->isCommandLine()) { - LOG_TRACE(ProgramProfile, "overrideProperty (read-only command line ) id " << id << + RH_TRACE(spdSupportLog, "overrideProperty (read-only command line ) id " << id << " with value " << ossie::any_to_string(value)); process_overrides(&execParameters, id, value); } @@ -758,10 +756,10 @@ void ProgramProfile::overrideProperty(const char* id, const CORBA::Any& value) void ProgramProfile::process_overrides(CF::Properties* props, const char* id, const CORBA::Any &value) { - LOG_DEBUG(ProgramProfile, "Attempting to override property " << id); + RH_DEBUG(spdSupportLog, "Attempting to override property " << id); for (unsigned int i = 0; i < (*props).length(); ++i ) { if (strcmp(id, (*props)[i].id) == 0) { - LOG_DEBUG(ProgramProfile, "Overriding property " << id << " with value " << ossie::any_to_string(value)); + RH_DEBUG(spdSupportLog, "Overriding property " << id << " with value " << ossie::any_to_string(value)); (*props)[i].value = value; } } @@ -818,7 +816,7 @@ const bool ProgramProfile::isAssemblyController() const bool ProgramProfile::isScaCompliant() { - return _isScaCompliant; + return spd.isScaCompliant(); } @@ -850,20 +848,20 @@ void ProgramProfile::fillSeqForStructProperty(CF::Properties &props) { for (redhawk::PropertyMap::iterator structIter = structProps.begin(); structIter != structProps.end(); ++structIter) { if (structProps[ossie::corba::returnString(structIter->id)].isNil()) { // is the nil value for a sequence? - for (std::vector::const_iterator internal_iter = tmp_struct->getValue().begin(); internal_iter != tmp_struct->getValue().end(); ++internal_iter) { - std::string _inner_id((*internal_iter)->getID()); + for (ossie::PropertyList::const_iterator internal_iter = tmp_struct->getValue().begin(); internal_iter != tmp_struct->getValue().end(); ++internal_iter) { + std::string _inner_id(internal_iter->getID()); if (_inner_id == ossie::corba::returnString(structIter->id)) { - if (dynamic_cast(*internal_iter)) { + if (dynamic_cast(&(*internal_iter)) != NULL) { nilSeq = true; } } } } else { // is the non-nil value for a simple? - for (std::vector::const_iterator internal_iter = tmp_struct->getValue().begin(); internal_iter != tmp_struct->getValue().end(); ++internal_iter) { - std::string _inner_id((*internal_iter)->getID()); + for (ossie::PropertyList::const_iterator internal_iter = tmp_struct->getValue().begin(); internal_iter != tmp_struct->getValue().end(); ++internal_iter) { + std::string _inner_id(internal_iter->getID()); if (_inner_id == ossie::corba::returnString(structIter->id)) { - if (dynamic_cast(*internal_iter)) { + if (dynamic_cast(&(*internal_iter)) != NULL) { nonNilVal = true; } } @@ -875,12 +873,13 @@ void ProgramProfile::fillSeqForStructProperty(CF::Properties &props) { for (redhawk::PropertyMap::iterator structIter = structProps.begin(); structIter != structProps.end(); ++structIter) { if (structProps[ossie::corba::returnString(structIter->id)].isNil()) { // is the nil value for a sequence? - for (std::vector::const_iterator internal_iter = tmp_struct->getValue().begin(); internal_iter != tmp_struct->getValue().end(); ++internal_iter) { - std::string _inner_id((*internal_iter)->getID()); + for (ossie::PropertyList::const_iterator internal_iter = tmp_struct->getValue().begin(); internal_iter != tmp_struct->getValue().end(); ++internal_iter) { + std::string _inner_id(internal_iter->getID()); if (_inner_id == ossie::corba::returnString(structIter->id)) { - const ossie::SimpleSequenceProperty* _type = dynamic_cast(*internal_iter); + const ossie::SimpleSequenceProperty* _type = dynamic_cast(&(*internal_iter)); std::vector empty_string_vector; - structProps[ossie::corba::returnString(structIter->id)] = ossie::strings_to_any(empty_string_vector, ossie::getTypeKind(_type->getType())); + CORBA::TypeCode_ptr _typecode = ossie::getTypeCode(static_cast(_type->getType())); + structProps[ossie::corba::returnString(structIter->id)] = ossie::strings_to_any(empty_string_vector, ossie::getTypeKind(_type->getType()), _typecode); } } } @@ -905,10 +904,10 @@ bool ProgramProfile::checkStruct(CF::Properties &props) for (std::vector::const_iterator prf_iter = prf.getProperties().begin(); prf_iter != prf.getProperties().end(); ++prf_iter) { const ossie::StructProperty* tmp_struct = dynamic_cast(*prf_iter); if (tmp_struct) { - for (std::vector::const_iterator internal_iter = tmp_struct->getValue().begin(); internal_iter != tmp_struct->getValue().end(); ++internal_iter) { - std::string _id((*internal_iter)->getID()); + for (ossie::PropertyList::const_iterator internal_iter = tmp_struct->getValue().begin(); internal_iter != tmp_struct->getValue().end(); ++internal_iter) { + std::string _id(internal_iter->getID()); if (_id == ossie::corba::returnString(tmpP->id)) { - if (dynamic_cast(*internal_iter)) { + if (dynamic_cast(&(*internal_iter))) { foundSeq = true; break; } diff --git a/redhawk/src/control/sdr/devmgr/spdSupport.h b/redhawk/src/control/sdr/devmgr/spdSupport.h index 27177281c..f7d1ecd51 100644 --- a/redhawk/src/control/sdr/devmgr/spdSupport.h +++ b/redhawk/src/control/sdr/devmgr/spdSupport.h @@ -48,6 +48,8 @@ namespace ossie namespace SpdSupport { + extern rh_logger::LoggerPtr spdSupportLog; + class ResourceInfo; class SoftpkgInfo; class ImplementationInfo; @@ -98,7 +100,7 @@ namespace ossie const CORBA::ULong getPriority() const; const bool hasStackSize() const; const bool hasPriority() const; - const std::vector& getDependencyProperties() const; + const std::vector& getDependencyProperties() const; const SoftpkgInfoList & getSoftPkgDependencies() const; bool checkProcessorAndOs(const ossie::Properties& prf) const; @@ -113,11 +115,11 @@ namespace ossie ImplementationInfo ( const ImplementationInfo&); void setLocalFileName(const char* fileName); void setEntryPoint(const char* fileName); - void setCodeType(const char* _type); + void setCodeType(ossie::SPD::Code::CodeType _type); void setPropertyFile(const char* filename); void setStackSize(const unsigned long long *_stackSize); void setPriority(const unsigned long long *_priority); - void addDependencyProperty(const ossie::SPD::PropertyRef& property); + void addDependencyProperty(const ossie::PropertyRef& property); void addSoftPkgDependency(SoftpkgInfo *softpkg); std::string id; @@ -132,7 +134,7 @@ namespace ossie bool _hasPriority; std::vector processorDeps; std::vector osDeps; - std::vector dependencyProperties; + std::vector dependencyProperties; SoftpkgInfoList softPkgDependencies; }; @@ -171,7 +173,6 @@ namespace ossie bool parseProfile (CF::FileSystem_ptr fileSys, CF::FileSystem_ptr depFileSys ); const std::string _spdFileName; - std::string _name; // name from SPD File std::string _identifier; // identifier from SPD File ImplementationInfo::List _implementations; @@ -193,13 +194,13 @@ namespace ossie ProgramProfile (const std::string& spdFileName); ~ProgramProfile (); - void setIdentifier(const char* identifier, std::string instance_id); + void setIdentifier(const std::string &identifier, const std::string &instance_id); void setNamingService(const bool isNamingService); - void setNamingServiceName(const char* NamingServiceName); - void setUsageName(const char* usageName); + void setNamingServiceName(const std::string &NamingServiceName); + void setUsageName(const std::string &usageName); void setIsAssemblyController(bool isAssemblyController); void setIsScaCompliant(bool isScaCompliant); - void setNicAssignment(std::string nic); + void setNicAssignment(const std::string &nic); void setAffinity( const AffinityProperties &affinity ); void mergeAffinityOptions( const CF::Properties &new_affinity ); void setLoggingConfig( const LoggingConfig &logcfg ); @@ -247,6 +248,9 @@ namespace ossie CF::Properties getExecParameters(); CF::Properties getPopulatedExecParameters(); + void load(CF::FileSystem_ptr fileSystem, + CF::FileSystem_ptr depFileSys ); + ComponentDescriptor scd; ossie::Properties prf; diff --git a/redhawk/src/control/sdr/dommgr/AllocationManager_impl.cpp b/redhawk/src/control/sdr/dommgr/AllocationManager_impl.cpp index 7a37586e7..dd1059d63 100644 --- a/redhawk/src/control/sdr/dommgr/AllocationManager_impl.cpp +++ b/redhawk/src/control/sdr/dommgr/AllocationManager_impl.cpp @@ -23,12 +23,16 @@ #include #include +#include +#include +#include +#include +#include +#include + +#include "DomainManager_impl.h" #include "AllocationManager_impl.h" -#include "ossie/debug.h" -#include "ossie/CorbaUtils.h" -#include "ossie/ossieSupport.h" -#include "ossie/CorbaIterator.h" typedef ossie::corba::Iterator_domainManager->updateRemoteAllocations(this->_remoteAllocations); } - TRACE_EXIT(AllocationManager_impl) return result._retn(); } /* Allocates a set of dependencies only inside the local Domain */ CF::AllocationManager::AllocationResponseSequence* AllocationManager_impl::allocateLocal(const CF::AllocationManager::AllocationRequestSequence &requests, const char* domainName) throw (CF::AllocationManager::AllocationError) { - TRACE_ENTER(AllocationManager_impl); - CF::AllocationManager::AllocationResponseSequence* results; if (requests.length() > 0) { ossie::DeviceList registeredDevices = this->_domainManager->getRegisteredDevices(); @@ -162,23 +162,22 @@ CF::AllocationManager::AllocationResponseSequence* AllocationManager_impl::alloc results = new CF::AllocationManager::AllocationResponseSequence(); } - TRACE_EXIT(AllocationManager_impl); return results; } CF::AllocationManager::AllocationResponseSequence* AllocationManager_impl::allocateDevices(const CF::AllocationManager::AllocationRequestSequence &requests, ossie::DeviceList& devices, const std::string& domainName) { - LOG_TRACE(AllocationManager_impl, "Servicing " << requests.length() << " allocation request(s)"); + RH_TRACE(_allocMgrLog, "Servicing " << requests.length() << " allocation request(s)"); CF::AllocationManager::AllocationResponseSequence_var response = new CF::AllocationManager::AllocationResponseSequence(); typedef std::list LocalAllocationList; LocalAllocationList local_allocations; - + for (unsigned int request_idx=0; request_idx result = allocateRequest(requestID, request.allocationProperties, requestedDevices, sourceID, std::vector(), std::vector(), domainName); + std::pair result = allocateRequest(requestID, + request.allocationProperties, + requestedDevices, + sourceID, std::vector(), + std::vector(), + domainName); if (result.first) { local_allocations.push_back(result.first); ossie::AllocationType* allocation(result.first); @@ -226,12 +230,18 @@ CF::AllocationManager::AllocationResponseSequence* AllocationManager_impl::alloc return response._retn(); } -std::pair AllocationManager_impl::allocateRequest(const std::string& requestID, const CF::Properties& dependencyProperties, ossie::DeviceList& devices, const std::string& sourceID, const std::vector& processorDeps, const std::vector& osDeps, const std::string& domainName) +std::pair AllocationManager_impl::allocateRequest(const std::string& requestID, + const CF::Properties& dependencyProperties, + ossie::DeviceList& devices, const std::string& sourceID, + const std::vector& processorDeps, + const std::vector& osDeps, + const std::string& domainName, + const CF::Properties &deviceRequires ) { for (ossie::DeviceList::iterator iter = devices.begin(); iter != devices.end(); ++iter) { boost::shared_ptr node = *iter; CF::Properties allocatedProperties; - if (allocateDevice(dependencyProperties, *node, allocatedProperties, processorDeps, osDeps)) { + if (allocateDevice(dependencyProperties, *node, allocatedProperties, processorDeps, osDeps, deviceRequires)) { ossie::AllocationType* allocation = new ossie::AllocationType(); allocation->allocationID = ossie::generateUUID(); allocation->sourceID = sourceID; @@ -245,10 +255,16 @@ std::pair AllocationManager_ return std::make_pair((ossie::AllocationType*)0, devices.end()); } -ossie::AllocationResult AllocationManager_impl::allocateDeployment(const std::string& requestID, const CF::Properties& allocationProperties, ossie::DeviceList& devices, const std::string& sourceID, const std::vector& processorDeps, const std::vector& osDeps) +ossie::AllocationResult AllocationManager_impl::allocateDeployment(const std::string& requestID, + const CF::Properties& allocationProperties, + ossie::DeviceList& devices, + const std::string& sourceID, + const std::vector& processorDeps, + const std::vector& osDeps, + const CF::Properties& deviceRequires ) { const std::string domainName = this->_domainManager->getDomainManagerName(); - std::pair result = allocateRequest(requestID, allocationProperties, devices, sourceID, processorDeps, osDeps, domainName); + std::pair result = allocateRequest(requestID, allocationProperties, devices, sourceID, processorDeps, osDeps, domainName, deviceRequires); if (result.first) { // Update the allocation table, including the persistence store const std::string allocationID = result.first->allocationID; @@ -277,10 +293,15 @@ bool AllocationManager_impl::hasListenerAllocation(const CF::Properties& request return false; } -bool AllocationManager_impl::allocateDevice(const CF::Properties& requestedProperties, ossie::DeviceNode& node, CF::Properties& allocatedProperties, const std::vector& processorDeps, const std::vector& osDeps) +bool AllocationManager_impl::allocateDevice(const CF::Properties& requestedProperties, + ossie::DeviceNode& node, + CF::Properties& allocatedProperties, + const std::vector& processorDeps, + const std::vector& osDeps, + const CF::Properties& devicerequires) { if (!ossie::corba::objectExists(node.device)) { - LOG_WARN(AllocationManager_impl, "Not using device for uses_device allocation " << node.identifier << " because it no longer exists"); + RH_WARN(_allocMgrLog, "Not using device for uses_device allocation " << node.identifier << " because it no longer exists"); return false; } try { @@ -289,23 +310,30 @@ bool AllocationManager_impl::allocateDevice(const CF::Properties& requestedPrope } } catch ( ... ) { // bad device reference or device in an unusable state - LOG_WARN(AllocationManager_impl, "Unable to verify state of device " << node.identifier); + RH_WARN(_allocMgrLog, "Unable to verify state of device " << node.identifier); return false; } - LOG_TRACE(AllocationManager_impl, "Allocating against device " << node.identifier); + RH_TRACE(_allocMgrLog, "Allocating against device " << node.identifier); // Determine whether or not the device in question has the required matching properties CF::Properties allocProps; if (!checkDeviceMatching(node.prf, allocProps, requestedProperties, processorDeps, osDeps)) { - LOG_TRACE(AllocationManager_impl, "Matching failed"); + RH_TRACE(_allocMgrLog, "Matching failed"); + return false; + } + + RH_DEBUG(_allocMgrLog, "allocateDevice::PartitionMatching " << node.requiresProps ); + const redhawk::PropertyMap &devReqs = redhawk::PropertyMap::cast(devicerequires); + if ( !checkPartitionMatching( node, devReqs )) { + RH_TRACE(_allocMgrLog, "Partition Matching failed"); return false; } // If there are no external properties to allocate, the allocation is // already successful if (allocProps.length() == 0) { - LOG_TRACE(AllocationManager_impl, "Allocation requires no capacity from device"); + RH_TRACE(_allocMgrLog, "Allocation requires no capacity from device"); return true; } @@ -313,24 +341,24 @@ bool AllocationManager_impl::allocateDevice(const CF::Properties& requestedPrope std::vector allocations; partitionProperties(allocProps, allocations); - LOG_TRACE(AllocationManager_impl, "Allocating " << allocProps.length() << " properties (" + RH_TRACE(_allocMgrLog, "Allocating " << allocProps.length() << " properties (" << allocations.size() << " calls)"); try { if (!this->completeAllocations(node.device, allocations)) { - LOG_TRACE(AllocationManager_impl, "Device lacks sufficient capacity"); + RH_TRACE(_allocMgrLog, "Device lacks sufficient capacity"); return false; } } catch (const CF::Device::InvalidCapacity& e) { - LOG_TRACE(AllocationManager_impl, "Device reported invalid capacity"); + RH_TRACE(_allocMgrLog, "Device reported invalid capacity"); return false; } catch (const CF::Device::InsufficientCapacity& e) { - LOG_TRACE(AllocationManager_impl, "Device reported insufficient capacity"); + RH_TRACE(_allocMgrLog, "Device reported insufficient capacity"); return false; } // Transfer ownership of the allocated properties to the caller ossie::corba::move(allocatedProperties, allocProps); - LOG_TRACE(AllocationManager_impl, "Allocation successful"); + RH_TRACE(_allocMgrLog, "Allocation successful"); return true; } @@ -364,7 +392,7 @@ bool AllocationManager_impl::completeAllocations(CF::Device_ptr device, const st // Allocation succeeded, try next continue; } - } CATCH_LOG_WARN(AllocationManager_impl, "Device allocation raised an exception"); + } CATCH_RH_WARN(_allocMgrLog, "Device allocation raised an exception"); // An allocation failed; backtrack and deallocate any prior successes bool warned = false; @@ -376,7 +404,7 @@ bool AllocationManager_impl::completeAllocations(CF::Device_ptr device, const st if (!warned) { // If a symmetric deallocateCapacity failes, the device is // probably in a bad state; only warn about it once - LOG_WARN(AllocationManager_impl, "Device deallocation on cleanup raised an exception"); + RH_WARN(_allocMgrLog, "Device deallocation on cleanup raised an exception"); warned = true; } } @@ -391,7 +419,7 @@ bool AllocationManager_impl::checkMatchingProperty(const ossie::Property* proper // Only attempt matching for simple properties const ossie::SimpleProperty* simpleProp = dynamic_cast(property); if (!simpleProp) { - LOG_ERROR(AllocationManager_impl, "Invalid action '" << property->getAction() + RH_ERROR(_allocMgrLog, "Invalid action '" << property->getAction() << "' for non-simple property " << property->getID()); return false; } @@ -403,7 +431,7 @@ bool AllocationManager_impl::checkMatchingProperty(const ossie::Property* proper const CORBA::Any depValue = ossie::convertAnyToPropertyType(dependency.value, simpleProp); std::string action = simpleProp->getAction(); - LOG_TRACE(AllocationManager_impl, "Matching " << simpleProp->getID() << " '" << simpleProp->getValue() + RH_TRACE(_allocMgrLog, "Matching " << simpleProp->getID() << " '" << simpleProp->getValue() << "' " << action << " '" << ossie::any_to_string(dependency.value) << "'"); // Per section D.4.1.1.7 the allocation property is on the left side of the action @@ -416,20 +444,20 @@ bool AllocationManager_impl::checkDeviceMatching(ossie::Properties& prf, CF::Pro // Check for a matching processor, which only happens in deployment if (!processorDeps.empty()) { if (!ossie::checkProcessor(processorDeps, prf.getAllocationProperties())) { - LOG_TRACE(AllocationManager_impl, "Device did not match requested processor"); + RH_TRACE(_allocMgrLog, "Device did not match requested processor"); return false; } else { - LOG_TRACE(AllocationManager_impl, "Matched processor name"); + RH_TRACE(_allocMgrLog, "Matched processor name"); } } // Likewise, check for OS name/version if (!osDeps.empty()) { if (!ossie::checkOs(osDeps, prf.getAllocationProperties())) { - LOG_TRACE(AllocationManager_impl, "Device did not match requested OS name/version"); + RH_TRACE(_allocMgrLog, "Device did not match requested OS name/version"); return false; } else { - LOG_TRACE(AllocationManager_impl, "Matched OS name/version"); + RH_TRACE(_allocMgrLog, "Matched OS name/version"); } } @@ -441,12 +469,12 @@ bool AllocationManager_impl::checkDeviceMatching(ossie::Properties& prf, CF::Pro const ossie::Property* property = prf.getAllocationProperty(propId); if (!property) { - LOG_TRACE(AllocationManager_impl, "Device has no property " << propId); + RH_TRACE(_allocMgrLog, "Device has no property " << propId); return false; } else if (property->isExternal()) { // Collect properties with an action of "external" for a later // allocateCapacity() call - LOG_TRACE(AllocationManager_impl, "Adding external property " << propId); + RH_TRACE(_allocMgrLog, "Adding external property " << propId); ossie::corba::push_back(externalProperties, ossie::convertDataTypeToPropertyType(dependency, property)); } else { // Evaluate matching properties right now @@ -458,24 +486,78 @@ bool AllocationManager_impl::checkDeviceMatching(ossie::Properties& prf, CF::Pro } } - LOG_TRACE(AllocationManager_impl, "Matched " << matches << " properties"); + RH_TRACE(_allocMgrLog, "Matched " << matches << " properties"); return true; } + +bool AllocationManager_impl::checkPartitionMatching( ossie::DeviceNode& node, + const redhawk::PropertyMap& devicerequires ) +{ + // + // perform matching of a device's deployrequires property set against a componentplacment's devicerequires list + // + + // Check if the device has a required property set for deployment + if ( node.requiresProps.size() == 0 and devicerequires.size() == 0 ) { + RH_TRACE(_allocMgrLog, "Device: " << node.label << " has no required properties to filter deployments against."); + return true; + } + + // Check if the device has a required property set for deployment + if ( devicerequires.size() == 0 and node.requiresProps.size() > 0 ) { + RH_TRACE(_allocMgrLog, "Device: " << node.label << " has required properties for deployment, component does not provide any properties."); + return false; + } + + // Check if the component provides a property set for deployment + if ( devicerequires.size() > 0 and node.requiresProps.size() == 0 ) { + RH_TRACE(_allocMgrLog, "Device: " << node.label << " has no required properties for deployment, component's contains deviicerequires properties."); + return false; + } + + if ( node.requiresProps.size() != devicerequires.length()) { + RH_TRACE(_allocMgrLog, "Device: " << node.label << " has required properties for deployment, number of properties does not match."); + return false; + } + + + const redhawk::PropertyMap &provided_props = redhawk::PropertyMap::cast( devicerequires ); + redhawk::PropertyMap::iterator iter = node.requiresProps.begin(); + for ( ; iter != node.requiresProps.end(); ++iter) { + std::string pid(iter->getId()); + RH_TRACE(_allocMgrLog, "checkPartitionMatching source device requires: " << pid ); + redhawk::PropertyMap::const_iterator provided_prop = provided_props.find( pid ); + if ( provided_prop == provided_props.end() ) { + RH_INFO(_allocMgrLog, "Device: " << node.label << ", Missing REQUIRES property: " << pid << " from component for deployment"); + return false; + } + + // Convert the input Any to the property's data type via string; if it came + // from the ApplicationFactory, it's already a string, but a remote request + // could be of any type + std::string action("eq"); + if ( !ossie::compare_anys(iter->getValue(), provided_prop->getValue(), action) ) { + return false; + } + } + + RH_TRACE(_allocMgrLog, "checkPartitionMatch PASSED for device: " << node.label ); + return true; +} + + /* Deallocates a set of allocations */ void AllocationManager_impl::deallocate(const CF::AllocationManager::allocationIDSequence &allocationIDs) throw (CF::AllocationManager::InvalidAllocationId) { - TRACE_ENTER(AllocationManager_impl); if (allocationIDs.length() > 0) { deallocate(allocationIDs.get_buffer(), allocationIDs.get_buffer() + allocationIDs.length()); } - TRACE_EXIT(AllocationManager_impl); } /* Returns all current allocations on all Domains */ CF::AllocationManager::AllocationStatusSequence* AllocationManager_impl::allocations(const CF::AllocationManager::allocationIDSequence &allocationIDs) throw (CF::AllocationManager::InvalidAllocationId) { - TRACE_ENTER(AllocationManager_impl) boost::recursive_mutex::scoped_lock lock(allocationAccess); CF::AllocationManager::AllocationStatusSequence_var result = new CF::AllocationManager::AllocationStatusSequence(); @@ -522,15 +604,13 @@ CF::AllocationManager::AllocationStatusSequence* AllocationManager_impl::allocat throw CF::AllocationManager::InvalidAllocationId(invalid_ids); } } - - TRACE_EXIT(AllocationManager_impl) + return result._retn(); } /* Returns all current allocations that were made through the Allocation Manager that have not been deallocated */ CF::AllocationManager::AllocationStatusSequence* AllocationManager_impl::localAllocations(const CF::AllocationManager::allocationIDSequence &allocationIDs) throw (CF::AllocationManager::InvalidAllocationId) { - TRACE_ENTER(AllocationManager_impl) boost::recursive_mutex::scoped_lock lock(allocationAccess); CF::AllocationManager::AllocationStatusSequence_var result = new CF::AllocationManager::AllocationStatusSequence(); @@ -562,8 +642,7 @@ CF::AllocationManager::AllocationStatusSequence* AllocationManager_impl::localAl throw CF::AllocationManager::InvalidAllocationId(invalid_ids); } } - - TRACE_EXIT(AllocationManager_impl) + return result._retn(); } @@ -603,7 +682,6 @@ void AllocationManager_impl::listAllocations(CF::AllocationManager::AllocationSc /* Returns all devices in all Domains that can be seen by any Allocation Manager seen by the local Allocation Manager */ CF::AllocationManager::DeviceLocationSequence* AllocationManager_impl::allDevices() { - TRACE_ENTER(AllocationManager_impl) boost::recursive_mutex::scoped_lock lock(allocationAccess); // Start with local devices @@ -615,33 +693,29 @@ CF::AllocationManager::DeviceLocationSequence* AllocationManager_impl::allDevice for (ossie::DomainManagerList::const_iterator start = remoteDomains.begin(); start != end; ++start) { CF::AllocationManager_var allocationMgr = start->domainManager->allocationMgr(); CF::AllocationManager::DeviceLocationSequence_var remoteDevices = allocationMgr->localDevices(); - LOG_TRACE(AllocationManager_impl, "Adding " << remoteDevices->length() << " device(s) from domain '" + RH_TRACE(_allocMgrLog, "Adding " << remoteDevices->length() << " device(s) from domain '" << ossie::corba::returnString(start->domainManager->name()) << "' to list"); ossie::corba::extend(result, remoteDevices); } - LOG_TRACE(AllocationManager_impl, result->length() << " total device(s)"); - - TRACE_EXIT(AllocationManager_impl) + RH_TRACE(_allocMgrLog, result->length() << " total device(s)"); + return result._retn(); } /* Returns all devices after policy is applied by any Allocation Manager seen by the local Allocation Manager */ CF::AllocationManager::DeviceLocationSequence* AllocationManager_impl::authorizedDevices() { - TRACE_ENTER(AllocationManager_impl) boost::recursive_mutex::scoped_lock lock(allocationAccess); // Default implementation has no policy engine; return all local devices CF::AllocationManager::DeviceLocationSequence_var result = localDevices(); - - TRACE_EXIT(AllocationManager_impl) + return result._retn(); } /* Returns all devices that are located within the local Domain */ CF::AllocationManager::DeviceLocationSequence* AllocationManager_impl::localDevices() { - TRACE_ENTER(AllocationManager_impl) boost::recursive_mutex::scoped_lock lock(allocationAccess); // Get a point-in-time copy of the domain's devices @@ -659,18 +733,14 @@ CF::AllocationManager::DeviceLocationSequence* AllocationManager_impl::localDevi result[ii].devMgr = CF::DeviceManager::_duplicate((*start)->devMgr.deviceManager); result[ii].dev = CF::Device::_duplicate((*start)->device); } - LOG_TRACE(AllocationManager_impl, result->length() << " local device(s)"); + RH_TRACE(_allocMgrLog, result->length() << " local device(s)"); - TRACE_EXIT(AllocationManager_impl) return result._retn(); } /* Returns a link to the local Domain */ CF::DomainManager_ptr AllocationManager_impl::domainMgr() { - TRACE_ENTER(AllocationManager_impl); - - TRACE_EXIT(AllocationManager_impl); return _domainManager->_this(); } @@ -696,7 +766,7 @@ void AllocationManager_impl::restoreAllocations (ossie::AllocationTable &ref_all for (; start != end; ++start) { // Contact the remote AllocationManager to get the full state for each // allocation - LOG_TRACE(AllocationManager_impl, "Restoring allocation '" << start->first << "'"); + RH_TRACE(_allocMgrLog, "Restoring allocation '" << start->first << "'"); CF::AllocationManager::allocationIDSequence alloc_ids; alloc_ids.length(1); alloc_ids[0] = start->first.c_str(); @@ -704,7 +774,7 @@ void AllocationManager_impl::restoreAllocations (ossie::AllocationTable &ref_all try { result = start->second->localAllocations(alloc_ids); } catch (const CORBA::Exception& ex) { - LOG_ERROR(AllocationManager_impl, "Unable to restore allocation '" << start->first << "': CORBA::" + RH_ERROR(_allocMgrLog, "Unable to restore allocation '" << start->first << "': CORBA::" << ex._name()); continue; } @@ -746,10 +816,10 @@ bool AllocationManager_impl::deallocateLocal(const std::string& allocationID) const ossie::AllocationType& localAlloc = alloc->second; std::vector allocations; partitionProperties(localAlloc.allocationProperties, allocations); - LOG_TRACE(AllocationManager_impl, "Deallocating " << localAlloc.allocationProperties.length() + RH_TRACE(_allocMgrLog, "Deallocating " << localAlloc.allocationProperties.length() << " properties (" << allocations.size() << " calls) for local allocation " << allocationID); if (!ossie::corba::objectExists(localAlloc.allocatedDevice)) { - LOG_WARN(AllocationManager_impl, "Not deallocating capacity a device because it no longer exists"); + RH_WARN(_allocMgrLog, "Not deallocating capacity a device because it no longer exists"); } else { bool warned = false; for (size_t index = 0; index < allocations.size(); ++index) { @@ -760,7 +830,7 @@ bool AllocationManager_impl::deallocateLocal(const std::string& allocationID) if (!warned) { // If a symmetric deallocateCapacity failes, the device is // probably in a bad state; only warn about it once - LOG_WARN(AllocationManager_impl, "Deallocation raised an exception"); + RH_WARN(_allocMgrLog, "Deallocation raised an exception"); warned = true; } } @@ -777,7 +847,7 @@ bool AllocationManager_impl::deallocateRemote(const std::string& allocationID) return false; } - LOG_TRACE(AllocationManager_impl, "Deallocating remote allocation " << allocationID); + RH_TRACE(_allocMgrLog, "Deallocating remote allocation " << allocationID); CF::AllocationManager::allocationIDSequence allocations; allocations.length(1); allocations[0] = allocationID.c_str(); @@ -789,7 +859,7 @@ bool AllocationManager_impl::deallocateRemote(const std::string& allocationID) } catch (...) { // Some other failure occurred; remove the allocation from the table // and continue - LOG_WARN(AllocationManager_impl, "Remote deallocation " << allocationID << " failed"); + RH_WARN(_allocMgrLog, "Remote deallocation " << allocationID << " failed"); } this->_remoteAllocations.erase(alloc); return true; diff --git a/redhawk/src/control/sdr/dommgr/AllocationManager_impl.h b/redhawk/src/control/sdr/dommgr/AllocationManager_impl.h index 269a8c50a..bb59602ae 100644 --- a/redhawk/src/control/sdr/dommgr/AllocationManager_impl.h +++ b/redhawk/src/control/sdr/dommgr/AllocationManager_impl.h @@ -28,10 +28,8 @@ #include #include -#include -#include -#include -#include "DomainManager_impl.h" + +class DomainManager_impl; class AllocationManager_impl: public virtual POA_CF::AllocationManager { @@ -74,7 +72,13 @@ class AllocationManager_impl: public virtual POA_CF::AllocationManager CF::DomainManager_ptr domainMgr(); /* Allocates a set of dependencies for deployment; not part of the CORBA API */ - ossie::AllocationResult allocateDeployment(const std::string& requestID, const CF::Properties& allocationProperties, ossie::DeviceList& devices, const std::string& sourceID, const std::vector& processorDeps, const std::vector& osDeps); + ossie::AllocationResult allocateDeployment(const std::string& requestID, + const CF::Properties& allocationProperties, + ossie::DeviceList& devices, + const std::string& sourceID, + const std::vector& processorDeps, + const std::vector& osDeps, + const CF::Properties& deviceRequires); /* Deallocates a set of allocations */ template @@ -107,16 +111,37 @@ class AllocationManager_impl: public virtual POA_CF::AllocationManager void restoreAllocations(ossie::AllocationTable& ref_allocations, std::map &ref_remoteAllocations); + void setLogger(rh_logger::LoggerPtr logptr) { + _allocMgrLog = logptr; + }; + private: CF::AllocationManager::AllocationResponseSequence* allocateDevices(const CF::AllocationManager::AllocationRequestSequence &requests, ossie::DeviceList& devices, const std::string& domainName); - std::pair allocateRequest(const std::string& requestID, const CF::Properties& allocationProperties, ossie::DeviceList& devices, const std::string& sourceID, const std::vector& processorDeps, const std::vector& osDeps, const std::string& domainName); + std::pair allocateRequest(const std::string& requestID, + const CF::Properties& allocationProperties, + ossie::DeviceList& devices, + const std::string& sourceID, + const std::vector& processorDeps, + const std::vector& osDeps, + const std::string& domainName, + const CF::Properties& deviceRequires = CF::Properties() ); bool checkDeviceMatching(ossie::Properties& _prf, CF::Properties& externalProps, const CF::Properties& dependencyPropertiesFromComponent, const std::vector& processorDeps, const std::vector& osDeps); bool checkMatchingProperty(const ossie::Property* property, const CF::DataType& dependency); + bool checkPartitionMatching( ossie::DeviceNode& node, + const redhawk::PropertyMap& devicerequires ); + + redhawk::PropertyMap getDeviceRequiredProperties( ossie::DeviceNode& node ); - bool allocateDevice(const CF::Properties& requestedProperties, ossie::DeviceNode& device, CF::Properties& allocatedProperties, const std::vector& processorDeps, const std::vector& osDeps); + + bool allocateDevice(const CF::Properties& requestedProperties, + ossie::DeviceNode& device, + CF::Properties& allocatedProperties, + const std::vector& processorDeps, + const std::vector& osDeps, + const CF::Properties& deviceRequires = CF::Properties() ); void partitionProperties(const CF::Properties& properties, std::vector& outProps); bool completeAllocations(CF::Device_ptr device, const std::vector& duplicates); @@ -129,7 +154,8 @@ class AllocationManager_impl: public virtual POA_CF::AllocationManager ossie::AllocationTable _allocations; ossie::RemoteAllocationTable _remoteAllocations; void unfilledRequests(CF::AllocationManager::AllocationRequestSequence &requests, const CF::AllocationManager::AllocationResponseSequence &result); - + rh_logger::LoggerPtr _allocMgrLog; + protected: boost::recursive_mutex allocationAccess; diff --git a/redhawk/src/control/sdr/dommgr/ApplicationComponent.cpp b/redhawk/src/control/sdr/dommgr/ApplicationComponent.cpp new file mode 100644 index 000000000..aaf1e4d4a --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/ApplicationComponent.cpp @@ -0,0 +1,313 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include + +#include "ApplicationComponent.h" +#include "Application_impl.h" + +using redhawk::ApplicationComponent; + +// TODO: Should probably use its own logger; using Application_impl's for +// consistency with old code +PREPARE_ALT_LOGGING(ApplicationComponent, Application_impl); + +ApplicationComponent::ApplicationComponent(const std::string& identifier) : + _identifier(identifier), + _name(identifier), + _isVisible(true), + _processId(0), + _componentHost(0) +{ +} + +const std::string& ApplicationComponent::getIdentifier() const +{ + return _identifier; +} + +const std::string& ApplicationComponent::getName() const +{ + return _name; +} + +void ApplicationComponent::setName(const std::string& name) +{ + _name = name; +} + +const std::string& ApplicationComponent::getSoftwareProfile() const +{ + return _softwareProfile; +} + +void ApplicationComponent::setSoftwareProfile(const std::string& softwareProfile) +{ + _softwareProfile = softwareProfile; +} + +bool ApplicationComponent::hasNamingContext() const +{ + return !_namingContext.empty(); +} + +const std::string& ApplicationComponent::getNamingContext() const +{ + return _namingContext; +} + +void ApplicationComponent::setNamingContext(const std::string& namingContext) +{ + _namingContext = namingContext; +} + +const std::string& ApplicationComponent::getImplementationId() const +{ + return _implementationId; +} + +void ApplicationComponent::setImplementationId(const std::string& implementationId) +{ + _implementationId = implementationId; +} + +bool ApplicationComponent::isVisible() const +{ + return _isVisible; +} + +void ApplicationComponent::setVisible(bool visible) +{ + _isVisible = visible; +} + +ApplicationComponent* ApplicationComponent::getComponentHost() +{ + return _componentHost; +} + +void ApplicationComponent::setComponentHost(ApplicationComponent* componentHost) +{ + _componentHost = componentHost; + if (_componentHost) { + _componentHost->_children.push_back(this); + } +} + +const std::vector& ApplicationComponent::getChildren() const +{ + return _children; +} + +unsigned long ApplicationComponent::getProcessId() const +{ + if (_componentHost) { + return _componentHost->getProcessId(); + } + return _processId; +} + +void ApplicationComponent::setProcessId(unsigned long processId) +{ + _processId = processId; +} + +bool ApplicationComponent::isResource() const +{ + return !CORBA::is_nil(_resource); +} + +bool ApplicationComponent::isTerminated() const +{ + return (getProcessId() == 0); +} + +bool ApplicationComponent::isRegistered() const +{ + return !CORBA::is_nil(_componentObject); +} + +const std::vector& ApplicationComponent::getLoadedFiles() const +{ + return _loadedFiles; +} + +void ApplicationComponent::addLoadedFile(const std::string& fileName) +{ + _loadedFiles.push_back(fileName); +} + +CORBA::Object_ptr ApplicationComponent::getComponentObject() const +{ + return CORBA::Object::_duplicate(_componentObject); +} + +void ApplicationComponent::setComponentObject(CORBA::Object_ptr object) +{ + _componentObject = CORBA::Object::_duplicate(object); + _resource = ossie::corba::_narrowSafe(object); +} + +CF::Resource_ptr ApplicationComponent::getResourcePtr() const +{ + return CF::Resource::_duplicate(_resource); +} + +const boost::shared_ptr& ApplicationComponent::getAssignedDevice() const +{ + return _assignedDevice; +} + +void ApplicationComponent::setAssignedDevice(const boost::shared_ptr& assignedDevice) +{ + _assignedDevice = assignedDevice; + _assignedDeviceId = assignedDevice->identifier; +} + +const std::string & ApplicationComponent::getAssignedDeviceId() const +{ + if ( _assignedDevice ) { + return _assignedDevice->identifier; + } + return _assignedDeviceId; +} + +void ApplicationComponent::setAssignedDeviceId(const std::string &devId ) +{ + _assignedDeviceId = devId; +} + +void ApplicationComponent::start() +{ + omniORB::setClientCallTimeout(_resource, 0); + try { + _resource->start(); + } catch (const CF::Resource::StartError& exc) { + std::ostringstream message; + message << "Component '" << _name << "' failed to start: "; + message << exc.msg; + throw CF::Resource::StartError(exc.errorNumber, message.str().c_str()); + } catch (const CORBA::SystemException& exc) { + std::ostringstream message; + message << "Component '" << _name << "' failed to start: "; + message << ossie::corba::describeException(exc); + throw CF::Resource::StartError(CF::CF_EIO, message.str().c_str()); + } +} + +bool ApplicationComponent::stop(float timeout) +{ + if (timeout < 0) + timeout = 0; + omniORB::setClientCallTimeout(_resource, timeout * 1000); + try { + _resource->stop(); + return true; + } catch (const CF::Resource::StopError& error) { + RH_ERROR(_appComponentLog, "Failed to stop " << _identifier << "; CF::Resource::StopError '" << error.msg << "'"); + } catch (const CORBA::SystemException& exc) { + if (!isTerminated()) { + RH_ERROR(_appComponentLog, "Failed to stop component '" << _identifier << "'; " + << ossie::corba::describeException(exc)); + } else { + RH_DEBUG(_appComponentLog, "Ignoring CORBA exception stopping terminated component '" + << _identifier << "'"); + } + } catch (...) { + RH_ERROR(_appComponentLog, "Failed to stop " << _identifier); + } + return false; +} + +void ApplicationComponent::releaseObject() +{ + if (!isResource()) { + return; + } + + RH_DEBUG(_appComponentLog, "Releasing component '" << _identifier << "'"); + try { + unsigned long timeout = 3; // seconds; + omniORB::setClientCallTimeout(_resource, timeout * 1000); + _resource->releaseObject(); + } catch (const CORBA::SystemException& exc) { + if (!isTerminated()) { + RH_ERROR(_appComponentLog, "Failed to release component '" << _identifier << "'; " + << ossie::corba::describeException(exc)); + } else { + RH_DEBUG(_appComponentLog, "Ignoring CORBA exception releasing terminated component '" + << _identifier << "'"); + } + } CATCH_RH_WARN(_appComponentLog, "releaseObject failed for component '" << _identifier << "'"); +} + +void ApplicationComponent::terminate() +{ + // If the process already terminated, or the component is running inside of + // a ComponentHost instance, skip termination + if (isTerminated() || _componentHost) { + return; + } + + if (!_assignedDevice || !_assignedDevice->isExecutable()) { + RH_WARN(_appComponentLog, "Cannot find device to terminate component " << _identifier); + return; + } + + RH_DEBUG(_appComponentLog, "Terminating component '" << _identifier + << "' on device '" << _assignedDevice->label + << "' (" << _assignedDevice->identifier << ")"); + try { + _assignedDevice->executableDevice->terminate(_processId); + } catch (const CF::ExecutableDevice::InvalidProcess& ip) { + RH_ERROR(_appComponentLog, "Failed to terminate process for component '" << _identifier + << "': invalid process"); + } catch (const CF::Device::InvalidState& state) { + RH_ERROR(_appComponentLog, "Failed to terminate process for component '" << _identifier + << "': device '" << _assignedDevice->label << "' is in an invalid state"); + } catch (const CORBA::SystemException& exc) { + RH_ERROR(_appComponentLog, "Failed to terminate process for component '" << _identifier + << "': " << ossie::corba::describeException(exc)); + } +} + +void ApplicationComponent::unloadFiles() +{ + if (_loadedFiles.empty()) { + return; + } + + RH_DEBUG(_appComponentLog, "Unloading " << _loadedFiles.size() << " file(s) for component '" + << _identifier << "'"); + + if (!_assignedDevice || !_assignedDevice->isLoadable()) { + RH_WARN(_appComponentLog, "Cannot find device to unload files for component " << _identifier); + return; + } + + BOOST_FOREACH(const std::string& file, _loadedFiles) { + RH_TRACE(_appComponentLog, "Unloading file " << file); + try { + _assignedDevice->loadableDevice->unload(file.c_str()); + } CATCH_RH_WARN(_appComponentLog, "Unable to unload file " << file); + } +} diff --git a/redhawk/src/control/sdr/dommgr/ApplicationComponent.h b/redhawk/src/control/sdr/dommgr/ApplicationComponent.h new file mode 100644 index 000000000..705a0d0fe --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/ApplicationComponent.h @@ -0,0 +1,117 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef APPLICATIONCOMPONENT_H +#define APPLICATIONCOMPONENT_H + +#include +#include + +#include +#include + +#include "PersistenceStore.h" + +#define DEFAULT_STOP_TIMEOUT 3 + +namespace redhawk { + class ApplicationComponent { + + ENABLE_LOGGING; + + public: + ApplicationComponent(const std::string& identifier); + + const std::string& getIdentifier() const; + + const std::string& getName() const; + void setName(const std::string& name); + + const std::string& getSoftwareProfile() const; + void setSoftwareProfile(const std::string& softwareProfile); + + bool hasNamingContext() const; + const std::string& getNamingContext() const; + void setNamingContext(const std::string& namingContext); + + const std::string& getImplementationId() const; + void setImplementationId(const std::string& implementationId); + + bool isVisible() const; + void setVisible(bool visible); + + ApplicationComponent* getComponentHost(); + void setComponentHost(ApplicationComponent* componentHost); + + unsigned long getProcessId() const; + void setProcessId(unsigned long processId); + + bool isResource() const; + bool isTerminated() const; + bool isRegistered() const; + + const std::vector& getLoadedFiles() const; + void addLoadedFile(const std::string& fileName); + + CORBA::Object_ptr getComponentObject() const; + void setComponentObject(CORBA::Object_ptr object); + + CF::Resource_ptr getResourcePtr() const; + + const boost::shared_ptr& getAssignedDevice() const; + void setAssignedDevice(const boost::shared_ptr& assignedDevice); + + const std::string &getAssignedDeviceId() const; + void setAssignedDeviceId(const std::string & devId); + + const std::vector& getChildren() const; + + void start(); + bool stop(float timeout); + + void releaseObject(); + void terminate(); + void unloadFiles(); + + void setLogger(rh_logger::LoggerPtr log) { + _appComponentLog = log; + }; + + private: + std::string _identifier; + std::string _name; + std::string _softwareProfile; + std::string _namingContext; + std::string _implementationId; + bool _isVisible; + std::vector _loadedFiles; + unsigned long _processId; + CORBA::Object_var _componentObject; + CF::Resource_var _resource; + boost::shared_ptr _assignedDevice; + std::string _assignedDeviceId; + + ApplicationComponent* _componentHost; + std::vector _children; + rh_logger::LoggerPtr _appComponentLog; + }; +} + +#endif // APPLICATIONCOMPONENT_H diff --git a/redhawk/src/control/sdr/dommgr/ApplicationDeployment.cpp b/redhawk/src/control/sdr/dommgr/ApplicationDeployment.cpp new file mode 100644 index 000000000..c9d18f806 --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/ApplicationDeployment.cpp @@ -0,0 +1,325 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include +#include +#include + +#include "PersistenceStore.h" +#include "ApplicationDeployment.h" +#include "ProfileCache.h" + +using namespace redhawk; +using namespace ossie; + +PREPARE_CF_LOGGING(ApplicationDeployment); + +ContainerDeployment::ContainerDeployment(const ossie::SoftPkg* softpkg, + ossie::ComponentInstantiation* instantiation, + const std::string& identifier) : + ComponentDeployment(softpkg, instantiation, identifier), + instance(instantiation) +{ +} + +ApplicationDeployment::ApplicationDeployment(const SoftwareAssembly& sad, + const std::string& instanceName, + const CF::Properties& initConfiguration) : + sad(sad), + // Give the application a unique identifier of the form + // "softwareassemblyid:ApplicationName", where the application name + // includes the serial number generated for the naming context + // (e.g. "Application_1"). + identifier(sad.getID() + ":" + instanceName), + instanceName(instanceName), + initConfiguration(initConfiguration), + ac(0) +{ +} + +ApplicationDeployment::~ApplicationDeployment() +{ + ac=NULL; + BOOST_FOREACH(ComponentDeployment* component, components) { + delete component; + } + BOOST_FOREACH(ContainerDeployment* container, containers) { + delete container; + } +} + + +const std::string& ApplicationDeployment::getIdentifier() const +{ + return identifier; +} + +ComponentDeployment* ApplicationDeployment::getAssemblyController() +{ + BOOST_FOREACH(ComponentDeployment* deployment, components) { + if (deployment->isAssemblyController()) { + return deployment; + } + } + return 0; +} + +redhawk::PropertyMap ApplicationDeployment::getAllocationContext() const +{ + redhawk::PropertyMap properties; + BOOST_FOREACH(ComponentDeployment* deployment, components) { + if (deployment->isAssemblyController()) { + properties = deployment->getAllocationContext(); + } + } + return properties; +} + +ComponentDeployment* ApplicationDeployment::createComponentDeployment(const SoftPkg* softpkg, + const ComponentInstantiation* instantiation) +{ + // Create a unique identifier for this component instance by appending the + // application instance's unique name + std::string component_id = instantiation->getID() + ":" + instanceName; + + if (softpkg->isScaCompliant() && !instantiation->isNamingService()) { + RH_WARN(_appDeploymentLog, "Component instantiation " + << instantiation->getID() << " does not provide a 'findcomponent' name but " + << softpkg->getName() << " is SCA-compliant"); + } + + if ( (instantiation->getID() == sad.getAssemblyControllerRefId() ) && ac ) { + RH_TRACE(_appDeploymentLog, " Requesting AssemblyController " << instantiation->getID() ); + return ac; + } + + ComponentDeployment* deployment = new ComponentDeployment(softpkg, instantiation, component_id); + components.push_back(deployment); + + // Override properties from initial configuration + if (instantiation->getID() == sad.getAssemblyControllerRefId()) { + ac = deployment; + deployment->setIsAssemblyController(true); + overrideAssemblyControllerProperties(deployment); + } + overrideExternalProperties(deployment); + + overrideImpliedProperties(deployment); + + return deployment; +} + +ContainerDeployment* ApplicationDeployment::createContainer(redhawk::ProfileCache& cache, + const boost::shared_ptr& device) +{ + ContainerDeployment* container = getContainer(device->identifier); + if (container) { + RH_DEBUG(_appDeploymentLog, "Using existing container " << container->getIdentifier()); + return container; + } + + const ossie::SoftPkg* softpkg = cache.loadSoftPkg("/mgr/rh/ComponentHost/ComponentHost.spd.xml"); + + // Create an instantiation with the ID and naming service name based on the + // device label; the deployment will own this object + ossie::ComponentInstantiation* instantiation = new ossie::ComponentInstantiation; + instantiation->instantiationId = "ComponentHost_" + device->label; + instantiation->namingservicename = instantiation->instantiationId; + + // Use the same pattern as components to generate the unique runtime ID + RH_DEBUG(_appDeploymentLog, "Creating component host " << instantiation->getID()); + std::string container_id = instantiation->getID() + ":" + instanceName; + + container = new ContainerDeployment(softpkg, instantiation, container_id); + containers.push_back(container); + return container; +} + +const ApplicationDeployment::ComponentList& ApplicationDeployment::getComponentDeployments() +{ + return components; +} + +const ApplicationDeployment::ContainerList& ApplicationDeployment::getContainerDeployments() +{ + return containers; +} + +ComponentDeployment* ApplicationDeployment::getComponentDeployment(const std::string& instantiationId) +{ + for (ComponentList::iterator comp = components.begin(); comp != components.end(); ++comp) { + if (instantiationId == (*comp)->getInstantiation()->getID()) { + return *comp; + } + } + + return 0; +} + +ComponentDeployment* ApplicationDeployment::getComponentDeploymentByUniqueId(const std::string& identifier) +{ + BOOST_FOREACH(ComponentDeployment* deployment, components) { + if (identifier == deployment->getIdentifier()) { + return deployment; + } + } + + return 0; +} + +void ApplicationDeployment::applyCpuReservations(const CpuReservations& reservations) +{ + BOOST_FOREACH(ComponentDeployment* deployment, components) { + CpuReservations::const_iterator reserved = reservations.find(deployment->getIdentifier()); + if (reserved == reservations.end()) { + // NB: Check for the usage name for consistency with 2.0, although + // the instantiation ID makes more sense. If the usage name does not apply, + // use the instantiation ID + reserved = reservations.find(deployment->getInstantiation()->getUsageName()); + if (reserved == reservations.end()) { + reserved = reservations.find(deployment->getInstantiation()->getID()); + } + } + if (reserved != reservations.end()) { + deployment->setCpuReservation(reserved->second); + } + } +} + +void ApplicationDeployment::overrideAssemblyControllerProperties(ComponentDeployment* deployment) +{ + BOOST_FOREACH(const redhawk::PropertyType& override, initConfiguration) { + const std::string propid = override.getId(); + if (propid == "LOGGING_CONFIG_URI") { + if (deployment->getLoggingConfiguration().empty()) { + RH_TRACE(_appDeploymentLog, "Adding LOGGING_CONFIG_URI as a command line parameter with value " + << override.getValue().toString()); + deployment->overrideProperty(propid, override.getValue()); + } + } else { + RH_TRACE(_appDeploymentLog, "Overriding property " << propid + << " with " << override.getValue().toString()); + deployment->overrideProperty(propid, override.getValue()); + } + } +} + +void ApplicationDeployment::overrideExternalProperties(ComponentDeployment* deployment) +{ + const std::string& instantiation_id = deployment->getInstantiation()->getID(); + BOOST_FOREACH(const SoftwareAssembly::Property& property, sad.getExternalProperties()) { + if (property.comprefid == instantiation_id) { + std::string property_id = property.externalpropid; + if (property_id.empty()) { + property_id = property.propid; + } + redhawk::PropertyMap::iterator override = initConfiguration.find(property_id); + if (override != initConfiguration.end()) { + RH_TRACE(_appDeploymentLog, "Overriding external property " << property_id + << " (" << property.propid << ") = " << override->getValue().toString()); + deployment->overrideProperty(property.propid, override->getValue()); + } + } + } +} + +void ApplicationDeployment::overrideImpliedProperties(ComponentDeployment* deployment) { + BOOST_FOREACH(const redhawk::PropertyType& override, initConfiguration) { + const std::string propid = override.getId(); + if (propid == "LOGGING_CONFIG_URI") { + deployment->overrideProperty(propid, override.getValue()); + } + } +} + +ContainerDeployment* ApplicationDeployment::getContainer(const std::string& deviceId) +{ + BOOST_FOREACH(ContainerDeployment* container, containers) { + if (container->getAssignedDevice() && container->getAssignedDevice()->identifier == deviceId) { + return container; + } + } + return 0; +} + +CF::Resource_ptr ApplicationDeployment::lookupComponentByInstantiationId(const std::string& identifier) +{ + ComponentDeployment* deployment = getComponentDeployment(identifier); + if (deployment) { + return deployment->getResourcePtr(); + } + return CF::Resource::_nil(); +} + +CF::Device_ptr ApplicationDeployment::lookupDeviceThatLoadedComponentInstantiationId(const std::string& componentId) +{ + RH_TRACE(_appDeploymentLog, "[DeviceLookup] Lookup device that loaded component " << componentId); + + ComponentDeployment* deployment = getComponentDeployment(componentId); + if (!deployment) { + throw ossie::LookupError("component '" + componentId + "' not found"); + } + + boost::shared_ptr device = deployment->getAssignedDevice(); + if (!device) { + throw ossie::LookupError("component '" + componentId + "' is not assigned to a device"); + } + + RH_TRACE(_appDeploymentLog, "[DeviceLookup] Assigned device id " << device->identifier); + return CF::Device::_duplicate(device->device); +} + +CF::Device_ptr ApplicationDeployment::lookupDeviceUsedByComponentInstantiationId(const std::string& componentId, + const std::string& usesId) +{ + RH_TRACE(_appDeploymentLog, "[DeviceLookup] Lookup device used by component " << componentId); + + ComponentDeployment* deployment = getComponentDeployment(componentId); + if (!deployment) { + throw ossie::LookupError("component '" + componentId + "' not found"); + } + + UsesDeviceAssignment* uses = deployment->getUsesDeviceAssignment(usesId); + if (!uses) { + throw ossie::LookupError("component '" + componentId + "' has no usesdevice '" + usesId + "'"); + } + + CF::Device_var device = uses->getAssignedDevice(); + RH_TRACE(_appDeploymentLog, "[DeviceLookup] Assigned device id " + << ossie::corba::returnString(device->identifier())); + return device._retn(); +} + +CF::Device_ptr ApplicationDeployment::lookupDeviceUsedByApplication(const std::string& usesRefId) +{ + RH_TRACE(_appDeploymentLog, "[DeviceLookup] Lookup device used by application, Uses Id: " << usesRefId); + + UsesDeviceAssignment* uses = getUsesDeviceAssignment(usesRefId); + if (!uses) { + throw ossie::LookupError("application has no usesdevice '" + usesRefId + "'"); + } + + CF::Device_var device = uses->getAssignedDevice(); + RH_TRACE(_appDeploymentLog, "[DeviceLookup] Assigned device id " + << ossie::corba::returnString(device->identifier())); + return device._retn(); +} diff --git a/redhawk/src/control/sdr/dommgr/ApplicationDeployment.h b/redhawk/src/control/sdr/dommgr/ApplicationDeployment.h new file mode 100644 index 000000000..403bb1fff --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/ApplicationDeployment.h @@ -0,0 +1,122 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef APPLICATIONDEPLOYMENT_H +#define APPLICATIONDEPLOYMENT_H + +#include +#include + +#include + +#include +#include +#include + +#include "connectionSupport.h" +#include "Deployment.h" + +namespace redhawk { + + class ProfileCache; + + class ContainerDeployment : public ComponentDeployment + { + public: + ContainerDeployment(const ossie::SoftPkg* softpkg, + ossie::ComponentInstantiation* instantiation, + const std::string& identifier); + + protected: + // The instantiation does not appear in a SAD; take ownership here + boost::scoped_ptr instance; + }; + + class ApplicationDeployment : public ossie::ComponentLookup, public ossie::DeviceLookup, public UsesDeviceDeployment + { + ENABLE_LOGGING; + + public: + typedef std::vector ComponentList; + typedef std::vector ContainerList; + typedef std::map CpuReservations; + + ApplicationDeployment(const ossie::SoftwareAssembly& sad, + const std::string& instanceName, + const CF::Properties& initConfiguration); + ~ApplicationDeployment(); + + const std::string& getIdentifier() const; + + /** + * Returns the properties used for evaluating math statements in + * allocation + */ + redhawk::PropertyMap getAllocationContext() const; + + ComponentDeployment* getAssemblyController(); + + ComponentDeployment* createComponentDeployment(const ossie::SoftPkg* softpkg, + const ossie::ComponentInstantiation* instantiation); + + const ComponentList& getComponentDeployments(); + ComponentDeployment* getComponentDeployment(const std::string& instantiationId); + ComponentDeployment* getComponentDeploymentByUniqueId(const std::string& identifier); + + void applyCpuReservations(const CpuReservations& reservations); + + const ContainerList& getContainerDeployments(); + ContainerDeployment* createContainer(redhawk::ProfileCache& cache, + const boost::shared_ptr& device); + + // Adapt interfaces for component and device search to support + // ConnectionManager + // ComponentLookup interface + virtual CF::Resource_ptr lookupComponentByInstantiationId(const std::string& identifier); + + // DeviceLookup interface + CF::Device_ptr lookupDeviceThatLoadedComponentInstantiationId(const std::string& componentId); + CF::Device_ptr lookupDeviceUsedByComponentInstantiationId(const std::string& componentId, + const std::string& usesId); + CF::Device_ptr lookupDeviceUsedByApplication(const std::string& usesRefId); + + void setLogger(rh_logger::LoggerPtr log) { + _appDeploymentLog = log; + }; + + protected: + void overrideAssemblyControllerProperties(ComponentDeployment* deployment); + void overrideExternalProperties(ComponentDeployment* deployment); + void overrideImpliedProperties(ComponentDeployment* deployment); + + ContainerDeployment* getContainer(const std::string& deviceId); + + const ossie::SoftwareAssembly& sad; + const std::string identifier; + const std::string instanceName; + redhawk::PropertyMap initConfiguration; + ComponentList components; + ContainerList containers; + ComponentDeployment *ac; + rh_logger::LoggerPtr _appDeploymentLog; + }; +} + +#endif // APPLICATIONDEPLOYMENT_H diff --git a/redhawk/src/control/sdr/dommgr/ApplicationFactory_impl.cpp b/redhawk/src/control/sdr/dommgr/ApplicationFactory_impl.cpp index 85a561282..a19c6b0f7 100644 --- a/redhawk/src/control/sdr/dommgr/ApplicationFactory_impl.cpp +++ b/redhawk/src/control/sdr/dommgr/ApplicationFactory_impl.cpp @@ -1,3 +1,4 @@ + /* * This file is protected by Copyright. Please refer to the COPYRIGHT file * distributed with this source distribution. @@ -29,7 +30,7 @@ #include #include - +#include #include #include @@ -37,12 +38,16 @@ #include #include #include +#include #include "Application_impl.h" #include "ApplicationFactory_impl.h" +#include "createHelper.h" #include "DomainManager_impl.h" #include "AllocationManager_impl.h" #include "RH_NamingContext.h" +#include "ApplicationValidator.h" +#include "DeploymentExceptions.h" namespace fs = boost::filesystem; using namespace ossie; @@ -82,7 +87,6 @@ void ScopedAllocations::transfer(ScopedAllocations& dest) void ScopedAllocations::deallocate() { if (!_allocations.empty()) { - LOG_TRACE(ApplicationFactory_impl, "Deallocating " << _allocations.size() << " allocations"); _allocator.deallocate(_allocations.begin(), _allocations.end()); } } @@ -104,367 +108,39 @@ static void rotateDeviceList(DeviceList& devices, const std::string& identifier) } } -static std::vector mergeProcessorDeps(const ossie::ImplementationInfo::List& implementations) -{ - // this function merges the overlap in processors between the different components that have been selected - std::vector processorDeps; - for (ossie::ImplementationInfo::List::const_iterator impl = implementations.begin(); impl != implementations.end(); ++impl) { - const std::vector& implDeps = (*impl)->getProcessorDeps(); - if (!implDeps.empty()) { - if (processorDeps.empty()) { - // No prior processor dependencies, so overwrite - processorDeps = implDeps; - } else { - std::vector toremove; - toremove.resize(0); - for (std::vector::iterator proc = processorDeps.begin(); proc != processorDeps.end(); ++proc) { - if (std::find(implDeps.begin(), implDeps.end(), *proc) == implDeps.end()) { - toremove.push_back(*proc); - } - } - for (std::vector::iterator _rem = toremove.begin(); _rem != toremove.end(); ++_rem) { - std::vector::iterator proc = std::find(processorDeps.begin(), processorDeps.end(), *_rem); - if (proc != processorDeps.end()) { - processorDeps.erase(proc); - } - } - } - } - } - return processorDeps; -} - -static std::vector mergeOsDeps(const ossie::ImplementationInfo::List& implementations) -{ - // this function merges the overlap in operating systems between the different components that have been selected - std::vector osDeps; - for (ossie::ImplementationInfo::List::const_iterator impl = implementations.begin(); impl != implementations.end(); ++impl) { - const std::vector& implDeps = (*impl)->getOsDeps(); - if (!implDeps.empty()) { - if (osDeps.empty()) { - // No prior OS dependencies, so overwrite - osDeps = implDeps; - } else { - std::vector toremove; - toremove.resize(0); - for (std::vector::iterator pair = osDeps.begin(); pair != osDeps.end(); ++pair) { - if (std::find(implDeps.begin(), implDeps.end(), *pair) == implDeps.end()) { - toremove.push_back(*pair); - } - } - for (std::vector::iterator _rem = toremove.begin(); _rem != toremove.end(); ++_rem) { - std::vector::iterator pair = std::find(osDeps.begin(), osDeps.end(), *_rem); - if (pair != osDeps.end()) { - osDeps.erase(pair); - } +namespace { + template + inline bool mergeDependencies(std::vector& first, const std::vector& second) + { + if (second.empty()) { + return true; + } else if (first.empty()) { + first = second; + return true; + } else { + for (typename std::vector::iterator iter = first.begin(); iter != first.end(); ) { + if (std::find(second.begin(), second.end(), *iter) == second.end()) { + iter = first.erase(iter); + } else { + ++iter; } } + return !first.empty(); } } - return osDeps; -} - -PREPARE_CF_LOGGING(ApplicationFactory_impl); - -void -ApplicationFactory_impl::ValidateFileLocation( CF::FileManager_ptr fileMgr, const std::string &profile_file) -{ - TRACE_ENTER(ApplicationFactory_impl) - - if (profile_file == "") { - TRACE_EXIT(ApplicationFactory_impl) - return; - } - - // Verify file within the provided FileMgr - LOG_TRACE(ApplicationFactory_impl, "Validating that profile " << profile_file << " exists"); - if (!fileMgr->exists (profile_file.c_str())) { - string msg = "File "; - msg += profile_file; - msg += " does not exist."; - throw CF::FileException (CF::CF_ENOENT, msg.c_str()); - } -} - -bool ApplicationFactory_impl::ValidateImplementationCodeFile( CF::FileManager_ptr fileMgr, - DomainManager_impl *domMgr, - const std::string &spd_path, - const std::string &sfw_profile, - const std::string &codeFile, - const bool allow_missing_impl ) { - - bool hasImpl = false; - try { - boost::filesystem::path implPath = boost::filesystem::path( spd_path ) / codeFile; - LOG_TRACE(ApplicationFactory_impl, "Validating Implmentation existance: " << implPath.string() ); - ValidateFileLocation( fileMgr, implPath.string().c_str() ); - hasImpl=true; - } catch (CF::InvalidFileName ex) { - if ( allow_missing_impl) { - LOG_WARN( ApplicationFactory_impl, "Invalid localfile for PROFILE: " << sfw_profile << " CODE: " << codeFile); - } - else { - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } - } catch (CF::FileException ex) { - if ( allow_missing_impl) { - LOG_WARN(ApplicationFactory_impl, "Invalid or missing localfile for PROFILE: " << sfw_profile << " CODE: " << codeFile); - } - else { - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } - } catch ( ... ) { - if ( !allow_missing_impl ) { - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ""); - } - } - - return hasImpl; -} - - -void ApplicationFactory_impl::ValidateSoftPkgDep (CF::FileManager_ptr fileMgr, DomainManager_impl *domMgr, const std::string& sfw_profile, const bool allow_missing_impl ) { - SoftPkg pkg; - ValidateSPD(fileMgr, domMgr, pkg, sfw_profile, false, false, allow_missing_impl, true ); -} - -std::string ApplicationFactory_impl::xmlParsingVersionMismatch(DomainManager_impl *domMgr, std::string &component_version) -{ - std::string added_message; - if (!component_version.empty()) { - try { - static std::string version = domMgr->getRedhawkVersion(); - if (redhawk::compareVersions(component_version, version) < 0) { - added_message = "Attempting to run a component from version "; - added_message += component_version; - added_message += " on REDHAWK version "; - added_message += version; - added_message += ". "; - } - } catch ( ... ) {} - } - return added_message; -} - -void ApplicationFactory_impl::ValidateSPD(CF::FileManager_ptr fileMgr, - DomainManager_impl *domMgr, - SoftPkg &spdParser, - const std::string& sfw_profile, - const bool require_prf, - const bool require_scd, - const bool allow_missing_impl, - const bool is_dep ){ - - TRACE_ENTER(ApplicationFactory_impl) - - if ( sfw_profile == "" ) { - LOG_WARN( ApplicationFactory_impl, "No Software Profile Provided."); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, "No software profile provided"); - TRACE_EXIT(ApplicationFactory_impl); - } - - try { - LOG_TRACE(ApplicationFactory_impl, "Validating SPD " << sfw_profile); - ValidateFileLocation(fileMgr, sfw_profile); - - // check the filename ends with the extension given in the spec - if ((strstr (sfw_profile.c_str(), ".spd.xml")) == NULL) - { LOG_ERROR(ApplicationFactory_impl, "File " << sfw_profile << " should end with .spd.xml"); } - LOG_TRACE(ApplicationFactory_impl, "validating " << sfw_profile); - - try { - File_stream _spd(fileMgr, sfw_profile.c_str()); - spdParser.load( _spd, sfw_profile.c_str() ); - _spd.close(); - } catch (ossie::parser_error& ex) { - File_stream _spd(fileMgr, sfw_profile.c_str()); - std::string line; - std::string component_version; - while (std::getline(_spd, line)) { - size_t type_idx = line.find("type"); - if (type_idx != std::string::npos) { - size_t first_quote = line.find('"', type_idx); - if (first_quote == std::string::npos) - continue; - size_t second_quote = line.find('"', first_quote + 1); - if (second_quote == std::string::npos) - continue; - component_version = line.substr(first_quote + 1, second_quote-(first_quote+1)); - break; - } - } - ostringstream eout; - eout << xmlParsingVersionMismatch(domMgr, component_version); - std::string parser_error_line = ossie::retrieveParserErrorLineNumber(ex.what()); - eout << "Failed to parse SPD: " << sfw_profile << ". " << parser_error_line << " The XML parser returned the following error: " << ex.what(); - LOG_ERROR(ApplicationFactory_impl, eout.str() ); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, eout.str().c_str()); - } catch (CF::InvalidFileName ex) { - LOG_ERROR(ApplicationFactory_impl, "Failed to validate SPD: " << sfw_profile << ". Invalid file name exception: " << ex.msg); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } catch (CF::FileException ex) { - LOG_ERROR(ApplicationFactory_impl, "Failed to validate SPD: " << sfw_profile << ". File exception: " << ex.msg); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } catch ( ... ) { - LOG_ERROR(ApplicationFactory_impl, "Unexpected error validating SPD: " << sfw_profile ); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ""); - } - - // - // validate each implementation - // - const ossie::SPD::Implementations& impls = spdParser.getImplementations(); - ossie::SPD::Implementations::const_iterator impl = impls.begin(); - int impl_cnt=0; - for( ; impl != impls.end(); impl++ ) { - - std::string code_file = impl->getCodeFile(); - LOG_TRACE(ApplicationFactory_impl, "Validating Implementation: " << impl->getID() << " File: " << code_file << " impl_cnt " << impl_cnt ); - bool hasImpl=ValidateImplementationCodeFile( fileMgr, domMgr, spdParser.getSPDPath(), - sfw_profile, code_file, allow_missing_impl ); - if ( hasImpl ) { - impl_cnt++; - - const ossie::SPD::SoftPkgDependencies& deps = impl->getSoftPkgDependencies(); - ossie::SPD::SoftPkgDependencies::const_iterator dep = deps.begin(); - for(; dep != deps.end(); dep++ ) { - std::string localfile = dep->localfile; - - try { - LOG_TRACE(ApplicationFactory_impl, "Validating Dependency: " << localfile); - ValidateSoftPkgDep(fileMgr, domMgr, localfile, allow_missing_impl ); - } catch (CF::InvalidFileName ex) { - if ( allow_missing_impl) { - LOG_WARN( ApplicationFactory_impl, "Invalid Code File (dependency), PROFILE: " << sfw_profile << " CODE: " << code_file); - } - else { - LOG_ERROR(ApplicationFactory_impl, "Invalid Code File (dependency), PROFILE: " << sfw_profile << " CODE: " << code_file); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } - } catch (CF::FileException ex) { - if ( allow_missing_impl) { - LOG_WARN(ApplicationFactory_impl, "Invalid Code File (dependency), PROFILE: " << sfw_profile << " CODE: " << code_file); - } - else { - LOG_ERROR(ApplicationFactory_impl, "Invalid Code File (dependency), PROFILE: " << sfw_profile << " CODE: " << code_file); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } - } - } - } - LOG_TRACE(ApplicationFactory_impl, "After Validating File: " << code_file << " impl_cnt " << impl_cnt ); - } - - if ( 0 == impl_cnt ) { - if ( !is_dep or !allow_missing_impl ) { - ostringstream os; - os << "No valid implementations found, PROFILE:" << sfw_profile; - LOG_ERROR(ApplicationFactory_impl, os.str() ); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, os.str().c_str() ); - } - } - - // query SPD for PRF - if (spdParser.getPRFFile() != 0) { - LOG_TRACE(ApplicationFactory_impl, "validating " << spdParser.getPRFFile()); - try { - ValidateFileLocation ( fileMgr, spdParser.getPRFFile ()); - // check the file name ends with the extension given in the spec - if (spdParser.getPRFFile() && (strstr (spdParser.getPRFFile (), ".prf.xml")) == NULL) { - LOG_ERROR(ApplicationFactory_impl, "File " << spdParser.getPRFFile() << " should end in .prf.xml."); - } - - LOG_TRACE(ApplicationFactory_impl, "Creating file stream") - File_stream prfStream(fileMgr, spdParser.getPRFFile()); - LOG_TRACE(ApplicationFactory_impl, "Loading parser") - Properties prfParser(prfStream); - LOG_TRACE(ApplicationFactory_impl, "Closing stream") - prfStream.close(); - } catch (ossie::parser_error& ex ) { - ostringstream eout; - std::string component_version(spdParser.getSoftPkgType()); - eout << xmlParsingVersionMismatch(domMgr, component_version); - std::string parser_error_line = ossie::retrieveParserErrorLineNumber(ex.what()); - eout << "Failed to parse PRF: " << spdParser.getPRFFile() << ". " << parser_error_line << " The XML parser returned the following error: " << ex.what(); - LOG_ERROR(ApplicationFactory_impl, eout.str() ); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, eout.str().c_str()); - } catch (CF::InvalidFileName ex) { - if ( require_prf ) { - LOG_ERROR(ApplicationFactory_impl, "Failed to validate PRF: " << spdParser.getPRFFile() << " Invalid file name exception: " << ex.msg); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } - } catch (CF::FileException ex) { - if ( require_prf ) { - LOG_ERROR(ApplicationFactory_impl, "Failed to validate PRF: " << spdParser.getPRFFile() << " File exception: " << ex.msg); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } - } catch ( ... ) { - LOG_ERROR(ApplicationFactory_impl, "Unexpected error validating PRF: " << spdParser.getPRFFile()); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ""); - } - } else { - LOG_TRACE(ApplicationFactory_impl, "No PRF file to validate") - } - - if (spdParser.getSCDFile() != 0) { - try { - // query SPD for SCD - LOG_TRACE(ApplicationFactory_impl, "validating " << spdParser.getSCDFile()); - ValidateFileLocation ( fileMgr, spdParser.getSCDFile ()); - - // Check the filename ends with the extension given in the spec - if ((strstr (spdParser.getSCDFile (), ".scd.xml")) == NULL) - { LOG_ERROR(ApplicationFactory_impl, "File " << spdParser.getSCDFile() << " should end with .scd.xml."); } - - File_stream _scd(fileMgr, spdParser.getSCDFile()); - ComponentDescriptor scdParser (_scd); - _scd.close(); - } catch (ossie::parser_error& ex) { - ostringstream eout; - std::string component_version(spdParser.getSoftPkgType()); - eout << xmlParsingVersionMismatch(domMgr, component_version); - std::string parser_error_line = ossie::retrieveParserErrorLineNumber(ex.what()); - eout << "Failed to parse SCD: " << spdParser.getSCDFile() << ". " << parser_error_line << " The XML parser returned the following error: " << ex.what(); - LOG_ERROR(ApplicationFactory_impl, eout.str() ); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, eout.str().c_str()); - } catch (CF::InvalidFileName ex) { - if ( require_scd ){ - LOG_ERROR(ApplicationFactory_impl, "Failed to validate SCD: " << spdParser.getSCDFile() << " Invalid file name exception: " << ex.msg); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } - } catch (CF::FileException ex) { - if ( require_scd ) { - LOG_ERROR(ApplicationFactory_impl, "Failed to validate SCD: " << spdParser.getSCDFile() << " File exception: " << ex.msg); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } - } catch ( ... ) { - LOG_ERROR(ApplicationFactory_impl, "Unexpected error validating SCD: " << spdParser.getSCDFile()); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ""); - } - } else if (spdParser.isScaCompliant() and require_scd ) { - LOG_ERROR(ApplicationFactory_impl, "SCA compliant component is missing SCD file reference"); - throw CF::DomainManager::ApplicationInstallationError(CF::CF_EBADF, "SCA compliant components require SCD file"); + static std::string getVersionMismatchMessage(const SoftPkg* softpkg) + { + const std::string& softpkg_version = softpkg->getSoftPkgType(); + if (redhawk::compareVersions(VERSION, softpkg_version) > 0) { + return " (attempting to run a component from version " + softpkg_version + " on REDHAWK version " VERSION ")"; } else { - LOG_TRACE(ApplicationFactory_impl, "No SCD file to validate") + return std::string(); } - - } catch (CF::InvalidFileName& ex) { - LOG_ERROR(ApplicationFactory_impl, "Failed to validate SPD: " << sfw_profile << ", exception: " << ex.msg); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } catch (CF::FileException& ex) { - LOG_ERROR(ApplicationFactory_impl, "Failed to validate SPD: " << sfw_profile << ", exception: " << ex.msg); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } catch (CF::DomainManager::ApplicationInstallationError& ex) { - throw; - } catch ( ... ) { - LOG_ERROR(ApplicationFactory_impl, "Unexpected error validating SPD: " << sfw_profile); - throw CF::DomainManager::ApplicationInstallationError (); } - - } +PREPARE_CF_LOGGING(ApplicationFactory_impl); ApplicationFactory_impl::ApplicationFactory_impl (const std::string& softwareProfile, const std::string& domainName, @@ -477,205 +153,82 @@ ApplicationFactory_impl::ApplicationFactory_impl (const std::string& softwarePro // Get the naming context from the domain _domainContext = RH_NamingContext::GetNamingContext( _domainName, !_domainManager->bindToDomain() ); if (CORBA::is_nil(_domainContext)) { - LOG_ERROR(ApplicationFactory_impl, "CosNaming::NamingContext::_narrow threw Unknown Exception"); + RH_ERROR(_appFactoryLog, "CosNaming::NamingContext::_narrow threw Unknown Exception"); throw; } - _dmnMgr = domainManager->_this(); - try { - _fileMgr = _dmnMgr->fileMgr(); + _fileMgr = _domainManager->fileMgr(); } catch ( std::exception& ex ) { ostringstream eout; eout << "The following standard exception occurred: "<_fileMgr failed with Unknown Exception"); + RH_ERROR(_appFactoryLog, "domainManager->_fileMgr failed with Unknown Exception"); throw CF::DomainManager::ApplicationInstallationError(CF::CF_EBADF, "Could not get File Manager from Domain Manager"); } - bool strict_spd_validation = _domainManager->strictSPDValidation(); - + RH_INFO(_appFactoryLog, "Installing application " << _softwareProfile); try { + if (!_fileMgr->exists(_softwareProfile.c_str())) { + std::string msg = "File "; + msg += _softwareProfile; + msg += " does not exist."; + throw CF::FileException (CF::CF_ENOENT, msg.c_str()); + } - LOG_INFO(ApplicationFactory_impl, "Installing application " << _softwareProfile.c_str()); - ValidateFileLocation ( _fileMgr, _softwareProfile ); - - File_stream _sad(_fileMgr, _softwareProfile.c_str()); - _sadParser.load(_sad); - _sad.close(); + File_stream _sad(_fileMgr, _softwareProfile.c_str()); + _sadParser.load(_sad); + _sad.close(); } catch (const ossie::parser_error& ex) { ostringstream eout; std::string parser_error_line = ossie::retrieveParserErrorLineNumber(ex.what()); eout << "Failed to parse SAD file: " << _softwareProfile << ". " << parser_error_line << " The XML parser returned the following error: " << ex.what(); - LOG_ERROR(ApplicationFactory_impl, eout.str()); + RH_ERROR(_appFactoryLog, eout.str()); throw CF::DomainManager::ApplicationInstallationError(CF::CF_EBADF, eout.str().c_str()); } catch ( std::exception& ex ) { ostringstream eout; eout << "The following standard exception occurred: "<& ports = _sadParser.getExternalPorts(); - std::vector extPorts; - for (std::vector::const_iterator port = ports.begin(); port != ports.end(); ++port) { - // Gets name to use - std::string extName; - if (port->externalname != "") { - extName = port->externalname; - } else { - extName = port->identifier; - } - // Check for duplicate - if (std::find(extPorts.begin(), extPorts.end(), extName) == extPorts.end()) { - extPorts.push_back(extName); - } else { - ostringstream eout; - eout << "Duplicate External Port name: " << extName; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::DomainManager::ApplicationInstallationError(CF::CF_NOTSET, eout.str().c_str()); - } - } - - // Gets the assembly controller software profile by looping through each - // component instantiation to find a matching ID to the AC's - std::string assemblyControllerId = _sadParser.getAssemblyControllerRefId(); - SoftPkg ac_spd; - CORBA::String_var ac_profile = ""; - bool ac_found = false; - std::vector components = _sadParser.getAllComponents(); - for (std::vector::const_iterator comp = components.begin(); - comp != components.end(); ++comp) { - SoftPkg comp_pkg; - std::string p_name; - try { - if ( _sadParser.getSPDById(comp->getFileRefId())) { - p_name = _sadParser.getSPDById(comp->getFileRefId()); - LOG_DEBUG(ApplicationFactory_impl, "Validating... COMP profile: " << p_name); - ValidateSPD(_fileMgr, _domainManager, comp_pkg, p_name, !strict_spd_validation ) ; - } - else { - LOG_ERROR(ApplicationFactory_impl, "installApplication: invalid componentfileref: " << comp->getFileRefId() ); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, "installApplication: invalid componentfileref"); - } - } catch (CF::FileException& ex) { - LOG_ERROR(ApplicationFactory_impl, "installApplication: While validating the SAD profile: " << ex.msg); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); - } catch( CF::InvalidFileName& ex ) { - std::ostringstream eout; - eout << "Invalid file name: " << p_name; - LOG_ERROR(ApplicationFactory_impl, "installApplication: Invalid file name: " << p_name); - throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, eout.str().c_str()); - } catch (CF::DomainManager::ApplicationInstallationError& e) { - LOG_TRACE(ApplicationFactory_impl, "rethrowing ApplicationInstallationError" << e.msg); - throw; - } catch ( std::exception& ex ) { - std::ostringstream eout; - eout << "The following standard exception occurred: "< compInstantiations = comp->instantiations; - for (std::vector::const_iterator compInst = compInstantiations.begin(); - compInst != compInstantiations.end(); ++compInst){ - if (assemblyControllerId == compInst->instantiationId) { - ac_spd = comp_pkg; - ac_profile = _sadParser.getSPDById(comp->getFileRefId()); - ac_found = true; - break; - } - } - } - } - - // Gets the assembly controllers properties - Properties prf; - if (ac_found) { - if ( ac_spd.getPRFFile() ) { - std::string prf_file(ac_spd.getPRFFile()); - try { - File_stream _prf(_fileMgr, prf_file.c_str()); - prf.load(_prf); - _prf.close(); - } catch(ossie::parser_error& ex ) { - std::ostringstream os; - std::string parser_error_line = ossie::retrieveParserErrorLineNumber(ex.what()); - os << "Invalid PRF file: " << prf_file << ". " << parser_error_line << " The XML parser returned the following error: " << ex.what(); - LOG_ERROR(ApplicationFactory_impl, os.str() ); - throw CF::DomainManager::ApplicationInstallationError(CF::CF_NOTSET, os.str().c_str()); - } catch( ... ) { - // Errors are reported at create time - } - } - } - - // Makes sure all external property names are unique - const std::vector& properties = _sadParser.getExternalProperties(); - std::vector extProps; - for (std::vector::const_iterator prop = properties.begin(); prop != properties.end(); ++prop) { - // Gets name to use - std::string extName; - if (prop->externalpropid != "") { - extName = prop->externalpropid; - } else { - extName = prop->propid; - } - // Check for duplicate - if (std::find(extProps.begin(), extProps.end(), extName) == extProps.end()) { - extProps.push_back(extName); - } else { - ostringstream eout; - eout << "Duplicate External Property name: " << extName; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::DomainManager::ApplicationInstallationError(CF::CF_NOTSET, eout.str().c_str()); - } - } - - // Make sure AC prop ID's aren't in conflict with external ones - const std::vector& acProps = prf.getProperties(); - for (unsigned int i = 0; i < acProps.size(); ++i) { - // Check for duplicate - if (std::find(extProps.begin(), extProps.end(), acProps[i]->getID()) == extProps.end()) { - extProps.push_back(acProps[i]->getID()); - } else { - ostringstream eout; - eout << "Assembly controller property in use as External Property: " << acProps[i]->getID(); - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::DomainManager::ApplicationInstallationError(CF::CF_NOTSET, eout.str().c_str()); - } + // Validate the application using the current domain state; however, we + // cannot assume that the component SPDs will not change between now and a + // subsequent create call, so the parsed profiles are not saved + redhawk::ApplicationValidator validator(_fileMgr, _appFactoryLog); + try { + validator.validate(_sadParser); + } catch (const std::runtime_error& exc) { + RH_ERROR(_appFactoryLog, "SAD " << softwareProfile + << " failed validation: " << exc.what()); + throw CF::DomainManager::ApplicationInstallationError(CF::CF_EBADF, exc.what()); } _name = _sadParser.getName(); @@ -696,76 +249,82 @@ ApplicationFactory_impl::~ApplicationFactory_impl () } -/* - * Check to make sure assemblyController was initialized if it was SCA compliant - */ -void createHelper::_checkAssemblyController( - CF::Resource_ptr assemblyController, - ossie::ComponentInfo* assemblyControllerComponent) const +const std::string& ApplicationFactory_impl::getIdentifier() const { - if (CORBA::is_nil(assemblyController)) { - if ((assemblyControllerComponent==NULL) || - (assemblyControllerComponent->isScaCompliant()) - ) { - LOG_DEBUG(ApplicationFactory_impl, "assembly controller is not Sca Compliant or has not been assigned"); - throw (CF::ApplicationFactory::CreateApplicationError( - CF::CF_NOTSET, - "assembly controller is not Sca Compliant or has not been assigned")); - } - } + return _identifier; } -void createHelper::_connectComponents(std::vector& connections){ - try{ - connectComponents(connections, _baseNamingContext); - } catch (CF::ApplicationFactory::CreateApplicationError& ex) { - throw; - } CATCH_THROW_LOG_TRACE( - ApplicationFactory_impl, - "Connecting components failed (unclear where this occurred)", - CF::ApplicationFactory::CreateApplicationError( - CF::CF_EINVAL, - "Connecting components failed (unclear where this occurred)")); +const std::string& ApplicationFactory_impl::getName() const +{ + return _name; } -void createHelper::_configureComponents() +const std::string& ApplicationFactory_impl::getSoftwareProfile() const { - try{ - configureComponents(); - } catch (CF::ApplicationFactory::CreateApplicationError& ex) { - throw; - } CATCH_THROW_LOG_TRACE( - ApplicationFactory_impl, - "Configure on component failed (unclear where in the process this occurred)", - CF::ApplicationFactory::CreateApplicationError(CF::CF_EINVAL, "Configure of component failed (unclear where in the process this occurred)")) + return _softwareProfile; } -void createHelper::assignRemainingComponentsToDevices(const std::string &appIdentifier) +void createHelper::assignPlacementsToDevices(redhawk::ApplicationDeployment& appDeployment, + const DeviceAssignmentMap& devices, + const std::map& specialized_reservations) { - PlacementList::iterator componentIter; - for (componentIter = _requiredComponents.begin(); - componentIter != _requiredComponents.end(); - componentIter++) - { - if (!(*componentIter)->isAssignedToDevice()) { - allocateComponent(*componentIter, std::string(), _appUsedDevs, appIdentifier); + // Try to place all of the collocations first, since they naturally have + // more restrictive placement constraints + BOOST_FOREACH(const SoftwareAssembly::HostCollocation& collocation, _appFact._sadParser.getHostCollocations()) { + _placeHostCollocation(appDeployment, collocation, devices, specialized_reservations); + } + + // Place the remaining components one-by-one + BOOST_FOREACH(const ComponentPlacement& placement, _appFact._sadParser.getComponentPlacements()) { + const SoftPkg* softpkg = _profileCache.loadProfile(placement.filename); + BOOST_FOREACH(const ComponentInstantiation& instantiation, placement.getInstantiations()) { + // Even though the XML supports more than one instantiation per + // component placement, the tooling doesn't support that, so this + // loop may be strictly academic + std::string assigned_device; + DeviceAssignmentMap::const_iterator device = devices.find(instantiation.getID()); + if (device != devices.end()) { + assigned_device = device->second; + RH_TRACE(_createHelperLog, "Component " << instantiation.getID() + << " is assigned to device " << assigned_device); + } + redhawk::ComponentDeployment* deployment = appDeployment.createComponentDeployment(softpkg, &instantiation); + allocateComponent(appDeployment, deployment, assigned_device, specialized_reservations); + + // For components that run as shared libraries, create or reuse a + // matching container deployment + if (deployment->getImplementation()->getCodeType() == SPD::Code::SHARED_LIBRARY) { + RH_DEBUG(_createHelperLog, "Component " << deployment->getInstantiation()->getID() + << "' implementation " << deployment->getImplementation()->getID() + << " is a shared library"); + redhawk::ContainerDeployment* container = appDeployment.createContainer(_profileCache, deployment->getAssignedDevice()); + if (!container->getAssignedDevice()) { + + const redhawk::PropertyMap& devReqs = deployment->getDeviceRequires(); + if ( devReqs.size() ) container->setDeviceRequires(devReqs); + // Use whether the device is assigned as a sentinel to check + // whether the container was already created, and if not, + // allocate it to the device + allocateComponent(appDeployment, container, deployment->getAssignedDevice()->identifier, specialized_reservations); + } + deployment->setContainer(container); + } + } } } -void createHelper::_assignComponentsUsingDAS(const DeviceAssignmentMap& deviceAssignments, const std::string &appIdentifier) +void createHelper::_validateDAS(redhawk::ApplicationDeployment& appDeployment, + const DeviceAssignmentMap& deviceAssignments) { - LOG_TRACE(ApplicationFactory_impl, "Assigning " << deviceAssignments.size() - << " component(s) based on DeviceAssignmentSequence"); - + RH_TRACE(_createHelperLog, "Validating device assignment sequence (length " + << deviceAssignments.size() << ")"); for (DeviceAssignmentMap::const_iterator ii = deviceAssignments.begin(); ii != deviceAssignments.end(); ++ii) { const std::string& componentId = ii->first; const std::string& assignedDeviceId = ii->second; - LOG_TRACE(ApplicationFactory_impl, "Component " << componentId << " is assigned to device " << assignedDeviceId); - ossie::ComponentInfo* component = findComponentByInstantiationId(componentId); - if (!component) { - LOG_ERROR(ApplicationFactory_impl, "Failed to create application; " + if (!_appFact._sadParser.getComponentInstantiation(componentId)) { + RH_ERROR(_createHelperLog, "Failed to create application; " << "unknown component " << componentId << " in user assignment (DAS)"); CF::DeviceAssignmentSequence badDAS; @@ -774,502 +333,536 @@ void createHelper::_assignComponentsUsingDAS(const DeviceAssignmentMap& deviceAs badDAS[0].assignedDeviceId = assignedDeviceId.c_str(); throw CF::ApplicationFactory::CreateApplicationRequestError(badDAS); } - allocateComponent(component, assignedDeviceId, _appUsedDevs, appIdentifier); } } -void createHelper::_resolveImplementations(PlacementList::iterator comp, PlacementList& compList, std::vector &res_vec) +bool createHelper::placeHostCollocation(redhawk::ApplicationDeployment& appDeployment, + const DeploymentList& components, + DeploymentList::const_iterator current, + ossie::DeviceList& deploymentDevices, + const redhawk::PropertyMap& deviceRequires, + const ReservationList& reservations, + const ProcessorList& processorDeps, + const OSList& osDeps) + { - if (comp == compList.end()) { - return; - } - ossie::ImplementationInfo::List comp_imps; - std::vector tmp_res_vec = res_vec; - (*comp)->getImplementations(comp_imps); - unsigned int old_res_vec_size = res_vec.size(); - if (old_res_vec_size == 0) { - res_vec.resize(comp_imps.size()); - for (unsigned int ii=0; iigetSoftPkg()->getImplementations(); + RH_TRACE(_createHelperLog, "Finding collocation-compatible implementations for component " + << deployment->getInstantiation()->getID()); + ++current; + for (SPD::Implementations::const_iterator impl = comp_impls.begin(); impl != comp_impls.end(); ++impl) { + const ossie::SPD::Implementation* implementation = &(*impl); + RH_TRACE(_createHelperLog, "Checking implementation " << implementation->getID()); + + // Check that the processor dependencies are compatible, filtering out + // anything not compatible with the current component + std::vector proc_list = processorDeps;; + if (!mergeDependencies(proc_list, implementation->getProcessors())) { + RH_TRACE(_createHelperLog, "Skipping implementation " << implementation->getID() + << ": no processor match"); + continue; } - } else { - res_vec.resize(old_res_vec_size * comp_imps.size()); - for (unsigned int i=0; i os_list = osDeps; + if (!mergeDependencies(os_list, implementation->getOsDeps())) { + RH_TRACE(_createHelperLog, "Skipping implementation " << implementation->getID() + << ": no OS match"); + continue; + } + + // Set this implementation for deployment and recurse one more level + deployment->setImplementation(implementation); + if (placeHostCollocation(appDeployment, components, current, deploymentDevices, deviceRequires, reservations, proc_list, os_list)) { + return true; } } - this->_resolveImplementations(++comp, compList, res_vec); - return; + + return false; } -void createHelper::_removeUnmatchedImplementations(std::vector &res_vec) +bool createHelper::allocateHostCollocation(redhawk::ApplicationDeployment& appDeployment, + const DeploymentList& components, + ossie::DeviceList& deploymentDevices, + const ProcessorList& processorDeps, + const OSList& osDeps, + const redhawk::PropertyMap& deviceRequires, + const ReservationList& reservations ) { - std::vector::iterator impl_list = res_vec.begin(); - while (impl_list != res_vec.end()) { - std::vector::iterator old_impl_list = impl_list; - ossie::ImplementationInfo::List::iterator impl = (*impl_list).begin(); - std::vector reference_pair = (*impl)->getOsDeps(); - std::vector reference_procs = (*impl)->getProcessorDeps(); - bool os_init_to_zero = (reference_pair.size()==0); - bool proc_init_to_zero = (reference_procs.size()==0); - impl++; - bool match = true; - while (impl != (*impl_list).end()) { - std::vector pair = (*impl)->getOsDeps(); - std::vector procs = (*impl)->getProcessorDeps(); - bool os_must_match = false; - bool proc_must_match = false; - if (os_init_to_zero) - os_must_match = false; - if (proc_init_to_zero) - proc_must_match = false; - if ((reference_pair.size() != 0) and (pair.size() != 0)) {os_must_match = true;} - if ((reference_procs.size() != 0) and (procs.size() != 0)) {proc_must_match = true;} - // if os must match (because both lists are non-zero length), check that at least one of the sets matches - if (os_must_match) { - bool at_least_one_match = false; - for (std::vector::iterator ref=reference_pair.begin(); ref::iterator cur=pair.begin(); cur::iterator ref=reference_procs.begin(); ref::iterator cur=procs.begin(); curpair.size()) { - for (std::vector::iterator ref=reference_pair.begin(); ref::iterator cur=pair.begin(); curprocs.size()) { - for (std::vector::iterator ref=reference_procs.begin(); ref::iterator cur=procs.begin(); cur::iterator cur=pair.begin(); cur::iterator cur=procs.begin(); cur nicAllocations; + redhawk::PropertyMap allocationProperties = _consolidateAllocations(components, nicAllocations); + if (reservations.size() != 0) { + redhawk::PropertyMap _struct; + std::vector _kinds, _values; + for (ReservationList::const_iterator it=reservations.begin(); it!=reservations.end(); it++) { + _kinds.push_back(it->kind); + _values.push_back(it->value); } - if (not match) { - (*impl_list).erase(impl); - } - impl_list++; + _struct["redhawk::reservation_request::kinds"].setValue(_kinds); + _struct["redhawk::reservation_request::values"].setValue(_values); + _struct["redhawk::reservation_request::obj_id"].setValue(appDeployment.getIdentifier()); + allocationProperties["redhawk::reservation_request"].setValue(_struct); } - return; -} -void createHelper::_consolidateAllocations(const PlacementList &placingComponents, const ossie::ImplementationInfo::List& impls, CF::Properties& allocs) -{ - allocs.length(0); - for (ossie::ImplementationInfo::List::const_iterator impl= impls.begin(); impl != impls.end(); ++impl) { - ComponentInfo* component = NULL; - for (PlacementList::const_iterator _comp=placingComponents.begin(); _comp!=placingComponents.end(); _comp++) { - ossie::ImplementationInfo::List _tmp_impls; - (*_comp)->getImplementations(_tmp_impls); - for (ossie::ImplementationInfo::List::iterator _tmp_impl=_tmp_impls.begin(); _tmp_impl != _tmp_impls.end(); _tmp_impl++) { - if ((*_tmp_impl) == (*impl)) { - component = (*_comp); - break; - } + RH_TRACE(_createHelperLog, "Allocating deployment for " << components.size() + << " collocated components"); + for (DeploymentList::const_iterator depl = components.begin(); depl != components.end(); ++depl) { + RH_TRACE(_createHelperLog, "Component " << (*depl)->getInstantiation()->getID() + << " implementation " << (*depl)->getImplementation()->getID()); + } + + if ( !deviceRequires.empty() ) { + RH_TRACE(_createHelperLog, "Collocation has devicerequires: " << deviceRequires ); + } + + const std::string requestid = ossie::generateUUID(); + ossie::AllocationResult response = _allocationMgr->allocateDeployment(requestid, allocationProperties, deploymentDevices, appDeployment.getIdentifier(), processorDeps, osDeps, deviceRequires); + if (!response.first.empty()) { + // Ensure that all capacities get cleaned up, keeping ownership local + // to this scope until it's clear that the device can support all of + // the collocated components' dependencies + ScopedAllocations local_allocations(*_allocationMgr); + local_allocations.push_back(response.first); + + // Convert from response back into a device node + boost::shared_ptr& node = response.second; + const std::string& deviceId = node->identifier; + + for (DeploymentList::const_iterator depl = components.begin(); depl != components.end(); ++depl) { + // Reset any dependencies that may have been resolved in a prior attempt + (*depl)->clearDependencies(); + if (!resolveSoftpkgDependencies(appDeployment, *depl, *node)) { + RH_TRACE(_createHelperLog, "Unable to resolve softpackage dependencies for component " + << (*depl)->getIdentifier() + << " implementation " << (*depl)->getImplementation()->getID()); + return false; } - } - CF::Properties configureProperties = component->getConfigureProperties(); - const CF::Properties &construct_props = component->getConstructProperties(); - unsigned int configlen = configureProperties.length(); - configureProperties.length(configureProperties.length()+construct_props.length()); - for (unsigned int i=0; i& deps = (*impl)->getDependencyProperties(); - - for (std::vector::const_iterator dep = deps.begin(); dep != deps.end(); ++dep) { - ossie::ComponentProperty *prop = dep->property.get(); - CF::Properties _tmp_allocs; - _tmp_allocs.length(1); - if (dynamic_cast( prop ) != NULL) { - const SimplePropertyRef* dependency = dynamic_cast(prop); - _tmp_allocs[0] = convertPropertyToDataType(dependency); - } else if (dynamic_cast(prop) != NULL) { - const SimpleSequencePropertyRef* dependency = dynamic_cast(prop); - _tmp_allocs[0] = convertPropertyToDataType(dependency); - } else if (dynamic_cast(prop) != NULL) { - const ossie::StructPropertyRef* dependency = dynamic_cast(prop); - _tmp_allocs[0] = convertPropertyToDataType(dependency); - } else if (dynamic_cast(prop) != NULL) { - const ossie::StructSequencePropertyRef* dependency = dynamic_cast(prop); - _tmp_allocs[0] = convertPropertyToDataType(dependency); + (*depl)->setAssignedDevice(node); + + const std::string component_id = (*depl)->getIdentifier(); + if (nicAllocations.count(component_id)) { + const std::string& alloc_id = nicAllocations[component_id]; + _applyNicAllocation((*depl), alloc_id, node->device); } - this->_evaluateMATHinRequest(_tmp_allocs, configureProperties); - ossie::corba::push_back(allocs, _tmp_allocs[0]); } + + // Once all the dependencies have been resolved, take ownership of the + // allocations + local_allocations.transfer(_allocations); + + // Move the device to the front of the list + rotateDeviceList(_executableDevices, deviceId); + + RH_TRACE(_createHelperLog, "Successful collocation allocation"); + return true; } -} + RH_TRACE(_createHelperLog, "Failed collocation allocation"); + return false; + } -void createHelper::_handleHostCollocation(const std::string &appIdentifier) +redhawk::PropertyMap createHelper::_consolidateAllocations(const DeploymentList& deployments, std::map& nicAllocs) { - const std::vector& hostCollocations = - _appFact._sadParser.getHostCollocations(); - LOG_TRACE(ApplicationFactory_impl, - "Assigning " << hostCollocations.size() - << " collocated groups of components"); + redhawk::PropertyMap allocs; + for (DeploymentList::const_iterator depl = deployments.begin(); depl != deployments.end(); ++depl) { + redhawk::PropertyMap allocationProperties = _getComponentAllocations(*depl); - for (unsigned int ii = 0; ii < hostCollocations.size(); ++ii) { - _placeHostCollocation(hostCollocations[ii], appIdentifier); + std::string nic_alloc_id = _getNicAllocationId(allocationProperties); + if (!nic_alloc_id.empty()) { + nicAllocs[(*depl)->getIdentifier()] = nic_alloc_id; + } + + ossie::corba::extend(allocs, allocationProperties); } + return allocs; } -void createHelper::_placeHostCollocation(const SoftwareAssembly::HostCollocation& collocation, const std::string &appIdentifier) +redhawk::PropertyMap createHelper::_getComponentAllocations(const redhawk::ComponentDeployment* deployment) { - LOG_TRACE(ApplicationFactory_impl, - "-- Begin placment for Collocation " << - collocation.getName() << " " << - collocation.getID()); + const ossie::SPD::Implementation* implementation = deployment->getImplementation(); + const std::vector& prop_refs = implementation->getDependencies(); + redhawk::PropertyMap allocationProperties; + this->_castRequestProperties(allocationProperties, prop_refs); - PlacementList placingComponents; - std::vector res_vec; + // Get the combined set of properties that are available at start time for + // the component (i.e., those that are passed to initializeProperties() or + // the first configure() call) to use as context for MATH statements + redhawk::PropertyMap alloc_context = deployment->getAllocationContext(); + this->_evaluateMATHinRequest(allocationProperties, alloc_context); + + return allocationProperties; +} + +std::string createHelper::_getNicAllocationId(redhawk::PropertyMap& allocationProperties) +{ + redhawk::PropertyMap::iterator nic_alloc = allocationProperties.find("nic_allocation"); + if (nic_alloc != allocationProperties.end()) { + redhawk::PropertyMap& substr = nic_alloc->getValue().asProperties(); + std::string alloc_id = substr["nic_allocation::identifier"].toString(); + if (alloc_id.empty()) { + alloc_id = ossie::generateUUID(); + substr["nic_allocation::identifier"] = alloc_id; + } + return alloc_id; + } + return std::string(); +} - // Some components may have been placed by a user DAS; keep a - // list of those that still need to be assigned to a device. - //PlacementList placingComponents; +void createHelper::_placeHostCollocation(redhawk::ApplicationDeployment& appDeployment, + const ossie::SoftwareAssembly::HostCollocation& collocation, + const DeviceAssignmentMap& devices, + const std::map& specialized_reservations) +{ + RH_TRACE(_createHelperLog, "Placing host collocation " << collocation.getID() + << " " << collocation.getName()); + std::pair < std::string, redhawk::PropertyMap > devReq(std::string(""), redhawk::PropertyMap()); // Keep track of devices to which some of the components have // been assigned. DeviceIDList assignedDevices; + DeploymentList deployments; + BOOST_FOREACH(const ComponentPlacement& placement, collocation.getComponents()) { + const SoftPkg* softpkg = _profileCache.loadProfile(placement.filename); + BOOST_FOREACH(const ComponentInstantiation& instantiation, placement.getInstantiations()) { + // Even though the XML supports more than one instantiation per + // component placement, the tooling doesn't support that, so this + // loop may be strictly academic + redhawk::ComponentDeployment* deployment = appDeployment.createComponentDeployment(softpkg, &instantiation); + deployments.push_back(deployment); + + DeviceAssignmentMap::const_iterator device = devices.find(instantiation.getID()); + if (device != devices.end()) { + assignedDevices.push_back(device->second); + } - const std::vector& collocatedComponents = - collocation.getComponents(); - - _getComponentsToPlace(collocatedComponents, - assignedDevices, - placingComponents); + // check if collocation contains a devicerequires set + if ( !deployment->getDeviceRequires().empty() ) { + devReq.first = deployment->getIdentifier(); + devReq.second = deployment->getDeviceRequires(); + RH_DEBUG(_createHelperLog, "Collocation contains devicerequires instance: " << devReq.first << " props :" << devReq.second); + } + } + } - // create every combination of implementations for the components in the set - // for each combination: - // consolidate allocations - // attempt allocation - // if the allocation succeeds, break the loop - this->_resolveImplementations(placingComponents.begin(), placingComponents, res_vec); - this->_removeUnmatchedImplementations(res_vec); + // + // if there are any collocated device requires, get the actual resolved usesdevice + // + std::vector< CF::Device_var > req_usesDevices; + BOOST_FOREACH(const UsesDeviceRef& devref, collocation.getUsesDeviceRefs()) { + std::string refid= devref.getID(); + CF::Device_var dev=appDeployment.lookupDeviceUsedByApplication(refid); + RH_DEBUG(_createHelperLog, "UsesDevice for collocation: " << dev->label() ); + if ( !CORBA::is_nil(dev) ) { + req_usesDevices.push_back(dev); + } + } // Get the executable devices for the domain; if there were any devices // assigned, filter out all other devices ossie::DeviceList deploymentDevices = _executableDevices; if (!assignedDevices.empty()) { - for (ossie::DeviceList::iterator node = deploymentDevices.begin(); node != deploymentDevices.end(); ++node) { + for (ossie::DeviceList::iterator node = deploymentDevices.begin(); node != deploymentDevices.end(); ) { if (std::find(assignedDevices.begin(), assignedDevices.end(), (*node)->identifier) == assignedDevices.end()) { node = deploymentDevices.erase(node); } + else { + node++; + } } } - for (size_t index = 0; index < res_vec.size(); ++index) { - // Merge processor and OS dependencies from all implementations - std::vector processorDeps = mergeProcessorDeps(res_vec[index]); - std::vector osDeps = mergeOsDeps(res_vec[index]); - - // Consolidate the allocation properties into a single list - CF::Properties allocationProperties; - this->_consolidateAllocations(placingComponents, res_vec[index], allocationProperties); + // if the collocation contains a deviceRequires then filter down the deployment list + if ( !devReq.first.empty() ) { + for (ossie::DeviceList::iterator node = deploymentDevices.begin(); node != deploymentDevices.end(); ) { + if ( (*node)->requiresProps != devReq.second ) { + node = deploymentDevices.erase(node); + } + node++; + } + } - const std::string requestid = ossie::generateUUID(); - ossie::AllocationResult response = this->_allocationMgr->allocateDeployment(requestid, allocationProperties, deploymentDevices, appIdentifier, processorDeps, osDeps); - if (!response.first.empty()) { - // Ensure that all capacities get cleaned up - this->_allocations.push_back(response.first); - - // Convert from response back into a device node - boost::shared_ptr& node = response.second; - const std::string& deviceId = node->identifier; - - PlacementList::iterator comp = placingComponents.begin(); - ossie::ImplementationInfo::List::iterator impl = res_vec[index].end()-1; - DeviceAssignmentList collocAssignedDevs; - collocAssignedDevs.resize(placingComponents.size()); - std::string emsg; - for (unsigned int i=0; idevice); - collocAssignedDevs[i].deviceAssignment.assignedDeviceId = CORBA::string_dup(deviceId.c_str()); - (*comp)->setSelectedImplementation(*impl); - if (!resolveSoftpkgDependencies(*impl, *node)) { - LOG_TRACE(ApplicationFactory_impl, "Unable to resolve softpackage dependencies for component " - << (*comp)->getIdentifier() << " implementation " << (*impl)->getId()); - continue; + // no deployment devices available so we can stop + if ( deploymentDevices.size() == 0 ) { + ostringstream os; + os << "No ExecutableDevices available to satisfy collocation."; + throw redhawk::PlacementFailure(collocation, os.str() ); + } + + // if there is a usesdevice in the collocation that filter out the GPPs that we can use. + if ( req_usesDevices.size() > 0 ) { + // from the remaining list of deploymentDevices filter out those that not on the same host + for ( std::vector< CF::Device_var >::iterator dev = req_usesDevices.begin(); dev != req_usesDevices.end(); ++dev) { + RH_TRACE(_createHelperLog, "Find GPP for device collocation, device: " << (*dev)->label() << " Number available GPPs:" << deploymentDevices.size() ); + for (ossie::DeviceList::iterator node = deploymentDevices.begin(); node != deploymentDevices.end(); ) { + bool retval = ossie::sameHost( *dev, (*node)->device ); + RH_TRACE(_createHelperLog, "Check Collocation, Device: " << (*dev)->label() << " Executable Device: " << (*node)->device->label() << " --> RESULTS:" << retval ); + if ( retval == false ) { + node = deploymentDevices.erase(node); } - LOG_TRACE(ApplicationFactory_impl, "(collocation) Validate compponent's deployment files, component: " << - (*comp)->getIdentifier() << " implementation " << (*impl)->getId()); - if ( !validateImplementationCodeFile( *comp, *impl, emsg, false, true ) ) { - continue; + else { + node++; } - (*comp)->setAssignedDevice(node); - collocAssignedDevs[i].deviceAssignment.componentId = CORBA::string_dup((*comp)->getIdentifier()); } - - // Move the device to the front of the list - rotateDeviceList(_executableDevices, deviceId); + } - _appUsedDevs.insert(_appUsedDevs.end(), - collocAssignedDevs.begin(), - collocAssignedDevs.end()); - LOG_TRACE(ApplicationFactory_impl, "-- Completed placement for Collocation ID:" << collocation.id << " Components Placed: " << collocatedComponents.size()); - return; + if ( deploymentDevices.size() == 0 ) { + ostringstream os; + os << "No Collocated ExecutableDevices for Device: "; + for ( std::vector< CF::Device_var >::iterator dev = req_usesDevices.begin(); dev != req_usesDevices.end(); ++dev) { + os << (*dev)->label(); + if (dev+1 != req_usesDevices.end()) os << ", "; + } + RH_DEBUG(_createHelperLog, os.str() ); + throw redhawk::PlacementFailure(collocation, os.str() ); } + } - std::ostringstream eout; - eout << "Could not collocate components for collocation NAME: " << collocation.getName() << " ID:" << collocation.id; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationRequestError(); -} + // load any collocation-based reservations + std::vector _overloadedReservations = overloadReservations(collocation, specialized_reservations); -void createHelper::_getComponentsToPlace( - const std::vector& collocatedComponents, - DeviceIDList& assignedDevices, - PlacementList& placingComponents) -{ - std::vector::const_iterator placement = - collocatedComponents.begin(); - - for (; placement != collocatedComponents.end(); ++placement) { - ComponentInstantiation instantiation = - (placement->getInstantiations()).at(0); - ossie::ComponentInfo* component = - findComponentByInstantiationId(instantiation.getID()); - - if (!component) { - ostringstream eout; - eout << "Failed to create application; unable to recover component Id (error parsing the SAD file: "<<_appFact._softwareProfile<<")"; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationError( - CF::CF_EAGAIN, - eout.str().c_str()); - } - LOG_TRACE(ApplicationFactory_impl, - "Collocated component " << - component->getInstantiationIdentifier()); - - if (component->isAssignedToDevice()) { - // This component is already assigned to a device; for collocating - // other components, the pre-assigned devices are used in the order - // they are encountered. - LOG_TRACE(ApplicationFactory_impl, - "Already assigned to device " << - component->getAssignedDeviceId()); - assignedDevices.push_back( component->getAssignedDeviceId() ); - } else { - // This component needs to be assigned to a device. - placingComponents.push_back(component); + RH_TRACE(_createHelperLog, "Placing " << deployments.size() << " components"); + if (!placeHostCollocation(appDeployment, deployments, deployments.begin(), deploymentDevices, devReq.second, _overloadedReservations)) { + if (_allDevicesBusy(deploymentDevices)) { + throw redhawk::PlacementFailure(collocation, "all executable devices (GPPs) in the Domain are busy"); + } + throw redhawk::PlacementFailure(collocation, "failed to satisfy device dependencies"); + } + + for (DeploymentList::iterator deployment = deployments.begin(); deployment != deployments.end(); deployment++) { + if ((*deployment)->getImplementation()->getCodeType() == SPD::Code::SHARED_LIBRARY) { + RH_DEBUG(_createHelperLog, "Component " << (*deployment)->getInstantiation()->getID() + << "' implementation " << (*deployment)->getImplementation()->getID() + << " is a shared library"); + redhawk::ContainerDeployment* container = appDeployment.createContainer(_profileCache, (*deployment)->getAssignedDevice()); + if (!container->getAssignedDevice()) { + const redhawk::PropertyMap& devReqs = (*deployment)->getDeviceRequires(); + if ( devReqs.size() ) container->setDeviceRequires(devReqs); + // Use whether the device is assigned as a sentinel to check + // whether the container was already created, and if not, + // allocate it to the device + allocateComponent(appDeployment, container, (*deployment)->getAssignedDevice()->identifier, specialized_reservations); + } + (*deployment)->setContainer(container); } } + + RH_TRACE(_createHelperLog, "-- Completed placement for Collocation ID:" + << collocation.getID() << " Components Placed: " << deployments.size()); } -void createHelper::_handleUsesDevices(const std::string& appName) +std::vector createHelper::overloadReservations(const ossie::SoftwareAssembly::HostCollocation& collocation, + const std::map& specialized_reservations) { - // Gets all uses device info from the SAD file - const UsesDeviceInfo::List& usesDevices = _appInfo.getUsesDevices(); - LOG_TRACE(ApplicationFactory_impl, "Application has " << usesDevices.size() << " usesdevice dependencies"); - const CF::Properties& appProperties = _appInfo.getACProperties(); - // The device assignments for SAD-level usesdevices are never stored - DeviceAssignmentList assignedDevices; - if (!allocateUsesDevices(appName, usesDevices, appProperties, assignedDevices, this->_allocations)) { - // There were unsatisfied usesdevices for the application - ostringstream eout; - eout << "Failed to satisfy 'usesdevice' dependencies "; - bool first = true; - for (UsesDeviceInfo::List::const_iterator uses = usesDevices.begin(); uses != usesDevices.end(); ++uses) { - if ((*uses)->getAssignedDeviceId().empty()) { - if (!first) { - eout << ", "; - } else { - first = false; + const std::vector& reservations = collocation.getReservations(); + std::vector retval = reservations; + if ((reservations.size() == 0) and (specialized_reservations.size() == 0)) { + return retval; + } + + int number_collocations = _appFact._sadParser.getHostCollocations().size(); + if (number_collocations == 0) { + return retval; + } + int number_blank_specialization = 0; + for (std::map::const_iterator _it=specialized_reservations.begin();_it!=specialized_reservations.end();_it++) { + if (_it->first.empty()) { + number_blank_specialization++; + } + } + if (number_blank_specialization > 1) { + throw std::logic_error("Ambiguous specialized CPU usage; cannot have more than one blank specialization"); + } + if ((number_blank_specialization == 1) and (number_collocations > 1)) { + throw std::logic_error("Ambiguous specialized CPU usage; more than one host collocation cannot be matched to a blank specialization"); + } + if ((number_blank_specialization == 1) and (number_collocations == 1)) { + if (reservations.size() != 0) { + for (std::vector::iterator _it=retval.begin();_it!=retval.end();_it++) { + if (_it->getKind() == "cpucores") { + std::string value_str; + std::ostringstream ss; + ss<second; + value_str = ss.str(); + _it->overloadValue(value_str); + } + } + } + return retval; + } + bool found_overload = false; + bool has_value = false; + for (std::map::const_iterator _it_spec=specialized_reservations.begin();_it_spec!=specialized_reservations.end();_it_spec++) { + if (_it_spec->first == collocation.getID()) { + has_value = true; + for (std::vector::iterator _it=retval.begin();_it!=retval.end();_it++) { + if (_it->getKind() == "cpucores") { + found_overload = true; + std::string value_str; + std::ostringstream ss; + ss<first)->second; + value_str = ss.str(); + _it->overloadValue(value_str); } - eout << (*uses)->getId(); } } - eout << "for application '" << appName << "'"; - LOG_DEBUG(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_ENOSPC, eout.str().c_str()); } - for (DeviceAssignmentList::iterator dev=assignedDevices.begin(); dev!=assignedDevices.end(); dev++) { - dev->deviceAssignment.componentId = getAssemblyController()->getIdentifier(); + if ((not found_overload) and has_value) { + Reservation res; + res.kind = "cpucores"; + std::string value_str; + std::ostringstream ss; + ss<second; + res.value = ss.str(); + retval.push_back(res); + } + return retval; +} + +void createHelper::_handleUsesDevices(redhawk::ApplicationDeployment& appDeployment, + const std::string& appName) +{ + // Gets all uses device info from the SAD file + const std::vector& usesDevices = _appFact._sadParser.getUsesDevices(); + RH_TRACE(_createHelperLog, "Application has " << usesDevices.size() << " usesdevice dependencies"); + + // Get the assembly controller's configure properties for context in the + // allocations + CF::Properties appProperties = appDeployment.getAllocationContext(); + + // The device assignments for SAD-level usesdevices are never stored + redhawk::UsesDeviceDeployment assignedDevices; + if (!allocateUsesDevices(usesDevices, appProperties, assignedDevices, this->_allocations)) { + // There were unsatisfied usesdevices for the application + std::vector failed_ids = _getFailedUsesDevices(usesDevices, assignedDevices); + throw redhawk::UsesDeviceFailure(appDeployment, failed_ids); + } + + assignedDevices.transferUsesDeviceAssignments(appDeployment); +} + +std::vector createHelper::_getFailedUsesDevices(const std::vector& usesDevices, + redhawk::UsesDeviceDeployment& assignedDevices) +{ + std::vector failed_ids; + BOOST_FOREACH(const ossie::UsesDevice& uses, usesDevices) { + if (!assignedDevices.getUsesDeviceAssignment(uses.getID())) { + failed_ids.push_back(uses.getID()); + } + } + return failed_ids; +} + +void createHelper::checkOptions() +{ + RH_TRACE(_createHelperLog, + "Number of optionss: " << _appFact._sadParser.getOptions().size()); + + BOOST_FOREACH(const SoftwareAssembly::Option& option, _appFact._sadParser.getOptions()) { + if (option.name == "AWARE_APPLICATION") { + if ((option.value == "true") || (option.value == "True") || (option.value == "TRUE") || (option.value == "1")) { + this->_aware = true; + } else if ((option.value == "false") || (option.value == "False") || (option.value == "FALSE") || (option.value == "0")) { + this->_aware = false; + } + } else if (option.name == "STOP_TIMEOUT") { + this->_stopTimeout = strtof(option.value.c_str(), NULL); + } } - _appUsedDevs.insert(_appUsedDevs.end(), assignedDevices.begin(), assignedDevices.end()); } -void createHelper::setUpExternalPorts(Application_impl* application) +void createHelper::setUpExternalPorts(redhawk::ApplicationDeployment& appDeployment, + Application_impl* application) { - const std::vector& ports = - _appInfo.getExternalPorts(); - LOG_TRACE(ApplicationFactory_impl, - "Mapping " << ports.size() << " external port(s)"); - std::vector::const_iterator port; + RH_TRACE(_createHelperLog, + "Mapping " << _appFact._sadParser.getExternalPorts().size() << " external port(s)"); - for (port = ports.begin(); port != ports.end(); ++port) { - LOG_TRACE(ApplicationFactory_impl, - "Port component: " << port->componentrefid - << " Port identifier: " << port->identifier); + BOOST_FOREACH(const SoftwareAssembly::Port& port, _appFact._sadParser.getExternalPorts()) { + RH_TRACE(_createHelperLog, "External port '" << port.getExternalName() + << "' from component '" << port.componentrefid + << "' identifier '" << port.identifier << "'"); // Get the component from the instantiation identifier. - CORBA::Object_var obj = - lookupComponentByInstantiationId(port->componentrefid); - if (CORBA::is_nil(obj)) { - LOG_ERROR(ApplicationFactory_impl, - "Invalid componentinstantiationref (" - <componentrefid - <<") given for an external port "); - throw(CF::ApplicationFactory::CreateApplicationError( - CF::CF_NOTSET, - "Invalid componentinstantiationref given for external port")); - } - - if (port->type == SoftwareAssembly::Port::SUPPORTEDIDENTIFIER) { - ossie::corba::overrideBlockingCall(obj); - if (!obj->_is_a(port->identifier.c_str())) { - LOG_ERROR( - ApplicationFactory_impl, - "Component does not support requested interface: " - << port->identifier); - throw(CF::ApplicationFactory::CreateApplicationError( - CF::CF_NOTSET, - "Component does not support requested interface")); + redhawk::ComponentDeployment* deployment = appDeployment.getComponentDeployment(port.componentrefid); + if (!deployment) { + // The SAD parser should have rejected invalid component references + throw std::logic_error("component not found for external port '" + port.getExternalName() + "'"); + } + + CF::Resource_var resource = deployment->getResourcePtr(); + CORBA::Object_var obj; + + if (port.type == SoftwareAssembly::Port::SUPPORTEDIDENTIFIER) { + ossie::corba::overrideBlockingCall(resource); + if (!resource->_is_a(port.identifier.c_str())) { + throw redhawk::BadExternalPort(port, "component does not support interface " + port.identifier); } + obj = CORBA::Object::_duplicate(resource); } else { // Must be either "usesidentifier" or "providesidentifier", // which are equivalent unless you want to be extra // pedantic and check how the port is described in the // component's SCD. - - CF::PortSupplier_var portSupplier = - ossie::corba::_narrowSafe (obj); - // Try to look up the port. try { - obj = portSupplier->getPort(port->identifier.c_str()); - } CATCH_THROW_LOG_ERROR( - ApplicationFactory_impl, - "Invalid port id", - CF::ApplicationFactory::CreateApplicationError( - CF::CF_NOTSET, - "Invalid port identifier")) + obj = resource->getPort(port.identifier.c_str()); + } catch (const CF::PortSupplier::UnknownPort& exc) { + throw redhawk::BadExternalPort(port, "component has no port '" + port.identifier + "'"); + } catch (const CORBA::SystemException& exc) { + throw redhawk::BadExternalPort(port, ossie::corba::describeException(exc)); + } catch (...) { + // Should never happen, but turn anything else into a + // BadExternalPort just in case + throw redhawk::BadExternalPort(port, "unexpected error"); + } } // Add it to the list of external ports on the application object. - if (port->externalname == ""){ - application->addExternalPort(port->identifier, obj); - } else { - application->addExternalPort(port->externalname, obj); - } + application->addExternalPort(port.getExternalName(), obj); } } -void createHelper::setUpExternalProperties(Application_impl* application) +void createHelper::setUpExternalProperties(redhawk::ApplicationDeployment& appDeployment, + Application_impl* application) { - const std::vector& props = _appInfo.getExternalProperties(); - LOG_TRACE(ApplicationFactory_impl, "Mapping " << props.size() << " external property(ies)"); + const std::vector& props = _appFact._sadParser.getExternalProperties(); + RH_TRACE(_createHelperLog, "Mapping " << props.size() << " external property(ies)"); for (std::vector::const_iterator prop = props.begin(); prop != props.end(); ++prop) { - LOG_TRACE(ApplicationFactory_impl, "Property component: " << prop->comprefid << " Property identifier: " << prop->propid); - - // Verify internal property - ComponentInfo *tmp = findComponentByInstantiationId(prop->comprefid); - if (tmp == 0) { - LOG_ERROR(ApplicationFactory_impl, "Unable to find component for comprefid " << prop->comprefid); - throw(CF::ApplicationFactory::CreateApplicationError(CF::CF_NOTSET, "Unable to find component for given comprefid")); - } - const std::vector& props = tmp->prf.getProperties(); - bool foundProp = false; - for (unsigned int i = 0; i < props.size(); ++i) { - if (props[i]->getID() == prop->propid){ - foundProp = true; - } - } - if (!foundProp){ - LOG_ERROR(ApplicationFactory_impl, "Attempting to promote property: '" << - prop->propid << "' that does not exist in component: '" << prop->comprefid << "'"); - throw (CF::ApplicationFactory::CreateApplicationError(CF::CF_NOTSET, - "Attempting to promote property that does not exist in component")); - } + RH_TRACE(_createHelperLog, "Property component: " << prop->comprefid << " Property identifier: " << prop->propid); // Get the component from the compref identifier. - CF::Resource_var comp = lookupComponentByInstantiationId(prop->comprefid); - if (CORBA::is_nil(comp)) { - LOG_ERROR(ApplicationFactory_impl, "Invalid comprefid (" << prop->comprefid << ") given for an external property"); - throw(CF::ApplicationFactory::CreateApplicationError(CF::CF_NOTSET, "Invalid comprefid given for external property")); + redhawk::ComponentDeployment* deployment = appDeployment.getComponentDeployment(prop->comprefid); + if (!deployment) { + // The SAD parser should have rejected invalid component references + throw std::logic_error("component not found for external property '" + prop->getExternalID() + "'"); } - - const ossie::Properties* _comp_props = lookupComponentPropertiesByInstantiationId(prop->comprefid); - std::string _access = "readwrite"; - for (std::vector::const_iterator _it=_comp_props->getProperties().begin(); _it!=_comp_props->getProperties().end(); _it++) { - std::string _id((*_it)->getID()); - if (_id != prop->propid) - continue; - if ((*_it)->getMode()) { - _access = (*_it)->getMode(); - } + const Property* property = deployment->getSoftPkg()->getProperties()->getProperty(prop->propid); + if (!property) { + throw redhawk::DeploymentError("Attempting to promote property '" + prop->propid + "' that does not exist in component '" + prop->comprefid + "'"); } - if (prop->externalpropid == "") { - application->addExternalProperty(prop->propid, - prop->propid, - _access, - comp); + std::ostringstream _access; + if (property->getMode()) { + _access << property->getMode(); } else { - application->addExternalProperty(prop->propid, - prop->externalpropid, - _access, - comp); + _access << "readwrite"; } + + CF::Resource_var comp = deployment->getResourcePtr(); + application->addExternalProperty(prop->propid, prop->getExternalID(), _access.str(), comp); } } @@ -1293,8 +886,7 @@ throw (CORBA::SystemException, CF::ApplicationFactory::CreateApplicationError, CF::ApplicationFactory::CreateApplicationInsufficientCapacityError, CF::ApplicationFactory::InvalidInitConfiguration) { - TRACE_ENTER(ApplicationFactory_impl); - LOG_TRACE(ApplicationFactory_impl, "Creating application " << name); + RH_TRACE(_appFactoryLog, "Creating application " << name); // must declare these here, so we can pass to the createHelper instance string _waveform_context_name; @@ -1303,7 +895,7 @@ throw (CORBA::SystemException, CF::ApplicationFactory::CreateApplicationError, /////////////////////////////////////////////////// // Establish new naming context for waveform - LOG_TRACE(ApplicationFactory_impl, "Establishing waveform naming context"); + RH_TRACE(_appFactoryLog, "Establishing waveform naming context"); try { // VERY IMPORTANT: we must first lock the operations in this try block // in order to prevent a naming context collision due to multiple create calls @@ -1320,7 +912,7 @@ throw (CORBA::SystemException, CF::ApplicationFactory::CreateApplicationError, WaveformContextName.length(1); WaveformContextName[0].id = _waveform_context_name.c_str(); - LOG_TRACE(ApplicationFactory_impl, "Binding new context " << _waveform_context_name.c_str()); + RH_TRACE(_appFactoryLog, "Binding new context " << _waveform_context_name.c_str()); try { _waveformContext = _domainContext->bind_new_context(WaveformContextName); } catch( ... ) { @@ -1330,7 +922,7 @@ throw (CORBA::SystemException, CF::ApplicationFactory::CreateApplicationError, _domainContext->unbind(WaveformContextName); } catch ( ... ) { } - LOG_ERROR(ApplicationFactory_impl, "bind_new_context threw Unknown Exception"); + RH_ERROR(_appFactoryLog, "bind_new_context threw Unknown Exception"); throw; } @@ -1348,14 +940,41 @@ throw (CORBA::SystemException, CF::ApplicationFactory::CreateApplicationError, // now use the createHelper class to actually run 'create' // - createHelper is needed to allow concurrent calls to 'create' without // each instance stomping on the others - LOG_TRACE(ApplicationFactory_impl, "Creating new createHelper class."); + RH_TRACE(_appFactoryLog, "Creating new createHelper class."); createHelper new_createhelper(*this, _waveform_context_name, base_naming_context, _waveformContext, _domainContext); // now actually perform the create operation - LOG_TRACE(ApplicationFactory_impl, "Performing 'create' function."); - CF::Application_ptr new_app = new_createhelper.create(name, initConfiguration, deviceAssignmentMap); + RH_TRACE(_appFactoryLog, "Performing 'create' function."); + CF::Application_ptr new_app; + try { + new_app = new_createhelper.create(name, initConfiguration, deviceAssignmentMap); + } catch (const redhawk::DeploymentError& exc) { + // Convert from internal error to CORBA exception and report the error + const std::string message = exc.message(); + RH_ERROR(_appFactoryLog, "Failed to create application '" << name << "': " << message); + throw CF::ApplicationFactory::CreateApplicationError(exc.errorNumber(), message.c_str()); + } catch (CF::ApplicationFactory::CreateApplicationError& ex) { + RH_ERROR(_appFactoryLog, "Error in application creation; " << ex.msg); + throw; + } catch (CF::ApplicationFactory::CreateApplicationRequestError& ex) { + RH_ERROR(_appFactoryLog, "Error in application creation") + throw; + } catch (const std::exception& ex) { + std::ostringstream eout; + eout << "The following standard exception occurred: "<>= aware_application; - modifiedInitConfiguration.length(initConfiguration.length()-1); - for (unsigned int rem_idx=0; rem_idx>= reservations) { - for (unsigned int rem_idx=0; rem_idxlength(); rem_idx++) { - double value = 0; - std::string component_id((*reservations)[rem_idx].id); - if ((*reservations)[rem_idx].value >>= value) { - specialized_reservations[component_id] = value; - } + checkOptions(); + + /////////////////////////////////////////////////////////////////// + // Check to see if this is an aware application and + // check to see if a different GPP reservation setting is defined + const std::string aware_app_deprecated_property_id("AWARE_APPLICATION"); + const std::string specialized_reservation_id("SPECIALIZED_CPU_RESERVATION"); + + std::map specialized_reservations; + for (unsigned int initCount = 0; initCount < initConfiguration.length(); initCount++) { + const std::string stringId(initConfiguration[initCount].id); + const redhawk::Value& value = redhawk::Value::cast(initConfiguration[initCount].value); + if ((stringId == ExtendedCF::WKP::AWARE_APPLICATION) or (stringId == aware_app_deprecated_property_id)) { + _aware = value.toBoolean(); + } else if (stringId == ExtendedCF::WKP::STOP_TIMEOUT) { + _stopTimeout = value.toFloat(); + } else if (stringId == specialized_reservation_id) { + if (value.getType() == redhawk::Value::TYPE_PROPERTIES) { + const redhawk::PropertyMap& reservations = value.asProperties(); + for (unsigned int idx=0; idxgetRegisteredDevices(); - _executableDevices.clear(); - for (DeviceList::iterator iter = _registeredDevices.begin(); iter != _registeredDevices.end(); ++iter) { - if ((*iter)->isExecutable) { - _executableDevices.push_back(*iter); - } + // Get a list of all device currently in the domain + _registeredDevices = _appFact._domainManager->getRegisteredDevices(); + _executableDevices.clear(); + for (DeviceList::iterator iter = _registeredDevices.begin(); iter != _registeredDevices.end(); ++iter) { + if ((*iter)->isExecutable()) { + _executableDevices.push_back(*iter); } + } - // Fail immediately if there are no available devices to execute components - if (_executableDevices.empty()) { - const char* message = "Domain has no executable devices (GPPs) to run components"; - LOG_WARN(ApplicationFactory_impl, message); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_ENODEV, message); - } + // Fail immediately if there are no available devices to execute components + if (_executableDevices.empty()) { + throw redhawk::NoExecutableDevices(); + } - const std::string lastExecutableDevice = _appFact._domainManager->getLastDeviceUsedForDeployment(); - if (!lastExecutableDevice.empty()) { - LOG_TRACE(ApplicationFactory_impl, "Placing device " << lastExecutableDevice - << " first in deployment list"); - rotateDeviceList(_executableDevices, lastExecutableDevice); - } + const std::string lastExecutableDevice = _appFact._domainManager->getLastDeviceUsedForDeployment(); + if (!lastExecutableDevice.empty()) { + RH_TRACE(_createHelperLog, "Placing device " << lastExecutableDevice + << " first in deployment list"); + rotateDeviceList(_executableDevices, lastExecutableDevice); + } - ////////////////////////////////////////////////// - // Load the components to instantiate from the SAD - getRequiredComponents(); + ////////////////////////////////////////////////// + // Load the components to instantiate from the SAD + redhawk::ApplicationDeployment app_deployment(_appFact._sadParser, _waveformContextName, modifiedInitConfiguration); + app_deployment.setLogger(_createHelperLog); - ossie::ComponentInfo* assemblyControllerComponent = getAssemblyController(); - if (assemblyControllerComponent) { - overrideProperties(modifiedInitConfiguration, assemblyControllerComponent); - } + //////////////////////////////////////////////// + // Assign components to devices + //////////////////////////////////////////////// - ////////////////////////////////////////////////// - // Store information about this application - _appInfo.populateApplicationInfo(_appFact._sadParser); - for (unsigned int i = 0; i < _requiredComponents.size(); ++i) { - ComponentInfo *comp = _requiredComponents[i]; - if (comp->isAssemblyController()) { - _appInfo.setACProperties(comp->getConfigureProperties()); - } - _appInfo.addComponent(comp); - } - - overrideExternalProperties(modifiedInitConfiguration); - - //////////////////////////////////////////////// - // Assign components to devices - //////////////////////////////////////////////// - - /* - * _appUsedDevs and appCapacityTable represent all the allocations - * and assigned made during applicaiton deployment. It provides the - * "context" for the deployment. This context pattern will be - * applied again when collocation requests are fullfilled. There 2 - * container are used to deploy the waveform, and also to "cleanup" - * if deployment fails - */ - - // reset list of devices that were used during component - // allocation/placement process for an application - _appUsedDevs.resize(0); - - // Start with a empty set of allocation properties, used to keep - // track of device capacity allocations. If this is not cleared - // each time, deallocation may start occuring multiple times, - // resulting in incorrect capacities. - //_appCapacityTable.clear(); - - // Allocate any usesdevice capacities specified in the SAD file - _handleUsesDevices(name); - - // Give the application a unique identifier of the form - // "softwareassemblyid:ApplicationName", where the application - // name includes the serial number generated for the naming context - // (e.g. "Application_1"). - std::string appIdentifier = - _appFact._identifier + ":" + _waveformContextName; - - // First, assign components to devices based on the caller supplied - // DAS. - _assignComponentsUsingDAS(deviceAssignments, appIdentifier); - - // Second, attempt to honor host collocation. - _handleHostCollocation(appIdentifier); - - assignRemainingComponentsToDevices(appIdentifier); - - //////////////////////////////////////////////// - // Create the Application servant - - // Manage the Application servant with an auto_ptr in case - // something throws an exception. - _application = new Application_impl(appIdentifier, - name, - _appFact._softwareProfile, - _appFact._domainManager, - _waveformContextName, - _waveformContext, - aware_application, - _domainContext); - - // Activate the new Application servant - PortableServer::ObjectId_var oid = Application_impl::Activate(_application); - - std::vector connections; - std::vector allocationIDs; - - CF::ApplicationRegistrar_var app_reg = _application->appReg(); - loadAndExecuteComponents(app_reg); - waitForComponentRegistration(); - initializeComponents(); - - // Check that the assembly controller is valid - CF::Resource_var assemblyController; - if (assemblyControllerComponent) { - assemblyController = assemblyControllerComponent->getResourcePtr(); - } - _checkAssemblyController(assemblyController, assemblyControllerComponent); - - _connectComponents(connections); - _configureComponents(); - - setUpExternalPorts(_application); - setUpExternalProperties(_application); - - //////////////////////////////////////////////// - // Create the application - // - // We are assuming that all components and their resources are - // collocated. This means that we assume the SAD - // element contains the element. NB: Ownership - // of the ConnectionManager is passed to the application. - _allocations.transfer(allocationIDs); - - _application->populateApplication( - assemblyController, - _appUsedDevs, - _startSeq, - connections, - allocationIDs); - - // Add a reference to the new application to the - // ApplicationSequence in DomainManager - CF::Application_var appObj = _application->_this(); - try { - _appFact._domainManager->addApplication(_application); - } catch (CF::DomainManager::ApplicationInstallationError& ex) { - // something bad happened - clean up - LOG_ERROR(ApplicationFactory_impl, ex.msg); - throw CF::ApplicationFactory::CreateApplicationError(ex.errorNumber, ex.msg); - } + // Catch invalid device assignments + _validateDAS(app_deployment, deviceAssignments); - // After all components have been deployed, we know that the first - // executable device in the list was used for the last deployment, - // so update the domain manager - _appFact._domainManager->setLastDeviceUsedForDeployment(_executableDevices.front()->identifier); + // resolve assembly controller to assist with usesdevices that + // require matching properties + _resolveAssemblyController(app_deployment); - if ( _appFact._domainManager ) { - _appFact._domainManager->sendAddEvent( _appFact._identifier.c_str(), - appIdentifier.c_str(), - name, - appObj, - StandardEvent::APPLICATION); - } + // check to make sure that there's no collision between sad-based and command-line reservations + if (specialized_reservations.size() != 0) { + verifyNoCpuSpecializationCollisions(_appFact._sadParser, specialized_reservations); + } - LOG_INFO(ApplicationFactory_impl, "Done creating application " << appIdentifier << " " << name); - _isComplete = true; - return appObj._retn(); - } catch (CF::ApplicationFactory::CreateApplicationError& ex) { - LOG_ERROR(ApplicationFactory_impl, "Error in application creation; " << ex.msg); - throw; - } catch (CF::ApplicationFactory::CreateApplicationRequestError& ex) { - LOG_ERROR(ApplicationFactory_impl, "Error in application creation") - throw; - } catch ( std::exception& ex ) { - ostringstream eout; - eout << "The following standard exception occurred: "<_aware, + this->_stopTimeout, + _domainContext); + _appFact._domainManager->addPendingApplication(_application); + + // Activate the new Application servant + PortableServer::ObjectId_var oid = Application_impl::Activate(_application); + + CF::ApplicationRegistrar_var app_reg = _application->appReg(); + loadAndExecuteContainers(app_deployment.getContainerDeployments(), app_reg); + waitForContainerRegistration(app_deployment); + + loadAndExecuteComponents(app_deployment.getComponentDeployments(), app_reg); + waitForComponentRegistration(app_deployment); + + // Check that the assembly controller is valid + RH_TRACE(_createHelperLog, "Checking assembly controller"); + redhawk::ComponentDeployment* ac_deployment = app_deployment.getAssemblyController(); + if (!ac_deployment) { + // This condition should have been prevented by parser validation + throw std::logic_error("Assembly controller has not been assigned"); } + CF::Resource_var assemblyController = ac_deployment->getResourcePtr(); + if (CORBA::is_nil(assemblyController) && ac_deployment->getSoftPkg()->isScaCompliant()) { + // Likewise, component registration should have already thrown an + // exception if an SCA-compliant component did not register + throw std::logic_error("Assembly controller has not registered with the application"); + } + _application->setAssemblyController(ac_deployment->getIdentifier()); -} + initializeComponents(app_deployment.getComponentDeployments()); -ossie::ComponentInfo* createHelper::getAssemblyController() -{ - for (PlacementList::iterator ii = _requiredComponents.begin(); ii != _requiredComponents.end(); ++ii) { - if ((*ii)->isAssemblyController()) { - return *ii; + std::vector connections; + connectComponents(app_deployment, connections, _baseNamingContext); + configureComponents(app_deployment.getComponentDeployments()); + + setUpExternalPorts(app_deployment, _application); + setUpExternalProperties(app_deployment, _application); + + //////////////////////////////////////////////// + // Create the application + // + // We are assuming that all components and their resources are + // collocated. This means that we assume the SAD + // element contains the element. NB: Ownership + // of the ConnectionManager is passed to the application. + std::vector allocationIDs; + _allocations.transfer(allocationIDs); + + // Fill in the uses devices for the application + CF::DeviceAssignmentSequence app_devices; + typedef std::vector UsesList; + const UsesList& app_uses = app_deployment.getUsesDeviceAssignments(); + for (UsesList::const_iterator uses = app_uses.begin(); uses != app_uses.end(); ++uses) { + CF::DeviceAssignmentType assignment; + assignment.componentId = CORBA::string_dup(name); + std::string deviceId; + try { + deviceId = ossie::corba::returnString((*uses)->getAssignedDevice()->identifier()); + } catch (...) { } + assignment.assignedDeviceId = deviceId.c_str(); + ossie::corba::push_back(app_devices, assignment); } - return 0; -} -void createHelper::overrideExternalProperties(const CF::Properties& initConfiguration) -{ - const std::vector& props = _appInfo.getExternalProperties(); - - for (unsigned int i = 0; i < initConfiguration.length(); ++i) { - for (std::vector::const_iterator prop = props.begin(); prop != props.end(); ++prop) { - std::string id; - if (prop->externalpropid == "") { - id = prop->propid; - } else { - id = prop->externalpropid; - } + const DeploymentList& deployments = app_deployment.getComponentDeployments(); + for (DeploymentList::const_iterator dep = deployments.begin(); dep != deployments.end(); ++dep) { + CF::DeviceAssignmentType comp_assignment; + comp_assignment.componentId = (*dep)->getIdentifier().c_str(); + comp_assignment.assignedDeviceId = (*dep)->getAssignedDevice()->identifier.c_str(); + ossie::corba::push_back(app_devices, comp_assignment); - if (id == static_cast(initConfiguration[i].id)) { - ComponentInfo *comp = findComponentByInstantiationId(prop->comprefid); - if (comp != 0) { - comp->overrideProperty(prop->propid.c_str(), initConfiguration[i].value); - } + const UsesList& dep_uses = (*dep)->getUsesDeviceAssignments(); + for (UsesList::const_iterator uses = dep_uses.begin(); uses != dep_uses.end(); ++uses) { + CF::DeviceAssignmentType assignment; + assignment.componentId = (*dep)->getIdentifier().c_str(); + std::string deviceId; + try { + deviceId = ossie::corba::returnString((*uses)->getAssignedDevice()->identifier()); + } catch (...) { } + assignment.assignedDeviceId = deviceId.c_str(); + ossie::corba::push_back(app_devices, assignment); } } -} -void createHelper::overrideProperties(const CF::Properties& initConfiguration, - ossie::ComponentInfo* component) { - // Override properties - for (unsigned int initCount = 0; initCount < initConfiguration.length(); initCount++) { - const std::string init_id(initConfiguration[initCount].id); - if (init_id == "LOGGING_CONFIG_URI"){ - // See if the LOGGING_CONFIG_URI has already been set - // via or initParams - bool alreadyHasLoggingConfigURI = false; - CF::Properties execParameters = component->getExecParameters(); - for (unsigned int i = 0; i < execParameters.length(); ++i) { - const std::string propid(execParameters[i].id); - if (propid == "LOGGING_CONFIG_URI") { - alreadyHasLoggingConfigURI = true; + std::vector start_order = getStartOrder(app_deployment.getComponentDeployments()); + _application->setStartOrder(start_order); + + _application->populateApplication(app_devices, + connections, + allocationIDs); + + // Add a reference to the new application to the + // ApplicationSequence in DomainManager + CF::Application_var appObj = _application->_this(); + try { + _appFact._domainManager->completePendingApplication(_application); + } catch (CF::DomainManager::ApplicationInstallationError& ex) { + // something bad happened - clean up + RH_ERROR(_createHelperLog, ex.msg); + throw CF::ApplicationFactory::CreateApplicationError(ex.errorNumber, ex.msg); + } + + // After all components have been deployed, we know that the first + // executable device in the list was used for the last deployment, + // so update the domain manager + _appFact._domainManager->setLastDeviceUsedForDeployment(_executableDevices.front()->identifier); + + _appFact._domainManager->sendAddEvent(_appFact._identifier, + app_deployment.getIdentifier(), + name, + appObj, + StandardEvent::APPLICATION); + + RH_INFO(_createHelperLog, "Done creating application " << app_deployment.getIdentifier() << " " << name); + _isComplete = true; + return appObj._retn(); +} + +void createHelper::verifyNoCpuSpecializationCollisions(const ossie::SoftwareAssembly& sad, std::map specialized_reservations) { + std::vector host_collocation_names = this->getHostCollocationsIds(); + int number_empty = 0; + bool found_host_collocation = false; + BOOST_FOREACH(const SoftwareAssembly::HostCollocation& collocation, _appFact._sadParser.getHostCollocations()) { + if (collocation.getReservations().size() > 0) { + found_host_collocation = true; + break; + } + } + bool found_component = false; + bool name_is_nothing = false; + std::string bad_name; + for (std::map::iterator _reservation=specialized_reservations.begin();_reservation!=specialized_reservations.end();_reservation++) { + if (not _reservation->first.empty()) { + bool host_collocation = false; + for (std::vector::iterator _name=host_collocation_names.begin();_name!=host_collocation_names.end();_name++) { + if (*_name == _reservation->first) { + host_collocation = true; + found_host_collocation = true; break; } } - // If LOGGING_CONFIG_URI isn't already an exec param, add it - // Otherwise, don't override component exec param value - if (!alreadyHasLoggingConfigURI) { - // Add LOGGING_CONFIG_URI as an exec param now so that it can be set to the overridden value - CF::DataType lcuri = initConfiguration[initCount]; - component->addExecParameter(lcuri); - LOG_TRACE(ApplicationFactory_impl, "Adding LOGGING_CONFIG_URI as exec param with value " - << ossie::any_to_string(lcuri.value)); + if (not host_collocation) { + BOOST_FOREACH(const ComponentPlacement& placement, sad.getComponentPlacements()) { + if (placement.getInstantiations()[0].getID() == _reservation->first) { + found_component = true; + break; + } + } + BOOST_FOREACH(const SoftwareAssembly::HostCollocation& _hostcollocation, sad.getHostCollocations()) { + BOOST_FOREACH(const ComponentPlacement& placement, _hostcollocation.getComponents()) { + if (placement.getInstantiations()[0].getID() == _reservation->first) { + found_component = true; + break; + } + } + } + if (found_component) + break; + name_is_nothing = true; + bad_name = _reservation->first; + break; } } else { - LOG_TRACE(ApplicationFactory_impl, "Overriding property " << init_id - << " with " << ossie::any_to_string(initConfiguration[initCount].value)); - component->overrideProperty(init_id.c_str(), initConfiguration[initCount].value); + number_empty++; + } + } + if (name_is_nothing) { + throw std::logic_error("'SPECIALIZED_CPU_RESERVATION must include a hostcollocation id, a component id, or (when not ambiguous), a blank, bad id is: "+bad_name+"'"); + } + if (number_empty > 1) { + throw std::logic_error("'SPECIALIZED_CPU_RESERVATION cannot have more than 1 hostcollocation without an id'"); + } + if (number_empty > 0) + found_host_collocation = true; + if (found_host_collocation and found_component) { + throw std::logic_error("'SPECIALIZED_CPU_RESERVATION cannot mix hostcollocation and component reservations'"); + } +} + +std::vector createHelper::getComponentUsageNames(redhawk::ApplicationDeployment& appDeployment) { + std::vector retval; + BOOST_FOREACH(const redhawk::ComponentDeployment* compdep, appDeployment.getComponentDeployments()) { + retval.push_back(compdep->getInstantiation()->usageName); + } + return retval; +} + +std::vector createHelper::getHostCollocationsIds() { + std::vector retval; + BOOST_FOREACH(const SoftwareAssembly::HostCollocation& collocation, _appFact._sadParser.getHostCollocations()) { + std::string _name; + if (not collocation.id.empty()) { + _name = collocation.id; + } + retval.push_back(_name); + } + return retval; +} + +void createHelper::_resolveAssemblyController( redhawk::ApplicationDeployment& appDeployment ) { + + // Place the remaining components one-by-one + std::string asm_refid = _appFact._sadParser.getAssemblyControllerRefId(); + const ComponentPlacement *asm_placement = _appFact._sadParser.getAssemblyControllerPlacement(); + if ( asm_placement && asm_refid != "" and asm_refid.size() > 0 ) { + const SoftPkg* softpkg = _profileCache.loadProfile(asm_placement->filename); + const ComponentInstantiation *asm_inst = asm_placement->getInstantiation(asm_refid); + if ( asm_inst ) { + std::string inst_id = asm_inst->getID(); + RH_DEBUG(_createHelperLog, "Resolved ASSEMBLY CONTROLLER: " << asm_refid ); + redhawk::ComponentDeployment *cp __attribute__((unused)); + cp = appDeployment.createComponentDeployment(softpkg, asm_inst); + return; } } } -CF::AllocationManager::AllocationResponseSequence* createHelper::allocateUsesDeviceProperties(const UsesDeviceInfo::List& usesDevices, const CF::Properties& configureProperties) +CF::AllocationManager::AllocationResponseSequence* createHelper::allocateUsesDeviceProperties(const std::vector& usesDevices, const CF::Properties& configureProperties) { CF::AllocationManager::AllocationRequestSequence request; request.length(usesDevices.size()); for (unsigned int usesdev_idx=0; usesdev_idx< usesDevices.size(); usesdev_idx++) { - const std::string requestid = usesDevices[usesdev_idx]->getId(); + const std::string requestid = usesDevices[usesdev_idx].getID(); request[usesdev_idx].requestID = requestid.c_str(); - // Get the usesdevice dependency properties, first from the SPD... + // Get the usesdevice dependency properties CF::Properties& allocationProperties = request[usesdev_idx].allocationProperties; - const std::vector&prop_refs = usesDevices[usesdev_idx]->getProperties(); + const std::vector&prop_refs = usesDevices[usesdev_idx].getDependencies(); this->_castRequestProperties(allocationProperties, prop_refs); - // ...then from the SAD; in practice, these are mutually exclusive, but - // there is no harm in doing both, as one set will always be empty - const std::vector& sad_refs = usesDevices[usesdev_idx]->getSadDeps(); - this->_castRequestProperties(allocationProperties, sad_refs, allocationProperties.length()); - this->_evaluateMATHinRequest(allocationProperties, configureProperties); } @@ -1708,79 +1322,56 @@ CF::AllocationManager::AllocationResponseSequence* createHelper::allocateUsesDev * - Allocate capacity on usesdevice(s) * - Find and implementation that has it's implementation-specific usesdevice dependencies satisfied * - Allocate the component to a particular device - - Current implementation takes advantage of single failure then clean up everything..... To support collocation - allocation failover for mulitple devices, then we need to clean up only the allocations that we made during a failed - collocation request. This requires that we know and cleanup only those allocations that we made.. - appCapacityTable holds all the applications that were made during the entire application deployment process. - - I think for each try of a collocation request... we need to swap out the current appCapacityTable for a - temporary table, to assist with the allocation and clean up - */ -void createHelper::allocateComponent(ossie::ComponentInfo* component, +void createHelper::allocateComponent(redhawk::ApplicationDeployment& appDeployment, + redhawk::ComponentDeployment* deployment, const std::string& assignedDeviceId, - DeviceAssignmentList &appAssignedDevs, - const std::string& appIdentifier) + const std::map& specialized_reservations) { - // get the implementations from the component - ossie::ImplementationInfo::List implementations; - component->getImplementations(implementations); - - CF::Properties configureProperties = component->getConfigureProperties(); - const CF::Properties &construct_props = component->getConstructProperties(); - unsigned int configlen = configureProperties.length(); - configureProperties.length(configureProperties.length()+construct_props.length()); - for (unsigned int i=0; igetAllocationContext(); // Find the devices that allocate the SPD's minimum required usesdevices properties - const UsesDeviceInfo::List &usesDevVec = component->getUsesDevices(); - if (!allocateUsesDevices(component->getIdentifier(), usesDevVec, configureProperties, appAssignedDevs, this->_allocations)) { + const std::vector& usesDevices = deployment->getSoftPkg()->getUsesDevices(); + redhawk::UsesDeviceDeployment assignedDevices; + if (!allocateUsesDevices(usesDevices, alloc_context, assignedDevices, this->_allocations)) { // There were unsatisfied usesdevices for the component - ostringstream eout; - eout << "Failed to satisfy 'usesdevice' dependencies "; - bool first = true; - for (UsesDeviceInfo::List::const_iterator uses = usesDevVec.begin(); uses != usesDevVec.end(); ++uses) { - if ((*uses)->getAssignedDeviceId().empty()) { - if (!first) { - eout << ", "; - } else { - first = false; - } - eout << (*uses)->getId(); - } - } - eout << "for component '" << component->getIdentifier() << "'"; - LOG_DEBUG(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_ENOSPC, eout.str().c_str()); + std::vector failed_ids = _getFailedUsesDevices(usesDevices, assignedDevices); + throw redhawk::UsesDeviceFailure(deployment, failed_ids); } - - std::string emsg; + // now attempt to find an implementation that can have it's allocation requirements met + const SPD::Implementations& implementations = deployment->getSoftPkg()->getImplementations(); for (size_t implCount = 0; implCount < implementations.size(); implCount++) { - ossie::ImplementationInfo* impl = implementations[implCount]; + const ossie::SPD::Implementation* implementation = &implementations[implCount]; + + // TODO: Validate code file and dependency files exist // Handle 'usesdevice' dependencies for the particular implementation - DeviceAssignmentList implAllocatedDevices; + redhawk::UsesDeviceDeployment implAssignedDevices; ScopedAllocations implAllocations(*this->_allocationMgr); - const UsesDeviceInfo::List &implUsesDevVec = impl->getUsesDevices(); + const std::vector& implUsesDevVec = implementation->getUsesDevices(); - if (!allocateUsesDevices(component->getIdentifier(), implUsesDevVec, configureProperties, implAllocatedDevices, implAllocations)) { - LOG_DEBUG(ApplicationFactory_impl, "Unable to satisfy 'usesdevice' dependencies for component " - << component->getIdentifier() << " implementation " << impl->getId()); + if (!allocateUsesDevices(implUsesDevVec, alloc_context, implAssignedDevices, implAllocations)) { + RH_DEBUG(_createHelperLog, "Unable to satisfy 'usesdevice' dependencies for component " + << deployment->getIdentifier() << " implementation " << implementation->getID()); continue; } + + deployment->setImplementation(implementation); + + // Transfer ownership of the uses device assigments to the deployment + assignedDevices.transferUsesDeviceAssignments(*deployment); // Found an implementation which has its 'usesdevice' dependencies // satisfied, now perform assignment/allocation of component to device - LOG_DEBUG(ApplicationFactory_impl, "Trying to find the device"); - ossie::AllocationResult response = allocateComponentToDevice(component, impl, assignedDeviceId, appIdentifier); + RH_DEBUG(_createHelperLog, "Trying to find the device"); + ossie::AllocationResult response = allocateComponentToDevice(deployment, assignedDeviceId, + appDeployment.getIdentifier(), + specialized_reservations); if (response.first.empty()) { - LOG_DEBUG(ApplicationFactory_impl, "Unable to allocate device for component " - << component->getIdentifier() << " implementation " << impl->getId()); + RH_DEBUG(_createHelperLog, "Unable to allocate device for component " + << deployment->getIdentifier() << " implementation " << implementation->getID()); continue; } @@ -1788,100 +1379,73 @@ void createHelper::allocateComponent(ossie::ComponentInfo* component, implAllocations.push_back(response.first); // Convert from response back into a device node + deployment->setAssignedDevice(response.second); DeviceNode& node = *(response.second); const std::string& deviceId = node.identifier; - if (!resolveSoftpkgDependencies(impl, node)) { - component->clearSelectedImplementation(); - LOG_DEBUG(ApplicationFactory_impl, "Unable to resolve softpackage dependencies for component " - << component->getIdentifier() << " implementation " << impl->getId()); - continue; - } - // - // validate necessary code files and dependency files exists - // - LOG_TRACE(ApplicationFactory_impl, "(allocate_component) Validate compponent's deployment files, component: " << - component->getIdentifier() << " implementation " << impl->getId()); - if ( !validateImplementationCodeFile( component, impl, emsg ) ) { + + if (!resolveSoftpkgDependencies(appDeployment, deployment, node)) { + RH_DEBUG(_createHelperLog, "Unable to resolve softpackage dependencies for component " + << deployment->getIdentifier() << " implementation " << implementation->getID()); continue; } - + // Allocation to a device succeeded - LOG_DEBUG(ApplicationFactory_impl, "Assigned component " << component->getInstantiationIdentifier() - << " implementation " << impl->getId() << " to device " << deviceId); - component->setAssignedDevice(response.second); + RH_DEBUG(_createHelperLog, "Assigned component " << deployment->getInstantiation()->getID() + << " implementation " << implementation->getID() << " to device " << deviceId); // Move the device to the front of the list rotateDeviceList(_executableDevices, deviceId); - ossie::DeviceAssignmentInfo dai; - dai.deviceAssignment.componentId = CORBA::string_dup(component->getIdentifier()); - dai.deviceAssignment.assignedDeviceId = deviceId.c_str(); - dai.device = CF::Device::_duplicate(node.device); - appAssignedDevs.push_back(dai); - // Store the implementation-specific usesdevice allocations and // device assignments implAllocations.transfer(this->_allocations); - std::copy(implAllocatedDevices.begin(), implAllocatedDevices.end(), std::back_inserter(appAssignedDevs)); + + implAssignedDevices.transferUsesDeviceAssignments(*deployment); - component->setSelectedImplementation(impl); return; } - ossie::DeviceList::iterator device; - ossie::DeviceList devices = _registeredDevices; - bool allBusy = true; - unsigned int num_exec_devices = 0; - for (device = devices.begin(); device != devices.end(); ++device) { - if ((*device)->isExecutable) { - num_exec_devices++; - if ((*device)->device->usageState() != CF::Device::BUSY) { - allBusy = false; - } - } - } - if (num_exec_devices == 0) { - // Report failure - std::ostringstream eout; - eout << "Unable to launch component '"<getName()<<"'. No executable devices (i.e.: GPP) are available in the Domain"; - LOG_DEBUG(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_ENOSPC, eout.str().c_str()); - } - if (allBusy) { - // Report failure - std::ostringstream eout; - eout << "Unable to launch component '"<getName()<<"'. All executable devices (i.e.: GPP) in the Domain are busy"; - LOG_DEBUG(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_ENOSPC, eout.str().c_str()); + // Report failure, checking if the problem was that all executable devices + // were busy + if (_allDevicesBusy(_executableDevices)) { + throw redhawk::PlacementFailure(deployment->getInstantiation(), "all executable devices (GPPs) in the Domain are busy"); } + throw redhawk::PlacementFailure(deployment->getInstantiation(), "failed to satisfy device dependencies"); +} - if ( emsg.size() != 0 ) { - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EBADF, emsg.c_str()); +bool createHelper::_allDevicesBusy(ossie::DeviceList& devices) +{ + // While this can yield false negatives (or positives) since it's not + // atomic with component allocation, it should provide a little extra + // insight in most cases + for (ossie::DeviceList::iterator dev = devices.begin(); dev != devices.end(); ++dev) { + CF::Device::UsageType state; + try { + state = (*dev)->device->usageState(); + } catch (...) { + RH_WARN(_createHelperLog, "Device " << (*dev)->identifier << " is not reachable"); + continue; + } + if (state != CF::Device::BUSY) { + return false; + } } - - // Report failure - std::ostringstream eout; - eout << "Failed to satisfy device dependencies for component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << "'"; - LOG_DEBUG(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_ENOSPC, eout.str().c_str()); + return true; } -bool createHelper::allocateUsesDevices(const std::string& componentIdentifier, - const ossie::UsesDeviceInfo::List& usesDevices, +bool createHelper::allocateUsesDevices(const std::vector& usesDevices, const CF::Properties& configureProperties, - DeviceAssignmentList& deviceAssignments, + redhawk::UsesDeviceDeployment& deviceAssignments, ScopedAllocations& allocations) { // Create a temporary lookup table for reconciling allocation requests with // usesdevice identifiers - typedef std::map UsesDeviceMap; + typedef std::map UsesDeviceMap; UsesDeviceMap usesDeviceMap; - for (UsesDeviceInfo::List::const_iterator iter = usesDevices.begin(); iter != usesDevices.end(); ++iter) { + for (std::vector::const_iterator iter = usesDevices.begin(); iter != usesDevices.end(); ++iter) { // Ensure that no devices are assigned to start; the caller can check // for unassigned devices to report which usesdevices failed - (*iter)->clearAssignedDeviceId(); - usesDeviceMap[(*iter)->getId()] = *iter; + usesDeviceMap[iter->getID()] = &(*iter); } // Track allocations made internally, either to clean up on failure or to @@ -1892,7 +1456,7 @@ bool createHelper::allocateUsesDevices(const std::string& componentIdentifier, for (unsigned int resp = 0; resp < response->length(); resp++) { // Ensure that this allocation is recorded so that it can be cleaned up const std::string allocationId(response[resp].allocationID); - LOG_TRACE(ApplicationFactory_impl, "Allocated " << allocationId); + RH_TRACE(_createHelperLog, "Allocated " << allocationId); localAllocations.push_back(allocationId); // Find the usesdevice that matches the request and update it, removing @@ -1901,19 +1465,16 @@ bool createHelper::allocateUsesDevices(const std::string& componentIdentifier, UsesDeviceMap::iterator uses = usesDeviceMap.find(requestID); if (uses == usesDeviceMap.end()) { // This condition should never occur - LOG_WARN(ApplicationFactory_impl, "Allocation request " << requestID + RH_WARN(_createHelperLog, "Allocation request " << requestID << " does not match any usesdevice"); continue; } const std::string deviceId = ossie::corba::returnString(response[resp].allocatedDevice->identifier()); - uses->second->setAssignedDeviceId(deviceId); usesDeviceMap.erase(uses); - DeviceAssignmentInfo assignment; - assignment.deviceAssignment.componentId = componentIdentifier.c_str(); - assignment.deviceAssignment.assignedDeviceId = deviceId.c_str(); - assignment.device = CF::Device::_duplicate(response[resp].allocatedDevice); - deviceAssignments.push_back(assignment); + redhawk::UsesDeviceAssignment* assignment = new redhawk::UsesDeviceAssignment(uses->second); + assignment->setAssignedDevice(response[resp].allocatedDevice); + deviceAssignments.addUsesDeviceAssignment(assignment); } if (usesDeviceMap.empty()) { @@ -1947,7 +1508,7 @@ void createHelper::_evaluateMATHinRequest(CF::Properties &request, const CF::Pro mathStatement.erase(mathStatement.end() - 1, mathStatement.end()); std::vector args; while ((mathStatement.length() > 0) && (mathStatement.find(',') != std::string::npos)) { - LOG_TRACE(ApplicationFactory_impl, "__MATH__ ARG: " << mathStatement.substr(0, mathStatement.find(',')) ); + RH_TRACE(_createHelperLog, "__MATH__ ARG: " << mathStatement.substr(0, mathStatement.find(',')) ); args.push_back(mathStatement.substr(0, mathStatement.find(','))); mathStatement.erase(0, mathStatement.find(',') + 1); } @@ -2015,7 +1576,7 @@ void createHelper::_evaluateMATHinRequest(CF::Properties &request, const CF::Pro CORBA::TypeCode_var matchingCompPropType = matchingCompProp->value.type(); request[math_prop].value = ossie::calculateDynamicProp(operand, compValue, math, matchingCompPropType->kind()); std::string retval = ossie::any_to_string(request[math_prop].value); - LOG_DEBUG(ApplicationFactory_impl, "__MATH__ RESULT: " << retval << " op1: " << operand << " op2:" << ossie::any_to_string(compValue) ); + RH_DEBUG(_createHelperLog, "__MATH__ RESULT: " << retval << " op1: " << operand << " op2:" << ossie::any_to_string(compValue) ); } else { std::ostringstream eout; eout << " invalid __MATH__ statement; '" << mathStatement << "'"; @@ -2026,21 +1587,45 @@ void createHelper::_evaluateMATHinRequest(CF::Properties &request, const CF::Pro } /* Perform allocation/assignment of a particular component to the device. - * - First do allocation/assignment based on user provided DAS + * - Check if deployment has required device properties.. + * - next, do allocation/assignment based on user provided DAS * - If not specified in DAS, then iterate through devices looking for a device that satisfies * the allocation properties */ -ossie::AllocationResult createHelper::allocateComponentToDevice( ossie::ComponentInfo* component, - ossie::ImplementationInfo* implementation, - const std::string& assignedDeviceId, - const std::string& appIdentifier) +ossie::AllocationResult createHelper::allocateComponentToDevice(redhawk::ComponentDeployment* deployment, + const std::string& assignedDeviceId, + const std::string& appIdentifier, + const std::map& specialized_reservations) { + const ossie::SPD::Implementation* implementation = deployment->getImplementation(); ossie::DeviceList devices = _registeredDevices; + const CF::Properties& deviceRequires = deployment->getDeviceRequires(); + + if ( deviceRequires.length() > 0 ) { + RH_TRACE(_createHelperLog, "Compnent: '" << deployment->getSoftPkg()->getName() << "' has device requires"); + // filter out devices that only match devicerequires property set + ossie::DeviceList::iterator device; + for (device = devices.begin(); device != devices.end(); ) { + boost::shared_ptr devnode = *device; + RH_DEBUG(_createHelperLog, "allocateDevice::PartitionMatching required props: " << devnode->requiresProps ); + if ( !checkPartitionMatching( *devnode, deviceRequires )) { + RH_TRACE(_createHelperLog, "Partition Matching failed"); + device=devices.erase(device); + continue; + } + device++; + } + + if ( devices.size() == 0 ) { + throw redhawk::PlacementFailure(deployment->getInstantiation(), "failed to satisfy devicerequires specification."); + } + } + // First check to see if the component was assigned in the user provided DAS // See if a device was assigned in the DAS if (!assignedDeviceId.empty()) { - LOG_TRACE(ApplicationFactory_impl, "User-provided DAS: Component: '" << component->getName() << + RH_TRACE(_createHelperLog, "User-provided DAS: Component: '" << deployment->getSoftPkg()->getName() << "' Assigned device: '" << assignedDeviceId << "'"); ossie::DeviceList::iterator device; for (device = devices.begin(); device != devices.end(); ++device) { @@ -2050,11 +1635,11 @@ ossie::AllocationResult createHelper::allocateComponentToDevice( ossie::Componen } if (device == devices.end()) { - LOG_DEBUG(ApplicationFactory_impl, "DAS specified unknown device " << assignedDeviceId << - " for component " << component->getIdentifier()); + RH_DEBUG(_createHelperLog, "DAS specified unknown device " << assignedDeviceId << + " for component " << deployment->getIdentifier()); CF::DeviceAssignmentSequence badDAS; badDAS.length(1); - badDAS[0].componentId = CORBA::string_dup(component->getIdentifier()); + badDAS[0].componentId = deployment->getIdentifier().c_str(); badDAS[0].assignedDeviceId = assignedDeviceId.c_str(); throw CF::ApplicationFactory::CreateApplicationRequestError(badDAS); } @@ -2065,400 +1650,173 @@ ossie::AllocationResult createHelper::allocateComponentToDevice( ossie::Componen } const std::string requestid = ossie::generateUUID(); - std::vector prop_refs = implementation->getDependencyProperties(); - redhawk::PropertyMap allocationProperties; - this->_castRequestProperties(allocationProperties, prop_refs); - CF::Properties configure_props = component->getConfigureProperties(); - CF::Properties construct_props = component->getConstructProperties(); - unsigned int initial_length = configure_props.length(); - configure_props.length(configure_props.length()+construct_props.length()); - for (unsigned int i=0; i_evaluateMATHinRequest(allocationProperties, configure_props); + redhawk::PropertyMap allocationProperties = _getComponentAllocations(deployment); - LOG_TRACE(ApplicationFactory_impl, "alloc prop size " << allocationProperties.size() ); + RH_TRACE(_createHelperLog, "alloc prop size " << allocationProperties.size() ); redhawk::PropertyMap::iterator iter=allocationProperties.begin(); for( ; iter != allocationProperties.end(); iter++){ - LOG_TRACE(ApplicationFactory_impl, "alloc prop: " << iter->id <<" value:" << ossie::any_to_string(iter->value) ); - } - - redhawk::PropertyMap::iterator nic_alloc = allocationProperties.find("nic_allocation"); - std::string alloc_id; - if (nic_alloc != allocationProperties.end()) { - redhawk::PropertyMap& substr = nic_alloc->getValue().asProperties(); - alloc_id = substr["nic_allocation::identifier"].toString(); - if (alloc_id.empty()) { - alloc_id = ossie::generateUUID(); - substr["nic_allocation::identifier"] = alloc_id; - } + RH_TRACE(_createHelperLog, "alloc prop: " << iter->id <<" value:" << ossie::any_to_string(iter->value) ); } - ossie::AllocationResult response = this->_allocationMgr->allocateDeployment(requestid, allocationProperties, devices, appIdentifier, implementation->getProcessorDeps(), implementation->getOsDeps()); - if (allocationProperties.contains("nic_allocation")) { + std::string nic_alloc_id = _getNicAllocationId(allocationProperties); + + if ( specialized_reservations.size() > 0 ) { + redhawk::PropertyMap _struct; + std::string instantiationId = deployment->getInstantiation()->instantiationId; + std::vector _kinds, _values; + _kinds.push_back("cpucores"); + if (specialized_reservations.find(instantiationId) == specialized_reservations.end()) { + _values.push_back("-1"); + } else { + std::ostringstream ss; + ss<second; + _values.push_back(ss.str()); + } + _struct["redhawk::reservation_request::kinds"].setValue(_kinds); + _struct["redhawk::reservation_request::values"].setValue(_values); + _struct["redhawk::reservation_request::obj_id"].setValue(deployment->getIdentifier()); + allocationProperties["redhawk::reservation_request"].setValue(_struct); + } + + ossie::AllocationResult response = this->_allocationMgr->allocateDeployment(requestid, + allocationProperties, + devices, + appIdentifier, + implementation->getProcessors(), + implementation->getOsDeps(), + deviceRequires ); + if (!nic_alloc_id.empty()) { if (!response.first.empty()) { - redhawk::PropertyMap query_props; - query_props["nic_allocation_status"] = redhawk::Value(); - response.second->device->query(query_props); - redhawk::ValueSequence& retstruct = query_props["nic_allocation_status"].asSequence(); - for (redhawk::ValueSequence::iterator it = retstruct.begin(); it!=retstruct.end(); it++) { - redhawk::PropertyMap& struct_prop = it->asProperties(); - std::string identifier = struct_prop["nic_allocation_status::identifier"].toString(); - if (identifier == alloc_id) { - const std::string interface = struct_prop["nic_allocation_status::interface"].toString(); - LOG_DEBUG(ApplicationFactory_impl, "Allocation NIC assignment: " << interface ); - component->setNicAssignment(interface); - redhawk::PropertyType nic_execparam; - nic_execparam.id = "NIC"; - nic_execparam.setValue(interface); - component->addExecParameter(nic_execparam); - - // RESOLVE - need SAD file directive to control this behavior.. i.e if promote_nic_to_affinity==true... - // for now add nic assignment as application affinity to all components deployed by this device - _app_affinity = component->getAffinityOptionsWithAssignment(); - } - } + _applyNicAllocation(deployment, nic_alloc_id, response.second->device); } } - TRACE_EXIT(ApplicationFactory_impl); + return response; } -void createHelper::_castRequestProperties(CF::Properties& allocationProperties, const std::vector &prop_refs, unsigned int offset) +void createHelper::_applyNicAllocation(redhawk::ComponentDeployment* deployment, + const std::string& allocId, + CF::Device_ptr device) { - allocationProperties.length(offset+prop_refs.size()); - for (unsigned int i=0; iquery(query_props); -void createHelper::_castRequestProperties(CF::Properties& allocationProperties, const std::vector &prop_refs, unsigned int offset) -{ - allocationProperties.length(offset+prop_refs.size()); - for (unsigned int i=0; iasProperties(); + std::string identifier = struct_prop["nic_allocation_status::identifier"].toString(); + if (identifier == allocId) { + const std::string interface = struct_prop["nic_allocation_status::interface"].toString(); + RH_DEBUG(_createHelperLog, "Assigning NIC '" << interface << "' to component '" + << deployment->getIdentifier() << "'"); + deployment->setNicAssignment(interface); + } } } -CF::DataType createHelper::castProperty(const ossie::ComponentProperty* property) + +bool createHelper::checkPartitionMatching( ossie::DeviceNode& devnode, + const CF::Properties& devicerequires ) { - if (dynamic_cast(property) != NULL) { - const SimplePropertyRef* dependency = dynamic_cast(property); - return convertPropertyToDataType(&(*dependency)); - } else if (dynamic_cast(property) != NULL) { - const SimpleSequencePropertyRef* dependency = dynamic_cast(property); - return convertPropertyToDataType(dependency); - } else if (dynamic_cast(property) != NULL) { - const ossie::StructPropertyRef* dependency = dynamic_cast(property); - return convertPropertyToDataType(dependency); - } else if (dynamic_cast(property) != NULL) { - const ossie::StructSequencePropertyRef* dependency = dynamic_cast(property); - return convertPropertyToDataType(dependency); - } - CF::DataType dataType; - dataType.id = CORBA::string_dup(property->_id.c_str()); - return dataType; -} + // + // perform matching of a device's deployrequires property set against a componentplacment's devicerequires list + // + if ( devnode.requiresProps.size() != devicerequires.length()) { + RH_TRACE(_createHelperLog, "Number of devicerequired properties for deployment does not match, Device: " << devnode.label ); + return false; + } -bool createHelper::validateImplementationCodeFile(ossie::ComponentInfo* component, - ossie::ImplementationInfo* impl, - std::string &emsg, - const bool clear_on_fail, - const bool suppress_log ) { + // Check if the device has a required property set for deployment + if ( devicerequires.length() == 0 ) { + RH_TRACE(_createHelperLog, "Component and Device have no devicerequires/deployerrequires property sets."); + return true; + } - bool all_pass = false; - std::string impl_id = impl->getId(); - try { - LOG_TRACE(ApplicationFactory_impl, "Validate compponent's deployment files, component: " << - component->getIdentifier() << " implementation " << impl->getId()); - _appFact.ValidateImplementationCodeFile( _appFact._fileMgr, - _appFact._domainManager, - component->spd.getSPDPath(), - component->getName(), - impl->getLocalFileName(), - false ); - - // for each dependency validate code file... - validateSoftpkgDependencies( impl ); - all_pass = true; - - } catch (CF::InvalidFileName ex) { - ostringstream os; - os << "Failed to validate SPD and dependencies for component: " << component->getIdentifier() << " implementation: " << impl_id << ". Invalid file name exception: " << ex.msg; - if ( !suppress_log ) { - LOG_WARN(ApplicationFactory_impl, os.str() ); - } - if ( clear_on_fail ) { - component->clearSelectedImplementation(); - } - emsg = os.str(); - } catch (CF::FileException ex) { - ostringstream os; - os << "Failed to validate SPD and dependencies for component: " << component->getIdentifier() << " implementation: " << impl_id << ". File exception: " << ex.msg; - if ( !suppress_log ) { - LOG_WARN(ApplicationFactory_impl, os.str() ); - } - if ( clear_on_fail ) { - component->clearSelectedImplementation(); - } - emsg = os.str(); - } catch ( CF::DomainManager::ApplicationInstallationError &ex ) { - ostringstream os; - os << "Failed to validate SPD and dependencies for component: " << component->getIdentifier() << " implementation: " << impl_id << " Exception:: " << ex.msg; - if ( !suppress_log ) { - LOG_WARN(ApplicationFactory_impl, os.str() ); - } - if ( clear_on_fail ) { - component->clearSelectedImplementation(); - } - emsg = os.str(); - } catch ( ... ) { - ostringstream os; - os << "Failed to validate SPD and dependencies for component: " << component->getIdentifier() << " implementation: " << impl_id; - if ( !suppress_log ) { - LOG_WARN(ApplicationFactory_impl, os.str() ); + const redhawk::PropertyMap &devReqs = redhawk::PropertyMap::cast( devicerequires ); + for ( redhawk::PropertyMap::const_iterator iter=devReqs.begin(); iter != devReqs.end(); ++iter) { + std::string pid(iter->getId()); + RH_TRACE(_createHelperLog, "checkPartitionMatching source devicerequires id: " << pid ); + redhawk::PropertyMap::const_iterator dev_prop = devnode.requiresProps.find( pid ); + if ( dev_prop == devnode.requiresProps.end() ) { + RH_DEBUG(_createHelperLog, "Missing devicerequires property: " << pid << " for deployment from Device: " << devnode.label ); + return false; } - if ( clear_on_fail ) { - component->clearSelectedImplementation(); + + // Convert the input Any to the property's data type via string; if it came + // from the ApplicationFactory, it's already a string, but a remote request + // could be of any type + std::string action("eq"); + if ( !ossie::compare_anys(iter->getValue(), dev_prop->getValue(), action) ) { + return false; } - emsg = os.str(); } - return all_pass; - + RH_TRACE(_createHelperLog, "checkPartitionMatch PASSED, found match with device: " << devnode.label ); + return true; } -void createHelper::validateSoftpkgDependencies(const ossie::ImplementationInfo* implementation ) -{ - const SoftpkgInfoList & tmpSoftpkg = implementation->getSoftPkgDependencies(); - SoftpkgInfoList::const_iterator iterSoftpkg; - - for (iterSoftpkg = tmpSoftpkg.begin(); iterSoftpkg != tmpSoftpkg.end(); ++iterSoftpkg) { - SoftpkgInfoPtr pkg = *iterSoftpkg; - const ossie::ImplementationInfo* impl = pkg->getSelectedImplementation(); - if ( impl ) { - LOG_DEBUG(ApplicationFactory_impl, "Validate softpkgdep's deployment files, pkg: " << - pkg->getName() << " implementation " << impl->getId()); - _appFact.ValidateImplementationCodeFile(_appFact._fileMgr, - _appFact._domainManager, - pkg->spd.getSPDPath(), - pkg->getName(), - impl->getLocalFileName(), - false ); - validateSoftpkgDependencies( impl ); - } - - } - -} -bool createHelper::resolveSoftpkgDependencies(ossie::ImplementationInfo* implementation, ossie::DeviceNode& device) +void createHelper::_castRequestProperties(CF::Properties& allocationProperties, const std::vector &prop_refs, unsigned int offset) { - const SoftpkgInfoList & tmpSoftpkg = implementation->getSoftPkgDependencies(); - SoftpkgInfoList::const_iterator iterSoftpkg; - - for (iterSoftpkg = tmpSoftpkg.begin(); iterSoftpkg != tmpSoftpkg.end(); ++iterSoftpkg) { - // Find an implementation whose dependencies match - ossie::ImplementationInfo* spdImplInfo = resolveDependencyImplementation(*iterSoftpkg, device); - if (spdImplInfo) { - (*iterSoftpkg)->setSelectedImplementation(spdImplInfo); - LOG_DEBUG(ApplicationFactory_impl, "resolveSoftpkgDependencies: selected: " << (*iterSoftpkg)->getName()); - } else { - LOG_DEBUG(ApplicationFactory_impl, "resolveSoftpkgDependencies: implementation match not found between soft package dependency and device"); - implementation->clearSelectedDependencyImplementations(); - return false; - } + allocationProperties.length(offset+prop_refs.size()); + for (unsigned int i=0; igetImplementations(spd_list); + const ossie::SPD::Implementation* implementation = deployment->getImplementation(); + const SPD::SoftPkgDependencies& deps = implementation->getSoftPkgDependencies(); + SPD::SoftPkgDependencies::const_iterator iterSoftpkg; - for (size_t implCount = 0; implCount < spd_list.size(); implCount++) { - ossie::ImplementationInfo* implementation = spd_list[implCount]; - // Check that this implementation can run on the device - if (!implementation->checkProcessorAndOs(device.prf)) { - continue; - } - - // Recursively check any softpkg dependencies - if (resolveSoftpkgDependencies(implementation, device)) { - return implementation; - } - } - - return 0; -} - -/* Create a vector of all the components for the SAD associated with this App Factory - * - Get component information from the SAD and store in _requiredComponents vector - */ -void createHelper::getRequiredComponents() - throw (CF::ApplicationFactory::CreateApplicationError) -{ - TRACE_ENTER(ApplicationFactory_impl); - - std::vector componentsFromSAD = _appFact._sadParser.getAllComponents(); - - const std::string assemblyControllerRefId = _appFact._sadParser.getAssemblyControllerRefId(); - - // Bin the start orders based on the values in the SAD. Using a map of - // vectors, keyed on the start order value, accounts for duplicate keys and - // allows assigning the effective order easily by iterating through all - // the values. - std::map > startOrders; - - for (unsigned int i = 0; i < componentsFromSAD.size(); i++) { - const ComponentPlacement& component = componentsFromSAD[i]; - - // Create a list of pairs of start orders and instantiation IDs - for (unsigned int ii = 0; ii < component.getInstantiations().size(); ii++) { - // Only add a pair if a start order was provided, and the component is not the assembly controller - if (strcmp(component.getInstantiations()[ii].getStartOrder(), "") != 0 && - component.getInstantiations()[ii].getID() != assemblyControllerRefId) { - // Get the start order of the component - int startOrder = atoi(component.getInstantiations()[ii].getStartOrder()); - std::string instId = component.getInstantiations()[ii].getID(); - startOrders[startOrder].push_back(instId); - } - } - - // Extract required data from SPD file - ossie::ComponentInfo* newComponent = 0; - LOG_TRACE(ApplicationFactory_impl, "Getting the SPD Filename") - const char *spdFileName = _appFact._sadParser.getSPDById(component.getFileRefId()); - if (spdFileName == NULL) { - ostringstream eout; - eout << "The SPD file reference for componentfile "<& instantiations = component.getInstantiations(); - - const ComponentInstantiation& instance = instantiations[0]; - - ostringstream identifier; - identifier << instance.getID(); - // Violate SR:172, we use the uniquified name rather than the passed in name - identifier << ":" << _waveformContextName; - assert(newComponent != 0); - newComponent->setIdentifier(identifier.str().c_str(), instance.getID()); - - if (newComponent->getInstantiationIdentifier() == assemblyControllerRefId) { - newComponent->setIsAssemblyController(true); - } - - newComponent->setNamingService(instance.isNamingService()); - - if (newComponent->getNamingService()) { - ostringstream nameBinding; - nameBinding << instance.getFindByNamingServiceName(); -#if UNIQUIFY_NAME_BINDING -// DON'T USE THIS YET AS IT WILL BREAK OTHER PARTS OF REDHAWK - nameBinding << "_" << i; // Add a _UniqueIdentifier, per SR:169 -#endif - newComponent->setNamingServiceName(nameBinding.str().c_str()); // SR:169 + for (iterSoftpkg = deps.begin(); iterSoftpkg != deps.end(); ++iterSoftpkg) { + // Find an implementation whose dependencies match + redhawk::SoftPkgDeployment* dependency = resolveDependencyImplementation(appDeployment, *iterSoftpkg, device); + if (dependency) { + deployment->addDependency(dependency); } else { - if (newComponent->isScaCompliant()) { - LOG_WARN(ApplicationFactory_impl, "component instantiation is sca compliant but does not provide a 'findcomponent' name...this is probably an error") - } - } - - newComponent->setUsageName(instance.getUsageName()); - newComponent->setAffinity( instance.getAffinity() ); - newComponent->setLoggingConfig( instance.getLoggingConfig() ); - - const ossie::ComponentPropertyList & ins_prop = instance.getProperties(); - - int docker_image_idx = -1; - for (unsigned int i = 0; i < ins_prop.size(); ++i) { - if (ins_prop[i]._id == "__DOCKER_IMAGE__") { - docker_image_idx = i; - continue; - } - newComponent->overrideProperty(&ins_prop[i]); - } - - if (docker_image_idx > -1) { - CF::Properties tmp; - redhawk::PropertyMap& tmpProp = redhawk::PropertyMap::cast(tmp); - tmpProp["__DOCKER_IMAGE__"].setValue(dynamic_cast(ins_prop[docker_image_idx]).getValue()); - newComponent->addExecParameter(tmpProp[0]); + RH_DEBUG(_createHelperLog, "resolveSoftpkgDependencies: implementation match not found between soft package dependency and device"); + return false; } - - _requiredComponents.push_back(newComponent); - } - - // Build the start order instantiation ID vector in the right order - _startOrderIds.clear(); - for (std::map >::iterator ii = startOrders.begin(); ii != startOrders.end(); ++ii) { - _startOrderIds.insert(_startOrderIds.end(), ii->second.begin(), ii->second.end()); } - TRACE_EXIT(ApplicationFactory_impl); + return true; } -/* Given a device id, returns a CORBA pointer to the device - * - Gets a CORBA pointer for a device from a given id - */ -CF::Device_ptr createHelper::find_device_from_id(const char* device_id) +redhawk::SoftPkgDeployment* +createHelper::resolveDependencyImplementation(redhawk::ApplicationDeployment& appDeployment, + const ossie::SPD::SoftPkgRef& ref, + ossie::DeviceNode& device) { - try { - return CF::Device::_duplicate(find_device_node_from_id(device_id).device); - } catch ( ... ){ - } + RH_TRACE(_createHelperLog, "Resolving dependency " << ref); + const SoftPkg* softpkg = _profileCache.loadSoftPkg(ref.localfile); + const SPD::Implementations& spd_list = softpkg->getImplementations(); - for (DeviceAssignmentList::iterator iter = _appUsedDevs.begin(); iter != _appUsedDevs.end(); ++iter) { - if (strcmp(device_id, iter->deviceAssignment.assignedDeviceId) == 0) { - return CF::Device::_duplicate(iter->device); + for (size_t implCount = 0; implCount < spd_list.size(); implCount++) { + const ossie::SPD::Implementation& implementation = spd_list[implCount]; + if (ref.implref.isSet() && (implementation.getID() != *ref.implref)) { + continue; } - } - TRACE_EXIT(ApplicationFactory_impl); - return CF::Device::_nil(); -} - -const ossie::DeviceNode& createHelper::find_device_node_from_id(const char* device_id) throw(std::exception) -{ - for (DeviceList::iterator dn = _registeredDevices.begin(); dn != _registeredDevices.end(); ++dn) { - if ((*dn)->identifier == device_id) { - return **dn; + // Check that this implementation can run on the device + if (!checkProcessor(implementation.getProcessors(), device.prf.getAllocationProperties())) { + continue; + } else if (!checkOs(implementation.getOsDeps(), device.prf.getAllocationProperties())) { + continue; } - } - TRACE_EXIT(ApplicationFactory_impl); - throw(std::exception()); -} - -/* Given a component instantiation id, returns the associated ossie::ComponentInfo object - * - Gets the ComponentInfo class instance for a particular component instantiation id - */ -ossie::ComponentInfo* createHelper::findComponentByInstantiationId(const std::string& identifier) -{ - for (size_t ii = 0; ii < _requiredComponents.size(); ++ii) { - if (identifier == _requiredComponents[ii]->getInstantiationIdentifier()) { - return _requiredComponents[ii]; + redhawk::SoftPkgDeployment* dependency = new redhawk::SoftPkgDeployment(softpkg, &implementation); + // Recursively check any softpkg dependencies + if (resolveSoftpkgDependencies(appDeployment, dependency, device)) { + return dependency; } + delete dependency; } return 0; @@ -2523,245 +1881,130 @@ string ApplicationFactory_impl::getBaseWaveformContext(string waveform_context) return base_naming_context; } -void createHelper::loadDependencies(ossie::ComponentInfo& component, - CF::LoadableDevice_ptr device, - const SoftpkgInfoList & dependencies) +void createHelper::loadAndExecuteContainers(const ContainerList& containers, + CF::ApplicationRegistrar_ptr _appReg) { - for ( SoftpkgInfoList::const_iterator dep = dependencies.begin(); dep != dependencies.end(); ++dep) { - const ossie::ImplementationInfo* implementation = (*dep)->getSelectedImplementation(); - if (!implementation) { - LOG_ERROR(ApplicationFactory_impl, "No implementation selected for dependency " << (*dep)->getName()); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EINVAL, "Missing implementation"); + RH_TRACE(_createHelperLog, "Loading and Executing " << containers.size() << " containers"); + // TODO: Promote contained component affinity values + + BOOST_FOREACH(redhawk::ContainerDeployment* container, containers) { + boost::shared_ptr device = container->getAssignedDevice(); + if (!device) { + std::ostringstream message; + message << "component " << container->getIdentifier() << " was not assigned to a device"; + throw std::logic_error(message.str()); } - // Recursively load dependencies - LOG_TRACE(ApplicationFactory_impl, "Loading dependencies for soft package " << (*dep)->getName()); - loadDependencies(component, device, implementation->getSoftPkgDependencies()); + // Let the application know to expect the given component + redhawk::ApplicationComponent* app_container = _application->addContainer(container); + const ossie::ComponentInstantiation* instantiation = container->getInstantiation(); + if (instantiation->isNamingService()) { + app_container->setNamingContext(_baseNamingContext + "/" + instantiation->getFindByNamingServiceName()); + } + container->setApplicationComponent(app_container); + + // get the code.localfile + RH_TRACE(_createHelperLog, "Host is " << device->label << " Local file name is " + << container->getLocalFile()); - // Determine absolute path of dependency's local file - CF::LoadableDevice::LoadType codeType = implementation->getCodeType(); - fs::path codeLocalFile = fs::path(implementation->getLocalFileName()); - if (!codeLocalFile.has_root_directory()) { - // Path is relative to SPD file location - fs::path base_dir = fs::path((*dep)->getSpdFileName()).parent_path(); - codeLocalFile = base_dir / codeLocalFile; + // Get file name, load if it is not empty + std::string codeLocalFile = container->getLocalFile(); + if (codeLocalFile.empty()) { + // This should be caught by validation, but just in case + throw redhawk::ComponentError(container, "empty localfile"); } - codeLocalFile = codeLocalFile.normalize(); - if (codeLocalFile.has_leaf() && codeLocalFile.leaf() == ".") { - codeLocalFile = codeLocalFile.branch_path(); + + // Check for LoadableDevice interface + if (!device->isLoadable()) { + std::ostringstream message; + message << "container " << container->getIdentifier() << " was assigned to non-loadable device " + << device->identifier; + RH_ERROR(_createHelperLog, message); + throw std::logic_error(message.str()); } - const std::string fileName = codeLocalFile.string(); - LOG_DEBUG(ApplicationFactory_impl, "Loading dependency local file " << fileName); + RH_TRACE(_createHelperLog, "Loading " << codeLocalFile << " and dependencies on device " + << device->label); try { - device->load(_appFact._fileMgr, fileName.c_str(), codeType); - } catch (...) { - LOG_ERROR(ApplicationFactory_impl, "Failure loading file " << fileName); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EINVAL, "Failed to load file"); + container->load(_appFact._fileMgr, device->loadableDevice); + } catch (const std::exception& exc) { + throw redhawk::ComponentError(container, exc.what()); } - component.addResolvedSoftPkgDependency(fileName); - _application->addComponentLoadedFile(component.getIdentifier(), fileName); + + attemptComponentExecution(_appReg, container); } } /* Perform 'load' and 'execute' operations to launch component on the assigned device * - Actually loads and executes the component on the given device */ -void createHelper::loadAndExecuteComponents(CF::ApplicationRegistrar_ptr _appReg) +void createHelper::loadAndExecuteComponents(const DeploymentList& deployments, + CF::ApplicationRegistrar_ptr _appReg) { - LOG_TRACE(ApplicationFactory_impl, "Loading and Executing " << _requiredComponents.size() << " components"); + RH_TRACE(_createHelperLog, "Loading and Executing " << deployments.size() << " components"); // apply application affinity options to required components - applyApplicationAffinityOptions(); + applyApplicationAffinityOptions(deployments); - for (unsigned int rc_idx = 0; rc_idx < _requiredComponents.size (); rc_idx++) { - ossie::ComponentInfo* component = _requiredComponents[rc_idx]; - const ossie::ImplementationInfo* implementation = component->getSelectedImplementation(); + BOOST_FOREACH(redhawk::ComponentDeployment* deployment, deployments) { + const std::string& component_id = deployment->getIdentifier(); + RH_TRACE(_createHelperLog, "Loading and executing component '" << component_id << "'"); - boost::shared_ptr device = component->getAssignedDevice(); + boost::shared_ptr device = deployment->getAssignedDevice(); if (!device) { std::ostringstream message; - message << "component " << component->getIdentifier() << " was not assigned to a device"; + message << "component " << component_id << " was not assigned to a device"; throw std::logic_error(message.str()); } - LOG_TRACE(ApplicationFactory_impl, "Component - " << component->getName() - << " Assigned device - " << device->identifier); - LOG_INFO(ApplicationFactory_impl, "APPLICATION: " << _waveformContextName << " COMPONENT ID: " - << component->getIdentifier() << " ASSIGNED TO DEVICE ID/LABEL: " << device->identifier << "/" << device->label); + RH_INFO(_createHelperLog, "Application '" << _waveformContextName << "' component '" + << component_id << "' assigned to device '" << device->label + << "' (" << device->identifier << ")"); // Let the application know to expect the given component - _application->addComponent(component->getIdentifier(), component->getSpdFileName()); - _application->setComponentImplementation(component->getIdentifier(), implementation->getId()); - if (component->getNamingService()) { - std::string lookupName = _appFact._domainName + "/" + _waveformContextName + "/" + component->getNamingServiceName() ; - _application->setComponentNamingContext(component->getIdentifier(), lookupName); + redhawk::ApplicationComponent* app_component = _application->addComponent(deployment); + const ossie::ComponentInstantiation* instantiation = deployment->getInstantiation(); + if (instantiation->isNamingService()) { + app_component->setNamingContext(_baseNamingContext + "/" + instantiation->getFindByNamingServiceName()); + } + if (deployment->getContainer()) { + app_component->setComponentHost(deployment->getContainer()->getApplicationComponent()); } - _application->setComponentDevice(component->getIdentifier(), device->device); + deployment->setApplicationComponent(app_component); // get the code.localfile - fs::path codeLocalFile = fs::path(implementation->getLocalFileName()); - LOG_TRACE(ApplicationFactory_impl, "Host is " << device->label << " Local file name is " - << codeLocalFile); - if (!codeLocalFile.has_root_directory()) { - codeLocalFile = fs::path(component->spd.getSPDPath()) / codeLocalFile; - } - codeLocalFile = codeLocalFile.normalize(); - if (codeLocalFile.has_leaf() && codeLocalFile.leaf() == ".") { - codeLocalFile = codeLocalFile.branch_path(); - } + RH_TRACE(_createHelperLog, "Host is " << device->label << " Local file name is " + << deployment->getLocalFile()); // Get file name, load if it is not empty - if (codeLocalFile.string().size() <= 0) { - ostringstream eout; - eout << "code.localfile is empty for component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << "' "; - eout << " with implementation id: '" << implementation->getId() << "'"; - eout << " on device id: '" << device->identifier << "'"; - eout << " in waveform '" << _waveformContextName<<"'"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_TRACE(ApplicationFactory_impl, eout.str()) - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EBADF, eout.str().c_str()); - } - - // narrow to LoadableDevice interface - CF::LoadableDevice_var loadabledev = ossie::corba::_narrowSafe(device->device); - if (CORBA::is_nil(loadabledev)) { + std::string codeLocalFile = deployment->getLocalFile(); + if (codeLocalFile.empty()) { + // This should be caught by validation, but just in case + throw redhawk::ComponentError(deployment, "empty localfile"); + } + + // Check for LoadableDevice interface + if (!device->isLoadable()) { std::ostringstream message; - message << "component " << component->getIdentifier() << " was assigned to non-loadable device " + message << "component " << component_id << " was assigned to non-loadable device " << device->identifier; + RH_ERROR(_createHelperLog, message); throw std::logic_error(message.str()); } - loadDependencies(*component, loadabledev, implementation->getSoftPkgDependencies()); - - // load the file(s) - ostringstream load_eout; // used for any error messages dealing with load + RH_TRACE(_createHelperLog, "Loading " << codeLocalFile << " and dependencies on device " + << device->label); try { - try { - LOG_TRACE(ApplicationFactory_impl, "loading " << codeLocalFile << " on device " << ossie::corba::returnString(loadabledev->label())); - loadabledev->load(_appFact._fileMgr, codeLocalFile.string().c_str(), implementation->getCodeType()); - } catch( const CF::LoadableDevice::LoadFail &ex ) { - load_eout << "'load' failed for component: '"; - load_eout << component->getName() << "' with component id: '" << component->getIdentifier() << "' "; - load_eout << " with implementation id: '" << implementation->getId() << "';"; - load_eout << " on device id: '" << device->identifier << "'"; - load_eout << " in waveform '" << _waveformContextName<<"'"; - load_eout << "\nREASON: '" << ex.msg << "'\nError occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - throw; - } catch( const CF::InvalidFileName &ex ) { - load_eout << "'load' failed for component: '"; - load_eout << component->getName() << "' with component id: '" << component->getIdentifier() << "' "; - load_eout << " with implementation id: '" << implementation->getId() << "';"; - load_eout << " on device id: '" << device->identifier << "'"; - load_eout << " in waveform '" << _waveformContextName<<"'"; - load_eout << "\nREASON: '" << ex.msg << "'\nError occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - throw; - } catch( ... ) { - load_eout << "'load' failed for component: '"; - load_eout << component->getName() << "' with component id: '" << component->getIdentifier() << "' "; - load_eout << " with implementation id: '" << implementation->getId() << "';"; - load_eout << " on device id: '" << device->identifier << "'"; - load_eout << " in waveform '" << _waveformContextName<<"'"; - load_eout << "\nError occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - throw; - } - } catch( CF::InvalidFileName& _ex ) { - load_eout << " with error: <" << _ex.msg << ">;"; - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, load_eout.str().c_str()); - } catch( CF::Device::InvalidState& _ex ) { - load_eout << " with error: <" << _ex.msg << ">;"; - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, load_eout.str().c_str()); - } CATCH_THROW_LOG_TRACE(ApplicationFactory_impl, "", CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, load_eout.str().c_str())); - - // Mark the file as loaded - _application->addComponentLoadedFile(component->getIdentifier(), codeLocalFile.string()); + deployment->load(_appFact._fileMgr, device->loadableDevice); + } catch (const std::exception& exc) { + throw redhawk::ComponentError(deployment, exc.what()); + } - // OSSIE extends section D.2.1.6.3 to support loading a directory - // and execute a file in that directory using a entrypoint - // 1. Executable means to use CF LoadableDevice::load and CF ExecutableDevice::execute operations. This is a "main" process. - // - A Executable that references a directory instead of a file means to recursively load the contents of the directory - // and then execute the program specified via entrypoint - // 2. Driver and Kernel Module means load only. - // 3. SharedLibrary means dynamic linking. - // 4. A (SharedLibrary) Without a code entrypoint element means load only. - // 5. A (SharedLibrary) With a code entrypoint element means load and CF Device::execute. - if (((implementation->getCodeType() == CF::LoadableDevice::EXECUTABLE) || - (implementation->getCodeType() == CF::LoadableDevice::SHARED_LIBRARY)) && (implementation->getEntryPoint().size() != 0)) { - - // get executable device reference - CF::ExecutableDevice_var execdev = ossie::corba::_narrowSafe(loadabledev); - if (CORBA::is_nil(execdev)){ - std::ostringstream message; - message << "component " << component->getIdentifier() << " was assigned to non-executable device " - << device->identifier; - throw std::logic_error(message.str()); - } - - // Add the required parameters specified in SR:163 - // Naming Context IOR, Name Binding, and component identifier - CF::DataType ci; - ci.id = "COMPONENT_IDENTIFIER"; - ci.value <<= component->getIdentifier(); - component->addExecParameter(ci); - - CF::DataType nb; - nb.id = "NAME_BINDING"; - nb.value <<= component->getNamingServiceName(); - component->addExecParameter(nb); - - CF::DataType dp; - dp.id = "DOM_PATH"; - dp.value <<= _baseNamingContext; - component->addExecParameter(dp); - - CF::DataType pn; - pn.id = "PROFILE_NAME"; - pn.value <<= component->getSpdFileName(); - component->addExecParameter(pn); - - // resolve LOGGING_CONFIG_URI and DEBUG_LEVEL for component's execparam - resolveLoggingConfiguration( component ); - - // Add the Naming Context IOR to make it easier to parse the command line - CF::DataType ncior; - ncior.id = "NAMING_CONTEXT_IOR"; - ncior.value <<= ossie::corba::objectToString(_appReg); - component->addExecParameter(ncior); - - std::string sr_key; - if (this->specialized_reservations.find(std::string(component->getIdentifier())) != this->specialized_reservations.end()) { - sr_key = std::string(component->getIdentifier()); - } else if (this->specialized_reservations.find(std::string(component->getUsageName())) != this->specialized_reservations.end()) { - sr_key = std::string(component->getUsageName()); - } - if (not sr_key.empty()) { - CF::DataType spec_res; - spec_res.id = "RH::GPP::MODIFIED_CPU_RESERVATION_VALUE"; - //std::stringstream ss; - //ss << this->specialized_reservations[sr_key]; - spec_res.value <<= this->specialized_reservations[sr_key]; - component->addExecParameter(spec_res); - } - - fs::path executeName; - if ((implementation->getCodeType() == CF::LoadableDevice::EXECUTABLE) && (implementation->getEntryPoint().size() == 0)) { - LOG_WARN(ApplicationFactory_impl, "executing using code file as entry point; this is non-SCA compliant behavior; entrypoint must be set") - executeName = codeLocalFile; - } else { - executeName = fs::path(implementation->getEntryPoint()); - LOG_TRACE(ApplicationFactory_impl, "Using provided entry point " << executeName) - if (!executeName.has_root_directory()) { - executeName = fs::path(component->spd.getSPDPath()) / executeName; - } - executeName = executeName.normalize(); - } - - attemptComponentExecution(executeName, execdev, component, implementation); + if (deployment->isExecutable()) { + attemptComponentExecution(_appReg, deployment); } } } - int createHelper::resolveDebugLevel( const std::string &level_in ) { int debug_level=-1; std::string dlevel = boost::to_upper_copy(level_in); @@ -2769,7 +2012,7 @@ int createHelper::resolveDebugLevel( const std::string &level_in ) { debug_level = ossie::logging::ConvertRHLevelToDebug( rhlevel ); if ( dlevel.at(0) != 'I' and debug_level == 3 ) debug_level=-1; - // test if number was provided. + // test if number was provided. if ( debug_level == -1 ){ char *p=NULL; int dl=strtol(dlevel.c_str(), &p, 10 ); @@ -2779,82 +2022,74 @@ int createHelper::resolveDebugLevel( const std::string &level_in ) { debug_level = ossie::logging::ConvertRHLevelToDebug( rhlevel ); } } - - return debug_level; + + return debug_level; } -void createHelper::resolveLoggingConfiguration( ossie::ComponentInfo *component ) { - +void createHelper::resolveLoggingConfiguration(redhawk::ComponentDeployment* deployment, redhawk::PropertyMap &execParams ) +{ std::string logging_uri(""); int debug_level=-1; - redhawk::PropertyMap execParams = redhawk::PropertyMap::cast(component->getExecParameters()); - if ( execParams.contains("LOGGING_CONFIG_URI") ) { - logging_uri = execParams["LOGGING_CONFIG_URI"].toString(); - LOG_DEBUG(ApplicationFactory_impl, "resolveLoggingContext: exec parameter provided, logging cfg uri:" << logging_uri); - } - - if ( execParams.contains("DEBUG_LEVEL") ) { - debug_level = resolveDebugLevel( execParams["DEBUG_LEVEL"].toString() ); - LOG_DEBUG(ApplicationFactory_impl, "resolveLoggingConfig: exec parameter provided debug_level:" << debug_level); - } + bool resolved_loggingconfig = false; - if ( execParams.contains("LOG_LEVEL") ) { - debug_level = resolveDebugLevel( execParams["LOG_LEVEL"].toString() ); - LOG_DEBUG(ApplicationFactory_impl, "resolveLoggingConfig: exec parameter provided debug_level:" << debug_level); + // check if logging configuration is part of component placement (loggingconfig in the sad file) + redhawk::PropertyMap log_config=deployment->getLoggingConfiguration(); + if ( log_config.contains("LOGGING_CONFIG_URI") ) { + logging_uri = log_config["LOGGING_CONFIG_URI"].toString(); + RH_TRACE(_createHelperLog, "resolveLoggingConfig: loggingconfig log config: " << logging_uri); } - - // resolve with older style where - redhawk::PropertyMap cfgProps = redhawk::PropertyMap::cast(component->getConfigureProperties()); - if ( cfgProps.contains("LOGGING_CONFIG_URI") ) { - logging_uri = cfgProps["LOGGING_CONFIG_URI"].toString(); - LOG_DEBUG(ApplicationFactory_impl, "resolveLoggingConfig: configure parameer provided, logcfg:" << logging_uri); + if ( log_config.contains("LOG_LEVEL") ) { + debug_level = resolveDebugLevel(log_config["LOG_LEVEL"].toString()); + resolved_loggingconfig = true; + RH_TRACE(_createHelperLog, "resolveLoggingConfig: loggingconfig debug_level: " << debug_level); } - if ( cfgProps.contains("LOG_LEVEL") ) { - debug_level = resolveDebugLevel( cfgProps["DEBUG_LEVEL"].toString() ); - LOG_DEBUG(ApplicationFactory_impl, "resolveLoggingConfig: configure parameer provided, debug level:" << debug_level); + // Use the log config resolver (if enabled) + const ossie::ComponentInstantiation* instantiation = deployment->getInstantiation(); + if (_appFact._domainManager->getUseLogConfigResolver()) { + ossie::logging::LogConfigUriResolverPtr logcfg_resolver = ossie::logging::GetLogConfigUriResolver(); + if ( logcfg_resolver ) { + std::string logcfg_path = ossie::logging::GetComponentPath(_appFact._domainName, _waveformContextName, + instantiation->getFindByNamingServiceName()); + std::string uri = logcfg_resolver->get_uri(logcfg_path); + RH_TRACE(_createHelperLog, "Using LogConfigResolver plugin: path " << logcfg_path << " logcfg: " << uri ); + if ( !uri.empty() ) logging_uri = uri; + } } - // check if logging configuration is part of component placement - ComponentInfo::LoggingConfig log_config=component->getLoggingConfig(); - if ( !log_config.first.empty()) { - logging_uri = log_config.first; - LOG_DEBUG(ApplicationFactory_impl, "resolveLoggingConfig: loggingconfig log config:" << logging_uri); + // check for runtime overloads + if ( execParams.contains("LOGGING_CONFIG_URI") ) { + logging_uri = execParams["LOGGING_CONFIG_URI"].toString(); + RH_TRACE(_createHelperLog, "resolveLoggingContext: exec parameter provided, logging cfg uri: " << logging_uri); + if (debug_level != -1) { + debug_level = -1; + resolved_loggingconfig = false; + RH_TRACE(_createHelperLog, "exec parameter provided, logging cfg uri, remove the debug_level set from loggingconfig"); + } } - // check if debug value provided - if ( !log_config.second.empty() ) { - debug_level = resolveDebugLevel( log_config.second ); - LOG_DEBUG(ApplicationFactory_impl, "resolveLoggingConfig: loggingconfig debug_level:" << debug_level); + if ( execParams.contains("DEBUG_LEVEL") ) { + debug_level = resolveDebugLevel( execParams["DEBUG_LEVEL"].toString() ); + resolved_loggingconfig = true; + RH_TRACE(_createHelperLog, "resolveLoggingConfig: exec parameter provided debug_level: " << debug_level); } - if ( _appFact._domainManager->getUseLogConfigResolver() ) { - std::string logcfg_path = ossie::logging::GetComponentPath( _appFact._domainName, _waveformContextName, component->getNamingServiceName() ); - ossie::logging::LogConfigUriResolverPtr logcfg_resolver = ossie::logging::GetLogConfigUriResolver(); - if ( logcfg_resolver ) { - std::string t_uri = logcfg_resolver->get_uri( logcfg_path ); - LOG_DEBUG(ApplicationFactory_impl, "Using LogConfigResolver plugin: path " << logcfg_path << " logcfg:" << t_uri ); - if ( !t_uri.empty() ) logging_uri = t_uri; - } + if ( execParams.contains("LOG_LEVEL") ) { + debug_level = resolveDebugLevel( execParams["LOG_LEVEL"].toString() ); + resolved_loggingconfig = true; + RH_TRACE(_createHelperLog, "resolveLoggingConfig: exec parameter provided log_level: " << debug_level); } // nothing is provided, use DomainManger's context if ( logging_uri.empty() ) { // Query the DomainManager for the logging configuration - LOG_DEBUG(ApplicationFactory_impl, "Checking DomainManager for LOGGING_CONFIG_URI"); + RH_DEBUG(_createHelperLog, "Checking DomainManager for LOGGING_CONFIG_URI"); PropertyInterface *log_prop = _appFact._domainManager->getPropertyFromId("LOGGING_CONFIG_URI"); StringProperty *logProperty = (StringProperty *)log_prop; if (!logProperty->isNil()) { logging_uri = logProperty->getValue(); } else { - LOG_DEBUG(ApplicationFactory_impl, "DomainManager LOGGING_CONFIG_URI is not set"); - } - - rh_logger::LoggerPtr dom_logger = _appFact._domainManager->getLogger(); - if ( dom_logger && debug_level == -1 ) { - rh_logger::LevelPtr dlevel = dom_logger->getLevel(); - if ( !dlevel ) dlevel = rh_logger::Logger::getRootLogger()->getLevel(); - debug_level = ossie::logging::ConvertRHLevelToDebug( dlevel ); + RH_TRACE(_createHelperLog, "DomainManager LOGGING_CONFIG_URI is not set"); } } @@ -2863,238 +2098,304 @@ void createHelper::resolveLoggingConfiguration( ossie::ComponentInfo *component if (logging_uri.substr(0, 4) == "sca:") { string fileSysIOR = ossie::corba::objectToString(_appFact._domainManager->_fileMgr); logging_uri += ("?fs=" + fileSysIOR); - LOG_DEBUG(ApplicationFactory_impl, "Adding DomainManager's FileSystem IOR " << logging_uri); + RH_TRACE(_createHelperLog, "Adding DomainManager's FileSystem IOR " << logging_uri); } - CF::DataType prop; - prop.id = "LOGGING_CONFIG_URI"; - prop.value <<= logging_uri.c_str(); - LOG_DEBUG(ApplicationFactory_impl, "resolveLoggingConfiguration: LOGGING_CONFIG_URI: " << logging_uri); - component->addExecParameter(prop); + execParams["LOGGING_CONFIG_URI"] = logging_uri; + RH_DEBUG(_createHelperLog, "resolveLoggingConfiguration: COMP: " << deployment->getIdentifier() << " LOGGING_CONFIG_URI: " << logging_uri); } // if debug level is resolved, then add as execparam - if ( debug_level != -1 ) { - CF::DataType prop; - prop.id = "DEBUG_LEVEL"; - prop.value <<= static_cast(debug_level); - LOG_DEBUG(ApplicationFactory_impl, "resolveLoggingConfiguration: DEBUG_LEVEL: " << debug_level ); - component->addExecParameter(prop); + if (resolved_loggingconfig) { // check to see if loggingconfig is set on the SAD file + execParams["DEBUG_LEVEL"] = static_cast(debug_level); + RH_DEBUG(_createHelperLog, "resolveLoggingConfiguration: COMP: " << deployment->getIdentifier() << " LOG_LEVEL: " << _appFact._domainManager->getInitialLogLevel() ); + } else if ( _appFact._domainManager->getInitialLogLevel() != -1 ) { // check to see if a command-line debug level was used in nodeBooter + execParams["DEBUG_LEVEL"] = static_cast(_appFact._domainManager->getInitialLogLevel()); + RH_DEBUG(_createHelperLog, "resolveLoggingConfiguration: COMP: " << deployment->getIdentifier() << " LOG_LEVEL: " << _appFact._domainManager->getInitialLogLevel() ); } - } - -std::string createHelper::createVersionMismatchMessage(std::string &component_version) +void createHelper::attemptComponentExecution (CF::ApplicationRegistrar_ptr registrar, + redhawk::ComponentDeployment* deployment) { - std::string version = this->_appFact._domainManager->getRedhawkVersion(); - std::string added_message; - try { - if (redhawk::compareVersions(component_version, version) < 0) { - added_message = "Attempting to run a component from version "; - added_message += component_version; - added_message += " on REDHAWK version "; - added_message += version; - added_message += ". "; - } - } catch ( ... ) {} - return added_message; -} + // Get executable device reference + boost::shared_ptr device = deployment->getAssignedDevice(); + if (!device->isExecutable()){ + std::ostringstream message; + message << "component " << deployment->getIdentifier() << " was assigned to non-executable device " + << device->identifier; + throw std::logic_error(message.str()); + } -void createHelper::attemptComponentExecution ( - const fs::path& executeName, - CF::ExecutableDevice_ptr execdev, - ossie::ComponentInfo* component, - const ossie::ImplementationInfo* implementation) { + // Build up the list of command line parameters + redhawk::PropertyMap execParameters = deployment->getCommandLineParameters(); + const std::string& nic = deployment->getNicAssignment(); + if (!nic.empty()) { + execParameters["NIC"] = nic; + } - CF::Properties execParameters; - - // get entrypoint - CF::ExecutableDevice::ProcessID_Type tempPid = -1; + // Add specialized CPU reservation if given + if (deployment->hasCpuReservation()) { + execParameters["RH::GPP::MODIFIED_CPU_RESERVATION_VALUE"] = deployment->getCpuReservation(); + } - std::string component_version(component->spd.getSoftPkgType()); - // attempt to execute the component - try { - LOG_TRACE(ApplicationFactory_impl, "executing " << executeName << " on device " << ossie::corba::returnString(execdev->label())); - execParameters = component->getExecParameters(); - for (unsigned int i = 0; i < execParameters.length(); ++i) { - LOG_TRACE(ApplicationFactory_impl, " exec param " << execParameters[i].id << " " << ossie::any_to_string(execParameters[i].value)) - } - // call 'execute' on the ExecutableDevice to execute the component - CF::StringSequence dep_seq; - std::vector resolved_softpkg_deps = component->getResolvedSoftPkgDependencies(); - dep_seq.length(resolved_softpkg_deps.size()); - for (unsigned int p=0;p!=dep_seq.length();p++) { - dep_seq[p]=CORBA::string_dup(resolved_softpkg_deps[p].c_str()); - } + // Add the required parameters specified in SR:163 + // Naming Context IOR, Name Binding, and component identifier + execParameters["COMPONENT_IDENTIFIER"] = deployment->getIdentifier(); + if (deployment->getInstantiation()->isNamingService()) { + execParameters["NAME_BINDING"] = deployment->getInstantiation()->getFindByNamingServiceName(); + } + execParameters["PROFILE_NAME"] = deployment->getSoftPkg()->getSPDFile(); - // get Options list - CF::Properties cop = component->getOptions(); - for (unsigned int i = 0; i < cop.length(); ++i) { - LOG_TRACE(ApplicationFactory_impl, " RESOURCE OPTION: " << cop[i].id << " " << ossie::any_to_string(cop[i].value)) - } + execParameters["DOM_PATH"] = _baseNamingContext; + resolveLoggingConfiguration(deployment, execParameters); - tempPid = execdev->executeLinked(executeName.string().c_str(), cop, component->getPopulatedExecParameters(), dep_seq); - } catch( CF::InvalidFileName& _ex ) { - std::string added_message = this->createVersionMismatchMessage(component_version); - ostringstream eout; - eout << "InvalidFileName when calling 'execute' on device with device id: '" << component->getAssignedDeviceId() << "' for component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << "' "; - eout << " with implementation id: '" << implementation->getId() << "'"; - eout << " in waveform '" << _waveformContextName<<"'"; - eout << " with error: <" << _ex.msg << ">;"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_TRACE(ApplicationFactory_impl, eout.str()) - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); - } catch( CF::Device::InvalidState& _ex ) { - std::string added_message = this->createVersionMismatchMessage(component_version); - ostringstream eout; - eout << "InvalidState when calling 'execute' on device with device id: '" << component->getAssignedDeviceId() << "' for component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << "' "; - eout << " with implementation id: '" << implementation->getId() << "'"; - eout << " in waveform '" << _waveformContextName<<"'"; - eout << " with error: <" << _ex.msg << ">;"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_TRACE(ApplicationFactory_impl, eout.str()) - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); - } catch( CF::ExecutableDevice::InvalidParameters& _ex ) { - std::string added_message = this->createVersionMismatchMessage(component_version); - ostringstream eout; - eout << "InvalidParameters when calling 'execute' on device with device id: '" << component->getAssignedDeviceId() << "' for component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << "' "; - eout << " with implementation id: '" << implementation->getId() << "'"; - eout << " in waveform '" << _waveformContextName<<"'"; - eout << " with invalid params: <"; - for (unsigned int propIdx = 0; propIdx < _ex.invalidParms.length(); propIdx++){ - eout << "(" << _ex.invalidParms[propIdx].id << "," << ossie::any_to_string(_ex.invalidParms[propIdx].value) << ")"; - } - eout << " > error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_TRACE(ApplicationFactory_impl, eout.str()) - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); - } catch( CF::ExecutableDevice::InvalidOptions& _ex ) { - std::string component_version(component->spd.getSoftPkgType()); - std::string added_message = this->createVersionMismatchMessage(component_version); - ostringstream eout; - eout << "InvalidOptions when calling 'execute' on device with device id: '" << component->getAssignedDeviceId() << "' for component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << "' "; - eout << " with implementation id: '" << implementation->getId() << "'"; - eout << " in waveform '" << _waveformContextName<<"'"; - eout << " with invalid options: <"; - for (unsigned int propIdx = 0; propIdx < _ex.invalidOpts.length(); propIdx++){ - eout << "(" << _ex.invalidOpts[propIdx].id << "," << ossie::any_to_string(_ex.invalidOpts[propIdx].value) << ")"; - } - eout << " > error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_TRACE(ApplicationFactory_impl, eout.str()) - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); - } catch (CF::ExecutableDevice::ExecuteFail& ex) { - std::string added_message = this->createVersionMismatchMessage(component_version); - ostringstream eout; - eout << added_message; - eout << "ExecuteFail when calling 'execute' on device with device id: '" << component->getAssignedDeviceId() << "' for component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << "' "; - eout << " with implementation id: '" << implementation->getId() << "'"; - eout << " in waveform '" << _waveformContextName<<"'"; - eout << " with message: '" << ex.msg << "'"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_TRACE(ApplicationFactory_impl, eout.str()) - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); - } CATCH_THROW_LOG_ERROR( - ApplicationFactory_impl, this->createVersionMismatchMessage(component_version)<<"Caught an unexpected error when calling 'execute' on device with device id: '" - << component->getAssignedDeviceId() << "' for component: '" << component->getName() - << "' with component id: '" << component->getIdentifier() << "' " - << " with implementation id: '" << implementation->getId() << "'" - << " in waveform '" << _waveformContextName<<"'" - << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__, - CF::ApplicationFactory::CreateApplicationError(CF::CF_EINVAL, "Caught an unexpected error when calling 'execute' on device")); + // Add the Naming Context IOR last to make it easier to parse the command line + execParameters["NAMING_CONTEXT_IOR"] = ossie::corba::objectToString(registrar); + + // Get entry point + std::string entryPoint = deployment->getEntryPoint(); + if (entryPoint.empty()) { + RH_WARN(_createHelperLog, "executing using code file as entry point; this is non-SCA compliant behavior; entrypoint must be set"); + entryPoint = deployment->getLocalFile(); + } + + // Get the complete list of dependencies to include in executeLinked + std::vector resolved_softpkg_deps = deployment->getDependencyLocalFiles(); + CF::StringSequence dep_seq; + dep_seq.length(resolved_softpkg_deps.size()); + for (unsigned int p=0;p!=dep_seq.length();p++) { + dep_seq[p]=CORBA::string_dup(resolved_softpkg_deps[p].c_str()); + } + + // Attempt to execute the component + CF::ExecutableDevice_var execdev; + if (deployment->getContainer()) { + RH_TRACE(_createHelperLog, "Executing " << entryPoint << " via container on device " << device->label); + redhawk::ComponentDeployment* container = deployment->getContainer(); + CF::Resource_var resource = container->getResourcePtr(); + execdev = CF::ExecutableDevice::_narrow(resource); + } else { + RH_TRACE(_createHelperLog, "Executing " << entryPoint << " on device " << device->label); + execdev = CF::ExecutableDevice::_duplicate(device->executableDevice); + } + for (redhawk::PropertyMap::iterator prop = execParameters.begin(); prop != execParameters.end(); ++prop) { + RH_TRACE(_createHelperLog, " exec param " << prop->getId() << " " << prop->getValue().toString()); + } + + // Get options list + redhawk::PropertyMap options = deployment->getOptions(); + for (redhawk::PropertyMap::iterator opt = options.begin(); opt != options.end(); ++opt) { + RH_TRACE(_createHelperLog, " RESOURCE OPTION: " << opt->getId() + << " " << opt->getValue().toString()); + } + + CF::ExecutableDevice::ProcessID_Type pid = -1; + try { + // call 'execute' on the ExecutableDevice to execute the component + pid = execdev->executeLinked(entryPoint.c_str(), options, execParameters, dep_seq); + } catch (const CF::InvalidFileName& exc) { + throw redhawk::ExecuteError(deployment, "invalid filename " + std::string(exc.msg)); + } catch (const CF::Device::InvalidState& exc) { + std::string message = "invalid device state " + std::string(exc.msg); + throw redhawk::ExecuteError(deployment, message); + } catch (const CF::ExecutableDevice::InvalidParameters& exc) { + std::string message = "invalid parameters " + redhawk::PropertyMap::cast(exc.invalidParms).toString(); + throw redhawk::ExecuteError(deployment, message); + } catch (const CF::ExecutableDevice::InvalidOptions& exc) { + std::string message = "invalid options " + redhawk::PropertyMap::cast(exc.invalidOpts).toString(); + throw redhawk::ExecuteError(deployment, message); + } catch (const CF::ExecutableDevice::ExecuteFail& exc) { + std::string message = "execute failure " + std::string(exc.msg); + throw redhawk::ExecuteError(deployment, message); + } catch (const CORBA::SystemException& exc) { + throw redhawk::ExecuteError(deployment, ossie::corba::describeException(exc)); + } catch (...) { + // Should never happen, but turn anything else into an ExecuteError + // just in case + throw redhawk::ExecuteError(deployment, "unexpected error"); + } // handle pid output - if (tempPid < 0) { - std::string added_message = this->createVersionMismatchMessage(component_version); - ostringstream eout; - eout << added_message; - eout << "Failed to 'execute' component for component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << "' "; - eout << " with implementation id: '" << implementation->getId() << "'"; - eout << " in waveform '" << _waveformContextName<<"'"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_TRACE(ApplicationFactory_impl, eout.str()) - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EAGAIN, eout.str().c_str()); + if (pid < 0) { + throw redhawk::ExecuteError(deployment, "execute returned invalid process ID"); } else { - _application->setComponentPid(component->getIdentifier(), tempPid); + redhawk::ApplicationComponent* app_component = deployment->getApplicationComponent(); + app_component->setProcessId(pid); } } -void createHelper::applyApplicationAffinityOptions() { - - if ( _app_affinity.length() > 0 ) { - // log deployments with application affinity - for ( uint32_t i=0; i < _app_affinity.length(); i++ ) { - CF::DataType dt = _app_affinity[i]; - LOG_INFO(ApplicationFactory_impl, " Applying Application Affinity: directive id:" << dt.id << "/" << ossie::any_to_string( dt.value )) ; +void createHelper::applyApplicationAffinityOptions(const DeploymentList& deployments) +{ + // RESOLVE - need SAD file directive to control this behavior.. i.e if promote_nic_to_affinity==true... + // for now add nic assignment as application affinity to all components deployed by this device + redhawk::PropertyMap app_affinity; + for (DeploymentList::const_iterator dep = deployments.begin(); dep != deployments.end(); ++dep) { + if ((*dep)->hasNicAssignment()) { + app_affinity = (*dep)->getAffinityOptionsWithAssignment(); + } } + + if (!app_affinity.empty()) { + // log deployments with application affinity + for ( uint32_t i=0; i < app_affinity.length(); i++ ) { + CF::DataType dt = app_affinity[i]; + RH_INFO(_createHelperLog, " Applying Application Affinity: directive id:" << dt.id << "/" << ossie::any_to_string( dt.value )) ; + } - // - // Promote NIC affinity for all components deployed on the same device - // - boost::shared_ptr deploy_on_device; - for (unsigned int rc_idx = 0; rc_idx < _requiredComponents.size (); rc_idx++) { - ossie::ComponentInfo * comp = _requiredComponents[rc_idx]; - if ( comp->getNicAssignment() != "" ) { - deploy_on_device = comp->getAssignedDevice(); + // + // Promote NIC affinity for all components deployed on the same device + // + boost::shared_ptr deploy_on_device; + for (unsigned int rc_idx = 0; rc_idx < deployments.size(); rc_idx++) { + redhawk::ComponentDeployment* deployment = deployments[rc_idx]; + if (!(deployment->getNicAssignment().empty())) { + deploy_on_device = deployment->getAssignedDevice(); + } } - } - if ( deploy_on_device ) { - for (unsigned int rc_idx = 0; rc_idx < _requiredComponents.size (); rc_idx++) { - ossie::ComponentInfo* component = _requiredComponents[rc_idx]; - boost::shared_ptr dev= component->getAssignedDevice(); - // for matching device deployments then apply nic affinity settings - if ( dev->identifier == deploy_on_device->identifier ) { - component->mergeAffinityOptions( _app_affinity ); - } + if (deploy_on_device) { + for (unsigned int rc_idx = 0; rc_idx < deployments.size (); rc_idx++) { + redhawk::ComponentDeployment* deployment = deployments[rc_idx]; + boost::shared_ptr dev = deployment->getAssignedDevice(); + // for matching device deployments then apply nic affinity settings + if (dev->identifier == deploy_on_device->identifier) { + deployment->mergeAffinityOptions(app_affinity); + } + } } } +} - } +void createHelper::waitForContainerRegistration(redhawk::ApplicationDeployment& appDeployment) +{ + // Wait for any containers to be registered before continuing + int timeout = _appFact._domainManager->getComponentBindingTimeout(); + RH_TRACE(_createHelperLog, "Waiting " << timeout << "s for containers to register"); + std::set expected_components; + BOOST_FOREACH(redhawk::ContainerDeployment* container, appDeployment.getContainerDeployments()) { + expected_components.insert(container->getIdentifier()); + } + + // Record current time, to measure elapsed time in the event of a failure + time_t start = time(NULL); + + // Wait for all required components to register, adding additional context + // to any termination exceptions that may be raised + bool complete = _application->waitForComponents(expected_components, timeout); + // TODO: convert into ExecuteError + + // For reference, determine much time has really elapsed. + time_t elapsed = time(NULL)-start; + if (!complete) { + RH_ERROR(_createHelperLog, "Timed out waiting for container to register (" << elapsed << "s elapsed)"); + } else { + RH_DEBUG(_createHelperLog, "Container registration completed in " << elapsed << "s"); + } + + // Fetch the objects, finding any components that did not register + BOOST_FOREACH(redhawk::ContainerDeployment* container, appDeployment.getContainerDeployments()) { + // Check that the component host registered with the application; it + // should have a valid CORBA reference + CORBA::Object_var objref = container->getApplicationComponent()->getComponentObject(); + if (CORBA::is_nil(objref)) { + throw redhawk::ExecuteError(container, "container did not register with application"); + } + + CF::Resource_var resource = ossie::corba::_narrowSafe(objref); + if (CORBA::is_nil(resource)) { + throw redhawk::ComponentError(container, "component object is not a CF::Resource"); + } + + container->setResourcePtr(resource); + } } -void createHelper::waitForComponentRegistration() +void createHelper::waitForComponentRegistration(redhawk::ApplicationDeployment& appDeployment) { // Wait for all components to be registered before continuing int componentBindingTimeout = _appFact._domainManager->getComponentBindingTimeout(); - LOG_TRACE(ApplicationFactory_impl, "Waiting " << componentBindingTimeout << "s for all components register"); + RH_TRACE(_createHelperLog, "Waiting " << componentBindingTimeout << "s for all components to register"); // Track only SCA-compliant components; non-compliant components will never // register with the application, nor do they need to be initialized std::set expected_components; - for (PlacementList::iterator ii = _requiredComponents.begin(); ii != _requiredComponents.end(); ++ii) { - if ((*ii)->isScaCompliant()) { - expected_components.insert((*ii)->getIdentifier()); + const DeploymentList& deployments = appDeployment.getComponentDeployments(); + for (DeploymentList::const_iterator dep = deployments.begin(); dep != deployments.end(); ++dep) { + if ((*dep)->getSoftPkg()->isScaCompliant()) { + expected_components.insert((*dep)->getIdentifier()); } } // Record current time, to measure elapsed time in the event of a failure time_t start = time(NULL); - if (!_application->waitForComponents(expected_components, componentBindingTimeout)) { - // For reference, determine much time has really elapsed. - time_t elapsed = time(NULL)-start; - LOG_ERROR(ApplicationFactory_impl, "Timed out waiting for component to bind to naming context (" << elapsed << "s elapsed)"); - ostringstream eout; - for (unsigned int req_idx = 0; req_idx < _requiredComponents.size(); req_idx++) { - if (expected_components.count(_requiredComponents[req_idx]->getIdentifier())) { - std::string component_version(_requiredComponents[req_idx]->spd.getSoftPkgType()); - std::string added_message = this->createVersionMismatchMessage(component_version); - eout << added_message; - eout << "Timed out waiting for component to register: '" << _requiredComponents[req_idx]->getName() << "' with component id: '" << _requiredComponents[req_idx]->getIdentifier()<< " assigned to device: '"<<_requiredComponents[req_idx]->getAssignedDeviceId()<<"'"; - break; + // Wait for all required components to register, adding additional context + // to any termination exceptions that may be raised + bool complete; + try { + complete = _application->waitForComponents(expected_components, componentBindingTimeout); + } catch (const redhawk::ComponentTerminated& exc) { + redhawk::ComponentDeployment* deployment = appDeployment.getComponentDeploymentByUniqueId(exc.identifier()); + if (!deployment) { + // The deployment should always be found, but in the event that it + // isn't, rethrow the original exception just in case; the outer + // create() exception handler will turn it into a CF exception + throw; + } + std::string message = "component terminated before registering with application"; + message += ::getVersionMismatchMessage(deployment->getSoftPkg()); + throw redhawk::ExecuteError(deployment, message); + } + + // For reference, determine much time has really elapsed. + time_t elapsed = time(NULL)-start; + if (!complete) { + RH_ERROR(_createHelperLog, "Timed out waiting for components to register (" << elapsed << "s elapsed)"); + } else { + RH_DEBUG(_createHelperLog, "Component registration completed in " << elapsed << "s"); + } + + // Fetch the objects, finding any components that did not register + BOOST_FOREACH(redhawk::ComponentDeployment* deployment, deployments) { + const SoftPkg* softpkg = deployment->getSoftPkg(); + if (softpkg->isScaCompliant()) { + // Check that the component registered with the application; it + // should have a valid CORBA reference + CORBA::Object_var objref = deployment->getApplicationComponent()->getComponentObject(); + if (CORBA::is_nil(objref)) { + std::string message = "component did not register with application"; + message += ::getVersionMismatchMessage(softpkg); + throw redhawk::ExecuteError(deployment, message); + } + + // Occasionally, omniORB may have a cached connection where the + // other end has terminated (this is particularly a problem with + // Java, because the Sun ORB never closes connections on shutdown). + // If the new component just happens to have the same TCP/IP + // address and port, the first time we try to reach the component, + // it will get a CORBA.COMM_FAILURE exception even though the + // reference is valid. In this case, a call to _non_existent() + // should cause omniORB to clean up the stale socket, and any + // subsequent calls behave normally. + try { + objref->_non_existent(); + } catch (...) { + RH_DEBUG(_createHelperLog, "Component object did not respond to initial ping"); + } + + // Convert to a CF::Resource object + if (deployment->isResource()) { + CF::Resource_var resource = ossie::corba::_narrowSafe(objref); + if (CORBA::is_nil(resource)) { + throw redhawk::ComponentError(deployment, "component object is not a CF::Resource"); + } + + deployment->setResourcePtr(resource); } } - eout << " in waveform '" << _waveformContextName<<"';"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); } } @@ -3103,419 +2404,117 @@ void createHelper::waitForComponentRegistration() * - Ensure components have started and are bound to Naming Service * - Initialize each component */ -void createHelper::initializeComponents() +void createHelper::initializeComponents(const DeploymentList& deployments) { // Install the different components in the system - LOG_TRACE(ApplicationFactory_impl, "initializing " << _requiredComponents.size() << " waveform components") - - // Resize the _startSeq vector to the right size - _startSeq.resize(_startOrderIds.size()); - - CF::Components_var app_registeredComponents = _application->registeredComponents(); + RH_TRACE(_createHelperLog, "initializing " << deployments.size() << " waveform components"); - for (unsigned int rc_idx = 0; rc_idx < _requiredComponents.size (); rc_idx++) { - ossie::ComponentInfo* component = _requiredComponents[rc_idx]; + for (unsigned int rc_idx = 0; rc_idx < deployments.size (); rc_idx++) { + redhawk::ComponentDeployment* deployment = deployments[rc_idx]; + const ossie::SoftPkg* softpkg = deployment->getSoftPkg(); // If the component is non-SCA compliant then we don't expect anything beyond this - if (!component->isScaCompliant()) { - LOG_TRACE(ApplicationFactory_impl, "Component is non SCA-compliant, continuing to next component"); + if (!softpkg->isScaCompliant()) { + RH_TRACE(_createHelperLog, "Component is non SCA-compliant, continuing to next component"); continue; - } - - if (!component->isResource()) { - LOG_TRACE(ApplicationFactory_impl, "Component is not a resource, continuing to next component"); + } else if (!deployment->isResource()) { + RH_TRACE(_createHelperLog, "Component is not a resource, continuing to next component"); continue; } - // Find the component on the Application - const std::string componentId = component->getIdentifier(); - CF::Resource_var resource = CF::Resource::_nil(); - for (unsigned int comp_idx=0; comp_idxlength(); comp_idx++) { - std::string comp_id = std::string(app_registeredComponents[comp_idx].identifier); - if (comp_id == componentId) { - resource = ossie::corba::_narrowSafe(app_registeredComponents[comp_idx].componentObject); - break; - } - } - if (CORBA::is_nil(resource)) { - ostringstream eout; - std::string component_version(component->spd.getSoftPkgType()); - std::string added_message = this->createVersionMismatchMessage(component_version); - eout << added_message; - eout << "CF::Resource::_narrow failed with Unknown Exception for component: '" << component->getName() << "' with component id: '" << componentId << " assigned to device: '"<getAssignedDeviceId()<<"'"; - eout << " in waveform '" << _waveformContextName<<"';"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); - } - - component->setResourcePtr(resource); - - int initAttempts=3; - while ( initAttempts > 0 ) { - initAttempts--; - if ( ossie::corba::objectExists(resource) == true ) { initAttempts = 0; continue; } - LOG_DEBUG(ApplicationFactory_impl, "Retrying component ping............ comp:" << component->getIdentifier() << " waveform: " << _waveformContextName); - usleep(1000); - } - - - // - // call resource's initializeProperties method to handle any properties required for construction - // - LOG_DEBUG(ApplicationFactory_impl, "Initialize properties for component " << componentId); - if (component->isResource () && component->isConfigurable ()) { - CF::Properties partialStruct = component->containsPartialStructConstruct(); - if (partialStruct.length() != 0) { - ostringstream eout; - std::string component_version(component->spd.getSoftPkgType()); - std::string added_message = this->createVersionMismatchMessage(component_version); - eout << added_message; - eout << "Failed to 'initializeProperties' component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << " assigned to device: '"<getAssignedDeviceId() << "' "; - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << "This component contains structure"<initializeProperties(component->getNonNilNonExecConstructProperties()); - } catch(CF::PropertySet::InvalidConfiguration& e) { - ostringstream eout; - eout << "Failed to initialize component properties: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << " assigned to device: '"<getAssignedDeviceId() << "' "; - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << "InvalidConfiguration with this info: <"; - eout << e.msg << "> for these invalid properties: "; - for (unsigned int propIdx = 0; propIdx < e.invalidProperties.length(); propIdx++){ - eout << "(" << e.invalidProperties[propIdx].id << ","; - eout << ossie::any_to_string(e.invalidProperties[propIdx].value) << ")"; - } - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::InvalidInitConfiguration(e.invalidProperties); - } catch(CF::PropertySet::PartialConfiguration& e) { - ostringstream eout; - eout << "Failed to initialize component properties: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << " assigned to device: '"<getAssignedDeviceId() << "' "; - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << "PartialConfiguration for these invalid properties: "; - for (unsigned int propIdx = 0; propIdx < e.invalidProperties.length(); propIdx++){ - eout << "(" << e.invalidProperties[propIdx].id << ","; - eout << ossie::any_to_string(e.invalidProperties[propIdx].value) << ")"; - } - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::InvalidInitConfiguration(e.invalidProperties); - } catch( ... ) { - ostringstream eout; - std::string component_version(component->spd.getSoftPkgType()); - std::string added_message = this->createVersionMismatchMessage(component_version); - eout << added_message; - eout << "Failed to initialize component properties: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << " assigned to device: '"<getAssignedDeviceId() << "' "; - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << "'initializeProperties' failed with Unknown Exception"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EINVAL, eout.str().c_str()); - } - } - - LOG_TRACE(ApplicationFactory_impl, "Initializing component " << componentId); - try { - resource->initialize(); - } catch (const CF::LifeCycle::InitializeError& error) { - // Dump the detailed initialization failure to the log - ostringstream logmsg; - std::string component_version(component->spd.getSoftPkgType()); - std::string added_message = this->createVersionMismatchMessage(component_version); - logmsg << added_message; - logmsg << "Initializing component " << componentId << " failed"; - for (CORBA::ULong index = 0; index < error.errorMessages.length(); ++index) { - logmsg << std::endl << error.errorMessages[index]; - } - LOG_ERROR(ApplicationFactory_impl, logmsg.str()); - - ostringstream eout; - eout << added_message; - eout << "Unable to initialize component " << componentId; - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); - } catch (const CORBA::SystemException& exc) { - ostringstream eout; - std::string component_version(component->spd.getSoftPkgType()); - std::string added_message = this->createVersionMismatchMessage(component_version); - eout << added_message; - eout << "CORBA " << exc._name() << " exception initializing component " << componentId; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); - } - - if (!component->isAssemblyController()) { - // Try and find the right location in the vector to add the reference - unsigned int pos = 0; - for (unsigned int i = 0; i < _startOrderIds.size(); i++) { - std::string currID = _startOrderIds[i]; - currID = currID.append(":"); - currID = currID.append(_waveformContextName); - - if (componentId == currID) { - break; - } - pos++; - } - - // Add the reference if it belongs in the list - if (pos < _startOrderIds.size()) { - _startSeq[pos] = CF::Resource::_duplicate(resource); - } - } + deployment->initialize(); } } -void createHelper::configureComponents() +void createHelper::configureComponents(const DeploymentList& deployments) { - for (unsigned int rc_idx = 0; rc_idx < _requiredComponents.size (); rc_idx++) { - ossie::ComponentInfo* component = _requiredComponents[rc_idx]; - - if (component->isAssemblyController ()) { - continue; - } - - // If the component is non-SCA compliant then we don't expect anything beyond this - if (!component->isScaCompliant()) { - LOG_TRACE(ApplicationFactory_impl, "Skipping configure; Component is non SCA-compliant, continuing to next component"); - continue; - } - - if (!component->isResource ()) { - LOG_TRACE(ApplicationFactory_impl, "Skipping configure; Component in not resource, continuing to next component"); - continue; - } - - // Assuming 1 instantiation for each componentplacement - if (component->getNamingService ()) { - - CF::Resource_var _rsc = component->getResourcePtr(); - - if (CORBA::is_nil(_rsc)) { - LOG_ERROR(ApplicationFactory_impl, "Could not get component reference"); - ostringstream eout; - std::string component_version(component->spd.getSoftPkgType()); - std::string added_message = this->createVersionMismatchMessage(component_version); - eout << added_message; - eout << "Could not get component reference for component: '" - << component->getName() << "' with component id: '" - << component->getIdentifier() << " assigned to device: '" - << component->getAssignedDeviceId()<<"'"; - eout << " in waveform '" << _waveformContextName<<"';"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); - } - - if (component->isResource () && component->isConfigurable ()) { - CF::Properties partialStruct = component->containsPartialStructConfig(); - bool partialWarn = false; - if (partialStruct.length() != 0) { - ostringstream eout; - eout << "Component " << component->getIdentifier() << " contains structure: "<< partialStruct[0].id <<" with a mix of defined and nil values. The behavior for the component is undefined"; - LOG_WARN(ApplicationFactory_impl, eout.str()); - partialWarn = true; - } - try { - // try to configure the component - if (component->getNonNilConfigureProperties().length() != 0) - _rsc->configure (component->getNonNilConfigureProperties()); - } catch(CF::PropertySet::InvalidConfiguration& e) { - ostringstream eout; - eout << "Failed to 'configure' component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << " assigned to device: '"<getAssignedDeviceId() << "' "; - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << "InvalidConfiguration with this info: <"; - eout << e.msg << "> for these invalid properties: "; - for (unsigned int propIdx = 0; propIdx < e.invalidProperties.length(); propIdx++){ - eout << "(" << e.invalidProperties[propIdx].id << ","; - eout << ossie::any_to_string(e.invalidProperties[propIdx].value) << ")"; - } - if (partialWarn) { - eout << ". Note that this component contains a property with a mix of defined and nil values."; - } - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::InvalidInitConfiguration(e.invalidProperties); - } catch(CF::PropertySet::PartialConfiguration& e) { - ostringstream eout; - eout << "Failed to instantiate component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << " assigned to device: '"<getAssignedDeviceId() << "' "; - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << "Failed to 'configure' component; PartialConfiguration for these invalid properties: "; - for (unsigned int propIdx = 0; propIdx < e.invalidProperties.length(); propIdx++){ - eout << "(" << e.invalidProperties[propIdx].id << ","; - eout << ossie::any_to_string(e.invalidProperties[propIdx].value) << ")"; - } - if (partialWarn) { - eout << ". Note that this component contains a property with a mix of defined and nil values."; - } - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::InvalidInitConfiguration(e.invalidProperties); - } catch( ... ) { - ostringstream eout; - std::string component_version(component->spd.getSoftPkgType()); - std::string added_message = this->createVersionMismatchMessage(component_version); - eout << added_message; - eout << "Failed to instantiate component: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << " assigned to device: '"<getAssignedDeviceId() << "' "; - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << "'configure' failed with Unknown Exception"; - if (partialWarn) { - eout << ". Note that this component contains a property with a mix of defined and nil values."; - } - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EINVAL, eout.str().c_str()); - } - } + redhawk::ComponentDeployment* ac_deployment = 0; + for (DeploymentList::const_iterator depl = deployments.begin(); depl != deployments.end(); ++depl) { + redhawk::ComponentDeployment* deployment = (*depl); + if (deployment->isAssemblyController()) { + ac_deployment = deployment; + } else { + deployment->configure(); } } - - // configure the assembly controller last - for (unsigned int rc_idx = 0; rc_idx < _requiredComponents.size (); rc_idx++) { - ossie::ComponentInfo* component = _requiredComponents[rc_idx]; - - if (!component->isAssemblyController ()) { - continue; - } - - // If the component is non-SCA compliant then we don't expect anything beyond this - if (!component->isScaCompliant()) { - LOG_TRACE(ApplicationFactory_impl, "Skipping configure; Assembly controller is non SCA-compliant"); - break; - } - - if (!component->isResource ()) { - LOG_TRACE(ApplicationFactory_impl, "Skipping configure; Assembly controller is not resource"); - break; - } - - // Assuming 1 instantiation for each componentplacement - if (component->getNamingService ()) { - - CF::Resource_var _rsc = component->getResourcePtr(); - - if (CORBA::is_nil(_rsc)) { - LOG_ERROR(ApplicationFactory_impl, "Could not get Assembly Controller reference"); - ostringstream eout; - eout << "Could not get reference for Assembly Controller: '" - << component->getName() << "' with component id: '" - << component->getIdentifier() << " assigned to device: '" - << component->getAssignedDeviceId()<<"'"; - eout << " in waveform '" << _waveformContextName<<"';"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); - } - - if (component->isResource () && component->isConfigurable ()) { - CF::Properties partialStruct = component->containsPartialStructConfig(); - bool partialWarn = false; - if (partialStruct.length() != 0) { - ostringstream eout; - eout << "Component " << component->getIdentifier() << " contains structure"<< partialStruct[0].id <<" with a mix of defined and nil values. The behavior for the component is undefined"; - LOG_WARN(ApplicationFactory_impl, eout.str()); - partialWarn = true; - } - try { - // try to configure the component - if (component->getNonNilConfigureProperties().length() != 0) - _rsc->configure (component->getNonNilConfigureProperties()); - } catch(CF::PropertySet::InvalidConfiguration& e) { - ostringstream eout; - eout << "Failed to 'configure' Assembly Controller: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << " assigned to device: '"<getAssignedDeviceId() << "' "; - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << "InvalidConfiguration with this info: <"; - eout << e.msg << "> for these invalid properties: "; - for (unsigned int propIdx = 0; propIdx < e.invalidProperties.length(); propIdx++){ - eout << "(" << e.invalidProperties[propIdx].id << ","; - eout << ossie::any_to_string(e.invalidProperties[propIdx].value) << ")"; - } - if (partialWarn) { - eout << ". Note that this component contains a property with a mix of defined and nil values."; - } - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::InvalidInitConfiguration(e.invalidProperties); - } catch(CF::PropertySet::PartialConfiguration& e) { - ostringstream eout; - eout << "Failed to instantiate Assembly Controller: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << " assigned to device: '"<getAssignedDeviceId() << "' "; - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << "Failed to 'configure' Assembly Controller; PartialConfiguration for these invalid properties: "; - for (unsigned int propIdx = 0; propIdx < e.invalidProperties.length(); propIdx++){ - eout << "(" << e.invalidProperties[propIdx].id << ","; - eout << ossie::any_to_string(e.invalidProperties[propIdx].value) << ")"; - } - if (partialWarn) { - eout << ". Note that this component contains a property with a mix of defined and nil values."; - } - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::InvalidInitConfiguration(e.invalidProperties); - } catch( ... ) { - ostringstream eout; - eout << "Failed to instantiate Assembly Controller: '"; - eout << component->getName() << "' with component id: '" << component->getIdentifier() << " assigned to device: '"<getAssignedDeviceId() << "' "; - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << "'configure' failed with Unknown Exception"; - if (partialWarn) { - eout << ". Note that this component contains a property with a mix of defined and nil values."; - } - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_ERROR(ApplicationFactory_impl, eout.str()); - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EINVAL, eout.str().c_str()); - } - } - } - break; + + // Configure the assembly controller last, if it's configurable + if (ac_deployment) { + ac_deployment->configure(); } } /* Connect the components * - Connect the components */ -void createHelper::connectComponents(std::vector& connections, string base_naming_context) +void createHelper::connectComponents(redhawk::ApplicationDeployment& appDeployment, + std::vector& connections, + string base_naming_context) { const std::vector& _connection = _appFact._sadParser.getConnections (); // Create an AppConnectionManager to resolve and track all connections in the application. - // NB: Use an auto_ptr instead of a bare pointer so that it will automatically be deleted - // in the event of a failure. using ossie::AppConnectionManager; - std::auto_ptr connectionManager(new AppConnectionManager(_appFact._domainManager, this, this, base_naming_context)); + AppConnectionManager connectionManager(_appFact._domainManager, &appDeployment, &appDeployment, base_naming_context); // Create all resource connections - LOG_TRACE(ApplicationFactory_impl, "Establishing " << _connection.size() << " waveform connections") + RH_TRACE(_createHelperLog, "Establishing " << _connection.size() << " waveform connections") for (int c_idx = _connection.size () - 1; c_idx >= 0; c_idx--) { const Connection& connection = _connection[c_idx]; - LOG_TRACE(ApplicationFactory_impl, "Processing connection " << connection.getID()); + RH_TRACE(_createHelperLog, "Processing connection " << connection.getID()); // Attempt to resolve the connection; if any connection fails, application creation fails. - if (!connectionManager->resolveConnection(connection)) { - LOG_ERROR(ApplicationFactory_impl, "Unable to make connection " << connection.getID()); - ostringstream eout; - eout << "Unable to make connection " << connection.getID(); - eout << " in waveform '"<< _waveformContextName<<"';"; - eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - throw CF::ApplicationFactory::CreateApplicationError(CF::CF_EIO, eout.str().c_str()); + bool resolved; + try { + resolved = connectionManager.resolveConnection(connection); + } catch (const std::exception& exc) { + throw redhawk::ConnectionError(connection.getID(), exc.what()); + } + if (!resolved) { + throw redhawk::ConnectionError(connection.getID(), "connection failed"); } } // Copy all established connections into the connection array - const std::vector& establishedConnections = connectionManager->getConnections(); + const std::vector& establishedConnections = connectionManager.getConnections(); std::copy(establishedConnections.begin(), establishedConnections.end(), std::back_inserter(connections)); } +std::vector createHelper::getStartOrder(const DeploymentList& deployments) +{ + RH_TRACE(_createHelperLog, "Assigning start order"); + + // Now that all of the components are known, bin the start orders based on + // the values in the SAD. Using a multimap, keyed on the start order value, + // accounts for duplicate keys and allows assigning the effective order + // easily by iterating through all entries. + typedef std::multimap StartOrderMap; + StartOrderMap start_map; + for (size_t index = 0; index < deployments.size(); ++index) { + redhawk::ComponentDeployment* deployment = deployments[index]; + const ossie::ComponentInstantiation* instantiation = deployment->getInstantiation(); + if (deployment->isAssemblyController()) { + RH_TRACE(_createHelperLog, "Component " << instantiation->getID() + << " is the assembly controller"); + } else if (instantiation->hasStartOrder()) { + // Only track start order if it was provided, and the component is + // not the assembly controller + start_map.insert(std::make_pair(instantiation->getStartOrder(), deployment)); + } + } + + // Build the start order vector in the right order + std::vector start_order; + int index = 1; + for (StartOrderMap::iterator ii = start_map.begin(); ii != start_map.end(); ++ii, ++index) { + RH_TRACE(_createHelperLog, index << ": " + << ii->second->getInstantiation()->getID()); + start_order.push_back(ii->second->getIdentifier()); + } + return start_order; +} + createHelper::createHelper ( const ApplicationFactory_impl& appFact, string waveformContextName, @@ -3524,15 +2523,19 @@ createHelper::createHelper ( CosNaming::NamingContext_ptr domainContext ): _appFact(appFact), + _createHelperLog(appFact.returnLogger()), _allocationMgr(_appFact._domainManager->_allocationMgr), _allocations(*_allocationMgr), + _waveformContextName(waveformContextName), + _baseNamingContext(baseNamingContext), + _waveformContext(CosNaming::NamingContext::_duplicate(waveformContext)), + _domainContext(domainContext), + _profileCache(_appFact._fileMgr, appFact.returnLogger()), _isComplete(false), - _application(0) + _application(0), + _stopTimeout(DEFAULT_STOP_TIMEOUT), + _aware(true) { - this->_waveformContextName = waveformContextName; - this->_baseNamingContext = baseNamingContext; - this->_waveformContext = CosNaming::NamingContext::_duplicate(waveformContext); - this->_domainContext = domainContext; } createHelper::~createHelper() @@ -3543,130 +2546,36 @@ createHelper::~createHelper() if (_application) { _application->_remove_ref(); } - for (PlacementList::iterator comp = _requiredComponents.begin(); comp != _requiredComponents.end(); ++comp) { - delete (*comp); - } - _requiredComponents.clear(); } void createHelper::_cleanupFailedCreate() { if (_application) { + _appFact._domainManager->cancelPendingApplication(_application); _application->releaseComponents(); _application->terminateComponents(); _application->unloadComponents(); _application->_cleanupActivations(); } - LOG_TRACE(ApplicationFactory_impl, "Removing all bindings from naming context"); + RH_TRACE(_createHelperLog, "Removing all bindings from naming context"); try { if ( _appFact._domainManager && !_appFact._domainManager->bindToDomain() ) { ossie::corba::unbindAllFromContext(_waveformContext); } - } CATCH_LOG_WARN(ApplicationFactory_impl, "Could not unbind contents of naming context"); + } CATCH_RH_WARN(_createHelperLog, "Could not unbind contents of naming context"); CosNaming::Name DNContextname; DNContextname.length(1); DNContextname[0].id = _waveformContextName.c_str(); - LOG_TRACE(ApplicationFactory_impl, "Unbinding the naming context") + RH_TRACE(_createHelperLog, "Unbinding the naming context") try { _appFact._domainContext->unbind(DNContextname); } catch ( ... ) { } - LOG_TRACE(ApplicationFactory_impl, "Destroying naming context"); + RH_TRACE(_createHelperLog, "Destroying naming context"); try { _waveformContext->destroy(); - } CATCH_LOG_WARN(ApplicationFactory_impl, "Could not destroy naming context"); -} - -/* Given a component instantiation id, returns the associated CORBA Resource pointer - * - Gets the Resource pointer for a particular component instantiation id - */ -CF::Resource_ptr createHelper::lookupComponentByInstantiationId(const std::string& identifier) -{ - ossie::ComponentInfo* component = findComponentByInstantiationId(identifier); - if (component) { - return component->getResourcePtr(); - } - - return CF::Resource::_nil(); -} - -/* Given a component instantiation id, returns the component's parsed prf contents in a Properties object - */ -const ossie::Properties* createHelper::lookupComponentPropertiesByInstantiationId(const std::string& identifier) -{ - ossie::ComponentInfo* component = findComponentByInstantiationId(identifier); - if (component) { - return &(component->prf); - } - - return NULL; -} - -/* Given a component instantiation id, returns the associated CORBA Device pointer - * - Gets the Device pointer for a particular component instantiation id - */ -CF::Device_ptr createHelper::lookupDeviceThatLoadedComponentInstantiationId(const std::string& componentId) -{ - LOG_TRACE(ApplicationFactory_impl, "[DeviceLookup] Lookup device that loaded component " << componentId); - - ossie::ComponentInfo* component = findComponentByInstantiationId(componentId); - if (!component) { - LOG_WARN(ApplicationFactory_impl, "[DeviceLookup] Component not found"); - return CF::Device::_nil(); - } - - boost::shared_ptr device = component->getAssignedDevice(); - if (!device) { - LOG_WARN(ApplicationFactory_impl, "[DeviceLookup] Component not assigned to device"); - return CF::Device::_nil(); - } - LOG_TRACE(ApplicationFactory_impl, "[DeviceLookup] Assigned device id " << device->identifier); - return CF::Device::_duplicate(device->device); -} - - -/* Given a component instantiation id and uses id, returns the associated CORBA Device pointer - * - Gets the Device pointer for a particular component instantiation id and uses id - */ -CF::Device_ptr createHelper::lookupDeviceUsedByComponentInstantiationId(const std::string& componentId, const std::string& usesId) -{ - LOG_TRACE(ApplicationFactory_impl, "[DeviceLookup] Lookup device used by component " << componentId); - ossie::ComponentInfo* component = findComponentByInstantiationId(componentId.c_str()); - if (!component) { - LOG_WARN(ApplicationFactory_impl, "[DeviceLookup] Component not found"); - return CF::Device::_nil(); - } - - LOG_TRACE(ApplicationFactory_impl, "[DeviceLookup] Uses id " << usesId); - const ossie::UsesDeviceInfo* usesdevice = component->getUsesDeviceById(usesId); - if (!usesdevice) { - LOG_WARN(ApplicationFactory_impl, "[DeviceLookup] UsesDevice not found"); - return CF::Device::_nil(); - } - - std::string deviceId = usesdevice->getAssignedDeviceId(); - LOG_TRACE(ApplicationFactory_impl, "[DeviceLookup] Assigned device id " << deviceId); - - return find_device_from_id(deviceId.c_str()); -} - -CF::Device_ptr createHelper::lookupDeviceUsedByApplication(const std::string& usesRefId) -{ - LOG_TRACE(ApplicationFactory_impl, "[DeviceLookup] Lookup device used by application, Uses Id: " << usesRefId); - - - const ossie::UsesDeviceInfo* usesdevice = _appInfo.getUsesDeviceById(usesRefId); - if (!usesdevice) { - LOG_WARN(ApplicationFactory_impl, "[DeviceLookup] UsesDevice not found"); - return CF::Device::_nil(); - } - - std::string deviceId = usesdevice->getAssignedDeviceId(); - LOG_TRACE(ApplicationFactory_impl, "[DeviceLookup] Assigned device id " << deviceId); - - return find_device_from_id(deviceId.c_str()); + } CATCH_RH_WARN(_createHelperLog, "Could not destroy naming context"); } - diff --git a/redhawk/src/control/sdr/dommgr/ApplicationFactory_impl.h b/redhawk/src/control/sdr/dommgr/ApplicationFactory_impl.h index d6537c4eb..1b8961da0 100644 --- a/redhawk/src/control/sdr/dommgr/ApplicationFactory_impl.h +++ b/redhawk/src/control/sdr/dommgr/ApplicationFactory_impl.h @@ -22,25 +22,12 @@ #define APPLICATIONFACTORY_H #include -#include #include -#include #include #include -#include -#include -#include - -#include "PersistenceStore.h" -#include "applicationSupport.h" -#include "connectionSupport.h" class DomainManager_impl; -class Application_impl; -class AllocationManager_impl; - -class createHelper; class ApplicationFactory_impl: public virtual POA_CF::ApplicationFactory { @@ -59,7 +46,6 @@ class ApplicationFactory_impl: public virtual POA_CF::ApplicationFactory ossie::SoftwareAssembly _sadParser; CF::FileManager_var _fileMgr; - CF::DomainManager_var _dmnMgr; boost::mutex _pendingCreateLock; @@ -70,32 +56,8 @@ class ApplicationFactory_impl: public virtual POA_CF::ApplicationFactory // Support for creating a new waveform naming context std::string getWaveformContextName(std::string name); std::string getBaseWaveformContext(std::string waveform_context); + rh_logger::LoggerPtr _appFactoryLog; - static void ValidateFileLocation ( CF::FileManager_ptr fileMgr, const std::string &profile ); - static void ValidateSoftPkgDep( CF::FileManager_ptr fileMgr, - DomainManager_impl *domMgr, - const std::string &profile, - const bool allow_missing=true ); - static void ValidateSPD (CF::FileManager_ptr fileMgr, - DomainManager_impl *domMgr, - ossie::SoftPkg &spd, - const std::string &profile, - const bool require_prf=true, - const bool require_scd=true, - const bool allow_missing_impl=true, - const bool is_dep = false); - - static bool ValidateImplementationCodeFile( CF::FileManager_ptr fileMgr, - DomainManager_impl *domMgr, - const std::string &spd_path, - const std::string &sfw_profile, - const std::string &codeFile, - const bool allow_missing_impl=true ); - - -protected: - static std::string xmlParsingVersionMismatch(DomainManager_impl *domMgr, std::string &component_version); - public: ApplicationFactory_impl (const std::string& softwareProfile, const std::string& domainName, @@ -111,6 +73,15 @@ class ApplicationFactory_impl: public virtual POA_CF::ApplicationFactory CF::ApplicationFactory::CreateApplicationError, CORBA::SystemException); + rh_logger::LoggerPtr returnLogger() const { + return _appFactoryLog; + } + + void setLogger(rh_logger::LoggerPtr logptr) + { + _appFactoryLog = logptr; + }; + // getters for attributes char* name () throw (CORBA::SystemException) { return CORBA::string_dup(_name.c_str()); @@ -124,222 +95,12 @@ class ApplicationFactory_impl: public virtual POA_CF::ApplicationFactory return CORBA::string_dup(_softwareProfile.c_str()); } - const std::string & getID () { return _identifier; } - const std::string & getName () { return _name; } - + const std::string& getIdentifier () const; + const std::string& getName () const; + const std::string& getSoftwareProfile() const; + // allow createHelper to have access to ApplicationFactory_impl friend class createHelper; friend class ScopedAllocations; }; #endif - -#ifndef CREATEHELPER_H -#define CREATEHELPER_H - -class ScopedAllocations { -public: - ScopedAllocations(AllocationManager_impl& allocator); - ~ScopedAllocations(); - - void push_back(const std::string& allocationID); - - template - void transfer(T& dest); - - void transfer(ScopedAllocations& dest); - - void deallocate(); - -private: - AllocationManager_impl& _allocator; - std::list _allocations; -}; - -class createHelper: -public ossie::ComponentLookup, -public ossie::DeviceLookup -{ - -public: - struct componentReservation { - std::string id; - float reservation; - }; - std::vector componentReservations; - - typedef std::map DeviceAssignmentMap; - - createHelper (const ApplicationFactory_impl& appFact, - std::string waveformContextName, - std::string base_naming_context, - CosNaming::NamingContext_ptr WaveformContext, - CosNaming::NamingContext_ptr DomainContext); - ~createHelper (); - - CF::Application_ptr create (const char* name, - const CF::Properties& initConfiguration, - const DeviceAssignmentMap& deviceAssignments) - throw (CF::ApplicationFactory::InvalidInitConfiguration, - CF::ApplicationFactory::CreateApplicationRequestError, - CF::ApplicationFactory::CreateApplicationError, - CORBA::SystemException); - -private: - - // List of used devices assignments - typedef std::vector< ossie::DeviceAssignmentInfo > DeviceAssignmentList; - - // list of components that are part of a collocation - typedef std::vector PlacementList; - - // Used for storing the current state of the OE & create process - const ApplicationFactory_impl& _appFact; - - // Local pointer to the allocation manager - AllocationManager_impl* _allocationMgr; - - // Tracks allocation IDs made during creation, and automates cleanup on - // failure - ScopedAllocations _allocations; - CF::Properties _app_affinity; - - ossie::DeviceList _registeredDevices; - ossie::DeviceList _executableDevices; - PlacementList _requiredComponents; - std::map specialized_reservations; - - // - // List of used devices allocated during application creation - // - DeviceAssignmentList _appUsedDevs; - std::vector _startSeq; - std::vector _startOrderIds; - - // waveform instance-specific naming context (unique to the instance of the waveform) - std::string _waveformContextName; - - // full (includes full context path) waveform instance-specific naming context - std::string _baseNamingContext; - - // CORBA naming context - CosNaming::NamingContext_var _waveformContext; - CosNaming::NamingContext_ptr _domainContext; - - ossie::ApplicationInfo _appInfo; - - // createHelper helper methods - ossie::ComponentInfo* getAssemblyController(); - void overrideExternalProperties(const CF::Properties& initConfiguration); - void overrideProperties(const CF::Properties& initConfiguration, ossie::ComponentInfo* component); - void assignRemainingComponentsToDevices(const std::string &appIdentifier); - void _assignComponentsUsingDAS( - const DeviceAssignmentMap& deviceAssignments, const std::string &appIdentifier); - void _getComponentsToPlace( - const std::vector& collocatedComponents, - ossie::DeviceIDList& assignedDevices, - PlacementList& placingComponents); - void _connectComponents( - std::vector& connections); - void _configureComponents(); - void _checkAssemblyController( - CF::Resource_ptr assemblyController, - ossie::ComponentInfo* assemblyControllerComponent) const; - void setUpExternalPorts(Application_impl* application); - void setUpExternalProperties(Application_impl* application); - void _handleHostCollocation(const std::string &appIdentifier); - void _placeHostCollocation(const ossie::SoftwareAssembly::HostCollocation& collocation, const std::string &appIdentifier); - void _handleUsesDevices(const std::string& appName); - void _resolveImplementations(PlacementList::iterator comp, PlacementList& compList, std::vector &res_vec); - void _removeUnmatchedImplementations(std::vector &res_vec); - void _consolidateAllocations(const PlacementList &placingComponents, const ossie::ImplementationInfo::List& implementations, CF::Properties& allocs); - void _evaluateMATHinRequest(CF::Properties &request, const CF::Properties &configureProperties); - void _castRequestProperties(CF::Properties& allocationProperties, const std::vector &prop_refs, unsigned int offset=0); - void _castRequestProperties(CF::Properties& allocationProperties, const std::vector &prop_refs, - unsigned int offset=0); - CF::DataType castProperty(const ossie::ComponentProperty* property); - - // Populate _requiredComponents vector - void getRequiredComponents() throw (CF::ApplicationFactory::CreateApplicationError); - - int resolveDebugLevel( const std::string &debug_level ); - void resolveLoggingConfiguration( ossie::ComponentInfo* component); - - // Supports allocation - bool allocateUsesDevices(const std::string& componentIdentifier, - const ossie::UsesDeviceInfo::List& usesDevices, - const CF::Properties& configureProperties, - DeviceAssignmentList& deviceAssignments, - ScopedAllocations& allocations); - CF::AllocationManager::AllocationResponseSequence* allocateUsesDeviceProperties( - const ossie::UsesDeviceInfo::List& component, - const CF::Properties& configureProperties); - void allocateComponent(ossie::ComponentInfo* component, - const std::string& assignedDeviceId, - DeviceAssignmentList &appAssignedDevices, - const std::string& appIdentifier); - - ossie::AllocationResult allocateComponentToDevice(ossie::ComponentInfo* component, - ossie::ImplementationInfo* implementation, - const std::string& assignedDeviceId, - const std::string& appIdentifier); - - bool validateImplementationCodeFile(ossie::ComponentInfo* component, - ossie::ImplementationInfo* impl, - std::string &emsg, - const bool clear_on_fail=true, - const bool suppress_log = false ); - void validateSoftpkgDependencies( const ossie::ImplementationInfo* implementation ); - bool resolveSoftpkgDependencies(ossie::ImplementationInfo* implementation, ossie::DeviceNode& device); - ossie::ImplementationInfo* resolveDependencyImplementation(const ossie::SoftpkgInfoPtr &softpkg, ossie::DeviceNode& device); - - // Supports loading, executing, initializing, configuring, & connecting - void loadDependencies(ossie::ComponentInfo& component, - CF::LoadableDevice_ptr device, - const ossie::SoftpkgInfoList & dependencies); - - void loadAndExecuteComponents(CF::ApplicationRegistrar_ptr _appReg); - void applyApplicationAffinityOptions(); - - void attemptComponentExecution( - const boost::filesystem::path& executeName, - CF::ExecutableDevice_ptr execdev, - ossie::ComponentInfo* component, - const ossie::ImplementationInfo* implementation); - - void waitForComponentRegistration(); - void initializeComponents(); - - void configureComponents(); - void connectComponents( - std::vector& connections, - std::string base_naming_context); - - // Functions for looking up particular components/devices - CF::Device_ptr find_device_from_id(const char*); - const ossie::DeviceNode& find_device_node_from_id(const char*) throw(std::exception); - ossie::ComponentInfo* findComponentByInstantiationId(const std::string& identifier); - - // Cleanup - used when create fails/doesn't succeed for some reason - bool _isComplete; - void _cleanupFailedCreate(); - Application_impl* _application; - - /* Implements the ConnectionManager functions - * - Makes this class compatible with the ConnectionManager - */ - // ComponentLookup interface - CF::Resource_ptr lookupComponentByInstantiationId(const std::string& identifier); - - const ossie::Properties* lookupComponentPropertiesByInstantiationId(const std::string& identifier); - - // DeviceLookup interface - CF::Device_ptr lookupDeviceThatLoadedComponentInstantiationId(const std::string& componentId); - CF::Device_ptr lookupDeviceUsedByComponentInstantiationId( - const std::string& componentId, - const std::string& usesId); - CF::Device_ptr lookupDeviceUsedByApplication(const std::string& usesRefId); - - std::string createVersionMismatchMessage(std::string &component_version); - -}; -#endif diff --git a/redhawk/src/control/sdr/dommgr/ApplicationRegistrar.cpp b/redhawk/src/control/sdr/dommgr/ApplicationRegistrar.cpp index eb9f03bba..df2d16afa 100644 --- a/redhawk/src/control/sdr/dommgr/ApplicationRegistrar.cpp +++ b/redhawk/src/control/sdr/dommgr/ApplicationRegistrar.cpp @@ -44,20 +44,50 @@ CF::DomainManager_ptr ApplicationRegistrar_impl::domMgr() } void ApplicationRegistrar_impl::registerComponent(const char * Name, CF::Resource_ptr obj) throw (CF::InvalidObjectReference, CF::DuplicateName, CORBA::SystemException) { + if ( !CORBA::is_nil(_context) ) { - CosNaming::Name_var cosName = ossie::corba::stringToName(Name); + CosNaming::Name_var cosName; try { - _context->bind( cosName, obj ); + cosName = ossie::corba::stringToName(Name); + _context->bind( cosName, obj ); } catch(CosNaming::NamingContext::AlreadyBound&) { try { _context->rebind( cosName, obj ); } catch(...){ + if ( Name != NULL ) { + RH_NL_INFO("ApplicationRegistrar", "Unhandled exception from NamingContext, registering " << Name ); + } + else{ + RH_NL_INFO("ApplicationRegistrar", "Unhandled exception from NamingContext, Name is invalid" ); + } } } + catch(...){ + } if (!CORBA::is_nil(obj)) { - _application->registerComponent(obj); + try { + _application->registerComponent(obj); + } + catch( CF::InvalidObjectReference &ex ) { + throw; + } + catch( CF::DuplicateName &ex ) { + throw; + } + catch( CORBA::SystemException &ex) { + throw; + } + catch(...) { + if ( Name != NULL ) { + RH_NL_INFO("ApplicationRegistrar", "Unhandled exception from application, registering " << Name ); + } + else{ + RH_NL_INFO("ApplicationRegistrar", "Unhandled exception from application, Name is invalid" ); + } + + } } } } diff --git a/redhawk/src/control/sdr/dommgr/ApplicationValidator.cpp b/redhawk/src/control/sdr/dommgr/ApplicationValidator.cpp new file mode 100644 index 000000000..456bb89a0 --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/ApplicationValidator.cpp @@ -0,0 +1,336 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include +#include + +#include +#include +#include +#include "ApplicationValidator.h" + +using namespace redhawk; +using namespace ossie; +namespace fs = boost::filesystem; + +class bad_implementation : public redhawk::validation_error +{ +public: + bad_implementation(const SoftPkg* softpkg, const SPD::Implementation& impl, const std::string& message) : + redhawk::validation_error("Soft package " + softpkg->getSPDFile() + " has invalid implementation " + impl.getID() + ": " + message) + { + } +}; + +class no_valid_implemenation : public redhawk::validation_error +{ +public: + no_valid_implemenation(const SoftPkg* softpkg) : + redhawk::validation_error("Soft package " + softpkg->getSPDFile() + " has no valid implementations") + { + } +}; + +PREPARE_CF_LOGGING(ApplicationValidator); + +ApplicationValidator::ApplicationValidator(CF::FileSystem_ptr fileSystem, rh_logger::LoggerPtr log) : + fileSystem(CF::FileSystem::_duplicate(fileSystem)), + cache(fileSystem, log), + _appFactoryLog(log) +{ +} + +void ApplicationValidator::validate(const SoftwareAssembly& sad) +{ + // Check partitioning + BOOST_FOREACH(const SoftwareAssembly::HostCollocation& collocation, sad.getHostCollocations()) { + validateHostCollocation(collocation); + } + BOOST_FOREACH(const ComponentPlacement& placement, sad.getComponentPlacements()) { + validateComponentPlacement(placement); + } + + // Check externally-promoted ports and properties + validateExternalPorts(sad.getExternalPorts()); + + const Properties* ac_props = getAssemblyControllerProperties(sad); + validateExternalProperties(ac_props, sad.getExternalProperties()); +} + +void ApplicationValidator::validateExternalPorts(const std::vector& ports) +{ + // Make sure all external port names are unique + // NB: Should check component references are valid as well + std::set port_names; + BOOST_FOREACH(const SoftwareAssembly::Port& port, ports) { + const std::string& name = port.getExternalName(); + if (port_names.count(name) == 0) { + port_names.insert(name); + } else { + throw validation_error("Duplicate external port name " + name); + } + } +} + +void ApplicationValidator::validateExternalProperties(const Properties* acProperties, + const std::vector& properties) +{ + // Makes sure all external property names are unique + // NB: Should check component references are valid as well + std::set property_names; + BOOST_FOREACH(const SoftwareAssembly::Property& property, properties) { + const std::string& name = property.getExternalID(); + if (property_names.count(name) == 0) { + property_names.insert(name); + } else { + throw validation_error("Duplicate external property name " + name); + } + } + + // Make sure AC property IDs aren't in conflict with external ones + if (acProperties) { + BOOST_FOREACH(const Property* property, acProperties->getProperties()) { + const std::string& name = property->getID(); + if (property_names.count(name) == 0) { + property_names.insert(name); + } else { + throw validation_error("Assembly controller property " + name + " in use as external property"); + } + } + } +} + +void ApplicationValidator::validateSoftPkgRef(const SPD::SoftPkgRef& softpkgref) +{ + const std::string& spd_file = softpkgref.localfile; + if (spd_file.empty()) { + throw validation_error("empty softpkgref"); + } + + // Basic checking for valid, existing filename + RH_TRACE(_appFactoryLog, "Validating SPD " << spd_file); + if (!fileExists(spd_file)) { + throw validation_error("softpkgref " + spd_file + " does not exist"); + } + if (!endsWith(spd_file, ".spd.xml")) { + RH_WARN(_appFactoryLog, "SPD file " << spd_file << " should end with .spd.xml"); + } + + // If this fails, it will throw redhawk::invalid_profile + const SoftPkg* softpkg = cache.loadSoftPkg(spd_file); + + // Check implementation(s) + if (softpkgref.implref.isSet()) { + // A specific implementation is given; make sure it exists and is valid + const std::string& impl_id = *(softpkgref.implref); + const SPD::Implementation* implementation = softpkg->getImplementation(impl_id); + if (!implementation) { + throw validation_error("softpkgref " + spd_file + " has no implementation " + impl_id); + } + + validateImplementation(softpkg, *implementation, false); + } else { + // Validate all implementations + int valid_implementations = 0; + BOOST_FOREACH(const SPD::Implementation& implementation, softpkg->getImplementations()) { + try { + validateImplementation(softpkg, implementation, false); + valid_implementations++; + } catch (const validation_error& err) { + RH_WARN(_appFactoryLog, err.what()); + } + } + if (valid_implementations == 0) { + throw no_valid_implemenation(softpkg); + } + } +} + +void ApplicationValidator::validateImplementation(const SoftPkg* softpkg, + const SPD::Implementation& implementation, + bool executable) +{ + RH_TRACE(_appFactoryLog, "Validating SPD implementation " << implementation.getID()); + + // Always ensure that the localfile exists + std::string localfile = _relativePath(softpkg, implementation.getCodeFile()); + RH_TRACE(_appFactoryLog, "Validating code localfile " << localfile); + if (!fileExists(localfile)) { + throw bad_implementation(softpkg, implementation, "missing localfile " + localfile); + } + + // If the implementation needs to be executable (i.e., would be used for a + // component instantiation), make sure it has a valid entry point + if (executable) { + if (!implementation.getEntryPoint()) { + throw bad_implementation(softpkg, implementation, "has no entry point"); + } + std::string entry_point = _relativePath(softpkg, implementation.getEntryPoint()); + RH_TRACE(_appFactoryLog, "Validating code entry point " << entry_point); + if (!fileExists(entry_point)) { + throw bad_implementation(softpkg, implementation, "missing entrypoint " + entry_point); + } + } + + // Check all softpkg references + BOOST_FOREACH(const SPD::SoftPkgRef& spdref, implementation.getSoftPkgDependencies()) { + try { + validateSoftPkgRef(spdref); + } catch (const validation_error& exc) { + // Turn the exception into a more detailed bad_implementation that + // includes enough context to debug the XML + throw bad_implementation(softpkg, implementation, exc.what()); + } + } +} + +void ApplicationValidator::validateHostCollocation(const SoftwareAssembly::HostCollocation& collocation) +{ + std::pair< std::string, redhawk::PropertyMap > devReq(std::string(""),redhawk::PropertyMap()); + BOOST_FOREACH(const ComponentPlacement& placement, collocation.getComponents()) { + validateComponentPlacement(placement); + + // check if placement has deviceRequires, if so there can only be one or all must match + BOOST_FOREACH(const ComponentInstantiation& instantiation, placement.getInstantiations()) { + redhawk::PropertyMap deviceRequires; + ossie::convertComponentProperties(instantiation.getDeviceRequires(),deviceRequires); + if (!deviceRequires.empty() ) { + if ( !devReq.first.empty() ) { + if ( devReq.first == instantiation.getID() ) { + throw validation_error("hostcollocation contains multiple devicerequires, componentinstantiation: " +instantiation.getID() ); + } + + if ( devReq.second != deviceRequires ) { + throw validation_error("hostcollocation contains multiple devicerequires that are different, componentinstantiation: " + instantiation.getID() ); + } + } + else { + devReq.first = instantiation.getID(); + devReq.second = deviceRequires; + RH_TRACE(_appFactoryLog, "devicerequires collocation: " << collocation.getName() << " instantiation :" << devReq.first << " devicerequires: " << devReq.second); + } + } + } + } +} + +void ApplicationValidator::validateComponentPlacement(const ComponentPlacement& placement) +{ + const std::string& spd_file = placement.filename; + if (spd_file.empty()) { + throw validation_error("componentfile " + placement._componentFileRef + " filename is empty"); + } + + // Basic checking for valid, existing filename + RH_TRACE(_appFactoryLog, "Validating SPD " << spd_file); + if (!fileExists(spd_file)) { + throw validation_error("componentfile " + placement._componentFileRef + " points to non-existent file " + spd_file); + } + if (!endsWith(spd_file, ".spd.xml")) { + RH_WARN(_appFactoryLog, "SPD file " << spd_file << " should end with .spd.xml"); + } + + // If this fails, it will throw redhawk::invalid_profile + const SoftPkg* softpkg = cache.loadProfile(spd_file); + + // Check the PRF and SCD filenames + if (softpkg->getPRFFile()) { + std::string prf_file = softpkg->getPRFFile(); + if (!endsWith(prf_file, ".prf.xml")) { + RH_WARN(_appFactoryLog, "PRF file " << prf_file << " should end with .prf.xml"); + } + } + if (softpkg->getSCDFile()) { + std::string scd_file = softpkg->getSCDFile(); + if (!endsWith(scd_file, ".scd.xml")) { + RH_WARN(_appFactoryLog, "SCD file " << scd_file << " should end with .scd.xml"); + } + } + + int valid_implementations = 0; + BOOST_FOREACH(const SPD::Implementation& implementation, softpkg->getImplementations()) { + try { + validateImplementation(softpkg, implementation, true); + valid_implementations++; + } catch (const validation_error& err) { + RH_WARN(_appFactoryLog, err.what()); + } + } + if (valid_implementations == 0) { + throw no_valid_implemenation(softpkg); + } + + // If the softpkg is SCA-compliant, make sure it has a descriptor + if (softpkg->isScaCompliant() && !softpkg->getDescriptor()) { + std::string message = spd_file + " is SCA-compliant but does not have an SCD"; + throw validation_error(message); + } +} + +const Properties* ApplicationValidator::getAssemblyControllerProperties(const SoftwareAssembly& sad) +{ + // Search through all placements for the instantiation that is the assembly + // controller, then get the SoftPkg (which has already been loaded) and + // return its Properties + BOOST_FOREACH(const ComponentPlacement& placement, sad.getAllComponents()) { + BOOST_FOREACH(const ComponentInstantiation& instantiation, placement.getInstantiations()) { + if (instantiation.getID() == sad.getAssemblyControllerRefId()) { + const SoftPkg* softpkg = cache.loadProfile(placement.filename); + return softpkg->getProperties(); + } + } + } + + return 0; +} + +bool ApplicationValidator::fileExists(const std::string& filename) +{ + RH_TRACE(_appFactoryLog, "Checking existence of file '" << filename << "'"); + try { + return fileSystem->exists(filename.c_str()); + } catch (...) { + // Turn all exceptions into negative result; in this context, at least, + // CORBA errors mean the same thing--the file is not usable + return false; + } +} + +bool ApplicationValidator::endsWith(const std::string& filename, const std::string& suffix) +{ + if (filename.size() < suffix.size()) { + return false; + } + // Compare the end of the filename to the entirety of the suffix + std::string::size_type start = filename.size() - suffix.size(); + return filename.compare(start, suffix.size(), suffix) == 0; +} + +std::string ApplicationValidator::_relativePath(const SoftPkg* softpkg, const std::string& path) +{ + if (path.find('/') == 0) { + return path; + } else { + fs::path abspath = fs::path(softpkg->getSPDPath()) / path; + return abspath.string(); + } +} diff --git a/redhawk/src/control/sdr/dommgr/ApplicationValidator.h b/redhawk/src/control/sdr/dommgr/ApplicationValidator.h new file mode 100644 index 000000000..3d0cbdb6d --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/ApplicationValidator.h @@ -0,0 +1,93 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef APPLICATIONVALIDATOR_H +#define APPLICATIONVALIDATOR_H + +#include + +#include +#include +#include + +#include "ProfileCache.h" + +namespace redhawk { + + /** + * @brief An exception raised when a SoftwareAssembly is invalid + */ + class validation_error : public std::runtime_error { + public: + validation_error(const std::string& what_arg) : std::runtime_error(what_arg) + {} + }; + + class ApplicationValidator { + + ENABLE_LOGGING; + + public: + ApplicationValidator(CF::FileSystem_ptr fileSystem, rh_logger::LoggerPtr log); + + /** + * @brief Validates a SoftwareAssembly + * @param sad a parsed SoftwareAssembly + * @exception redhawk::validation_error the SAD is invalid + * @exception redhawk::invalid_profile a SoftPkg profile used by this + * SAD is invalid + * + * Performs validation of a SoftwareAssembly, making sure that it is + * semantically valid (at least, enough to attempt deployment). All + * SoftPkgs that could potentially be used to deploy components are + * checked to make sure that they are valid. + */ + void validate(const ossie::SoftwareAssembly& sad); + + private: + void validateExternalPorts(const std::vector& ports); + + void validateExternalProperties(const ossie::Properties* acProperties, + const std::vector& properties); + + void validateHostCollocation(const ossie::SoftwareAssembly::HostCollocation& collocation); + + void validateComponentPlacement(const ossie::ComponentPlacement& placement); + + void validateSoftPkgRef(const ossie::SPD::SoftPkgRef& softpkgref); + + void validateImplementation(const ossie::SoftPkg* softpkg, + const ossie::SPD::Implementation& implementation, + bool executable); + + const ossie::Properties* getAssemblyControllerProperties(const ossie::SoftwareAssembly& sad); + + bool endsWith(const std::string& filename, const std::string& suffix); + bool fileExists(const std::string& filename); + + std::string _relativePath(const ossie::SoftPkg* softpkg, const std::string& path); + + CF::FileSystem_var fileSystem; + redhawk::ProfileCache cache; + rh_logger::LoggerPtr _appFactoryLog; + }; +} + +#endif // APPLICATIONVALIDATOR_H diff --git a/redhawk/src/control/sdr/dommgr/Application_impl.cpp b/redhawk/src/control/sdr/dommgr/Application_impl.cpp index c37173cdd..a868e2d33 100644 --- a/redhawk/src/control/sdr/dommgr/Application_impl.cpp +++ b/redhawk/src/control/sdr/dommgr/Application_impl.cpp @@ -1,3 +1,4 @@ + /* * This file is protected by Copyright. Please refer to the COPYRIGHT file * distributed with this source distribution. @@ -22,9 +23,12 @@ #include #include +#include + #include #include #include +#include #include "Application_impl.h" #include "DomainManager_impl.h" @@ -32,53 +36,44 @@ #include "ApplicationRegistrar.h" #include "connectionSupport.h" #include "FakeApplication.h" +#include "DeploymentExceptions.h" PREPARE_CF_LOGGING(Application_impl); using namespace ossie; namespace { - CF::Application::ComponentElementType to_impl_element(const ossie::ApplicationComponent& component) + CF::Application::ComponentElementType to_impl_element(const redhawk::ApplicationComponent& component) { CF::Application::ComponentElementType result; - result.componentId = component.identifier.c_str(); - result.elementId = component.implementationId.c_str(); + result.componentId = component.getIdentifier().c_str(); + result.elementId = component.getImplementationId().c_str(); return result; } - bool has_naming_context(const ossie::ApplicationComponent& component) - { - return !component.namingContext.empty(); - } - - CF::Application::ComponentElementType to_name_element(const ossie::ApplicationComponent& component) + CF::Application::ComponentElementType to_name_element(const redhawk::ApplicationComponent& component) { CF::Application::ComponentElementType result; - result.componentId = component.identifier.c_str(); - result.elementId = component.namingContext.c_str(); + result.componentId = component.getIdentifier().c_str(); + result.elementId = component.getNamingContext().c_str(); return result; } - CF::Application::ComponentProcessIdType to_pid_type(const ossie::ApplicationComponent& component) + CF::Application::ComponentProcessIdType to_pid_type(const redhawk::ApplicationComponent& component) { CF::Application::ComponentProcessIdType result; - result.componentId = component.identifier.c_str(); - result.processId = component.processId; + result.componentId = component.getIdentifier().c_str(); + result.processId = component.getProcessId(); return result; } - bool is_registered(const ossie::ApplicationComponent& component) - { - return !CORBA::is_nil(component.componentObject); - } - - CF::ComponentType to_component_type(const ossie::ApplicationComponent& component) + CF::ComponentType to_component_type(const redhawk::ApplicationComponent& component) { CF::ComponentType result; - result.identifier = component.identifier.c_str(); - result.softwareProfile = component.softwareProfile.c_str(); + result.identifier = component.getIdentifier().c_str(); + result.softwareProfile = component.getSoftwareProfile().c_str(); result.type = CF::APPLICATION_COMPONENT; - result.componentObject = CORBA::Object::_duplicate(component.componentObject); + result.componentObject = component.getComponentObject(); return result; } @@ -86,7 +81,9 @@ namespace { void convert_sequence(Sequence& out, Iterator begin, const Iterator end, Function func) { for (; begin != end; ++begin) { - ossie::corba::push_back(out, func(*begin)); + if (begin->isVisible()) { + ossie::corba::push_back(out, func(*begin)); + } } } @@ -100,7 +97,7 @@ namespace { void convert_sequence_if(Sequence& out, Iterator begin, const Iterator end, Function func, Predicate pred) { for (; begin != end; ++begin) { - if (pred(*begin)) { + if (begin->isVisible() && pred(*begin)) { ossie::corba::push_back(out, func(*begin)); } } @@ -115,7 +112,10 @@ namespace { Application_impl::Application_impl (const std::string& id, const std::string& name, const std::string& profile, DomainManager_impl* domainManager, const std::string& waveformContextName, - CosNaming::NamingContext_ptr waveformContext, bool aware, CosNaming::NamingContext_ptr DomainContext) : + CosNaming::NamingContext_ptr waveformContext, bool aware, + float stopTimeout, CosNaming::NamingContext_ptr DomainContext) : + Logging_impl(domainManager->getInstanceLogger("Application")), + _assemblyController(0), _identifier(id), _sadProfile(profile), _appName(name), @@ -124,6 +124,7 @@ Application_impl::Application_impl (const std::string& id, const std::string& na _waveformContext(CosNaming::NamingContext::_duplicate(waveformContext)), _started(false), _isAware(aware), + _stopTimeout(stopTimeout), _fakeProxy(0), _domainContext(CosNaming::NamingContext::_duplicate(DomainContext)), _releaseAlreadyCalled(false) @@ -134,34 +135,48 @@ Application_impl::Application_impl (const std::string& id, const std::string& na } }; -void Application_impl::populateApplication(CF::Resource_ptr _controller, - std::vector& _devSeq, - std::vector _startSeq, +void Application_impl::setAssemblyController(const std::string& assemblyControllerRef) +{ + RH_DEBUG(_baseLog, "Assigning the assembly controller") + _assemblyController = findComponent(assemblyControllerRef); + // Assume _controller is NIL implies that the assembly controller component is Non SCA-Compliant + if (!_assemblyController || !_assemblyController->isResource()) { + RH_INFO(_baseLog, "Assembly controller is non SCA-compliant"); + _assemblyController = 0; + } +} + +redhawk::ApplicationComponent* Application_impl::getAssemblyController() +{ + return _assemblyController; +} + +void Application_impl::populateApplication(const CF::DeviceAssignmentSequence& assignedDevices, std::vector& connections, std::vector allocationIDs) { - TRACE_ENTER(Application_impl) _connections = connections; - _componentDevices = _devSeq; - _appStartSeq = _startSeq; + _componentDevices = assignedDevices; - LOG_DEBUG(Application_impl, "Creating allocation sequence"); + RH_DEBUG(_baseLog, "Creating allocation sequence"); this->_allocationIDs = allocationIDs; +} - LOG_DEBUG(Application_impl, "Assigning the assembly controller") - // Assume _controller is NIL implies that the assembly controller component is Non SCA-Compliant - if (CORBA::is_nil(_controller)) { - LOG_INFO(Application_impl, "Assembly controller is non SCA-compliant"); - } else { - assemblyController = CF::Resource::_duplicate(_controller); +void Application_impl::setStartOrder(const std::vector& startOrder) +{ + _startOrder.clear(); + BOOST_FOREACH(const std::string& componentId, startOrder) { + redhawk::ApplicationComponent* component = findComponent(componentId); + if (component) { + _startOrder.push_back(component); + } else { + RH_WARN(_baseLog, "Invalid component '" << componentId << "' in start order"); + } } - TRACE_EXIT(Application_impl) } Application_impl::~Application_impl () { - TRACE_ENTER(Application_impl) - TRACE_EXIT(Application_impl) }; PortableServer::ObjectId* Application_impl::Activate(Application_impl* application) @@ -198,6 +213,61 @@ CORBA::Boolean Application_impl::started () throw (CORBA::SystemException) return this->_started; } +void Application_impl::setLogLevel( const char *logger_id, const CF::LogLevel newLevel ) throw (CF::UnknownIdentifier) +{ + BOOST_FOREACH(redhawk::ApplicationComponent component, _components) { + if (not component.isRegistered() or not component.isVisible()) + continue; + CF::Resource_var resource_ref = component.getResourcePtr(); + try { + resource_ref->setLogLevel(logger_id, newLevel); + return; + } catch (const CF::UnknownIdentifier& ex) { + } + } + throw (CF::UnknownIdentifier()); +} + +CF::LogLevel Application_impl::getLogLevel( const char *logger_id ) throw (CF::UnknownIdentifier) +{ + BOOST_FOREACH(redhawk::ApplicationComponent component, _components) { + if (not component.isRegistered() or not component.isVisible()) + continue; + CF::Resource_var resource_ref = component.getResourcePtr(); + try { + CF::LogLevel level = resource_ref->getLogLevel(logger_id); + return level; + } catch (const CF::UnknownIdentifier& ex) { + } + } + throw (CF::UnknownIdentifier()); +} + +CF::StringSequence* Application_impl::getNamedLoggers() +{ + CF::StringSequence_var retval = new CF::StringSequence(); + BOOST_FOREACH(redhawk::ApplicationComponent component, _components) { + if (not component.isRegistered() or not component.isVisible()) + continue; + CF::Resource_var resource_ref = component.getResourcePtr(); + CF::StringSequence_var component_logger_list = resource_ref->getNamedLoggers(); + for (unsigned int i=0; ilength(); i++) { + ossie::corba::push_back(retval, CORBA::string_dup(component_logger_list[i])); + } + } + return retval._retn(); +} + +void Application_impl::resetLog() +{ + BOOST_FOREACH(redhawk::ApplicationComponent component, _components) { + if (not component.isRegistered() or not component.isVisible()) + continue; + CF::Resource_var resource_ref = component.getResourcePtr(); + resource_ref->resetLog(); + } +} + void Application_impl::start () throw (CORBA::SystemException, CF::Resource::StartError) { @@ -206,34 +276,31 @@ throw (CORBA::SystemException, CF::Resource::StartError) boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping start call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping start call because releaseObject has been called"); return; } } - if (CORBA::is_nil(assemblyController) and (_appStartSeq.size() == 0)) { - throw(CF::Resource::StartError(CF::CF_ENOTSUP, "No assembly controller and no Components with startorder set")); - return; + if (!_assemblyController && _startOrder.empty()) { + throw CF::Resource::StartError(CF::CF_ENOTSUP, "No assembly controller and no Components with startorder set"); } try { - omniORB::setClientCallTimeout(assemblyController, 0); - LOG_TRACE(Application_impl, "Calling start on assembly controller") - assemblyController->start (); + if (_assemblyController) { + RH_TRACE(_baseLog, "Calling start on assembly controller"); + _assemblyController->start(); + } // Start the rest of the components - for (unsigned int i = 0; i < _appStartSeq.size(); i++){ - std::string msg = "Calling start for "; - msg = msg.append(ossie::corba::returnString(_appStartSeq[i]->identifier())); - LOG_TRACE(Application_impl, msg) - - omniORB::setClientCallTimeout(_appStartSeq[i], 0); - _appStartSeq[i]-> start(); + BOOST_FOREACH(redhawk::ApplicationComponent* component, _startOrder) { + RH_TRACE(_baseLog, "Calling start for " << component->getIdentifier()); + component->start(); } - } catch( CF::Resource::StartError& se ) { - LOG_ERROR(Application_impl, "Start failed with CF:Resource::StartError"); + } catch (const CF::Resource::StartError& se) { + RH_ERROR(_baseLog, "Failed to start application '" << _appName << "': " << se.msg); throw; - } CATCH_THROW_LOG_ERROR(Application_impl, "Start failed", CF::Resource::StartError()) + } + if (!this->_started) { this->_started = true; if (_domainManager ) { @@ -245,31 +312,6 @@ throw (CORBA::SystemException, CF::Resource::StartError) } } - -bool Application_impl::stopComponent (CF::Resource_ptr component) -{ - std::string identifier; - try { - identifier = ossie::corba::returnString(component->identifier()); - } catch (const CORBA::SystemException& ex) { - LOG_ERROR(Application_impl, "CORBA::" << ex._name() << " getting component identifier"); - return false; - } catch (...) { - LOG_ERROR(Application_impl, "Unknown exception getting component identifier"); - return false; - } - LOG_TRACE(Application_impl, "Calling stop for " << identifier); - const unsigned long timeout = 3; // seconds - omniORB::setClientCallTimeout(component, timeout * 1000); - try { - component->stop(); - return true; - } catch (const CF::Resource::StopError& error) { - LOG_ERROR(Application_impl, "Failed to stop " << identifier << "; CF::Resource::StopError '" << error.msg << "'"); - } CATCH_LOG_ERROR(Application_impl, "Failed to stop " << identifier); - return false; -} - void Application_impl::stop () throw (CORBA::SystemException, CF::Resource::StopError) { @@ -278,38 +320,41 @@ throw (CORBA::SystemException, CF::Resource::StopError) boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping stop call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping stop call because releaseObject has been called"); return; } } - this->local_stop(); + this->local_stop(this->_stopTimeout); } -void Application_impl::local_stop () +void Application_impl::local_stop (float timeout) throw (CORBA::SystemException, CF::Resource::StopError) { - if (CORBA::is_nil(assemblyController) and (_appStartSeq.size() == 0)) { - throw(CF::Resource::StopError(CF::CF_ENOTSUP, "No assembly controller and no Components with startorder set")); - return; + if (!_assemblyController && _startOrder.empty()) { + throw CF::Resource::StopError(CF::CF_ENOTSUP, "No assembly controller and no Components with startorder set"); } int failures = 0; // Stop the components in the reverse order they were started - for (int i = (int)(_appStartSeq.size()-1); i >= 0; i--){ - if (!stopComponent(_appStartSeq[i])) { + BOOST_REVERSE_FOREACH(redhawk::ApplicationComponent* component, _startOrder) { + RH_TRACE(_baseLog, "Calling stop for " << component->getIdentifier()); + if (!component->stop(timeout)) { failures++; } } - LOG_TRACE(Application_impl, "Calling stop on assembly controller"); - if (!stopComponent(assemblyController)) { - failures++; + if (_assemblyController) { + RH_TRACE(_baseLog, "Calling stop on assembly controller"); + if (!_assemblyController->stop(timeout)) { + failures++; + } } + if (failures > 0) { std::ostringstream oss; oss << failures << " component(s) failed to stop"; const std::string message = oss.str(); - LOG_ERROR(Application_impl, "Stopping " << _identifier << "; " << message); + RH_ERROR(_baseLog, "Stopping " << _identifier << "; " << message); throw CF::Resource::StopError(CF::CF_NOTSET, message.c_str()); } if (this->_started) { @@ -323,6 +368,13 @@ throw (CORBA::SystemException, CF::Resource::StopError) } } +CORBA::Float Application_impl::stopTimeout () throw (CORBA::SystemException) { + return this->_stopTimeout; +} + +void Application_impl::stopTimeout (CORBA::Float timeout) throw (CORBA::SystemException) { + this->_stopTimeout = timeout; +} void Application_impl::initializeProperties (const CF::Properties& configProperties) throw (CF::PropertySet::PartialConfiguration, CF::PropertySet::InvalidConfiguration, CORBA::SystemException) @@ -332,7 +384,7 @@ throw (CF::PropertySet::PartialConfiguration, CF::PropertySet::InvalidConfigurat boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping initializeProperties call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping initializeProperties call because releaseObject has been called"); return; } } @@ -341,16 +393,14 @@ throw (CF::PropertySet::PartialConfiguration, CF::PropertySet::InvalidConfigurat void Application_impl::configure (const CF::Properties& configProperties) throw (CF::PropertySet::PartialConfiguration, CF::PropertySet::InvalidConfiguration, CORBA::SystemException) { - int validProperties = 0; - CF::Properties invalidProperties; + redhawk::PropertyMap invalidProperties; // Creates a map from componentIdentifier -> (rsc_ptr, ConfigPropSet) // to allow for one batched configure call per component - - CF::Properties acProps; - const std::string acId = ossie::corba::returnString(assemblyController->identifier()); + const std::string& acId = _assemblyController->getIdentifier(); + CF::Resource_var ac_resource = _assemblyController->getResourcePtr(); std::map > batch; - batch[acId] = std::pair(assemblyController, acProps); + batch[acId] = std::pair(ac_resource, CF::Properties()); // Loop through each passed external property, mapping it with its respective resource for (unsigned int i = 0; i < configProperties.length(); ++i) { @@ -359,20 +409,17 @@ throw (CF::PropertySet::PartialConfiguration, CF::PropertySet::InvalidConfigurat if (_properties.count(extId)) { // Gets the component and its internal property id - const std::string propId = _properties[extId].id; + const std::string propId = _properties[extId].property_id; CF::Resource_ptr comp = _properties[extId].component; if (CORBA::is_nil(comp)) { - LOG_ERROR(Application_impl, "Unable to retrieve component for external property: " << extId); - int count = invalidProperties.length(); - invalidProperties.length(count + 1); - invalidProperties[count].id = CORBA::string_dup(extId.c_str()); - invalidProperties[count].value = configProperties[i].value; + RH_ERROR(_baseLog, "Unable to retrieve component for external property: " << extId); + invalidProperties.push_back(configProperties[i]); } else { // Key used for map const std::string compId = ossie::corba::returnString(comp->identifier()); - LOG_TRACE(Application_impl, "Configure external property: " << extId << " on " + RH_TRACE(_baseLog, "Configure external property: " << extId << " on " << compId << " (propid: " << propId << ")"); // Adds property to component ID mapping @@ -390,19 +437,16 @@ throw (CF::PropertySet::PartialConfiguration, CF::PropertySet::InvalidConfigurat batch[compId] = std::pair(comp, tempProp); } } - } else if (!CORBA::is_nil(assemblyController)) { + } else if (_assemblyController) { // Properties that are not external get batched with assembly controller - LOG_TRACE(Application_impl, "Calling configure on assembly controller for property: " << configProperties[i].id); + RH_TRACE(_baseLog, "Calling configure on assembly controller for property: " << configProperties[i].id); int count = batch[acId].second.length(); batch[acId].second.length(count + 1); batch[acId].second[count].id = configProperties[i].id; batch[acId].second[count].value = configProperties[i].value; } else { - LOG_ERROR(Application_impl, "Unable to retrieve assembly controller for external property: " << extId); - int count = invalidProperties.length(); - invalidProperties.length(count + 1); - invalidProperties[count].id = CORBA::string_dup(extId.c_str()); - invalidProperties[count].value = configProperties[i].value; + RH_ERROR(_baseLog, "Unable to retrieve assembly controller for external property: " << extId); + invalidProperties.push_back(configProperties[i]); } } @@ -410,33 +454,20 @@ throw (CF::PropertySet::PartialConfiguration, CF::PropertySet::InvalidConfigurat // -Catch any errors for (std::map >::const_iterator comp = batch.begin(); comp != batch.end(); ++comp) { - int propLength = comp->second.second.length(); try { comp->second.first->configure(comp->second.second); - validProperties += propLength; } catch (CF::PropertySet::InvalidConfiguration e) { // Add invalid properties to return list - for (unsigned int i = 0; i < e.invalidProperties.length(); ++i) { - int count = invalidProperties.length(); - invalidProperties.length(count + 1); - invalidProperties[count].id = CORBA::string_dup(e.invalidProperties[i].id); - invalidProperties[count].value = e.invalidProperties[i].value; - } + invalidProperties.extend(e.invalidProperties); } catch (CF::PropertySet::PartialConfiguration e) { // Add invalid properties to return list - for (unsigned int i = 0; i < e.invalidProperties.length(); ++i) { - int count = invalidProperties.length(); - invalidProperties.length(count + 1); - invalidProperties[count].id = CORBA::string_dup(e.invalidProperties[i].id); - invalidProperties[count].value = e.invalidProperties[i].value; - } - validProperties += propLength - e.invalidProperties.length(); + invalidProperties.extend(e.invalidProperties); } } // Throw appropriate exception if any configure errors were handled - if (invalidProperties.length () > 0) { - if (validProperties > 0) { + if (!invalidProperties.empty()) { + if (invalidProperties.size() < configProperties.length()) { throw CF::PropertySet::PartialConfiguration(invalidProperties); } else { throw CF::PropertySet::InvalidConfiguration("No matching external properties found", invalidProperties); @@ -448,41 +479,38 @@ throw (CF::PropertySet::PartialConfiguration, CF::PropertySet::InvalidConfigurat void Application_impl::query (CF::Properties& configProperties) throw (CF::UnknownProperties, CORBA::SystemException) { - CF::Properties invalidProperties; + redhawk::PropertyMap invalidProperties; // Creates a map from componentIdentifier -> (rsc_ptr, ConfigPropSet) // to allow for one batched query call per component - const std::string acId = ossie::corba::returnString(assemblyController->identifier()); + const std::string acId = _assemblyController->getIdentifier(); std::map > batch; // For queries of zero length, return all external properties if (configProperties.length() == 0) { - LOG_TRACE(Application_impl, "Query all external and assembly controller properties"); + RH_TRACE(_baseLog, "Query all external and assembly controller properties"); configProperties.length(0); // Loop through each external property and add it to the batch with its respective component - for (std::map::const_iterator prop = _properties.begin(); + for (std::map::const_iterator prop = _properties.begin(); prop != _properties.end(); ++prop) { // Gets the property mapping info std::string extId = prop->first; - std::string propId = prop->second.id; + std::string propId = prop->second.property_id; CF::Resource_ptr comp = prop->second.component; if (prop->second.access == "writeonly") continue; if (CORBA::is_nil(comp)) { - LOG_ERROR(Application_impl, "Unable to retrieve component for external property: " << extId); - int count = invalidProperties.length(); - invalidProperties.length(count + 1); - invalidProperties[count].id = CORBA::string_dup(extId.c_str()); - invalidProperties[count].value = CORBA::Any(); + RH_ERROR(_baseLog, "Unable to retrieve component for external property: " << extId); + invalidProperties.push_back(redhawk::PropertyType(extId)); } else { // Key used for map const std::string compId = ossie::corba::returnString(comp->identifier()); - LOG_TRACE(Application_impl, "Query external property: " << extId << " on " + RH_TRACE(_baseLog, "Query external property: " << extId << " on " << compId << " (propid: " << propId << ")"); // Adds property to component ID mapping @@ -517,27 +545,20 @@ throw (CF::UnknownProperties, CORBA::SystemException) configProperties[count].value = comp->second.second[i].value; } } catch (CF::UnknownProperties e) { - for (unsigned int i = 0; i < e.invalidProperties.length(); ++i) { - // Add invalid properties to return list - int count = invalidProperties.length(); - invalidProperties.length(count + 1); - invalidProperties[count].id = CORBA::string_dup(e.invalidProperties[i].id); - invalidProperties[count].value = e.invalidProperties[i].value; - } + invalidProperties.extend(e.invalidProperties); } } // Query Assembly Controller properties CF::Properties tempProp; try { - assemblyController->query(tempProp); + CF::Resource_var ac_resource = _assemblyController->getResourcePtr(); + ac_resource->query(tempProp); } catch (CF::UnknownProperties e) { - int count = invalidProperties.length(); - invalidProperties.length(count + e.invalidProperties.length()); for (unsigned int i = 0; i < e.invalidProperties.length(); ++i) { - LOG_ERROR(Application_impl, "Invalid assembly controller property name: " << e.invalidProperties[i].id); - invalidProperties[count + i] = e.invalidProperties[i]; + RH_ERROR(_baseLog, "Invalid assembly controller property name: " << e.invalidProperties[i].id); } + invalidProperties.extend(e.invalidProperties); } // Adds Assembly Controller properties @@ -554,7 +575,8 @@ throw (CF::UnknownProperties, CORBA::SystemException) // For queries of length > 0, return all requested pairs that are valid external properties // or are Assembly Controller Properties CF::Properties acProps; - batch[acId] = std::pair(assemblyController, acProps); + CF::Resource_var ac_resource = _assemblyController->getResourcePtr(); + batch[acId] = std::pair(ac_resource, acProps); for (unsigned int i = 0; i < configProperties.length(); ++i) { // Gets external ID for property mapping @@ -571,20 +593,17 @@ throw (CF::UnknownProperties, CORBA::SystemException) } // Gets the component and its property id - const std::string propId = _properties[extId].id; + const std::string propId = _properties[extId].property_id; CF::Resource_ptr comp = _properties[extId].component; if (CORBA::is_nil(comp)) { - LOG_ERROR(Application_impl, "Unable to retrieve component for external property: " << extId); - int count = invalidProperties.length(); - invalidProperties.length(count + 1); - invalidProperties[count].id = CORBA::string_dup(extId.c_str()); - invalidProperties[count].value = configProperties[i].value; + RH_ERROR(_baseLog, "Unable to retrieve component for external property: " << extId); + invalidProperties.push_back(configProperties[i]); } else { // Key used for map std::string compId = ossie::corba::returnString(comp->identifier()); - LOG_TRACE(Application_impl, "Query external property: " << extId << " on " + RH_TRACE(_baseLog, "Query external property: " << extId << " on " << compId << " (propid: " << propId << ")"); // Adds property to component ID mapping @@ -602,20 +621,17 @@ throw (CF::UnknownProperties, CORBA::SystemException) batch[compId] = std::pair(comp, tempProp); } } - } else if (!CORBA::is_nil(assemblyController)) { + } else if (_assemblyController) { // Properties that are not external get batched with assembly controller - LOG_TRACE(Application_impl, "Calling query on assembly controller for property: " + RH_TRACE(_baseLog, "Calling query on assembly controller for property: " << configProperties[i].id); int count = batch[acId].second.length(); batch[acId].second.length(count + 1); batch[acId].second[count].id = configProperties[i].id; batch[acId].second[count].value = configProperties[i].value; } else { - LOG_ERROR(Application_impl, "Unable to retrieve assembly controller for external property: " << extId); - int count = invalidProperties.length(); - invalidProperties.length(count + 1); - invalidProperties[count].id = CORBA::string_dup(extId.c_str()); - invalidProperties[count].value = configProperties[i].value; + RH_ERROR(_baseLog, "Unable to retrieve assembly controller for external property: " << extId); + invalidProperties.push_back(configProperties[i]); } } @@ -626,13 +642,7 @@ throw (CF::UnknownProperties, CORBA::SystemException) try { comp->second.first->query(comp->second.second); } catch (CF::UnknownProperties e) { - for (unsigned int i = 0; i < e.invalidProperties.length(); ++i) { - // Add invalid properties to return list - int count = invalidProperties.length(); - invalidProperties.length(count + 1); - invalidProperties[count].id = CORBA::string_dup(e.invalidProperties[i].id); - invalidProperties[count].value = e.invalidProperties[i].value; - } + invalidProperties.extend(e.invalidProperties); } } @@ -644,11 +654,11 @@ throw (CF::UnknownProperties, CORBA::SystemException) // Checks if property ID is external or AC property if (_properties.count(extId)) { - propId = _properties[extId].id; + propId = _properties[extId].property_id; compId = ossie::corba::returnString(_properties[extId].component->identifier()); } else { propId = extId; - compId = ossie::corba::returnString(assemblyController->identifier()); + compId = _assemblyController->getIdentifier(); } // Loops through batched query results finding requested property @@ -664,11 +674,11 @@ throw (CF::UnknownProperties, CORBA::SystemException) } } - if (invalidProperties.length () != 0) { + if (!invalidProperties.empty()) { throw CF::UnknownProperties(invalidProperties); } - LOG_TRACE(Application_impl, "Query returning " << configProperties.length() << + RH_TRACE(_baseLog, "Query returning " << configProperties.length() << " external and assembly controller properties"); } @@ -679,12 +689,12 @@ char *Application_impl::registerPropertyListener( CORBA::Object_ptr listener, co SCOPED_LOCK( releaseObjectLock ); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping registerPropertyListener call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping registerPropertyListener call because releaseObject has been called"); std::string regid; return CORBA::string_dup(regid.c_str()); } - LOG_TRACE(Application_impl, "Number of Properties to Register: " << prop_ids.length() ); + RH_TRACE(_baseLog, "Number of Properties to Register: " << prop_ids.length() ); typedef std::map< CF::Resource_ptr, std::vector< std::string > > CompRegs; CompRegs comp_regs; @@ -694,12 +704,13 @@ char *Application_impl::registerPropertyListener( CORBA::Object_ptr listener, co if (_properties.count(extId)) { CF::Resource_ptr comp = _properties[extId].component; - std::string prop_id = _properties[extId].id; - LOG_TRACE(Application_impl, " ---> Register ExternalID: " << extId << " Comp/Id " << + std::string prop_id = _properties[extId].property_id; + RH_TRACE(_baseLog, " ---> Register ExternalID: " << extId << " Comp/Id " << ossie::corba::returnString(comp->identifier()) << "/" << prop_id); comp_regs[ comp ].push_back( prop_id ); - } else if (!CORBA::is_nil(assemblyController)) { - comp_regs[ assemblyController ].push_back( extId ) ; + } else if (_assemblyController) { + CF::Resource_var ac_resource = _assemblyController->getResourcePtr(); + comp_regs[ac_resource].push_back(extId); } } @@ -714,7 +725,7 @@ char *Application_impl::registerPropertyListener( CORBA::Object_ptr listener, co for ( uint32_t i=0; i < reg_iter->second.size(); i++ ) reg_ids[i] = reg_iter->second[i].c_str(); std::string reg_id = ossie::corba::returnString( reg_iter->first->registerPropertyListener( listener, reg_ids, interval ) ); - LOG_TRACE(Application_impl, "Component-->PropertyChangeRegistryRegistry comp/id " << + RH_TRACE(_baseLog, "Component-->PropertyChangeRegistryRegistry comp/id " << ossie::corba::returnString(reg_iter->first->identifier()) << "/" << reg_id ); pc_recs.push_back( PropertyChangeRecord( reg_id, reg_iter->first ) ); @@ -723,7 +734,7 @@ char *Application_impl::registerPropertyListener( CORBA::Object_ptr listener, co } catch (...) { - LOG_WARN(Application_impl, "PropertyChangeListener registration failed against Application: " << _identifier ); + RH_WARN(_baseLog, "PropertyChangeListener registration failed against Application: " << _identifier ); PropertyChangeRecords::iterator iter = pc_recs.begin(); try { iter->comp->unregisterPropertyListener( iter->reg_id.c_str() ); @@ -745,7 +756,7 @@ void Application_impl::unregisterPropertyListener( const char *reg_id ) { SCOPED_LOCK( releaseObjectLock ); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping unregisterPropertyListener call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping unregisterPropertyListener call because releaseObject has been called"); return; } @@ -765,7 +776,7 @@ void Application_impl::unregisterPropertyListener( const char *reg_id ) } } catch(...){ - LOG_WARN(Application_impl, "Unregister PropertyChangeListener operation failed. app/reg_id: " << _identifier << "/" << reg_id ); + RH_WARN(_baseLog, "Unregister PropertyChangeListener operation failed. app/reg_id: " << _identifier << "/" << reg_id ); } } @@ -783,65 +794,70 @@ throw (CORBA::SystemException, CF::LifeCycle::InitializeError) boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping initialize call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping initialize call because releaseObject has been called"); return; } } - if (CORBA::is_nil(assemblyController)) { return; } + + if (!_assemblyController) { + return; + } try { - LOG_TRACE(Application_impl, "Calling initialize on assembly controller") - assemblyController->initialize (); + RH_TRACE(_baseLog, "Calling initialize on assembly controller"); + CF::Resource_var resource = _assemblyController->getResourcePtr(); + resource->initialize(); } catch( CF::LifeCycle::InitializeError& ie ) { - LOG_ERROR(Application_impl, "Initialize failed with CF::LifeCycle::InitializeError") + RH_ERROR(_baseLog, "Initialize failed with CF::LifeCycle::InitializeError") throw; - } CATCH_THROW_LOG_ERROR(Application_impl, "Initialize failed", CF::LifeCycle::InitializeError()) + } CATCH_THROW_RH_ERROR(_baseLog, "Initialize failed", CF::LifeCycle::InitializeError()) } CORBA::Object_ptr Application_impl::getPort (const char* _id) throw (CORBA::SystemException, CF::PortSupplier::UnknownPort) { - SCOPED_LOCK( releaseObjectLock ); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping getPort because release has already been called"); + RH_DEBUG(_baseLog, "skipping getPort because release has already been called app_id :" << _identifier ); return CORBA::Object::_nil(); } const std::string identifier = _id; if (_ports.count(identifier)) { - return CORBA::Object::_duplicate(_ports[identifier]); + return CORBA::Object::_duplicate(_ports[identifier] ); } else { - LOG_ERROR(Application_impl, "Get port failed with unknown port " << _id) + RH_ERROR(_baseLog, "Get port failed with unknown port " << _id << " for application " << _appName << " application id " << _identifier ); throw(CF::PortSupplier::UnknownPort()); } } - CF::PortSet::PortInfoSequence* Application_impl::getPortSet () { - SCOPED_LOCK( releaseObjectLock ); CF::PortSet::PortInfoSequence_var retval = new CF::PortSet::PortInfoSequence(); - if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping getPortSet because release has alraeady been called"); - return retval._retn(); + { + SCOPED_LOCK( releaseObjectLock ); + RH_DEBUG(_baseLog, "skipping getPortSet because release has alraeady been called"); + if (_releaseAlreadyCalled) { + return retval._retn(); + } } - std::vector comp_portsets; - for (ossie::ComponentList::iterator _component_iter=this->_components.begin(); _component_iter!=this->_components.end(); _component_iter++) { - try { - CF::Resource_var comp = CF::Resource::_narrow(_component_iter->componentObject); - CF::PortSet::PortInfoSequence_var comp_ports = comp->getPortSet(); - comp_portsets.push_back(comp_ports); - } catch ( CORBA::COMM_FAILURE &ex ) { - LOG_ERROR(Application_impl, "Component getPortSet failed, application: " << _identifier << " comp:" << _component_iter->identifier << "/" << _component_iter->namingContext ); - // unable to add port reference - } catch ( ... ) { - LOG_ERROR(Application_impl, "Unhandled exception during getPortSet, application: " << _identifier << " comp:" << _component_iter->identifier << "/" << _component_iter->namingContext ); + { + boost::mutex::scoped_lock lock(_registrationMutex); + for (ComponentList::iterator _component_iter=this->_components.begin(); _component_iter!=this->_components.end(); _component_iter++) { + try { + CF::Resource_var comp = _component_iter->getResourcePtr(); + comp_portsets.push_back(comp->getPortSet()); + } catch ( CORBA::COMM_FAILURE &ex ) { + RH_ERROR(_baseLog, "Component getPortSet failed, application: " << _identifier << " comp:" << _component_iter->getIdentifier() << "/" << _component_iter->getNamingContext() ); + } catch ( ... ) { + RH_ERROR(_baseLog, "Unhandled exception during getPortSet, application: " << _identifier << " comp:" << _component_iter->getIdentifier() << "/" << _component_iter->getNamingContext() ); + } } } + for (std::map::iterator _port_val=_ports.begin(); _port_val!=_ports.end(); _port_val++) { for (std::vector::iterator comp_portset=comp_portsets.begin(); comp_portset!=comp_portsets.end(); comp_portset++) { for (unsigned int i=0; i<(*comp_portset)->length(); i++) { @@ -861,6 +877,7 @@ CF::PortSet::PortInfoSequence* Application_impl::getPortSet () } } } + if (_ports.size() != retval->length()) { // some of the components are unreachable and the list is incomplete for (std::map::iterator _port_val=_ports.begin(); _port_val!=_ports.end(); _port_val++) { @@ -883,7 +900,8 @@ CF::PortSet::PortInfoSequence* Application_impl::getPortSet () } } } - return retval._retn(); + + return retval._retn(); } @@ -896,18 +914,20 @@ throw (CORBA::SystemException, CF::UnknownProperties, CF::TestableObject::Unknow boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping runTest call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping runTest call because releaseObject has been called"); return; } } - if (CORBA::is_nil(assemblyController)) { - LOG_ERROR(Application_impl, "Run test called with non SCA compliant assembly controller"); + + if (!_assemblyController) { + RH_ERROR(_baseLog, "Run test called with non SCA compliant assembly controller"); throw CF::TestableObject::UnknownTest(); } try { - LOG_TRACE(Application_impl, "Calling runTest on assembly controller") - assemblyController->runTest (_testId, _props); + RH_TRACE(_baseLog, "Calling runTest on assembly controller"); + CF::Resource_var resource = _assemblyController->getResourcePtr(); + resource->runTest(_testId, _props); } catch( CF::UnknownProperties& up ) { std::ostringstream eout; eout << "Run test failed with CF::UnknownProperties for Test ID " << _testId << " for properties: "; @@ -918,20 +938,18 @@ throw (CORBA::SystemException, CF::UnknownProperties, CF::TestableObject::Unknow else eout << ", "; } - LOG_ERROR(Application_impl, eout.str()) + RH_ERROR(_baseLog, eout.str()) throw; } catch( CF::TestableObject::UnknownTest& ) { - LOG_ERROR(Application_impl, "Run test failed with CF::TestableObject::UnknownTest for Test ID " << _testId) + RH_ERROR(_baseLog, "Run test failed with CF::TestableObject::UnknownTest for Test ID " << _testId) throw; - } CATCH_RETHROW_LOG_ERROR(Application_impl, "Run test failed for Test ID " << _testId) + } CATCH_RETHROW_RH_ERROR(_baseLog, "Run test failed for Test ID " << _testId) } void Application_impl::releaseObject () throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) { - TRACE_ENTER(Application_impl); - try { // Make sure releaseObject hasn't already been called, but only hold the // lock long enough to check to prevent a potential priority inversion with @@ -940,37 +958,35 @@ throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping release because release has already been called"); + RH_DEBUG(_baseLog, "skipping release because release has already been called"); return; } else { _releaseAlreadyCalled = true; } } - LOG_DEBUG(Application_impl, "Releasing application"); + RH_DEBUG(_baseLog, "Releasing application"); // remove application from DomainManager's App Sequence try { _domainManager->removeApplication(_identifier); } catch (CF::DomainManager::ApplicationUninstallationError& ex) { - LOG_ERROR(Application_impl, ex.msg); + RH_ERROR(_baseLog, ex.msg); } // Stop all components on the application try { - this->local_stop(); + this->local_stop(DEFAULT_STOP_TIMEOUT); } catch ( ... ) { // error happened while stopping. Ignore the error and continue tear-down - LOG_TRACE(Application_impl, "Error occurred while stopping the application during tear-down. Ignoring the error and continuing") + RH_TRACE(_baseLog, "Error occurred while stopping the application during tear-down. Ignoring the error and continuing") } - assemblyController = CF::Resource::_nil (); - try { // Break all connections in the application ConnectionManager::disconnectAll(_connections, _domainManager); - LOG_DEBUG(Application_impl, "app->releaseObject finished disconnecting ports"); - } CATCH_LOG_ERROR(Application_impl, "Failure during disconnect operation"); + RH_DEBUG(_baseLog, "app->releaseObject finished disconnecting ports"); + } CATCH_RH_ERROR(_baseLog, "Failure during disconnect operation"); // Release all resources // Before releasing the components, all executed processes should be terminated, @@ -984,24 +1000,22 @@ throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) // - unbind from NS // - release each component // - unload and deallocate - for (ossie::ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { - - const std::string id = ii->identifier; + for (ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { - if (!ii->namingContext.empty()) { - std::string componentName = ii->namingContext; + if (ii->hasNamingContext()) { + const std::string& componentName = ii->getNamingContext(); // Unbind the component from the naming context. This assumes that the component is // bound into the waveform context, and its name inside of the context follows the // last slash in the fully-qualified name. std::string shortName = componentName.substr(componentName.rfind('/')+1); - LOG_TRACE(Application_impl, "Unbinding component " << shortName); + RH_TRACE(_baseLog, "Unbinding component " << shortName); CosNaming::Name_var componentBindingName = ossie::corba::stringToName(shortName); try { _waveformContext->unbind(componentBindingName); - } CATCH_LOG_ERROR(Application_impl, "Unable to unbind component") + } CATCH_RH_ERROR(_baseLog, "Unable to unbind component") } - LOG_DEBUG(Application_impl, "Next component") + RH_DEBUG(_baseLog, "Next component") } terminateComponents(); @@ -1019,14 +1033,13 @@ throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) } err << iad.invalidAllocationIds[ii]; } - LOG_ERROR(Application_impl, err.str()); + RH_ERROR(_baseLog, err.str()); } // Unbind the application's naming context using the fully-qualified name. - LOG_TRACE(Application_impl, "Unbinding application naming context " << _waveformContextName); + RH_TRACE(_baseLog, "Unbinding application naming context " << _waveformContextName); CosNaming::Name DNContextname; DNContextname.length(1); - std::string domainName = _domainManager->getDomainManagerName(); DNContextname[0].id = CORBA::string_dup(_waveformContextName.c_str()); try { if ( CORBA::is_nil(_domainContext) == false ) { @@ -1034,22 +1047,22 @@ throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) } } catch (const CosNaming::NamingContext::NotFound&) { // Someone else has removed the naming context; this is a non-fatal condition. - LOG_WARN(Application_impl, "Naming context has already been removed"); - } CATCH_LOG_ERROR(Application_impl, "Unbind context failed with CORBA::SystemException") + RH_WARN(_baseLog, "Naming context has already been removed"); + } CATCH_RH_ERROR(_baseLog, "Unbind context failed with CORBA::SystemException") // Destroy the waveform context; it should be empty by this point, assuming all // of the components were properly unbound. - LOG_TRACE(Application_impl, "Destroying application naming context " << _waveformContextName); + RH_TRACE(_baseLog, "Destroying application naming context " << _waveformContextName); try { _waveformContext->destroy(); } catch (const CosNaming::NamingContext::NotEmpty&) { const char* error = "Application naming context not empty"; - LOG_ERROR(Application_impl, error); + RH_ERROR(_baseLog, error); CF::StringSequence message; message.length(1); message[0] = CORBA::string_dup(error); throw CF::LifeCycle::ReleaseError(message); - } CATCH_LOG_ERROR(Application_impl, "Destory waveform context: " << _waveformContextName ); + } CATCH_RH_ERROR(_baseLog, "Destory waveform context: " << _waveformContextName ); _waveformContext = CosNaming::NamingContext::_nil(); // send application removed event notification @@ -1068,80 +1081,57 @@ throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) catch( boost::thread_resource_error &e) { std::stringstream errstr; errstr << "Error acquiring lock (errno=" << e.native_error() << " msg=\"" << e.what() << "\")"; - LOG_ERROR(Application_impl, errstr.str()); + RH_ERROR(_baseLog, errstr.str()); CF::StringSequence message; message.length(1); message[0] = CORBA::string_dup(errstr.str().c_str()); throw CF::LifeCycle::ReleaseError(message); } - - TRACE_EXIT(Application_impl); } void Application_impl::releaseComponents() { - for (ossie::ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { - if (CORBA::is_nil(ii->componentObject)) { - // Ignore components that never registered - continue; + for (ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { + if (ii->getChildren().empty()) { + // Release "real" components first + ii->releaseObject(); } + } - LOG_DEBUG(Application_impl, "Releasing component '" << ii->identifier << "'"); - try { - CF::Resource_var resource = CF::Resource::_narrow(ii->componentObject); - unsigned long timeout = 3; // seconds - omniORB::setClientCallTimeout(resource, timeout * 1000); - resource->releaseObject(); - } CATCH_LOG_WARN(Application_impl, "releaseObject failed for component '" << ii->identifier << "'"); + for (ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { + if (!ii->getChildren().empty()) { + // Release containers once all "real" components have been released + ii->releaseObject(); + } } } + void Application_impl::terminateComponents() { // Terminate any components that were executed on devices - for (ossie::ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { - const unsigned long pid = ii->processId; - if (pid == 0) { - continue; - } - - LOG_DEBUG(Application_impl, "Terminating component '" << ii->identifier << "' pid " << pid); - - CF::ExecutableDevice_var device = ossie::corba::_narrowSafe(ii->assignedDevice); - if (CORBA::is_nil(device)) { - LOG_WARN(Application_impl, "Cannot find device to terminate component " << ii->identifier); - } else { - try { - device->terminate(ii->processId); - } CATCH_LOG_WARN(Application_impl, "Unable to terminate process " << pid); + for (ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { + if ( !ii->getAssignedDevice() ) { + // no assigned device, try to resolve using device id + if ( _domainManager ) { + ossie::DeviceList _registeredDevices = _domainManager->getRegisteredDevices(); + for (ossie::DeviceList::iterator _dev=_registeredDevices.begin(); _dev!=_registeredDevices.end(); _dev++) { + if ( ii->getAssignedDeviceId() == (*_dev)->identifier) { + ii->setAssignedDevice( *_dev ); + break; + } + } + } } + ii->terminate(); } } void Application_impl::unloadComponents() { // Terminate any components that were executed on devices - for (ossie::ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { - if (ii->loadedFiles.empty()) { - continue; - } - - LOG_DEBUG(Application_impl, "Unloading " << ii->loadedFiles.size() << " file(s) for component '" - << ii->identifier << "'"); - - CF::LoadableDevice_var device = ossie::corba::_narrowSafe(ii->assignedDevice); - if (CORBA::is_nil(device)) { - LOG_WARN(Application_impl, "Cannot find device to unload files for component " << ii->identifier); - continue; - } - - for (std::vector::iterator file = ii->loadedFiles.begin(); file != ii->loadedFiles.end(); - ++file) { - LOG_TRACE(Application_impl, "Unloading file " << *file); - try { - device->unload(file->c_str()); - } CATCH_LOG_WARN(Application_impl, "Unable to unload file " << *file); - } + for (ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { + ii->unloadFiles(); } } @@ -1209,7 +1199,7 @@ throw (CORBA::SystemException) boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping componentProcessIds call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping componentProcessIds call because releaseObject has been called"); return result._retn(); } } @@ -1225,15 +1215,254 @@ CF::Components* Application_impl::registeredComponents () boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping registeredComponents call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping registeredComponents call because releaseObject has been called"); return result._retn(); } } boost::mutex::scoped_lock lock(_registrationMutex); - convert_sequence_if(result, _components, to_component_type, is_registered); + convert_sequence_if(result, _components, to_component_type, + std::mem_fun_ref(&redhawk::ApplicationComponent::isRegistered)); return result._retn(); } +bool Application_impl::haveAttribute(std::vector &atts, std::string att) +{ + if (std::find(atts.begin(), atts.end(), att) == atts.end()) { + return false; + } + return true; +} + +CF::Properties* Application_impl::metrics(const CF::StringSequence& components, const CF::StringSequence& attributes) +throw (CF::Application::InvalidMetric, CORBA::SystemException) +{ + CF::Properties_var result_ugly = new CF::Properties(); + // Make sure releaseObject hasn't already been called + { + boost::mutex::scoped_lock lock(releaseObjectLock); + + if (_releaseAlreadyCalled) { + RH_DEBUG(_baseLog, "skipping metrics call because releaseObject has been called"); + return result_ugly._retn(); + } + } + + boost::mutex::scoped_lock lock(metricsLock); + measuredDevices.clear(); + std::vector valid_attributes; + valid_attributes.push_back("valid"); + valid_attributes.push_back("shared"); + valid_attributes.push_back("cores"); + valid_attributes.push_back("memory"); + valid_attributes.push_back("processes"); + valid_attributes.push_back("threads"); + valid_attributes.push_back("files"); + valid_attributes.push_back("componenthost"); + std::vector mod_attributes; + mod_attributes.resize(attributes.length()); + for (unsigned int _att=0; _att component_map; + std::vector component_list; + for (ComponentList::iterator _component_iter=this->_components.begin(); _component_iter!=this->_components.end(); _component_iter++) { + if (_component_iter->isVisible()) { + component_list.push_back(_component_iter->getName()); + component_map[_component_iter->getName()] = &(*_component_iter); + } + } + ossie::DeviceList _registeredDevices = _domainManager->getRegisteredDevices(); + redhawk::PropertyMap& result = redhawk::PropertyMap::cast(result_ugly); + redhawk::PropertyMap measuredComponents; + std::vector mod_requests; + if (components.length() == 0) { + mod_requests.insert(mod_requests.begin(), component_list.begin(), component_list.end()); + mod_requests.push_back("application utilization"); + } else { + mod_requests.resize(components.length()); + for (unsigned int _req=0; _req::iterator _comp=component_list.begin();_comp!=component_list.end();_comp++) { + measuredComponents[*_comp] = measureComponent(*component_map[*_comp]); + if (not measuredComponents[*_comp].asProperties()["valid"].toBoolean()) + valid = false; + } + redhawk::PropertyMap _util; + if (valid) { + if (haveAttribute(mod_attributes, "cores")) + _util["cores"] = (float)0; + if (haveAttribute(mod_attributes, "memory")) + _util["memory"] = (float)0; + if (haveAttribute(mod_attributes, "processes")) + _util["processes"] = (unsigned long)0; + if (haveAttribute(mod_attributes, "threads")) + _util["threads"] = (unsigned long)0; + if (haveAttribute(mod_attributes, "files")) + _util["files"] = (unsigned long)0; + if (haveAttribute(mod_attributes, "valid")) + _util["valid"] = true; + std::vector already_measured; + for (std::vector::iterator _comp=component_list.begin();_comp!=component_list.end();_comp++) { + redhawk::PropertyMap _mC = measuredComponents[*_comp].asProperties(); + if (haveAttribute(already_measured, _mC["componenthost"].toString())) + continue; + already_measured.push_back(_mC["componenthost"].toString()); + if (haveAttribute(mod_attributes, "cores")) + _util["cores"] = _util["cores"].toFloat() + _mC["cores"].toFloat(); + if (haveAttribute(mod_attributes, "memory")) + _util["memory"] = _util["memory"].toFloat() + _mC["memory"].toFloat(); + if (haveAttribute(mod_attributes, "processes")) + _util["processes"] = _util["processes"].toULong() + _mC["processes"].toULong(); + if (haveAttribute(mod_attributes, "threads")) + _util["threads"] = _util["threads"].toULong() + _mC["threads"].toULong(); + if (haveAttribute(mod_attributes, "files")) + _util["files"] = _util["files"].toULong() + _mC["files"].toULong(); + } + result[_request] = _util; + } else { + redhawk::PropertyMap _util; + if (haveAttribute(mod_attributes, "valid")) + _util["valid"] = false; + result[_request] = _util; + } + } + } + // find out if all components need to be queried + for (unsigned int _req=0; _req::iterator _comp=component_list.begin();_comp!=component_list.end();_comp++) { + if (_request == *_comp) { + redhawk::PropertyMap tmp = measureComponent(*component_map[*_comp]); + result[_request] = filterAttributes(tmp, mod_attributes); + found_component = true; + break; + } + } + if (not found_component) { + CF::StringSequence _components; + CF::StringSequence _attributes; + ossie::corba::push_back(_components, _request.c_str()); + throw CF::Application::InvalidMetric(_components, _attributes); + } + } + } + } + return result_ugly._retn(); +} + +redhawk::PropertyMap Application_impl::filterAttributes(redhawk::PropertyMap &attributes, std::vector &filter) +{ + redhawk::PropertyMap retval; + if (haveAttribute(filter, "cores")) + retval["cores"] = attributes["cores"]; + if (haveAttribute(filter, "memory")) + retval["memory"] = attributes["memory"]; + if (haveAttribute(filter, "valid")) + retval["valid"] = attributes["valid"]; + if (haveAttribute(filter, "shared")) + retval["shared"] = attributes["shared"]; + if (haveAttribute(filter, "processes")) + retval["processes"] = attributes["processes"]; + if (haveAttribute(filter, "threads")) + retval["threads"] = attributes["threads"]; + if (haveAttribute(filter, "files")) + retval["files"] = attributes["files"]; + if (haveAttribute(filter, "componenthost")) + retval["componenthost"] = attributes["componenthost"]; + return retval; +} + +redhawk::PropertyMap Application_impl::measureComponent(redhawk::ApplicationComponent &component) +{ + redhawk::PropertyMap retval; + retval["valid"] = false; + if (component.getComponentHost() != NULL) { + retval["shared"] = true; + } else { + retval["shared"] = false; + } + ossie::DeviceList _registeredDevices = _domainManager->getRegisteredDevices(); + for (ossie::DeviceList::iterator _dev=_registeredDevices.begin(); _dev!=_registeredDevices.end(); _dev++) { + if (component.getAssignedDevice()->identifier == (*_dev)->identifier) { + retval["valid"] = false; + redhawk::PropertyMap query; + if (measuredDevices.find(component.getAssignedDevice()->identifier) == measuredDevices.end()) { + query["component_monitor"] = redhawk::Value(); + try { + (*_dev)->device->query(query); + } catch ( ... ) { + RH_WARN(_baseLog, "Unable to query 'component_monitor' on "<identifier); + continue; + } + measuredDevices[component.getAssignedDevice()->identifier] = query; + } else { + query = measuredDevices[component.getAssignedDevice()->identifier]; + } + const redhawk::ValueSequence& values = query["component_monitor"].asSequence(); + std::string target_id = component.getIdentifier(); + if (retval["shared"].toBoolean()) + target_id = component.getComponentHost()->getIdentifier(); + if (values.size()!=0) { + for (unsigned int i=0; iidentifier); + continue; + } + if (single["component_monitor::component_monitor::waveform_id"].toString() != _identifier) { + continue; + } + if (not single.contains("component_monitor::component_monitor::component_id")) { + RH_WARN(_baseLog, "Unable to query 'component_monitor' missing 'component_id' on "<identifier); + continue; + } + if (single["component_monitor::component_monitor::component_id"].toString() != target_id) { + continue; + } + if ((not single.contains("component_monitor::component_monitor::cores")) or + (not single.contains("component_monitor::component_monitor::mem_rss")) or + (not single.contains("component_monitor::component_monitor::num_processes")) or + (not single.contains("component_monitor::component_monitor::num_threads")) or + (not single.contains("component_monitor::component_monitor::num_files"))) { + RH_WARN(_baseLog, "Unable to query 'component_monitor' missing 'cores', 'mem_rss', 'num_processes', 'num_threads', or 'num_files' on "<identifier); + continue; + } + retval["cores"] = single["component_monitor::component_monitor::cores"].toFloat(); + retval["memory"] = single["component_monitor::component_monitor::mem_rss"].toFloat(); + retval["processes"] = single["component_monitor::component_monitor::num_processes"].toULong(); + retval["threads"] = single["component_monitor::component_monitor::num_threads"].toULong(); + retval["files"] = single["component_monitor::component_monitor::num_files"].toULong(); + retval["componenthost"] = target_id; + retval["valid"] = true; + break; + } + } + } + } + return retval; +} + CF::ApplicationRegistrar_ptr Application_impl::appReg (void) { return _registrar->_this(); @@ -1248,11 +1477,13 @@ throw (CORBA::SystemException) boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping componentNamingContexts call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping componentNamingContexts call because releaseObject has been called"); return result._retn(); } } - convert_sequence_if(result, _components, to_name_element, has_naming_context); + + convert_sequence_if(result, _components, to_name_element, + std::mem_fun_ref(&redhawk::ApplicationComponent::hasNamingContext)); return result._retn(); } @@ -1266,7 +1497,7 @@ throw (CORBA::SystemException) boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping componentImplementations call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping componentImplementations call because releaseObject has been called"); return result._retn(); } } @@ -1284,25 +1515,34 @@ throw (CORBA::SystemException) boost::mutex::scoped_lock lock(releaseObjectLock); if (_releaseAlreadyCalled) { - LOG_DEBUG(Application_impl, "skipping componentDevices call because releaseObject has been called"); + RH_DEBUG(_baseLog, "skipping componentDevices call because releaseObject has been called"); return result._retn(); } } - std::vector::const_iterator begin = _componentDevices.begin(); - const std::vector::const_iterator end = _componentDevices.end(); - for (; begin != end; ++begin) { - ossie::corba::push_back(result, begin->deviceAssignment); - } - return result._retn(); + + return new CF::DeviceAssignmentSequence(_componentDevices); +} + +const std::string& Application_impl::getIdentifier() const +{ + return _identifier; } +const std::string& Application_impl::getName() const +{ + return _appName; +} + +const std::string& Application_impl::getProfile() const +{ + return _sadProfile; +} void Application_impl::addExternalPort (const std::string& identifier, CORBA::Object_ptr port) { if (_ports.count(identifier)) { throw std::runtime_error("Port name " + identifier + " is already in use"); } - _ports[identifier] = CORBA::Object::_duplicate(port); } @@ -1312,8 +1552,11 @@ void Application_impl::addExternalProperty (const std::string& propId, const std throw std::runtime_error("External Property name " + externalId + " is already in use"); } - externalPropertyRecord external(propId, access, CF::Resource::_duplicate(comp)); - _properties.insert(std::pair(externalId, external)); + externalPropertyType external; + external.property_id = propId; + external.access = access; + external.component = CF::Resource::_duplicate(comp); + _properties.insert(std::pair(externalId, external)); } bool Application_impl::checkConnectionDependency (Endpoint::DependencyType type, const std::string& identifier) const @@ -1329,9 +1572,11 @@ bool Application_impl::checkConnectionDependency (Endpoint::DependencyType type, bool Application_impl::_checkRegistrations (std::set& identifiers) { - for (ossie::ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { - if (is_registered(*ii)) { - identifiers.erase(ii->identifier); + for (ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { + if (ii->isRegistered()) { + identifiers.erase(ii->getIdentifier()); + } else if (ii->isTerminated()) { + throw redhawk::ComponentTerminated(ii->getIdentifier()); } } return identifiers.empty(); @@ -1345,7 +1590,7 @@ bool Application_impl::waitForComponents (std::set& identifiers, in boost::mutex::scoped_lock lock(_registrationMutex); while (!_checkRegistrations(identifiers)) { - LOG_DEBUG(Application_impl, "Waiting for components....APP:" << _identifier << " list " << identifiers.size() ); + RH_DEBUG(_baseLog, "Waiting for components....APP:" << _identifier << " list " << identifiers.size() ); if (!_registrationCondition.timed_wait(lock, end)) { break; } @@ -1374,42 +1619,53 @@ CF::DomainManager_ptr Application_impl::getComponentDomainManager () return CF::DomainManager::_duplicate(ret); } -void Application_impl::registerComponent (CF::Resource_ptr resource) +redhawk::ApplicationComponent* Application_impl::getComponent(const std::string& identifier) { - const std::string componentId = ossie::corba::returnString(resource->identifier()); - const std::string softwareProfile = ossie::corba::returnString(resource->softwareProfile()); + redhawk::ApplicationComponent* component = findComponent(identifier); + if (!component) { + throw std::logic_error("unknown component '" + identifier + "'"); + } + return component; +} +void Application_impl::registerComponent (CF::Resource_ptr resource) +{ + std::string componentId; + std::string softwareProfile; + try{ + componentId = ossie::corba::returnString(resource->identifier()); + softwareProfile = ossie::corba::returnString(resource->softwareProfile()); + } + catch(...) { + throw CF::InvalidObjectReference(); + } boost::mutex::scoped_lock lock(_registrationMutex); - ossie::ApplicationComponent* comp = findComponent(componentId); + redhawk::ApplicationComponent* comp = findComponent(componentId); if (!comp) { - LOG_WARN(Application_impl, "Unexpected component '" << componentId + RH_WARN(_baseLog, "Unexpected component '" << componentId << "' registered with application '" << _appName << "'"); - _components.push_back(ossie::ApplicationComponent()); - comp = &(_components.back()); - comp->identifier = componentId; - comp->softwareProfile = softwareProfile; - comp->processId = 0; - } else if (softwareProfile != comp->softwareProfile) { + comp = addComponent(componentId, softwareProfile); + } else if (softwareProfile != comp->getSoftwareProfile()) { // Mismatch between expected and reported SPD path - LOG_WARN(Application_impl, "Component '" << componentId << "' software profile " << softwareProfile - << " does not match expected profile " << comp->softwareProfile); - comp->softwareProfile = softwareProfile; + RH_WARN(_baseLog, "Component '" << componentId << "' software profile " << softwareProfile + << " does not match expected profile " << comp->getSoftwareProfile()); + comp->setSoftwareProfile(softwareProfile); } - LOG_TRACE(Application_impl, "REGISTERING Component '" << componentId << "' software profile " << softwareProfile << " pid:" << comp->processId ); - comp->componentObject = CORBA::Object::_duplicate(resource); + RH_TRACE(_baseLog, "REGISTERING Component '" << componentId << "' software profile " << softwareProfile << " pid:" << comp->getProcessId()); + comp->setComponentObject(resource); _registrationCondition.notify_all(); } std::string Application_impl::getExternalPropertyId(std::string compIdIn, std::string propIdIn) { - for (std::map::const_iterator prop = _properties.begin(); + for (std::map::const_iterator prop = _properties.begin(); prop != _properties.end(); ++prop) { // Gets the property mapping info std::string extId = prop->first; - std::string propId = prop->second.id; + std::string propId = prop->second.property_id; // Gets the Resource identifier std::string compId = ossie::corba::returnString(prop->second.component->identifier()); @@ -1421,10 +1677,10 @@ std::string Application_impl::getExternalPropertyId(std::string compIdIn, std::s return ""; } -ossie::ApplicationComponent* Application_impl::findComponent(const std::string& identifier) +redhawk::ApplicationComponent* Application_impl::findComponent(const std::string& identifier) { - for (ossie::ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { - if (identifier == ii->identifier) { + for (ComponentList::iterator ii = _components.begin(); ii != _components.end(); ++ii) { + if (identifier == ii->getIdentifier()) { return &(*ii); } } @@ -1432,66 +1688,90 @@ ossie::ApplicationComponent* Application_impl::findComponent(const std::string& return 0; } -void Application_impl::addComponent(const std::string& identifier, const std::string& profile) +redhawk::ApplicationComponent* Application_impl::addComponent(const std::string& componentId, + const std::string& softwareProfile) { - if (findComponent(identifier)) { - LOG_ERROR(Application_impl, "Component '" << identifier << "' is already registered"); - return; - } - LOG_DEBUG(Application_impl, "Adding component '" << identifier << "' with profile " << profile); - ossie::ApplicationComponent component; - component.identifier = identifier; - component.softwareProfile = profile; - component.processId = 0; - _components.push_back(component); + _components.push_back(redhawk::ApplicationComponent(componentId)); + redhawk::ApplicationComponent* component = &(_components.back()); + component->setSoftwareProfile(softwareProfile); + component->setLogger(_baseLog); + return component; } -void Application_impl::setComponentPid(const std::string& identifier, unsigned long pid) +redhawk::ApplicationComponent* Application_impl::addContainer(const redhawk::ContainerDeployment* container) { - ossie::ApplicationComponent* component = findComponent(identifier); - if (!component) { - LOG_ERROR(Application_impl, "Setting process ID for unknown component '" << identifier << "'"); - } else { - component->processId = pid; - } + const std::string& identifier = container->getIdentifier(); + if (findComponent(identifier)) { + throw std::logic_error("container '" + identifier + "' is already registered"); + } + const std::string& profile = container->getSoftPkg()->getSPDFile(); + RH_DEBUG(_baseLog, "Adding container '" << identifier << "' with profile " << profile); + redhawk::ApplicationComponent* component = addComponent(identifier, profile); + component->setName(container->getInstantiation()->getID()); + component->setImplementationId(container->getImplementation()->getID()); + // Hide ComponentHost instances from the CORBA API + component->setVisible(false); + component->setAssignedDevice(container->getAssignedDevice()); + return component; } -void Application_impl::setComponentNamingContext(const std::string& identifier, const std::string& name) +redhawk::ApplicationComponent* Application_impl::addComponent(const redhawk::ComponentDeployment* deployment) { - ossie::ApplicationComponent* component = findComponent(identifier); - if (!component) { - LOG_ERROR(Application_impl, "Setting naming context for unknown component '" << identifier << "'"); - } else { - component->namingContext = name; - } + const std::string& identifier = deployment->getIdentifier(); + if (findComponent(identifier)) { + throw std::logic_error("component '" + identifier + "' is already registered"); + } + const std::string& profile = deployment->getSoftPkg()->getSPDFile(); + RH_DEBUG(_baseLog, "Adding component '" << identifier << "' with profile " << profile); + redhawk::ApplicationComponent* component = addComponent(identifier, profile); + component->setName(deployment->getInstantiation()->getID()); + component->setImplementationId(deployment->getImplementation()->getID()); + component->setAssignedDevice(deployment->getAssignedDevice()); + return component; } -void Application_impl::setComponentImplementation(const std::string& identifier, const std::string& implementationId) +void Application_impl::componentTerminated(const std::string& componentId, const std::string& deviceId) { - ossie::ApplicationComponent* component = findComponent(identifier); + boost::mutex::scoped_lock lock(_registrationMutex); + redhawk::ApplicationComponent* component = findComponent(componentId); if (!component) { - LOG_ERROR(Application_impl, "Setting implementation for unknown component '" << identifier << "'"); - } else { - component->implementationId = implementationId; + RH_WARN(_baseLog, "Unrecognized component '" << componentId << "' from application '" << _identifier + << "' terminated abnormally on device " << deviceId); + return; } -} - -void Application_impl::setComponentDevice(const std::string& identifier, CF::Device_ptr device) -{ - ossie::ApplicationComponent* component = findComponent(identifier); - if (!component) { - LOG_ERROR(Application_impl, "Setting device for unknown component '" << identifier << "'"); + if (!component->getChildren().empty()) { + RH_ERROR(_baseLog, "Component host from application '" << _appName + << "' containing " << component->getChildren().size() + << " component(s) terminated abnormally on device " << deviceId); + BOOST_FOREACH(redhawk::ApplicationComponent* child, component->getChildren()) { + _checkComponentConnections(child); + } } else { - component->assignedDevice = CF::Device::_duplicate(device); + RH_ERROR(_baseLog, "Component '" << component->getName() + << "' from application '" << _appName + << "' terminated abnormally on device " << deviceId); + _checkComponentConnections(component); } + component->setProcessId(0); + _registrationCondition.notify_all(); } -void Application_impl::addComponentLoadedFile(const std::string& identifier, const std::string& fileName) +void Application_impl::_checkComponentConnections(redhawk::ApplicationComponent* component) { - ossie::ApplicationComponent* component = findComponent(identifier); - if (!component) { - LOG_ERROR(Application_impl, "Adding loaded file for unknown component '" << identifier << "'"); - } else { - component->loadedFiles.push_back(fileName); + RH_DEBUG(_baseLog, "Checking for connections that depend on terminated component " + << component->getIdentifier()); + const std::string& name = component->getName(); + int connection_count = 0; + BOOST_FOREACH(ConnectionNode& connection, _connections) { + if (connection.dependencyTerminated(ossie::Endpoint::COMPONENT, name)) { + RH_TRACE(_baseLog, "Application '" << _appName << "' connection '" + << connection.identifier << "' depends on terminated component '" + << name << "'"); + connection_count++; + } + } + if (connection_count > 0) { + RH_DEBUG(_baseLog, "Application '" << _appName << "' has " << connection_count + << " connection(s) depending on terminated component '" << name << "'"); } } diff --git a/redhawk/src/control/sdr/dommgr/Application_impl.h b/redhawk/src/control/sdr/dommgr/Application_impl.h index de59c5f24..4f4adb54c 100644 --- a/redhawk/src/control/sdr/dommgr/Application_impl.h +++ b/redhawk/src/control/sdr/dommgr/Application_impl.h @@ -32,9 +32,11 @@ #include #include -#include "applicationSupport.h" +#include "Deployment.h" +#include "ApplicationDeployment.h" +#include "PersistenceStore.h" #include "connectionSupport.h" - +#include "ApplicationComponent.h" class DomainManager_impl; class ApplicationRegistrar_impl; @@ -45,21 +47,19 @@ class Application_impl : public virtual POA_CF::Application, public Logging_impl ENABLE_LOGGING friend class DomainManager_impl; -protected: - CF::Resource_var assemblyController; - public: Application_impl (const std::string& id, const std::string& name, const std::string& profile, DomainManager_impl* domainManager, const std::string& waveformContextName, - CosNaming::NamingContext_ptr waveformContext, bool aware, CosNaming::NamingContext_ptr DomainContext); + CosNaming::NamingContext_ptr waveformContext, bool aware, + float stopTimeout, CosNaming::NamingContext_ptr DomainContext); - void populateApplication (CF::Resource_ptr _assemblyController, - std::vector& _devSequence, - std::vector _startSeq, + void populateApplication (const CF::DeviceAssignmentSequence& deviceAssignments, std::vector& connections, std::vector allocationIDs); + void setStartOrder(const std::vector& startOrder); + ~Application_impl (); static PortableServer::ObjectId* Activate(Application_impl* application); @@ -72,7 +72,7 @@ class Application_impl : public virtual POA_CF::Application, public Logging_impl void stop () throw (CF::Resource::StopError, CORBA::SystemException); - void local_stop () + void local_stop (float timeout) throw (CF::Resource::StopError, CORBA::SystemException); // The core framework provides an implementation for this method. @@ -89,6 +89,10 @@ class Application_impl : public virtual POA_CF::Application, public Logging_impl void query (CF::Properties& configProperties) throw (CF::UnknownProperties, CORBA::SystemException); + // The core framework provides an implementation for this method. + CF::Properties* metrics (const CF::StringSequence& components, const CF::StringSequence& attributes) + throw (CF::Application::InvalidMetric, CORBA::SystemException); + char *registerPropertyListener( CORBA::Object_ptr listener, const CF::StringSequence &prop_ids, const CORBA::Float interval) throw(CF::UnknownProperties, CF::InvalidObjectReference); @@ -117,6 +121,10 @@ class Application_impl : public virtual POA_CF::Application, public Logging_impl bool aware () throw (CORBA::SystemException); + CORBA::Float stopTimeout () throw (CORBA::SystemException); + + void stopTimeout (CORBA::Float timeout) throw (CORBA::SystemException); + CF::DeviceAssignmentSequence * componentDevices () throw (CORBA::SystemException); @@ -133,6 +141,13 @@ class Application_impl : public virtual POA_CF::Application, public Logging_impl CF::ApplicationRegistrar_ptr appReg (void); + void setAssemblyController (const std::string& assemblyControllerRef); + redhawk::ApplicationComponent* getAssemblyController(); + + const std::string& getIdentifier() const; + const std::string& getName() const; + const std::string& getProfile() const; + void addExternalPort (const std::string&, CORBA::Object_ptr); void addExternalProperty (const std::string&, const std::string&, const std::string &access, CF::Resource_ptr); @@ -142,37 +157,34 @@ class Application_impl : public virtual POA_CF::Application, public Logging_impl void _cleanupActivations(); // Set component state - void addComponent(const std::string& identifier, const std::string& profile); - void setComponentPid(const std::string& identifier, unsigned long pid); - void setComponentNamingContext(const std::string& identifier, const std::string& name); - void setComponentImplementation(const std::string& identifier, const std::string& implementationId); - void setComponentDevice(const std::string& identifier, CF::Device_ptr device); - void addComponentLoadedFile(const std::string& identifier, const std::string& fileName); + redhawk::ApplicationComponent* addComponent(const std::string& componentId, const std::string& softwareProfile); + redhawk::ApplicationComponent* addComponent(const redhawk::ComponentDeployment* deployment); + redhawk::ApplicationComponent* addContainer(const redhawk::ContainerDeployment* container); void releaseComponents(); void terminateComponents(); void unloadComponents(); + void componentTerminated(const std::string& componentId, const std::string& deviceId); + bool waitForComponents(std::set& identifiers, int timeout); CF::Application_ptr getComponentApplication(); CF::DomainManager_ptr getComponentDomainManager(); - struct externalPropertyRecord { - public: + redhawk::ApplicationComponent* getComponent(const std::string& identifier); - std::string id; - std::string access; - CF::Resource_var component; + // set the log level for one of the loggers on a component on the waveform + void setLogLevel( const char *logger_id, const CF::LogLevel newLevel ) throw (CF::UnknownIdentifier); - externalPropertyRecord() {}; + // get the log level from one of the loggers on a component on the waveform + CF::LogLevel getLogLevel( const char *logger_id ) throw (CF::UnknownIdentifier); - externalPropertyRecord(const std::string &_id, const std::string &_access, CF::Resource_ptr _component) { - id = _id; - access = _access; - component = CF::Resource::_duplicate(_component); - }; - }; + // retrieves the list of named loggers from all the components associated with the waveform + CF::StringSequence* getNamedLoggers(); + + // reset the loggers on all components on the waveform + void resetLog(); private: Application_impl (); // No default constructor @@ -190,49 +202,57 @@ class Application_impl : public virtual POA_CF::Application, public Logging_impl typedef std::vector< PropertyChangeRecord > PropertyChangeRecords; typedef std::map< std::string, PropertyChangeRecords > PropertyChangeRegistry; - void registerComponent(CF::Resource_ptr resource); + std::map measuredDevices; + bool haveAttribute(std::vector &atts, std::string att); + redhawk::PropertyMap measureComponent(redhawk::ApplicationComponent &component); + redhawk::PropertyMap filterAttributes(redhawk::PropertyMap &attributes, std::vector &filter); - bool stopComponent(CF::Resource_ptr component); + void registerComponent(CF::Resource_ptr resource); bool _checkRegistrations(std::set& identifiers); + void _checkComponentConnections(redhawk::ApplicationComponent* component); + + redhawk::ApplicationComponent* _assemblyController; const std::string _identifier; const std::string _sadProfile; const std::string _appName; - std::vector _componentDevices; + CF::DeviceAssignmentSequence _componentDevices; std::vector _connections; - std::vector _appStartSeq; + std::vector _startOrder; std::vector _allocationIDs; DomainManager_impl* _domainManager; const std::string _waveformContextName; CosNaming::NamingContext_var _waveformContext; bool _started; const bool _isAware; + float _stopTimeout; FakeApplication* _fakeProxy; ApplicationRegistrar_impl* _registrar; - ossie::ComponentList _components; + typedef std::list ComponentList; + ComponentList _components; CosNaming::NamingContext_var _domainContext; boost::mutex _registrationMutex; boost::condition_variable _registrationCondition; std::map _ports; - std::map _properties; + std::map _properties; bool _releaseAlreadyCalled; boost::mutex releaseObjectLock; + boost::mutex metricsLock; PropertyChangeRegistry _propertyChangeRegistrations; - ossie::ApplicationComponent* findComponent(const std::string& identifier); + redhawk::ApplicationComponent* findComponent(const std::string& identifier); // Returns externalpropid if one exists based off of compId and // internal propId, returns empty string if no external prop exists std::string getExternalPropertyId(std::string compId, std::string propId); - friend class ApplicationRegistrar_impl; }; diff --git a/redhawk/src/control/sdr/dommgr/ConnectionManager.h b/redhawk/src/control/sdr/dommgr/ConnectionManager.h index e03c1eeb5..fb934b4a9 100644 --- a/redhawk/src/control/sdr/dommgr/ConnectionManager.h +++ b/redhawk/src/control/sdr/dommgr/ConnectionManager.h @@ -39,10 +39,16 @@ class ConnectionManager_impl : public virtual POA_CF::ConnectionManager { virtual void listConnections(CORBA::ULong count, CF::ConnectionManager::ConnectionStatusSequence_out connections, CF::ConnectionStatusIterator_out iter); + void setLogger(rh_logger::LoggerPtr logptr) { + _connMgrLog = logptr; + }; + private: ossie::Endpoint* requestToEndpoint(const CF::ConnectionManager::EndpointRequest& request); DomainManager_impl* _domainManager; + + rh_logger::LoggerPtr _connMgrLog; }; #endif // CONNECTIONMANAGER_H diff --git a/redhawk/src/control/sdr/dommgr/Deployment.cpp b/redhawk/src/control/sdr/dommgr/Deployment.cpp new file mode 100644 index 000000000..716593ba0 --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/Deployment.cpp @@ -0,0 +1,873 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include + +#include +#include + +#include "Application_impl.h" +#include "PersistenceStore.h" +#include "Deployment.h" +#include "DeploymentExceptions.h" +#include + +using namespace redhawk; +using namespace ossie; +namespace fs = boost::filesystem; + +rh_logger::LoggerPtr redhawk::deploymentLog; + +UsesDeviceDeployment::~UsesDeviceDeployment() +{ + for (AssignmentList::iterator assign = assignments.begin(); assign != assignments.end(); ++assign) { + delete *assign; + } +} + +void UsesDeviceDeployment::transferUsesDeviceAssignments(UsesDeviceDeployment& other) +{ + other.assignments.insert(other.assignments.end(), assignments.begin(), assignments.end()); + assignments.clear(); +} + +void UsesDeviceDeployment::addUsesDeviceAssignment(UsesDeviceAssignment* assignment) +{ + assignments.push_back(assignment); +} + +UsesDeviceAssignment* UsesDeviceDeployment::getUsesDeviceAssignment(const std::string identifier) +{ + for (AssignmentList::iterator assign = assignments.begin(); assign != assignments.end(); ++assign) { + if (identifier == (*assign)->getUsesDevice()->getID()) { + return *assign; + } + } + + return 0; +} + +const UsesDeviceDeployment::AssignmentList& UsesDeviceDeployment::getUsesDeviceAssignments() +{ + return assignments; +} + +UsesDeviceAssignment::UsesDeviceAssignment(const UsesDevice* usesDevice) : + usesDevice(usesDevice) +{ +} + +const UsesDevice* UsesDeviceAssignment::getUsesDevice() const +{ + return usesDevice; +} + +void UsesDeviceAssignment::setAssignedDevice(CF::Device_ptr device) +{ + assignedDevice = CF::Device::_duplicate(device); +} + +CF::Device_ptr UsesDeviceAssignment::getAssignedDevice() const +{ + return CF::Device::_duplicate(assignedDevice); +} + +SoftPkgDeployment::SoftPkgDeployment(const SoftPkg* softpkg, + const SPD::Implementation* implementation) : + softpkg(softpkg), + implementation(implementation) +{ +} + +SoftPkgDeployment::~SoftPkgDeployment() +{ + clearDependencies(); +} + +const SoftPkg* SoftPkgDeployment::getSoftPkg() const +{ + return &(*softpkg); +} + +void SoftPkgDeployment::setImplementation(const SPD::Implementation* implementation) +{ + this->implementation = implementation; +} + +const SPD::Implementation* SoftPkgDeployment::getImplementation() const +{ + return implementation; +} + +void SoftPkgDeployment::addDependency(SoftPkgDeployment* dependency) +{ + dependencies.push_back(dependency); +} + +const std::vector& SoftPkgDeployment::getDependencies() +{ + return dependencies; +} + +void SoftPkgDeployment::clearDependencies() +{ + for (DeploymentList::iterator dependency = dependencies.begin(); dependency != dependencies.end(); ++dependency) { + delete (*dependency); + } + dependencies.clear(); +} + +std::vector SoftPkgDeployment::getDependencyLocalFiles() +{ + std::vector files; + for (DeploymentList::iterator dependency = dependencies.begin(); dependency != dependencies.end(); ++dependency) { + std::vector depfiles = (*dependency)->getDependencyLocalFiles(); + std::copy(depfiles.begin(), depfiles.end(), std::back_inserter(files)); + files.push_back((*dependency)->getLocalFile()); + } + return files; +} + +void SoftPkgDeployment::load(redhawk::ApplicationComponent* appComponent, CF::FileSystem_ptr fileSystem, + CF::LoadableDevice_ptr device) +{ + if (!implementation) { + throw std::logic_error("no implementation selected for soft package " + softpkg->getName()); + } + + // Recursively load dependencies + if (!dependencies.empty()) { + RH_TRACE(deploymentLog, "Loading " << dependencies.size() << + " dependency(ies) for soft package " << softpkg->getName()); + for (DeploymentList::iterator dep = dependencies.begin(); dep != dependencies.end(); ++dep) { + (*dep)->load(appComponent, fileSystem, device); + } + } + + // Determine absolute path of local file + CF::LoadableDevice::LoadType codeType = getCodeType(); + const std::string fileName = getLocalFile(); + RH_DEBUG(deploymentLog, "Loading file " << fileName + << " for soft package " << softpkg->getName()); + try { + device->load(fileSystem, fileName.c_str(), codeType); + } catch (const CF::Device::InvalidState& exc) { + std::string message = "device is in invalid state: "; + message += exc.msg; + throw DeploymentError(message); + } catch (const CF::LoadableDevice::InvalidLoadKind& exc) { + throw DeploymentError("invalid load kind for file " + fileName); + } catch (const CF::InvalidFileName& exc) { + std::string message = "file name '" + fileName + "' is invalid: "; + message += exc.msg; + throw DeploymentError(message); + } catch (const CF::LoadableDevice::LoadFail& exc) { + std::string message = "failure loading file '" + fileName + "': "; + message += exc.msg; + throw DeploymentError(message); + } catch (const CORBA::SystemException& exc) { + std::string message = ossie::corba::describeException(exc); + message += " loading " + fileName; + throw DeploymentError(message); + } + appComponent->addLoadedFile(fileName); +} + +std::string SoftPkgDeployment::getLocalFile() +{ + fs::path codeLocalFile = fs::path(implementation->getCodeFile()); + if (!codeLocalFile.has_root_directory()) { + // Path is relative to SPD file location + fs::path base_dir = fs::path(softpkg->getSPDFile()).parent_path(); + codeLocalFile = base_dir / codeLocalFile; + } + codeLocalFile = codeLocalFile.normalize(); + if (codeLocalFile.has_leaf() && codeLocalFile.leaf() == ".") { + codeLocalFile = codeLocalFile.branch_path(); + } + + return codeLocalFile.string(); +} + +CF::LoadableDevice::LoadType SoftPkgDeployment::getCodeType() const +{ + switch (implementation->getCodeType()) { + case SPD::Code::KERNEL_MODULE: + return CF::LoadableDevice::KERNEL_MODULE; + case SPD::Code::SHARED_LIBRARY: + return CF::LoadableDevice::SHARED_LIBRARY; + case SPD::Code::EXECUTABLE: + return CF::LoadableDevice::EXECUTABLE; + case SPD::Code::DRIVER: + return CF::LoadableDevice::DRIVER; + default: + return CF::LoadableDevice::LoadType(); + } +} + +bool SoftPkgDeployment::isExecutable() const +{ + // REDHAWK extends section D.2.1.6.3 to support loading a directory + // and execute a file in that directory using a entrypoint + // 1. Executable means to use CF LoadableDevice::load and CF ExecutableDevice::execute operations. This is a "main" process. + // - A Executable that references a directory instead of a file means to recursively load the contents of the directory + // and then execute the program specified via entrypoint + // 2. Driver and Kernel Module means load only. + // 3. SharedLibrary means dynamic linking. + // 4. A (SharedLibrary) Without a code entrypoint element means load only. + // 5. A (SharedLibrary) With a code entrypoint element means load and CF Device::execute. + switch (implementation->getCodeType()) { + case SPD::Code::EXECUTABLE: + // Returns true if the entry point is non-null + return bool(implementation->getEntryPoint()); + case SPD::Code::SHARED_LIBRARY: + return true; + default: + return false; + } +} + +ComponentDeployment::ComponentDeployment(const SoftPkg* softpkg, + const ComponentInstantiation* instantiation, + const std::string& identifier) : + SoftPkgDeployment(softpkg), + instantiation(instantiation), + identifier(identifier), + assemblyController(false), + container(0), + appComponent(0) +{ + std::string sadLoggingConfig; + // If the SoftPkg has an associated Properties, check the overrides for + // validity + if (softpkg->getProperties()) { + BOOST_FOREACH(const ComponentProperty& override, instantiation->getProperties()) { + const Property* property = softpkg->getProperties()->getProperty(override.getID()); + if (!property) { + if (override.getID() == "LOGGING_CONFIG_URI") { + // It's legal to override the logging configuration, even + // if it isn't defined in the PRF + const SimplePropertyRef* ref = dynamic_cast(&override); + if ( ref ) { + CORBA::Any logging_any; + logging_any <<= ref->getValue(); + overrideProperty("LOGGING_CONFIG_URI", logging_any); + sadLoggingConfig = ref->getValue(); + } + } else if (override.getID() == "LOG_LEVEL") { + // It's legal to override the logging configuration, even + // if it isn't defined in the PRF + const SimplePropertyRef* ref = dynamic_cast(&override); + if ( ref ) { + CORBA::Any log_level_any; + log_level_any <<= ref->getValue(); + overrideProperty("LOG_LEVEL", log_level_any); + } + } else { + RH_WARN(deploymentLog, "Ignoring attempt to override property " + << override.getID() << " that does not exist in component"); + } + } else if (!property->canOverride()) { + RH_WARN(deploymentLog, "Ignoring attempt to override read-only property " + << property->getID()); + } + } + } + + + + ComponentInstantiation::LoggingConfig lcfg = instantiation->getLoggingConfig(); + // if a LOGGING_CONFIG_URI was provided in the SAD file, override the one from the component's profile + if (not sadLoggingConfig.empty()) { + lcfg.first = sadLoggingConfig; + } + if ( !lcfg.first.empty() ){ + RH_TRACE(deploymentLog, "Logging Config: <" << lcfg.first << ">" ); + loggingConfig["LOGGING_CONFIG_URI"] = lcfg.first; + } + if ( !lcfg.second.empty() ){ + RH_TRACE(deploymentLog, "Logging Level: <" << lcfg.second << ">" ); + loggingConfig["LOG_LEVEL"] = lcfg.second; + } + + if (!instantiation->getAffinity().empty()) { + RH_TRACE(deploymentLog, "Setting affinity options"); + affinityOptions = ossie::getAffinityOptions(instantiation->getAffinity()); + } + + if (!instantiation->getDeviceRequires().empty()) { + RH_TRACE(deploymentLog, "Getting devicerequires property set"); + ossie::convertComponentProperties(instantiation->getDeviceRequires(),deviceRequires); + } +} + +const std::string& ComponentDeployment::getIdentifier() const +{ + return identifier; +} + +const ComponentInstantiation* ComponentDeployment::getInstantiation() const +{ + return instantiation; +} + +void ComponentDeployment::setContainer(ComponentDeployment* container) +{ + this->container = container; +} + +ComponentDeployment* ComponentDeployment::getContainer() +{ + return container; +} + +bool ComponentDeployment::isResource() const +{ + return softpkg->getDescriptor()->isResource(); +} + +bool ComponentDeployment::isConfigurable() const +{ + return softpkg->getDescriptor()->isConfigurable(); +} + +bool ComponentDeployment::isAssemblyController() const +{ + return assemblyController; +} + +void ComponentDeployment::setIsAssemblyController(bool state) +{ + assemblyController = state; +} + +void ComponentDeployment::setAssignedDevice(const boost::shared_ptr& device) +{ + assignedDevice = device; +} + +const boost::shared_ptr& ComponentDeployment::getAssignedDevice() const +{ + return assignedDevice; +} + +std::string ComponentDeployment::getEntryPoint() +{ + const char* entryPoint = implementation->getEntryPoint(); + if (entryPoint) { + fs::path entryPointPath = fs::path(entryPoint); + if (!entryPointPath.has_root_directory()) { + // Path is relative to SPD file location + fs::path base_dir = fs::path(softpkg->getSPDFile()).parent_path(); + entryPointPath = base_dir / entryPointPath; + } + return entryPointPath.normalize().string(); + } + return std::string(); +} + +redhawk::PropertyMap ComponentDeployment::getOptions() +{ + // In prior versions, the options could be overridden, at least from the + // perspective of ComponentInfo; this may need to be re-implemented + redhawk::PropertyMap options; + + // Get the PRIORITY and STACK_SIZE from the SPD (if available) + const ossie::SPD::Code& code = implementation->code; + if (code.stacksize.isSet()) { + // 3.1.3.3.3.3.6 + // The specification says it's supposed to be an unsigned long, but the + // parser is set to unsigned long long + options[CF::ExecutableDevice::STACK_SIZE_ID] = static_cast(*code.stacksize); + } + if (code.priority.isSet()) { + // 3.1.3.3.3.3.7 + // The specification says it's supposed to be an unsigned long, but the + // parser is set to unsigned long long + options[CF::ExecutableDevice::PRIORITY_ID] = static_cast(*code.priority); + } + + redhawk::PropertyMap affinity = affinityOptions; + for (redhawk::PropertyMap::const_iterator prop = affinity.begin(); prop != affinity.end(); ++prop) { + RH_DEBUG(deploymentLog, "ComponentDeployment - Affinity Property: directive id:" + << prop->getId() << "/" << prop->getValue().toString()); + } + if (!nicAssignment.empty()) { + redhawk::PropertyMap::iterator nic_prop = affinity.find("nic"); + if (nic_prop == affinity.end() || (nic_prop->getId() != nicAssignment)) { + // No nic directive, or existing directive differs, append this one + affinity.push_back(redhawk::PropertyType("nic", nicAssignment)); + } + } + + if (!affinity.empty()) { + options["AFFINITY"] = affinity; + } + + return options; +} + +void ComponentDeployment::setNicAssignment(const std::string& nic) +{ + nicAssignment = nic; +} + +bool ComponentDeployment::hasNicAssignment() const +{ + return !nicAssignment.empty(); +} + +const std::string& ComponentDeployment::getNicAssignment() const +{ + return nicAssignment; +} + +void ComponentDeployment::setCpuReservation(float reservation) +{ + cpuReservation = reservation; +} + +bool ComponentDeployment::hasCpuReservation() const +{ + return cpuReservation.isSet(); +} + +float ComponentDeployment::getCpuReservation() const +{ + return *cpuReservation; +} + +redhawk::PropertyMap ComponentDeployment::getDeviceRequires() const { + return deviceRequires; +} + +void ComponentDeployment::setDeviceRequires( const redhawk::PropertyMap &devReqs ) { + deviceRequires = devReqs; +} + + +redhawk::PropertyMap ComponentDeployment::getAllocationContext() const +{ + redhawk::PropertyMap properties; + if (softpkg->getProperties()) { + BOOST_FOREACH(const Property* property, softpkg->getProperties()->getProperties()) { + // Old logic (2.0): + // * "configure" kind properties that are not read-only + // * "property" kind properties that are not command line (or execparams) + // New logic: + // * all "configure" and "property" kind properties + // Rationale: this is strictly used as context for math statements, + // so it doesn't matter how it gets initialized or whether it's + // writable. + if (property->isConfigure() || property->isProperty()) { + properties.push_back(getPropertyValue(property)); + } + } + } + return properties; +} + +redhawk::PropertyMap ComponentDeployment::getCommandLineParameters() const +{ + redhawk::PropertyMap properties; + bool has_LOGGING_CONFIG_URI = false; + bool has_LOG_LEVEL = false; + if (softpkg->getProperties()) { + BOOST_FOREACH(const Property* property, softpkg->getProperties()->getProperties()) { + if (property->isExecParam()) { + if (property->isReadOnly()) { + // NB: Not only can read-only execparams not be overridden, + // they are not included in the command line + continue; + } + } else if (!(property->isProperty() && property->isCommandLine())) { + continue; + } + std::string property_id = property->getID(); + if (property_id == "LOGGING_CONFIG_URI") { + has_LOGGING_CONFIG_URI = true; + } + if (property_id == "LOG_LEVEL") { + has_LOG_LEVEL = true; + } + CF::DataType dt = getPropertyValue(property); + if (!ossie::any::isNull(dt.value)) { + properties.push_back(dt); + } + } + } + if ((not has_LOGGING_CONFIG_URI) and appComponent->isVisible()) { + if (overrides.find("LOGGING_CONFIG_URI") != overrides.end()) { + CF::DataType dt; + dt.id = CORBA::string_dup("LOGGING_CONFIG_URI"); + dt.value <<= overrides["LOGGING_CONFIG_URI"].toString().c_str(); + properties.push_back(dt); + } + } + if ((not has_LOG_LEVEL) and appComponent->isVisible()) { + if (overrides.find("LOG_LEVEL") != overrides.end()) { + CF::DataType dt; + dt.id = CORBA::string_dup("LOG_LEVEL"); + dt.value <<= overrides["LOG_LEVEL"].toString().c_str(); + properties.push_back(dt); + } + } + + // Handle special Docker image property if set in component instantiation + const ComponentProperty* docker = getPropertyOverride("__DOCKER_IMAGE__"); + if (docker) { + properties["__DOCKER_IMAGE__"] = dynamic_cast(docker)->getValue(); + } + + return properties; +} + +redhawk::PropertyMap ComponentDeployment::getInitialConfigureProperties() const +{ + redhawk::PropertyMap properties; + if (softpkg->getProperties()) { + BOOST_FOREACH(const Property* property, softpkg->getProperties()->getProperties()) { + if (property->isConfigure() && !property->isReadOnly()) { + CF::DataType dt = getPropertyValue(property); + if (!ossie::any::isNull(dt.value)) { + properties.push_back(dt); + } + } + } + } + return properties; +} + +redhawk::PropertyMap ComponentDeployment::getInitializeProperties() const +{ + redhawk::PropertyMap properties; + if (softpkg->getProperties()) { + BOOST_FOREACH(const Property* property, softpkg->getProperties()->getProperties()) { + if (property->isProperty() ) { + CF::DataType dt = getPropertyValue(property); + if ( ossie::any::isNull(dt.value) ) { + continue; + } + if (!property->isCommandLine()) { + properties.push_back(dt); + } + else { + // allow cmd line params that can have empty values + CORBA::TypeCode_var vtype=dt.value.type(); + if ( vtype->kind() == CORBA::tk_char || + vtype->kind() == CORBA::tk_string ) { + std::string v=ossie::simpleAnyToString(dt.value); + if ( v == "" ) { + properties.push_back(dt); + } + } + } + } + } + } + return properties; +} + +void ComponentDeployment::overrideProperty(const std::string& id, const CORBA::Any& value) +{ + overrides[id] = value; +} + +CF::DataType ComponentDeployment::getPropertyValue(const Property* property) const +{ + if (property->canOverride()) { + // Check for a runtime override first + redhawk::PropertyMap::const_iterator override = overrides.find(property->getID()); + if (override != overrides.end()) { + return *override; + } + // Then, check for an override in the component instantiation + const ComponentProperty* propref = getPropertyOverride(property->getID()); + if (propref) { + return ossie::overridePropertyValue(property, propref); + } + } + // Default to the PRF value + return ossie::convertPropertyToDataType(property); +} + +const ComponentProperty* ComponentDeployment::getPropertyOverride(const std::string& id) const +{ + BOOST_FOREACH(const ComponentProperty& override, instantiation->getProperties()) { + if (override.getID() == id) { + return &override; + } + } + return 0; +} + +redhawk::PropertyMap ComponentDeployment::getAffinityOptionsWithAssignment() const +{ + redhawk::PropertyMap options = affinityOptions; + for (redhawk::PropertyMap::const_iterator prop = options.begin(); prop != options.end(); ++prop) { + RH_DEBUG(deploymentLog, "ComponentDeployment getAffinityOptionsWithAssignment ... Affinity Property: directive id:" << prop->getId() << "/" << prop->getValue().toString()); + } + + if (!nicAssignment.empty()) { + RH_DEBUG(deploymentLog, "ComponentDeployment getAffinityOptionsWithAssignment ... NIC AFFINITY: pol/value " << "nic" << "/" << nicAssignment); + options.push_back(redhawk::PropertyType("nic", nicAssignment)); + } + + return options; +} + +void ComponentDeployment::mergeAffinityOptions(const CF::Properties& properties) +{ + // Update existing settings with new ones + affinityOptions.update(properties); +} + +void ComponentDeployment::setResourcePtr(CF::Resource_ptr resource) +{ + this->resource = CF::Resource::_duplicate(resource); +} + +CF::Resource_ptr ComponentDeployment::getResourcePtr() const +{ + return CF::Resource::_duplicate(resource); +} + +void ComponentDeployment::load(CF::FileSystem_ptr fileSystem, CF::LoadableDevice_ptr device) +{ + if (!appComponent) { + throw std::logic_error("deployment is not assigned to an application component"); + } + SoftPkgDeployment::load(appComponent, fileSystem, device); +} + + +redhawk::PropertyMap ComponentDeployment::getLoggingConfiguration() const +{ + std::string logcfg_uri; + std::string debug_level; + + // check for a PRF value + if (softpkg->getProperties()) { + const Property* property = softpkg->getProperties()->getProperty("LOGGING_CONFIG_URI"); + if (property) { + const SimpleProperty* simple = dynamic_cast(property); + if (simple && simple->getValue()) { + logcfg_uri = simple->getValue(); + } + } + } + + // Check for a runtime override first + redhawk::PropertyMap::const_iterator override; + if (overrides.contains("LOGGING_CONFIG_URI") ) { + override = overrides.find("LOGGING_CONFIG_URI"); + if (!override->getValue().isNil()) { + logcfg_uri = override->getValue().toString(); + } + } + + if (overrides.contains("LOG_LEVEL") ) { + override = overrides.find("LOG_LEVEL"); + if (!override->getValue().isNil()) { + debug_level = override->getValue().toString(); + } + } + + // Then, check for an override in the component instantiation + const ComponentProperty* propref = getPropertyOverride("LOGGING_CONFIG_URI"); + if (propref) { + const SimplePropertyRef* simple = dynamic_cast(propref); + if (simple) { + logcfg_uri=simple->getValue(); + } + } + + propref = getPropertyOverride("LOG_LEVEL"); + if (propref) { + const SimplePropertyRef* simple = dynamic_cast(propref); + if (simple) { + debug_level=simple->getValue(); + } + } + + redhawk::PropertyMap ret; + + // prefer logging config if provide via sad, else use property setting + if ( loggingConfig.contains("LOGGING_CONFIG_URI") ) { + logcfg_uri = loggingConfig["LOGGING_CONFIG_URI"].toString(); + } + + if ( loggingConfig.contains("LOG_LEVEL") ) { + debug_level = loggingConfig["LOG_LEVEL"].toString(); + } + + if ( !logcfg_uri.empty() ) { + ret["LOGGING_CONFIG_URI"]=logcfg_uri; + } + + if ( !debug_level.empty() ) { + ret["LOG_LEVEL"]=debug_level; + } + + return ret; +} + +redhawk::ApplicationComponent* ComponentDeployment::getApplicationComponent() +{ + return appComponent; +} + +void ComponentDeployment::setApplicationComponent(redhawk::ApplicationComponent* component) +{ + appComponent = component; +} + +void ComponentDeployment::initializeProperties() +{ + redhawk::PropertyMap init_props = getInitializeProperties(); + + CF::Properties partials = ossie::getPartialStructs(init_props); + if (partials.length() > 0) { + std::ostringstream eout; + eout << "cannot be initialized due to " << partials.length(); + eout << " structure(s) with a mix of defined and nil values: "; + bool first = true; + for (size_t index = 0; index < partials.length(); ++index) { + if (!first) { + eout << ", "; + } + eout << partials[index].id; + } + throw ComponentError(this, eout.str()); + } + + RH_DEBUG(deploymentLog, "Initializing properties for component " << identifier); + try { + resource->initializeProperties(init_props); + } catch (const CF::PropertySet::InvalidConfiguration& exc) { + throw PropertiesError(this, exc.invalidProperties, "invalid configuration in property initialization"); + } catch (const CF::PropertySet::PartialConfiguration& exc) { + throw PropertiesError(this, exc.invalidProperties, "partial configuration in property initialization"); + } catch (const CF::PropertyEmitter::AlreadyInitialized&) { + // The component should never be initialized twice, at least not by the + // ApplicationFactory + throw ComponentError(this, "already initialized"); + } catch (const CORBA::SystemException& exc) { + throw ComponentError(this, "initializing properties raised " + ossie::corba::describeException(exc)); + } catch (...) { + // Should never happen, but turn anything else into a ComponentError + // just in case + throw ComponentError(this, "unexpected error initializing properties"); + } +} + +void ComponentDeployment::initialize() +{ + if (isConfigurable()) { + initializeProperties(); + } + + RH_TRACE(deploymentLog, "Initializing component " << identifier); + try { + resource->initialize(); + } catch (const CF::LifeCycle::InitializeError& error) { + // Dump the detailed initialization failure to the log + std::ostringstream logmsg; + logmsg << "initialize error"; + for (CORBA::ULong index = 0; index < error.errorMessages.length(); ++index) { + if (index > 0) { + logmsg << ","; + } + logmsg << " '" << error.errorMessages[index] << "'"; + } + throw ComponentError(this, logmsg.str()); + } catch (const CORBA::SystemException& exc) { + throw ComponentError(this, "initialize raised " + ossie::corba::describeException(exc)); + } catch (...) { + // Should never happen, but turn anything else into a ComponentError + // just in case + throw ComponentError(this, "unexpected error in initialize"); + } +} + +void ComponentDeployment::configure() +{ + if (!softpkg->isScaCompliant()) { + // If the component is non-SCA compliant then we don't expect anything beyond this + RH_TRACE(deploymentLog, "Skipping configure of non SCA-compliant component " + << identifier); + return; + } else if (!isResource()) { + RH_TRACE(deploymentLog, "Skipping configure of non-resource component " + << identifier); + return; + } + + if (!instantiation->isNamingService()) { + // Per the old code, we only configure if the instantiation uses naming + // service to locate the component + return; + } + + if (CORBA::is_nil(resource)) { + // NB: I think having a valid CORBA reference is a pre-condition of + // getting to this point in the first place + RH_ERROR(deploymentLog, "Could not get component reference"); + throw redhawk::ComponentError(this, "no CORBA reference"); + } + + redhawk::PropertyMap config_props = getInitialConfigureProperties(); + + // Skip empty configure call + if (config_props.empty()) { + return; + } + + // Check and warn for partial structs + CF::Properties partials = ossie::getPartialStructs(config_props); + if (partials.length() > 0) { + std::ostringstream eout; + eout << "Component " << identifier << " contains " << partials.length() + << " structure(s) with a mix of defined and nil values: "; + bool first = true; + for (size_t index = 0; index < partials.length(); ++index) { + if (!first) { + eout << ", "; + } + eout << partials[index].id; + } + eout << ". The behavior for the component is undefined"; + RH_WARN(deploymentLog, eout.str()); + } + + RH_TRACE(deploymentLog, "Configuring component " << identifier); + try { + resource->configure(config_props); + } catch (const CF::PropertySet::InvalidConfiguration& exc) { + throw PropertiesError(this, exc.invalidProperties, "invalid configuration in configure"); + } catch (const CF::PropertySet::PartialConfiguration& exc) { + throw PropertiesError(this, exc.invalidProperties, "partial configuration in configure"); + } catch (const CORBA::SystemException& exc) { + throw ComponentError(this, "configure raised " + ossie::corba::describeException(exc)); + } catch (...) { + throw ComponentError(this, "unexpected error configuring component"); + } +} diff --git a/redhawk/src/control/sdr/dommgr/Deployment.h b/redhawk/src/control/sdr/dommgr/Deployment.h new file mode 100644 index 000000000..9856952ce --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/Deployment.h @@ -0,0 +1,231 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef DEPLOYMENT_H +#define DEPLOYMENT_H + +#include +#include + +#include + +#include +#include "PersistenceStore.h" + +namespace redhawk { + + extern rh_logger::LoggerPtr deploymentLog; + + class ApplicationComponent; + + class UsesDeviceAssignment + { + public: + UsesDeviceAssignment(const ossie::UsesDevice* usesDevice); + + const ossie::UsesDevice* getUsesDevice() const; + + void setAssignedDevice(CF::Device_ptr device); + CF::Device_ptr getAssignedDevice() const; + + private: + const ossie::UsesDevice* usesDevice; + CF::Device_var assignedDevice; + }; + + class UsesDeviceDeployment + { + public: + typedef std::vector AssignmentList; + + ~UsesDeviceDeployment(); + + void addUsesDeviceAssignment(UsesDeviceAssignment* assignment); + UsesDeviceAssignment* getUsesDeviceAssignment(const std::string identifier); + const AssignmentList& getUsesDeviceAssignments(); + + void transferUsesDeviceAssignments(UsesDeviceDeployment& other); + + protected: + AssignmentList assignments; + }; + + class SoftPkgDeployment + { + public: + typedef std::vector DeploymentList; + + SoftPkgDeployment(const ossie::SoftPkg* softpkg, const ossie::SPD::Implementation* implementation=0); + ~SoftPkgDeployment(); + + const ossie::SoftPkg* getSoftPkg() const; + + void setImplementation(const ossie::SPD::Implementation* implementation); + const ossie::SPD::Implementation* getImplementation() const; + + std::string getLocalFile(); + CF::LoadableDevice::LoadType getCodeType() const; + bool isExecutable() const; + + void addDependency(SoftPkgDeployment* dependency); + const DeploymentList& getDependencies(); + void clearDependencies(); + + std::vector getDependencyLocalFiles(); + + protected: + void load(redhawk::ApplicationComponent* appComponent, CF::FileSystem_ptr fileSystem, + CF::LoadableDevice_ptr device); + + const ossie::SoftPkg* softpkg; + const ossie::SPD::Implementation* implementation; + DeploymentList dependencies; + }; + + class ComponentDeployment : public SoftPkgDeployment, public UsesDeviceDeployment + { + public: + typedef ossie::ComponentInstantiation::LoggingConfig LoggingConfig; + + ComponentDeployment(const ossie::SoftPkg* softpkg, + const ossie::ComponentInstantiation* instantiation, + const std::string& identifier); + + /** + * @brief Returns the component's runtime identifier + */ + const std::string& getIdentifier() const; + + const ossie::ComponentInstantiation* getInstantiation() const; + + void setContainer(ComponentDeployment* container); + ComponentDeployment* getContainer(); + + bool isResource() const; + bool isConfigurable() const; + + bool isAssemblyController() const; + void setIsAssemblyController(bool state); + + std::string getEntryPoint(); + + redhawk::PropertyMap getOptions(); + + redhawk::PropertyMap getAffinityOptionsWithAssignment() const; + void mergeAffinityOptions(const CF::Properties& affinity); + + redhawk::PropertyMap getLoggingConfiguration() const; + + void setNicAssignment(const std::string& nic); + bool hasNicAssignment() const; + const std::string& getNicAssignment() const; + + void setCpuReservation(float reservation); + bool hasCpuReservation() const; + float getCpuReservation() const; + + /** + * Returns the properties used for evaluating math statements in + * allocation + */ + redhawk::PropertyMap getAllocationContext() const; + + /** + * Returns the properties whose values are passed on the command line + * in execute + */ + redhawk::PropertyMap getCommandLineParameters() const; + + void overrideProperty(const std::string& id, const CORBA::Any& value); + + void setAssignedDevice(const boost::shared_ptr& device); + const boost::shared_ptr& getAssignedDevice() const; + + void setResourcePtr(CF::Resource_ptr resource); + CF::Resource_ptr getResourcePtr() const; + + void load(CF::FileSystem_ptr fileSystem, CF::LoadableDevice_ptr device); + + redhawk::PropertyMap getDeviceRequires() const; + void setDeviceRequires( const redhawk::PropertyMap &devRequires ); + + redhawk::ApplicationComponent* getApplicationComponent(); + void setApplicationComponent(redhawk::ApplicationComponent* appComponent); + + /** + * @brief Initializes the deployed component + * @exception ossie::properties_error invalid properties in property + * initialization + * @exception ossie::component_error initialization failed + * + * Handles initialization of new-style 'property' kind properties and + * calls initialize on the component. + */ + void initialize(); + + /** + * @brief Configures legacy properties to initial values + * @exception ossie::properties_error invalid properties + * @exception ossie::component_error configure failed + * + * Handles configuration of legacy 'configure' kind properties. + */ + void configure(); + + protected: + + /** + * Returns the properties used for the initial call to configure() + * during deployment + */ + redhawk::PropertyMap getInitialConfigureProperties() const; + + /** + * Returns the properties used for initializePropertes() during + * deployment + */ + redhawk::PropertyMap getInitializeProperties() const; + + CF::DataType getPropertyValue(const ossie::Property* property) const; + const ossie::ComponentProperty* getPropertyOverride(const std::string& id) const; + + void initializeProperties(); + + const ossie::ComponentInstantiation* instantiation; + const std::string identifier; + bool assemblyController; + + boost::shared_ptr assignedDevice; + ComponentDeployment* container; + CF::Resource_var resource; + + redhawk::ApplicationComponent* appComponent; + + redhawk::PropertyMap overrides; + std::string nicAssignment; + ossie::optional_value cpuReservation; + redhawk::PropertyMap affinityOptions; + redhawk::PropertyMap deviceRequires; + redhawk::PropertyMap loggingConfig; + }; + +} + +#endif // DEPLOYMENT_H diff --git a/redhawk/src/control/sdr/dommgr/DeploymentExceptions.cpp b/redhawk/src/control/sdr/dommgr/DeploymentExceptions.cpp new file mode 100644 index 000000000..d6a10101e --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/DeploymentExceptions.cpp @@ -0,0 +1,160 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include + +#include + +#include "DeploymentExceptions.h" +#include "Deployment.h" + +using namespace redhawk; + +UsesDeviceFailure::UsesDeviceFailure(const ApplicationDeployment&, const std::vector& ids) : + DeploymentError(CF::CF_ENOSPC, "failed to satisfy usesdevice dependencies"), + _context("application"), + _ids(ids) +{ +} + +UsesDeviceFailure::UsesDeviceFailure(const ComponentDeployment* component, const std::vector& ids) : + DeploymentError(CF::CF_ENOSPC, "failed to satisfy usesdevice dependencies"), + _context("component '" + component->getInstantiation()->getID() + "'"), + _ids(ids) +{ +} + +std::string UsesDeviceFailure::message() const +{ + std::ostringstream msg; + msg << "Failed to satisfy 'usesdevice' dependencies "; + bool first = true; + BOOST_FOREACH(const std::string& id, ids()) { + if (!first) { + msg << ", "; + } else { + first = false; + } + msg << id; + } + msg << " for " << context(); + return msg.str(); +} + +std::string ConnectionError::message() const +{ + std::ostringstream msg; + msg << "Unable to make connection '" << identifier() << "': " << what(); + return msg.str(); +} + +ComponentError::ComponentError(const ComponentDeployment* deployment, const std::string& message) : + DeploymentError(CF::CF_EINVAL, message), + _identifier(deployment->getInstantiation()->getID()) +{ + if (deployment->getImplementation()) { + _implementation = deployment->getImplementation()->getID(); + } +} + +std::string ComponentError::message() const +{ + std::ostringstream msg; + msg << "Deploying component " << identifier(); + if (!implementation().empty()) { + msg << " implementation " << implementation(); + } + msg << " failed: " << what(); + return msg.str(); +} + +ExecuteError::ExecuteError(const ComponentDeployment* deployment, const std::string& message) : + ComponentError(deployment, message), + _device(deployment->getAssignedDevice()) +{ + // Override the default ComponentError errorNumber; this is simpler than + // having an extra ComponentError constructor + errorNumber(CF::CF_EIO); +} + +std::string ExecuteError::message() const +{ + std::ostringstream msg; + msg << "Executing component " << identifier(); + msg << " implementation " << implementation(); + msg << " failed on device " << device()->identifier; + msg << ": " << what(); + return msg.str(); +} + +std::string PropertiesError::message() const +{ + std::ostringstream msg; + msg << "Component " << identifier(); + msg << " " << what(); + msg << " " << properties(); + return msg.str(); +} + +PlacementFailure::PlacementFailure(const ossie::ComponentInstantiation* instantiation, + const std::string& message) : + DeploymentError(CF::CF_EIO, message), + _name("component " + instantiation->getID()) +{ +} + +PlacementFailure::PlacementFailure(const ossie::SoftwareAssembly::HostCollocation& collocation, + const std::string& message) : + DeploymentError(CF::CF_EIO, message), + _name("host collocation " + collocation.getID() + " (" + collocation.getName() + ")") +{ +} + +std::string PlacementFailure::message() const +{ + std::ostringstream msg; + msg << "Failed to place " << name() << ": " << what(); + return msg.str(); +} + +BadExternalPort::BadExternalPort(const ossie::SoftwareAssembly::Port& port, const std::string& message) : + DeploymentError(CF::CF_EINVAL, message), + _name(port.getExternalName()), + _component(port.componentrefid) +{ +} + +std::string BadExternalPort::message() const +{ + std::ostringstream msg; + msg << "Could not create external port '" << name(); + msg << "' from component '" << component(); + msg << "': " << what(); + return msg.str(); +} + +std::string ComponentTerminated::message() const +{ + std::ostringstream msg; + msg << "Component '" << identifier() << "' terminated abnormally"; + return msg.str(); +} diff --git a/redhawk/src/control/sdr/dommgr/DeploymentExceptions.h b/redhawk/src/control/sdr/dommgr/DeploymentExceptions.h new file mode 100644 index 000000000..8e52521aa --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/DeploymentExceptions.h @@ -0,0 +1,273 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef DEPLOYMENTEXCEPTIONS_H +#define DEPLOYMENTEXCEPTIONS_H + +#include +#include +#include + +#include + +#include +#include + +namespace ossie { + class ComponentInstantiation; + class DeviceNode; +} + +namespace redhawk { + + class ApplicationDeployment; + class ComponentDeployment; + + class DeploymentError : public std::runtime_error { + public: + DeploymentError(const std::string& message) : + std::runtime_error(message), + _errorNumber(CF::CF_NOTSET) + { + } + + DeploymentError(CF::ErrorNumberType errorNum, const std::string& message) : + std::runtime_error(message), + _errorNumber(errorNum) + { + } + + virtual std::string message() const + { + return std::string(what()); + } + + CF::ErrorNumberType errorNumber() const + { + return _errorNumber; + } + + protected: + void errorNumber(CF::ErrorNumberType errorNum) + { + _errorNumber = errorNum; + } + + private: + CF::ErrorNumberType _errorNumber; + }; + + class NoExecutableDevices : public DeploymentError { + public: + NoExecutableDevices() : + DeploymentError(CF::CF_ENODEV, "Domain has no executable devices (GPPs) to run components") + { + } + }; + + class UsesDeviceFailure : public DeploymentError { + public: + UsesDeviceFailure(const ApplicationDeployment& application, const std::vector& ids); + UsesDeviceFailure(const ComponentDeployment* component, const std::vector& ids); + + virtual ~UsesDeviceFailure() throw() + { + } + + virtual std::string message() const; + + const std::string& context() const + { + return _context; + } + + const std::vector& ids() const + { + return _ids; + } + + private: + std::string _context; + std::vector _ids; + }; + + class ConnectionError : public DeploymentError { + public: + ConnectionError(const std::string& identifier, const std::string& message) : + DeploymentError(CF::CF_EIO, message), + _identifier(identifier) + { + } + + virtual ~ConnectionError() throw() + { + } + + virtual std::string message() const; + + const std::string& identifier() const + { + return _identifier; + } + + private: + const std::string _identifier; + }; + + class PlacementFailure : public DeploymentError { + public: + PlacementFailure(const ossie::ComponentInstantiation* instantiation, const std::string& message); + + PlacementFailure(const ossie::SoftwareAssembly::HostCollocation& collocation, const std::string& message); + + virtual ~PlacementFailure() throw () + { + } + + virtual std::string message() const; + + const std::string& name() const + { + return _name; + } + + private: + std::string _name; + }; + + class ComponentError : public DeploymentError { + public: + ComponentError(const ComponentDeployment* deployment, const std::string& message); + + virtual ~ComponentError() throw () + { + } + + virtual std::string message() const; + + const std::string& identifier() const + { + return _identifier; + } + + const std::string& implementation() const + { + return _implementation; + } + + private: + std::string _identifier; + std::string _implementation; + }; + + class ExecuteError : public ComponentError { + public: + ExecuteError(const ComponentDeployment* deployment, const std::string& message); + + const boost::shared_ptr& device() const + { + return _device; + } + + virtual ~ExecuteError() throw () + { + } + + virtual std::string message() const; + + private: + boost::shared_ptr _device; + }; + + class PropertiesError : public ComponentError { + public: + PropertiesError(const ComponentDeployment* deployment, + const CF::Properties& properties, + const std::string& message) : + ComponentError(deployment, message), + _properties(properties) + { + } + + virtual ~PropertiesError() throw () + { + } + + virtual std::string message() const; + + const redhawk::PropertyMap& properties() const + { + return _properties; + } + + private: + const redhawk::PropertyMap _properties; + }; + + class BadExternalPort : public DeploymentError { + public: + BadExternalPort(const ossie::SoftwareAssembly::Port& port, const std::string& message); + + virtual ~BadExternalPort() throw () + { + } + + virtual std::string message() const; + + const std::string& name() const + { + return _name; + } + + const std::string& component() const + { + return _component; + } + + private: + const std::string _name; + const std::string _component; + }; + + class ComponentTerminated : public DeploymentError { + public: + ComponentTerminated(const std::string& identifier) : + DeploymentError("component terminated abnormally"), + _identifier(identifier) + { + } + + virtual ~ComponentTerminated() throw () + { + } + + virtual std::string message() const; + + const std::string& identifier() const + { + return _identifier; + } + + private: + const std::string _identifier; + }; +} + +#endif // DEPLOYMENTEXCEPTIONS_H diff --git a/redhawk/src/control/sdr/dommgr/DomainManager.prf.xml b/redhawk/src/control/sdr/dommgr/DomainManager.prf.xml index 748e58d98..b74f19aba 100644 --- a/redhawk/src/control/sdr/dommgr/DomainManager.prf.xml +++ b/redhawk/src/control/sdr/dommgr/DomainManager.prf.xml @@ -45,12 +45,12 @@ with this program. If not, see http://www.gnu.org/licenses/. - + Enable CORBA persistence for the domain manager. true - + diff --git a/redhawk/src/control/sdr/dommgr/DomainManager_EventSupport.cpp b/redhawk/src/control/sdr/dommgr/DomainManager_EventSupport.cpp index c360bf272..96ab2fdf5 100644 --- a/redhawk/src/control/sdr/dommgr/DomainManager_EventSupport.cpp +++ b/redhawk/src/control/sdr/dommgr/DomainManager_EventSupport.cpp @@ -26,6 +26,7 @@ #include #include "DomainManager_EventSupport.h" #include "DomainManager_impl.h" +#include "Application_impl.h" using namespace ossie; @@ -61,7 +62,7 @@ DOM_Subscriber_ptr DomainManager_impl::subscriber( const std::string &cname ) try { ossie::events::EventChannel_var evt_channel; - RH_NL_DEBUG("DomainManager","Requesting Event Channel::" << cname); + RH_DEBUG(this->_baseLog, "Requesting Event Channel::" << cname); if ( _eventChannelMgr->isChannel( cname ) ) { evt_channel = ossie::events::EventChannel::_duplicate( _eventChannelMgr->findChannel( cname ) ); } @@ -77,7 +78,7 @@ DOM_Subscriber_ptr DomainManager_impl::subscriber( const std::string &cname ) } catch(...){ - RH_NL_WARN("DomainManager", "Unable to establish Subscriber interface to event channel:"<< cname); + RH_WARN(this->_baseLog, "Unable to establish Subscriber interface to event channel:"<< cname); } return ret; @@ -92,7 +93,7 @@ DOM_Publisher_ptr DomainManager_impl::publisher( const std::string &cname ) DOM_Publisher_ptr ret; try { - RH_NL_DEBUG("DomainManager","Requesting Event Channel::" << cname); + RH_DEBUG(this->_baseLog,"Requesting Event Channel::" << cname); ossie::events::EventChannel_var evt_channel; if ( _eventChannelMgr->isChannel( cname ) ) { evt_channel = ossie::events::EventChannel::_duplicate( _eventChannelMgr->findChannel( cname ) ); @@ -104,11 +105,11 @@ DOM_Publisher_ptr DomainManager_impl::publisher( const std::string &cname ) throw -1; } - RH_NL_DEBUG("DomainManager","Create DomainManager Publisher Object for:" << cname); + RH_DEBUG(this->_baseLog,"Create DomainManager Publisher Object for:" << cname); ret = DOM_Publisher_ptr( new DOM_Publisher( evt_channel ) ); } catch(...){ - RH_NL_WARN("DomainManager","Unable to establish Publisher interface to event channel:" << cname); + RH_WARN(this->_baseLog,"Unable to establish Publisher interface to event channel:" << cname); } return ret; @@ -171,22 +172,34 @@ void DomainManager_impl::sendResourceStateChange( const std::string &source_id, ewriter.sendResourceStateChange( evt ); } +void DomainManager_impl::idmTerminationMessages(const redhawk::events::ComponentTerminationEvent& termMsg) +{ + boost::recursive_mutex::scoped_lock lock(stateAccess); + Application_impl* application = findApplicationById(termMsg.application_id); + if (!application) { + ApplicationTable::iterator iter = _pendingApplications.find(termMsg.application_id); + if (iter != _pendingApplications.end()) { + application = iter->second; + } + } + // Make the device identification as useful as possible by providing the + // label first (if available) and then the unique identifier, which is + // often a UUID + std::string device_label; + DeviceList::iterator device = findDeviceById(termMsg.device_id); + if (device != _registeredDevices.end()) { + device_label = "'" + (*device)->label + "' (" + termMsg.device_id + ")"; + } else { + device_label = termMsg.device_id; + } -void DomainManager_impl::handleIDMChannelMessages( const CORBA::Any &msg ) { - - const StandardEvent::AbnormalComponentTerminationEventType *termMsg; - if ( msg >>= termMsg ) { - LOG_WARN(DomainManager_impl, "Abnormal Component Termination, Reporting Device: " << termMsg->deviceId << " Application/Component " << - termMsg->applicationId << "/" << termMsg->componentId ); - } - -} - - -void DomainManager_impl::idmTerminationMessages( const redhawk::events::ComponentTerminationEvent &termMsg ) { - LOG_WARN(DomainManager_impl, "Abnormal Component Termination, Reporting Device: " << termMsg.device_id << " Application/Component " << - termMsg.application_id << "/" << termMsg.component_id ); + if (application) { + application->componentTerminated(termMsg.component_id, device_label); + } else { + RH_WARN(this->_baseLog, "Abnormal Component Termination, Reporting Device: " << device_label + << " Application/Component " << termMsg.application_id << "/" << termMsg.component_id); + } } @@ -196,11 +209,17 @@ void DomainManager_impl::establishDomainManagementChannels( const std::string &d if ( _eventChannelMgr ){ if ( !dburi.empty() ) { - LOG_INFO(DomainManager_impl, "Restoring event channels file:" << dburi); + RH_INFO(this->_baseLog, "Restoring event channel manager state"); + restorePubProxies(dburi); + restoreSubProxies(dburi); + restoreEventChannelRegistrations(dburi); + RH_DEBUG(this->_baseLog, "Completed Restoring Event Channel Manager state"); + RH_INFO(this->_baseLog, "Restoring event channels file:" << dburi); restoreEventChannels(dburi); } + - LOG_TRACE(DomainManager_impl, "Establishing Domain Event Channels"); + RH_TRACE(this->_baseLog, "Establishing Domain Event Channels"); // // Create ODM Channel Publisher @@ -210,14 +229,14 @@ void DomainManager_impl::establishDomainManagementChannels( const std::string &d cname = redhawk::events::ODM_Channel_Spec; _odm_publisher = publisher( cname ); if ( _odm_publisher ) { - LOG_INFO(DomainManager_impl, "Domain Channel: " << cname << " created."); + RH_INFO(this->_baseLog, "Domain Channel: " << cname << " created."); } else { throw -1; } } catch(...) { - LOG_WARN(DomainManager_impl, "ODM Channel create FAILED, Disabling outgoing events"); + RH_WARN(this->_baseLog, "ODM Channel create FAILED, Disabling outgoing events"); } // @@ -227,7 +246,7 @@ void DomainManager_impl::establishDomainManagementChannels( const std::string &d cname = redhawk::events::IDM_Channel_Spec; DOM_Subscriber_ptr idmSubscriber = subscriber( cname ); if ( idmSubscriber ) { - LOG_INFO(DomainManager_impl, "Domain Channel: " << cname << " created."); + RH_INFO(this->_baseLog, "Domain Channel: " << cname << " created."); _idm_reader.setTerminationListener( this, &DomainManager_impl::idmTerminationMessages ); _idm_reader.subscribe( subscriber( cname ) ); } @@ -236,13 +255,13 @@ void DomainManager_impl::establishDomainManagementChannels( const std::string &d } } catch(...) { - LOG_WARN(DomainManager_impl, "IDM Channel create FAILED, Disabling incoming events"); + RH_WARN(this->_baseLog, "IDM Channel create FAILED, Disabling incoming events"); } - LOG_DEBUG(DomainManager_impl, "Completed Creating Domain Event Channels"); + RH_DEBUG(this->_baseLog, "Completed Creating Domain Event Channels"); } else { - LOG_WARN(DomainManager_impl, "No EventChannelManager, Disabling event channel management operations."); + RH_WARN(this->_baseLog, "No EventChannelManager, Disabling event channel management operations."); } } @@ -250,7 +269,7 @@ void DomainManager_impl::establishDomainManagementChannels( const std::string &d void DomainManager_impl::disconnectDomainManagementChannels() { if ( _eventChannelMgr ) { - RH_NL_DEBUG("DomainManager", "Disconnect Domain Mananagment Event Channels. " ); + RH_DEBUG(this->_baseLog, "Disconnect Domain Mananagment Event Channels. " ); try { if ( _odm_publisher ) { _odm_publisher->disconnect(); @@ -258,14 +277,14 @@ void DomainManager_impl::disconnectDomainManagementChannels() { } } catch(...){ - RH_NL_ERROR("DomainManager", "Error disconnecting from ODM Channel. "); + RH_ERROR(this->_baseLog, "Error disconnecting from ODM Channel. "); } try { _idm_reader.unsubscribe(); } catch(...){ - RH_NL_ERROR("DomainManager", "Error disconnecting from IDM Channel. "); + RH_ERROR(this->_baseLog, "Error disconnecting from IDM Channel. "); } // reset channel objects used by persistence module @@ -280,17 +299,17 @@ void DomainManager_impl::disconnectDomainManagementChannels() { _eventChannels.clear(); - RH_NL_DEBUG("DomainManager", "Terminating EventChannelManager, but do not destroy channels " ); + RH_DEBUG(this->_baseLog, "Terminating EventChannelManager, but do not destroy channels " ); _eventChannelMgr->terminate( false ); } catch(...){ - RH_NL_ERROR("DomainManager", "Error disconnecting from all event channels. "); + RH_ERROR(this->_baseLog, "Error disconnecting from all event channels. "); } } - RH_NL_DEBUG("DomainManager", "Completed disconnectDomainManagementChannels" ); + RH_DEBUG(this->_baseLog, "Completed disconnectDomainManagementChannels" ); } @@ -298,32 +317,30 @@ void DomainManager_impl::disconnectDomainManagementChannels() { ::ossie::events::EventChannel_ptr DomainManager_impl::createEventChannel (const std::string& cname) { - TRACE_ENTER(DomainManager_impl); - ::ossie::events::EventChannel_var eventChannel = ::ossie::events::EventChannel::_nil(); if ( _eventChannelMgr ) { try { - RH_NL_DEBUG( "DomainManager", "Request event channel: " << cname << " from EventChannelManager" ); + RH_DEBUG(this->_baseLog, "Request event channel: " << cname << " from EventChannelManager" ); eventChannel = _eventChannelMgr->create( cname ); } catch( const CF::EventChannelManager::ServiceUnavailable &) { - RH_NL_ERROR( "DomainManager", "Service unvailable, Unable to create event channel: " << cname ); + RH_ERROR(this->_baseLog, "Service unvailable, Unable to create event channel: " << cname ); } catch( const CF::EventChannelManager::InvalidChannelName &) { - RH_NL_ERROR( "DomainManager", "Invalid Channel Name, Unable to create event channel: " << cname ); + RH_ERROR(this->_baseLog, "Invalid Channel Name, Unable to create event channel: " << cname ); } catch( const CF::EventChannelManager::ChannelAlreadyExists &) { - RH_NL_ERROR( "DomainManager", "Channel already exists, Unable to create event channel: " << cname ); + RH_ERROR(this->_baseLog, "Channel already exists, Unable to create event channel: " << cname ); } catch( const CF::EventChannelManager::OperationNotAllowed &) { - RH_NL_ERROR( "DomainManager", "Operation not allowed, Unable to create event channel: " << cname ); + RH_ERROR(this->_baseLog, "Operation not allowed, Unable to create event channel: " << cname ); } catch( const CF::EventChannelManager::OperationFailed &) { - RH_NL_ERROR( "DomainManager", "Operation failed, Unable to create event channel: " << cname ); + RH_ERROR(this->_baseLog, "Operation failed, Unable to create event channel: " << cname ); } catch( ... ) { - RH_NL_ERROR( "DomainManager", "Unable to create event channel: " << cname ); + RH_ERROR(this->_baseLog, "Unable to create event channel: " << cname ); } } @@ -353,26 +370,24 @@ ::ossie::events::EventChannel_ptr DomainManager_impl::createEventChannel (const try { db.store("EVENT_CHANNELS", _eventChannels); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to event channels"); + RH_ERROR(this->_baseLog, "Error persisting change to event channels"); } - TRACE_EXIT(DomainManager_impl); return eventChannel._retn(); } void DomainManager_impl::destroyEventChannel (const std::string& name) { - TRACE_ENTER(DomainManager_impl); if ( _eventChannelMgr ) { try { - RH_NL_DEBUG("DomainManager", "Releasing channel: " << name ); + RH_DEBUG(this->_baseLog, "Releasing channel: " << name ); _eventChannelMgr->release(name); } catch(...){ - RH_NL_ERROR("DomainManager", "Error trying to release channel: " << name ); + RH_ERROR(this->_baseLog, "Error trying to release channel: " << name ); } } @@ -395,34 +410,32 @@ void DomainManager_impl::destroyEventChannel (const std::string& name) try { db.store("EVENT_CHANNELS", _eventChannels); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to event channels"); + RH_ERROR(this->_baseLog, "Error persisting change to event channels"); } - - TRACE_EXIT(DomainManager_impl); } void DomainManager_impl::destroyEventChannels() { if ( _eventChannelMgr ) { - RH_NL_DEBUG("DomainManager", "Delete Domain Mananagment Event Channels. " ); + RH_DEBUG(this->_baseLog, "Delete Domain Mananagment Event Channels. " ); try { - RH_NL_DEBUG("DomainManager", "Disconnect ODM CHANNEL. " ); + RH_DEBUG(this->_baseLog, "Disconnect ODM CHANNEL. " ); if ( _odm_publisher ) { _odm_publisher->disconnect(); _odm_publisher.reset(); } } catch(...){ - RH_NL_ERROR("DomainManager", "Error Destroying ODM Channel. "); + RH_ERROR(this->_baseLog, "Error Destroying ODM Channel. "); } try { - RH_NL_DEBUG("DomainManager", "Disconnect IDM CHANNEL. " ); + RH_DEBUG(this->_baseLog, "Disconnect IDM CHANNEL. " ); _idm_reader.unsubscribe(); } catch(...){ - RH_NL_ERROR("DomainManager", "Error Destroying IDM Channel. "); + RH_ERROR(this->_baseLog, "Error Destroying IDM Channel. "); } try{ @@ -437,18 +450,18 @@ void DomainManager_impl::destroyEventChannels() _eventChannels.clear(); - RH_NL_DEBUG("DomainManager", "Terminating all event channels within EventChannelManager" ); + RH_DEBUG(this->_baseLog, "Terminating all event channels within EventChannelManager" ); //boost::this_thread::sleep( boost::posix_time::milliseconds( 3000 ) ); _eventChannelMgr->terminate(); } catch(...){ - RH_NL_ERROR("DomainManager", "Error terminating all event channels. "); + RH_ERROR(this->_baseLog, "Error terminating all event channels. "); } } - RH_NL_DEBUG("DomainManager", "Completed destroyEventChannels" ); + RH_DEBUG(this->_baseLog, "Completed destroyEventChannels" ); } CosEventChannelAdmin::EventChannel_ptr DomainManager_impl::getEventChannel(const std::string &name) { @@ -479,35 +492,35 @@ bool DomainManager_impl::eventChannelExists(const std::string &name) { unsigned int DomainManager_impl::incrementEventChannelConnections(const std::string &EventChannelName) { - LOG_TRACE(DomainManager_impl, "Incrementing Event Channel " << EventChannelName); + RH_TRACE(this->_baseLog, "Incrementing Event Channel " << EventChannelName); std::vector < ossie::EventChannelNode >::iterator _iter = _eventChannels.begin(); while (_iter != _eventChannels.end()) { if ((*_iter).name == EventChannelName) { (*_iter).connectionCount++; - LOG_TRACE(DomainManager_impl, "Event Channel " << EventChannelName<<" count: "<<(*_iter).connectionCount); + RH_TRACE(this->_baseLog, "Event Channel " << EventChannelName<<" count: "<<(*_iter).connectionCount); return (*_iter).connectionCount; } _iter++; } - LOG_TRACE(DomainManager_impl, "Event Channel " << EventChannelName<<" does not exist"); + RH_TRACE(this->_baseLog, "Event Channel " << EventChannelName<<" does not exist"); return 0; } unsigned int DomainManager_impl::decrementEventChannelConnections(const std::string &EventChannelName) { - LOG_TRACE(DomainManager_impl, "Decrementing Event Channel " << EventChannelName); + RH_TRACE(this->_baseLog, "Decrementing Event Channel " << EventChannelName); std::vector < ossie::EventChannelNode >::iterator _iter = _eventChannels.begin(); while (_iter != _eventChannels.end()) { if ((*_iter).name == EventChannelName) { (*_iter).connectionCount--; - LOG_TRACE(DomainManager_impl, "Event Channel " << EventChannelName<<" count: "<<(*_iter).connectionCount); + RH_TRACE(this->_baseLog, "Event Channel " << EventChannelName<<" count: "<<(*_iter).connectionCount); return (*_iter).connectionCount; } _iter++; } - LOG_TRACE(DomainManager_impl, "Event Channel " << EventChannelName<<" does not exist"); + RH_TRACE(this->_baseLog, "Event Channel " << EventChannelName<<" does not exist"); return 0; } diff --git a/redhawk/src/control/sdr/dommgr/DomainManager_impl.cpp b/redhawk/src/control/sdr/dommgr/DomainManager_impl.cpp index 50c030aa4..583b263eb 100644 --- a/redhawk/src/control/sdr/dommgr/DomainManager_impl.cpp +++ b/redhawk/src/control/sdr/dommgr/DomainManager_impl.cpp @@ -26,6 +26,9 @@ #include #include #include + +#include + #include #include #include @@ -40,6 +43,7 @@ #include #include #include +#include #include "Application_impl.h" #include "ApplicationFactory_impl.h" @@ -52,10 +56,10 @@ using namespace ossie; using namespace std; -static const ComponentInstantiation* findComponentInstantiation (const std::vector& placements, +static const ComponentInstantiation* findComponentInstantiation (const std::vector& placements, const std::string& identifier) { - for (std::vector::const_iterator iter = placements.begin(); iter != placements.end(); ++iter) { + for (std::vector::const_iterator iter = placements.begin(); iter != placements.end(); ++iter) { const std::vector& instantiations = iter->getInstantiations(); for (std::vector::const_iterator ii = instantiations.begin(); ii != instantiations.end(); ++ii) { if (identifier == ii->getID()) { @@ -66,28 +70,43 @@ static const ComponentInstantiation* findComponentInstantiation (const std::vect return 0; } -PREPARE_CF_LOGGING(DomainManager_impl) +rh_logger::LoggerPtr DomainManager_impl::__logger; // If _overrideDomainName == NULL read the domain name from the DMD file DomainManager_impl::DomainManager_impl (const char* dmdFile, const char* _rootpath, const char* domainName, const char *db_uri, - const char* _logconfig_uri, bool useLogCfgResolver, bool bindToDomain ) : + const char* _logconfig_uri, bool useLogCfgResolver, bool bindToDomain, bool _persistence, int initialLogLevel) : + Logging_impl("DomainManager"), _eventChannelMgr(NULL), _domainName(domainName), _domainManagerProfile(dmdFile), _connectionManager(this, this, domainName), _useLogConfigUriResolver(useLogCfgResolver), _strict_spd_validation(false), + _initialLogLevel(initialLogLevel), _bindToDomain(bindToDomain) { - TRACE_ENTER(DomainManager_impl) - LOG_TRACE(DomainManager_impl, "Looking for DomainManager POA"); + std::string std_logconfig_uri; + if (_logconfig_uri) { + std::string _lu(_logconfig_uri); + std::string _rp(_rootpath); + std_logconfig_uri = ossie::logging::ResolveLocalUri(_lu, _rp, _lu); + } + std::string expanded_config = getExpandedLogConfig(std_logconfig_uri); + this->_baseLog->configureLogger(expanded_config, true); + + redhawk::setupParserLoggers(this->_baseLog); + PropertySet_impl::setLogger(this->_baseLog->getChildLogger("PropertySet", "")); + + RH_TRACE(this->_baseLog, "Looking for DomainManager POA"); + _connectionManager.setLogger(this->_baseLog->getChildLogger("ConnectionManager", "")); poa = ossie::corba::RootPOA()->find_POA("DomainManager", 1); // Initialize properties logging_config_prop = (StringProperty*)addProperty(logging_config_uri, "LOGGING_CONFIG_URI", "LOGGING_CONFIG_URI", "readonly", "", "external", "configure"); + if (_logconfig_uri) { logging_config_prop->setValue(_logconfig_uri); } @@ -97,6 +116,16 @@ DomainManager_impl::DomainManager_impl (const char* dmdFile, const char* _rootpa addProperty(redhawk_version, VERSION, "REDHAWK_VERSION", "redhawk_version", "readonly", "", "external", "configure"); + + addProperty(PERSISTENCE, + "PERSISTENCE", + "", + "readonly", + "", + "external", + "property"); + + PERSISTENCE = _persistence; addProperty(client_wait_times, client_wait_times_struct(), @@ -114,44 +143,52 @@ DomainManager_impl::DomainManager_impl (const char* dmdFile, const char* _rootpa PortableServer::ObjectId_var oid = ossie::corba::activatePersistentObject(poa, fileMgr_servant, fileManagerId); fileMgr_servant->_remove_ref(); _fileMgr = fileMgr_servant->_this(); - + fileMgr_servant->setLogger(_baseLog->getChildLogger("FileManager", "")); + // Create allocation manager and register with the parent POA _allocationMgr = new AllocationManager_impl (this); std::string allocationManagerId = _domainName + "/AllocationManager"; oid = ossie::corba::activatePersistentObject(poa, _allocationMgr, allocationManagerId); _allocationMgr->_remove_ref(); + _allocationMgr->setLogger(_baseLog->getChildLogger("AllocationManager", "")); + + ossie::proputilsLog = _baseLog->getChildLogger("proputils",""); + fileLog = _baseLog->getChildLogger("File",""); + redhawk::deploymentLog = _baseLog->getChildLogger("Deployment",""); + ossie::connectionSupportLog = _baseLog->getChildLogger("ConnectionSupport",""); // Likewise, create the domain-level connection manager _connectionMgr = new ConnectionManager_impl(this); std::string connectionManagerId = _domainName + "/ConnectionManager"; oid = ossie::corba::activatePersistentObject(poa, _connectionMgr, connectionManagerId); + _connectionMgr->setLogger(_baseLog->getChildLogger("ConnectionManager", "")); // Parse the DMD profile parseDMDProfile(); - LOG_TRACE(DomainManager_impl, "Establishing domain manager naming context") + RH_TRACE(this->_baseLog, "Establishing domain manager naming context") base_context = ossie::corba::stringToName(_domainName); CosNaming::NamingContext_ptr inc = CosNaming::NamingContext::_nil(); try { inc = ossie::corba::InitialNamingContext(); } catch ( ... ) { - LOG_FATAL(DomainManager_impl, "Unable to find Naming Service; make sure that it is configured correctly and running."); + RH_FATAL(this->_baseLog, "Unable to find Naming Service; make sure that it is configured correctly and running."); _exit(EXIT_FAILURE); } try { rootContext = inc->bind_new_context (base_context); } catch (CosNaming::NamingContext::AlreadyBound&) { - LOG_TRACE(DomainManager_impl, "Naming context already exists"); + RH_TRACE(this->_baseLog, "Naming context already exists"); CORBA::Object_var obj = inc->resolve(base_context); rootContext = CosNaming::NamingContext::_narrow(obj); try { cleanupDomainNamingContext(rootContext); } catch (CORBA::Exception& e) { - LOG_FATAL(DomainManager_impl, "Stopping domain manager; error cleaning up context for domain due to: " << e._name()); + RH_FATAL(this->_baseLog, "Stopping domain manager; error cleaning up context for domain due to: " << e._name()); _exit(EXIT_FAILURE); } } catch ( ... ) { - LOG_FATAL(DomainManager_impl, "Stopping domain manager; error creating new context for domain " << this->_domainName.c_str()) + RH_FATAL(this->_baseLog, "Stopping domain manager; error creating new context for domain " << this->_domainName); _exit(EXIT_FAILURE); } @@ -164,26 +201,33 @@ DomainManager_impl::DomainManager_impl (const char* dmdFile, const char* _rootpa _eventChannelMgr = new EventChannelManager(this, true, true, true); std::string id = _domainName + "/EventChannelManager"; oid = ossie::corba::activatePersistentObject(poa, _eventChannelMgr, id ); + _allocationMgr->setLogger(_baseLog->getChildLogger("EventChannelManager", "")); _eventChannelMgr->_remove_ref(); - LOG_DEBUG(DomainManager_impl, "Started EventChannelManager for the domain."); + RH_DEBUG(this->_baseLog, "Started EventChannelManager for the domain."); // setup IDM and ODM Channels for this domain std::string dburi = (db_uri) ? db_uri : ""; establishDomainManagementChannels( dburi ); } catch ( ... ) { - LOG_FATAL(DomainManager_impl, "Stopping domain manager; EventChannelManager - EventChannelFactory unavailable" ) + RH_FATAL(this->_baseLog, "Stopping domain manager; EventChannelManager - EventChannelFactory unavailable" ) _exit(EXIT_FAILURE); } - - // \todo lookup and install any services specified in the DMD - LOG_TRACE(DomainManager_impl, "Looking for ApplicationFactories POA"); + RH_TRACE(this->_baseLog, "Looking for ApplicationFactories POA"); appFact_poa = poa->find_POA("ApplicationFactories", 1); - LOG_TRACE(DomainManager_impl, "Done instantiating Domain Manager") - TRACE_EXIT(DomainManager_impl) + RH_TRACE(this->_baseLog, "Done instantiating Domain Manager") +} + +CF::LogLevel DomainManager_impl::log_level() { + int _level = this->_baseLog->getLevel()->toInt(); + if (_level == rh_logger::Level::OFF_INT) + _level = CF::LogLevels::OFF; + else if (_level == rh_logger::Level::ALL_INT) + _level = CF::LogLevels::ALL; + return _level; } void DomainManager_impl::parseDMDProfile() @@ -195,26 +239,26 @@ void DomainManager_impl::parseDMDProfile() dmdStream.close(); } catch (const parser_error& e) { std::string parser_error_line = ossie::retrieveParserErrorLineNumber(e.what()); - LOG_FATAL(DomainManager_impl, "Stopping domain manager; error parsing domain manager configuration DMD: " << _domainManagerProfile << ". " << parser_error_line << " The XML parser returned the following error: " << e.what()) + RH_FATAL(this->_baseLog, "Stopping domain manager; error parsing domain manager configuration DMD: " << _domainManagerProfile << ". " << parser_error_line << " The XML parser returned the following error: " << e.what()) _exit(EXIT_FAILURE); } catch (const std::ios_base::failure& e) { - LOG_FATAL(DomainManager_impl, "Stopping domain manager; domain manager configuration DMD: " << _domainManagerProfile << " IO failure exception: " << e.what()) + RH_FATAL(this->_baseLog, "Stopping domain manager; domain manager configuration DMD: " << _domainManagerProfile << " IO failure exception: " << e.what()) _exit(EXIT_FAILURE); } catch( CF::InvalidFileName& _ex ) { - LOG_FATAL(DomainManager_impl, "Stopping domain manager; domain manager configuration DMD: " << _domainManagerProfile << " Invalid file name exception: " << _ex.msg) + RH_FATAL(this->_baseLog, "Stopping domain manager; domain manager configuration DMD: " << _domainManagerProfile << " Invalid file name exception: " << _ex.msg) _exit(EXIT_FAILURE); } catch( CF::FileException& _ex ) { - LOG_FATAL(DomainManager_impl, "Stopping domain manager; domain manager configuration DMD: " << _domainManagerProfile << " File exception: " << _ex.msg) + RH_FATAL(this->_baseLog, "Stopping domain manager; domain manager configuration DMD: " << _domainManagerProfile << " File exception: " << _ex.msg) _exit(EXIT_FAILURE); } catch ( std::exception& ex ) { std::ostringstream eout; eout << "The following standard exception occurred: "<_baseLog, eout.str()) _exit(EXIT_FAILURE); } catch ( CORBA::Exception& ex ) { std::ostringstream eout; eout << "The following CORBA exception occurred: "<_baseLog, eout.str()) _exit(EXIT_FAILURE); } @@ -225,50 +269,50 @@ void DomainManager_impl::parseDMDProfile() void DomainManager_impl::restoreEventChannels(const std::string& _db_uri) { boost::recursive_mutex::scoped_lock lock(stateAccess); - LOG_INFO(DomainManager_impl, "Restoring state from URL " << _db_uri); + RH_INFO(this->_baseLog, "Restoring state from URL " << _db_uri); try { db.open(_db_uri); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading persistent state: " << e.what()); return; } - LOG_TRACE(DomainManager_impl, "Recovering event channels"); + RH_TRACE(this->_baseLog, "Recovering event channels"); // Recover the event channels std::vector _restoredEventChannels; try { db.fetch("EVENT_CHANNELS", _restoredEventChannels, true); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading event channels persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading event channels persistent state: " << e.what()); _restoredEventChannels.clear(); } for (std::vector::iterator i = _restoredEventChannels.begin(); i != _restoredEventChannels.end(); ++i) { - LOG_TRACE(DomainManager_impl, "Attempting to recover connection to Event Channel " << i->boundName); + RH_TRACE(this->_baseLog, "Attempting to recover connection to Event Channel " << i->boundName); try { if (ossie::corba::objectExists(i->channel)) { - LOG_INFO(DomainManager_impl, "Recovered connection to Event Channel: " << i->boundName); + RH_INFO(this->_baseLog, "Recovered connection to Event Channel: " << i->boundName); // try to restore channel with event channel manager.. try { if ( _eventChannelMgr ) _eventChannelMgr->restore( i->channel, i->name, i->boundName); } catch( CF::EventChannelManager::ChannelAlreadyExists){ - LOG_INFO(DomainManager_impl, "EventChannelManager::restore, Channel already exists: " << i->boundName); + RH_INFO(this->_baseLog, "EventChannelManager::restore, Channel already exists: " << i->boundName); } catch( CF::EventChannelManager::InvalidChannelName){ - LOG_WARN(DomainManager_impl, "EventChannelManager::restore, Invalid Channel Name, " << i->boundName); + RH_WARN(this->_baseLog, "EventChannelManager::restore, Invalid Channel Name, " << i->boundName); } catch( CF::EventChannelManager::ServiceUnavailable){ - LOG_WARN(DomainManager_impl, "EventChannelManager::restore, Event Service seems to be down. "); + RH_WARN(this->_baseLog, "EventChannelManager::restore, Event Service seems to be down. "); } catch( CF::EventChannelManager::OperationFailed){ - LOG_WARN(DomainManager_impl, "EventChannelManager::restore, Failed to recover Event Channel: " << i->boundName); + RH_WARN(this->_baseLog, "EventChannelManager::restore, Failed to recover Event Channel: " << i->boundName); } catch( ... ){ - LOG_WARN(DomainManager_impl, "EventChannelManager, Failed to recover Event Channel: " << i->boundName); + RH_WARN(this->_baseLog, "EventChannelManager, Failed to recover Event Channel: " << i->boundName); } CosEventChannelAdmin::EventChannel_var channel = i->channel; CosNaming::Name_var cosName = ossie::corba::stringToName(i->boundName); @@ -290,239 +334,197 @@ void DomainManager_impl::restoreEventChannels(const std::string& _db_uri) { _eventChannels.push_back(*i); } } else { - LOG_WARN(DomainManager_impl, "Failed to recover Event Channel: " << i->boundName); + RH_WARN(this->_baseLog, "Failed to recover Event Channel: " << i->boundName); } - } CATCH_LOG_WARN(DomainManager_impl, "Unable to restore connection to Event Channel: " << i->boundName); + } CATCH_RH_WARN(this->_baseLog, "Unable to restore connection to Event Channel: " << i->boundName); } try { db.store("EVENT_CHANNELS", _restoredEventChannels); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error restoring event channels from persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error restoring event channels from persistent state: " << e.what()); } } void DomainManager_impl::restoreState(const std::string& _db_uri) { boost::recursive_mutex::scoped_lock lock(stateAccess); - LOG_INFO(DomainManager_impl, "Restoring state from URL " << _db_uri); + RH_INFO(this->_baseLog, "Restoring state from URL " << _db_uri); try { db.open(_db_uri); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading persistent state: " << e.what()); return; } - LOG_DEBUG(DomainManager_impl, "Recovering device manager connections"); + RH_DEBUG(this->_baseLog, "Recovering device manager connections"); // Recover device manager connections and consume the value so that // the persistence store no longer has any device manager stored DeviceManagerList _restoredDeviceManagers; try { db.fetch("DEVICE_MANAGERS", _restoredDeviceManagers, true); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading device managers persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading device managers persistent state: " << e.what()); _restoredDeviceManagers.clear(); } for (DeviceManagerList::iterator ii = _restoredDeviceManagers.begin(); ii != _restoredDeviceManagers.end(); ++ii) { - LOG_TRACE(DomainManager_impl, "Attempting to recover connection to Device Manager " << ii->identifier << " " << ii->label); + RH_TRACE(this->_baseLog, "Attempting to recover connection to Device Manager " << ii->identifier << " " << ii->label); try { if (ossie::corba::objectExists(ii->deviceManager)) { - LOG_INFO(DomainManager_impl, "Recovered connection to Device Manager: " << ii->identifier << " " << ii->label); + RH_INFO(this->_baseLog, "Recovered connection to Device Manager: " << ii->identifier << " " << ii->label); addDeviceMgr(ii->deviceManager); mountDeviceMgrFileSys(ii->deviceManager); } else { - LOG_WARN(DomainManager_impl, "Failed to recover connection to Device Manager: " << ii->label << ": device manager servant no longer exists"); + RH_WARN(this->_baseLog, "Failed to recover connection to Device Manager: " << ii->label << ": device manager servant no longer exists"); } - } CATCH_LOG_WARN(DomainManager_impl, "Unable to restore connection to DeviceManager: " << ii->label); + } CATCH_RH_WARN(this->_baseLog, "Unable to restore connection to DeviceManager: " << ii->label); } - LOG_DEBUG(DomainManager_impl, "Recovering device connections"); + RH_DEBUG(this->_baseLog, "Recovering device connections"); DeviceList _restoredDevices; try { db.fetch("DEVICES", _restoredDevices, true); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading devices persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading devices persistent state: " << e.what()); _restoredDevices.clear(); } for (DeviceList::iterator iter = _restoredDevices.begin(); iter != _restoredDevices.end(); ++iter) { boost::shared_ptr i = *iter; - LOG_TRACE(DomainManager_impl, "Attempting to recover connection to Device " << i->identifier << " " << i->label); + RH_TRACE(this->_baseLog, "Attempting to recover connection to Device " << i->identifier << " " << i->label); try { if (ossie::corba::objectExists(i->device)) { - LOG_INFO(DomainManager_impl, "Recovered connection to Device: " << i->identifier << " " << i->label); + RH_INFO(this->_baseLog, "Recovered connection to Device: " << i->identifier << " " << i->label); if (ossie::corba::objectExists(i->devMgr.deviceManager)) { storeDeviceInDomainMgr(i->device, i->devMgr.deviceManager); } else { - LOG_WARN(DomainManager_impl, "Failed to recover connection to Device: " << i->identifier << ": device manager no longer exists"); + RH_WARN(this->_baseLog, "Failed to recover connection to Device: " << i->identifier << ": device manager no longer exists"); } } else { - LOG_WARN(DomainManager_impl, "Failed to recover connection to Device: " << i->identifier << ": device servant no longer exists"); + RH_WARN(this->_baseLog, "Failed to recover connection to Device: " << i->identifier << ": device servant no longer exists"); } - } CATCH_LOG_WARN(DomainManager_impl, "Unable to restore connection to Device: " << i->identifier); + } CATCH_RH_WARN(this->_baseLog, "Unable to restore connection to Device: " << i->identifier); } - LOG_DEBUG(DomainManager_impl, "Recovering registered services"); + RH_DEBUG(this->_baseLog, "Recovering registered services"); ServiceList _restoredServices; try { db.fetch("SERVICES", _restoredServices, true); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading services persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading services persistent state: " << e.what()); _restoredServices.clear(); } for (ServiceList::iterator ii = _restoredServices.begin(); ii != _restoredServices.end(); ++ii) { - LOG_TRACE(DomainManager_impl, "Attempting to recover connection to Service " << ii->name); + RH_TRACE(this->_baseLog, "Attempting to recover connection to Service " << ii->name); try { if (ossie::corba::objectExists(ii->service)) { - LOG_INFO(DomainManager_impl, "Recovered connection to Service: " << ii->name); + RH_INFO(this->_baseLog, "Recovered connection to Service: " << ii->name); ossie::DeviceManagerList::iterator deviceManager = findDeviceManagerById(ii->deviceManagerId); if (deviceManager != _registeredDeviceManagers.end()) { - storeServiceInDomainMgr(ii->service, deviceManager->deviceManager, ii->name.c_str(), ii->serviceId.c_str()); + storeServiceInDomainMgr(ii->service, deviceManager->deviceManager, ii->name, ii->serviceId); } else { - LOG_WARN(DomainManager_impl, "Failed to recover connection to Service: " << ii->name << ": DeviceManager " + RH_WARN(this->_baseLog, "Failed to recover connection to Service: " << ii->name << ": DeviceManager " << ii->deviceManagerId << " no longer exists"); } } else { - LOG_WARN(DomainManager_impl, "Failed to recover connection to Service: " << ii->name << ": servant no longer exists"); + RH_WARN(this->_baseLog, "Failed to recover connection to Service: " << ii->name << ": servant no longer exists"); } - } CATCH_LOG_WARN(DomainManager_impl, "Unable to restore connection to Service: " << ii->name); + } CATCH_RH_WARN(this->_baseLog, "Unable to restore connection to Service: " << ii->name); } - LOG_DEBUG(DomainManager_impl, "Recovering DCD connections"); + RH_DEBUG(this->_baseLog, "Recovering DCD connections"); ConnectionTable _restoredConnections; try { db.fetch("CONNECTIONS", _restoredConnections, true); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading services persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading services persistent state: " << e.what()); _restoredConnections.clear(); } for (ConnectionTable::iterator ii = _restoredConnections.begin(); ii != _restoredConnections.end(); ++ii) { const std::string& deviceManagerId = ii->first; const ConnectionList& connections = ii->second; - LOG_TRACE(DomainManager_impl, "Restoring port connections for DeviceManager " << deviceManagerId); + RH_TRACE(this->_baseLog, "Restoring port connections for DeviceManager " << deviceManagerId); for (ConnectionList::const_iterator jj = connections.begin(); jj != connections.end(); ++jj) { - LOG_TRACE(DomainManager_impl, "Restoring port connection " << jj->identifier); + RH_TRACE(this->_baseLog, "Restoring port connection " << jj->identifier); _connectionManager.restoreConnection(deviceManagerId, *jj); } } - LOG_DEBUG(DomainManager_impl, "Recovering application factories"); + RH_DEBUG(this->_baseLog, "Recovering application factories"); std::set restoredSADs; try { db.fetch("APP_FACTORIES", restoredSADs, true); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error loading application factory persistent state: " << ex.what()); + RH_ERROR(this->_baseLog, "Error loading application factory persistent state: " << ex.what()); restoredSADs.clear(); } for (std::set::iterator profile = restoredSADs.begin(); profile != restoredSADs.end(); ++profile) { - LOG_TRACE(DomainManager_impl, "Attempting to restore application factory " << *profile); + RH_TRACE(this->_baseLog, "Attempting to restore application factory " << *profile); try { - _local_installApplication(profile->c_str()); - LOG_INFO(DomainManager_impl, "Restored application factory " << *profile); - } CATCH_LOG_WARN(DomainManager_impl, "Failed to restore application factory " << *profile); + _local_installApplication(*profile); + RH_INFO(this->_baseLog, "Restored application factory " << *profile); + } CATCH_RH_WARN(this->_baseLog, "Failed to restore application factory " << *profile); } - LOG_DEBUG(DomainManager_impl, "Recovering applications"); + RH_DEBUG(this->_baseLog, "Recovering applications"); std::vector _restoredApplications; try { db.fetch("APPLICATIONS", _restoredApplications, true); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading application persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading application persistent state: " << e.what()); _restoredApplications.clear(); } for (std::vector::iterator i = _restoredApplications.begin(); i != _restoredApplications.end(); ++i) { - LOG_TRACE(DomainManager_impl, "Attempting to restore application " << i->name << " " << i->identifier << " " << i->profile); + RH_TRACE(this->_baseLog, "Attempting to restore application " << i->name << " " << i->identifier << " " << i->profile); try { if (ossie::corba::objectExists(i->context)) { - LOG_TRACE(DomainManager_impl, "Creating application " << i->identifier << " " << _domainName << " " << i->contextName); - Application_impl* _application = new Application_impl (i->identifier.c_str(), - i->name.c_str(), i->profile.c_str(), - this, - i->contextName, - i->context, - i->aware_application, - CosNaming::NamingContext::_nil() ); - LOG_TRACE(DomainManager_impl, "Restored " << i->connections.size() << " connections"); - - _application->populateApplication(i->assemblyController, - i->componentDevices, - i->componentRefs, - i->connections, - i->allocationIDs); - - // Restore various state about the components in the waveform - _application->_components = i->components; - - // Add external ports - for (std::map::const_iterator it = i->ports.begin(); - it != i->ports.end(); - ++it) { - _application->addExternalPort(it->first, it->second); - } - - // Add external properties - for (std::map::const_iterator it = i->properties.begin(); - it != i->properties.end(); - ++it) { - std::string extId = it->first; - std::string propId = it->second.property_id; - std::string access = it->second.access; - std::string compId = it->second.component_id; - std::vector comps = i->componentRefs; - comps.push_back(i->assemblyController); - for (unsigned int ii = 0; ii < comps.size(); ++ii) { - if (compId == ossie::corba::returnString(comps[ii]->identifier())) { - _application->addExternalProperty(propId, extId, access, comps[ii]); - break; - } - } - } + RH_TRACE(this->_baseLog, "Creating application " << i->identifier << " " << _domainName << " " << i->contextName); + Application_impl* application = _restoreApplication(*i); + Application_impl::Activate(application); + addApplication(application); + application->_remove_ref(); - Application_impl::Activate(_application); - addApplication(_application); - _application->_remove_ref(); - - LOG_INFO(DomainManager_impl, "Restored application " << i->identifier); + RH_INFO(this->_baseLog, "Restored application " << application->getIdentifier()); } - } CATCH_LOG_WARN(DomainManager_impl, "Failed to restore application" << i->identifier); + } CATCH_RH_WARN(this->_baseLog, "Failed to restore application" << i->identifier); } - LOG_DEBUG(DomainManager_impl, "Recovering remote domains"); + RH_DEBUG(this->_baseLog, "Recovering remote domains"); // Recover domain manager connections and consume the value so that // the persistence store no longer has any domain manager stored DomainManagerList _restoredDomainManagers; try { db.fetch("DOMAIN_MANAGERS", _restoredDomainManagers, true); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading domain managers persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading domain managers persistent state: " << e.what()); _restoredDomainManagers.clear(); } for (DomainManagerList::iterator ii = _restoredDomainManagers.begin(); ii != _restoredDomainManagers.end(); ++ii) { - LOG_TRACE(DomainManager_impl, "Attempting to recover connection to domain '" << ii->name << "'"); + RH_TRACE(this->_baseLog, "Attempting to recover connection to domain '" << ii->name << "'"); try { if (ossie::corba::objectExists(ii->domainManager)) { - LOG_INFO(DomainManager_impl, "Recovered connection to domain '" << ii->name << "'"); + RH_INFO(this->_baseLog, "Recovered connection to domain '" << ii->name << "'"); addDomainMgr(ii->domainManager); } else { - LOG_WARN(DomainManager_impl, "Failed to recover connection to domain '" << ii->name << "': domain manager object no longer exists"); + RH_WARN(this->_baseLog, "Failed to recover connection to domain '" << ii->name << "': domain manager object no longer exists"); } - } CATCH_LOG_WARN(DomainManager_impl, "Unable to restore connection to domain '" << ii->name << "'"); + } CATCH_RH_WARN(this->_baseLog, "Unable to restore connection to domain '" << ii->name << "'"); } - LOG_DEBUG(DomainManager_impl, "Recovering allocation manager"); + RH_DEBUG(this->_baseLog, "Recovering allocation manager"); ossie::AllocationTable _restoredLocalAllocations; try { db.fetch("LOCAL_ALLOCATIONS", _restoredLocalAllocations); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading local allocation persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading local allocation persistent state: " << e.what()); } _allocationMgr->restoreLocalAllocations(_restoredLocalAllocations); @@ -530,11 +532,11 @@ void DomainManager_impl::restoreState(const std::string& _db_uri) { try { db.fetch("REMOTE_ALLOCATIONS", _restoredRemoteAllocations); } catch (const ossie::PersistenceException& e) { - LOG_ERROR(DomainManager_impl, "Error loading remote allocations persistent state: " << e.what()); + RH_ERROR(this->_baseLog, "Error loading remote allocations persistent state: " << e.what()); } _allocationMgr->restoreRemoteAllocations(_restoredRemoteAllocations); - LOG_DEBUG(DomainManager_impl, "Done restoring state from URL " << _db_uri); + RH_DEBUG(this->_baseLog, "Done restoring state from URL " << _db_uri); } void DomainManager_impl::cleanupDomainNamingContext (CosNaming::NamingContext_ptr nc) @@ -552,7 +554,7 @@ void DomainManager_impl::cleanupDomainNamingContext (CosNaming::NamingContext_pt ossie::corba::overrideBlockingCall(obj); if (obj->_non_existent()) { // If it no longer exists, unbind it - LOG_TRACE(DomainManager_impl, "Unbinding naming context which no longer exists; this is probably due to an omniNames bug") + RH_TRACE(this->_baseLog, "Unbinding naming context which no longer exists; this is probably due to an omniNames bug") nc->unbind(bl[ii].binding_name); } else { cleanupDomainNamingContext(new_context); @@ -594,7 +596,7 @@ void DomainManager_impl::releaseAllApplications() try { db.store("APP_FACTORIES", _installedApplications); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to installed applications"); + RH_ERROR(this->_baseLog, "Error persisting change to installed applications"); } std::vector releasedApps; @@ -608,7 +610,7 @@ void DomainManager_impl::releaseAllApplications() try { (*app)->releaseObject(); } catch ( ... ) { - LOG_TRACE(DomainManager_impl, "Error releasing application " << ossie::corba::returnString((*app)->name())); + RH_TRACE(this->_baseLog, "Error releasing application " << (*app)->getName()); } (*app)->_remove_ref(); } @@ -626,15 +628,15 @@ void DomainManager_impl::shutdownAllDeviceManagers() devMgr->shutdown(); } else { - LOG_WARN(DomainManager_impl, "A DeviceManager: " << dm_label << ", reference is empty, possible lingering process after shutdown completes."); + RH_WARN(this->_baseLog, "A DeviceManager: " << dm_label << ", reference is empty, possible lingering process after shutdown completes."); } } catch ( std::exception& ex ) { - LOG_ERROR(DomainManager_impl, "The following standard exception occurred: "<_baseLog, "The following standard exception occurred: "<_baseLog, "DeviceManager: " << dm_label << " failed shutdown, continuing shutdown process. Exception: " << e._name()); if (lenRegDevMgr == _registeredDeviceManagers.size()) { _registeredDeviceManagers.erase(_registeredDeviceManagers.begin()); } @@ -642,7 +644,7 @@ void DomainManager_impl::shutdownAllDeviceManagers() if (lenRegDevMgr == _registeredDeviceManagers.size()) { _registeredDeviceManagers.erase(_registeredDeviceManagers.begin()); } - LOG_ERROR(DomainManager_impl, "Error shutting down Device Manager: " << dm_label << ", an unknown exception occurred." ); + RH_ERROR(this->_baseLog, "Error shutting down Device Manager: " << dm_label << ", an unknown exception occurred." ); } } } @@ -650,9 +652,7 @@ void DomainManager_impl::shutdownAllDeviceManagers() void DomainManager_impl::shutdown (int signal) { - TRACE_ENTER(DomainManager_impl) - - RH_NL_DEBUG("DomainManager", "Shutdown: signal=" << signal); + RH_DEBUG(this->_baseLog, "Shutdown: signal=" << signal); if (!ossie::corba::isPersistenceEnabled() || (ossie::corba::isPersistenceEnabled()and (signal == SIGINT)) ) { releaseAllApplications(); @@ -724,14 +724,11 @@ DomainManager_impl::~DomainManager_impl () // the servant is being deleted after the ORB // has shutdown in the nodebooter - TRACE_ENTER(DomainManager_impl) /************************************************** * Save current state for configuration recall * * this is not supported by this version * **************************************************/ - - TRACE_EXIT(DomainManager_impl) } uint32_t DomainManager_impl::getManagerWaitTime() { @@ -748,9 +745,6 @@ char * DomainManager_impl::identifier (void) throw (CORBA::SystemException) { - TRACE_ENTER(DomainManager_impl) - - TRACE_EXIT(DomainManager_impl) return CORBA::string_dup(_identifier.c_str()); } @@ -758,9 +752,6 @@ char * DomainManager_impl::name (void) throw (CORBA::SystemException) { - TRACE_ENTER(DomainManager_impl) - - TRACE_EXIT(DomainManager_impl) return CORBA::string_dup(this->_domainName.c_str()); } @@ -769,9 +760,6 @@ char * DomainManager_impl::domainManagerProfile (void) throw (CORBA::SystemException) { - TRACE_ENTER(DomainManager_impl) - - TRACE_EXIT(DomainManager_impl) return CORBA::string_dup(_domainManagerProfile.c_str()); } @@ -779,9 +767,6 @@ throw (CORBA::SystemException) CF::AllocationManager_ptr DomainManager_impl::allocationMgr (void) throw (CORBA:: SystemException) { - TRACE_ENTER(DomainManager_impl) - - TRACE_EXIT(DomainManager_impl) return _allocationMgr->_this(); } @@ -789,10 +774,7 @@ CF::AllocationManager_ptr DomainManager_impl::allocationMgr (void) throw (CORBA: CF::EventChannelManager_ptr DomainManager_impl::eventChannelMgr (void) throw (CORBA:: SystemException) { - TRACE_ENTER(DomainManager_impl) - - TRACE_EXIT(DomainManager_impl) - return _eventChannelMgr->_this(); + return _eventChannelMgr->_this(); } namespace { @@ -815,17 +797,12 @@ namespace { CF::FileManager_ptr DomainManager_impl::fileMgr (void) throw (CORBA:: SystemException) { - TRACE_ENTER(DomainManager_impl) - - TRACE_EXIT(DomainManager_impl) return CF::FileManager::_duplicate(_fileMgr); } CF::ConnectionManager_ptr DomainManager_impl::connectionMgr (void) throw (CORBA::SystemException) { - TRACE_ENTER(DomainManager_impl); - TRACE_EXIT(DomainManager_impl); return _connectionMgr->_this(); } @@ -834,13 +811,11 @@ CF::DomainManager::ApplicationFactorySequence * DomainManager_impl::applicationFactories (void) throw (CORBA:: SystemException) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); CF::DomainManager::ApplicationFactorySequence_var result = new CF::DomainManager::ApplicationFactorySequence(); map_to_sequence(result, _applicationFactories); - TRACE_EXIT(DomainManager_impl) return result._retn(); } @@ -848,13 +823,11 @@ DomainManager_impl::applicationFactories (void) throw (CORBA:: CF::DomainManager::ApplicationSequence * DomainManager_impl::applications (void) throw (CORBA::SystemException) { - TRACE_ENTER(DomainManager_impl); boost::recursive_mutex::scoped_lock lock(stateAccess); CF::DomainManager::ApplicationSequence_var result = new CF::DomainManager::ApplicationSequence(); map_to_sequence(result, _applications); - TRACE_EXIT(DomainManager_impl) return result._retn(); } @@ -862,7 +835,6 @@ DomainManager_impl::applications (void) throw (CORBA::SystemException) CF::DomainManager::DeviceManagerSequence * DomainManager_impl::deviceManagers (void) throw (CORBA::SystemException) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); CF::DomainManager::DeviceManagerSequence_var result = new CF::DomainManager::DeviceManagerSequence(); @@ -872,7 +844,6 @@ DomainManager_impl::deviceManagers (void) throw (CORBA::SystemException) result[ii] = CF::DeviceManager::_duplicate(deviceManager->deviceManager); } - TRACE_EXIT(DomainManager_impl) return result._retn(); } @@ -880,7 +851,6 @@ DomainManager_impl::deviceManagers (void) throw (CORBA::SystemException) CF::DomainManager::DomainManagerSequence * DomainManager_impl::remoteDomainManagers (void) throw (CORBA::SystemException) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); CF::DomainManager::DomainManagerSequence_var result = new CF::DomainManager::DomainManagerSequence(); @@ -890,7 +860,6 @@ DomainManager_impl::remoteDomainManagers (void) throw (CORBA::SystemException) result[ii] = CF::DomainManager::_duplicate(dmnMgr->domainManager); } - TRACE_EXIT(DomainManager_impl) return result._retn(); } @@ -941,23 +910,23 @@ throw (CORBA::SystemException, CF::InvalidObjectReference, DomainManagerList::iterator node = findDomainManagerById(identifier); if (node != _registeredDomainManagers.end()) { if (!ossie::corba::objectExists(node->domainManager)) { - LOG_WARN(DomainManager_impl, "Cleaning up registration of dead device manager: " << identifier); + RH_WARN(this->_baseLog, "Cleaning up registration of dead device manager: " << identifier); //catastrophicUnregisterDeviceManager(node); - LOG_TRACE(DomainManager_impl, "Continuing with registration of new device manager: " << identifier); + RH_TRACE(this->_baseLog, "Continuing with registration of new device manager: " << identifier); } else { bool DomMgr_alive = false; try { CORBA::String_var identifier = domainMgr->identifier(); DomMgr_alive = true; } catch ( ... ) { - LOG_WARN(DomainManager_impl, "Cleaning up registration of dead device manager: " << identifier); + RH_WARN(this->_baseLog, "Cleaning up registration of dead device manager: " << identifier); //catastrophicUnregisterDeviceManager(node); - LOG_TRACE(DomainManager_impl, "Continuing with registration of new device manager: " << identifier); + RH_TRACE(this->_baseLog, "Continuing with registration of new device manager: " << identifier); } if (DomMgr_alive) { ostringstream eout; eout << "Attempt re-register existing domain manager: " << identifier; - LOG_ERROR(DomainManager_impl, eout.str()); + RH_ERROR(this->_baseLog, eout.str()); throw CF::DomainManager::RegisterError(CF::CF_NOTSET, eout.str().c_str()); } } @@ -976,18 +945,18 @@ throw (CORBA::SystemException, CF::InvalidObjectReference, { boost::mutex::scoped_lock lock(interfaceAccess); if (CORBA::is_nil(domainMgr)) { - LOG_ERROR(DomainManager_impl, "Cannot unregister nil DomainManager"); + RH_ERROR(this->_baseLog, "Cannot unregister nil DomainManager"); throw CF::InvalidObjectReference("Cannot unregister nil DomainManager"); } DomainManagerList::iterator domMgrIter = findDomainManagerByObject(domainMgr); if (domMgrIter == _registeredDomainManagers.end()) { - LOG_WARN(DomainManager_impl, "Ignoring attempt to unregister domain manager that was not registered with this domain"); + RH_WARN(this->_baseLog, "Ignoring attempt to unregister domain manager that was not registered with this domain"); return; } if (!domMgrIter->domainManager->_is_equivalent(domainMgr)) { - LOG_TRACE(DomainManager_impl, "Ignoring attempt to unregister domain manager with same identifier but different object"); + RH_TRACE(this->_baseLog, "Ignoring attempt to unregister domain manager with same identifier but different object"); return; } @@ -995,7 +964,7 @@ throw (CORBA::SystemException, CF::InvalidObjectReference, try { db.store("DOMAIN_MANAGERS", _registeredDomainManagers); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to domain managers"); + RH_ERROR(this->_baseLog, "Error persisting change to domain managers"); } } @@ -1011,12 +980,11 @@ throw (CORBA::SystemException, CF::InvalidObjectReference, CF::InvalidProfile, std::string identifier = ossie::corba::returnString(deviceMgr->identifier()); std::string label = ossie::corba::returnString(deviceMgr->label()); - sendAddEvent( _identifier.c_str(), identifier, label, deviceMgr, StandardEvent::DEVICE_MANAGER ); + sendAddEvent(_identifier, identifier, label, deviceMgr, StandardEvent::DEVICE_MANAGER); } void DomainManager_impl::_local_registerDeviceManager (CF::DeviceManager_ptr deviceMgr) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); if (CORBA::is_nil (deviceMgr)) { @@ -1033,72 +1001,84 @@ void DomainManager_impl::_local_registerDeviceManager (CF::DeviceManager_ptr dev DeviceManagerList::iterator node = findDeviceManagerById(identifier); if (node != _registeredDeviceManagers.end()) { if (!ossie::corba::objectExists(node->deviceManager)) { - LOG_WARN(DomainManager_impl, "Cleaning up registration of dead device manager: " << identifier); + RH_WARN(this->_baseLog, "Cleaning up registration of dead device manager: " << identifier); catastrophicUnregisterDeviceManager(node); - LOG_TRACE(DomainManager_impl, "Continuing with registration of new device manager: " << identifier); + RH_TRACE(this->_baseLog, "Continuing with registration of new device manager: " << identifier); } else { bool DevMgr_alive = false; try { CORBA::String_var identifier = deviceMgr->identifier(); DevMgr_alive = true; } catch ( ... ) { - LOG_WARN(DomainManager_impl, "Cleaning up registration of dead device manager: " << identifier); + RH_WARN(this->_baseLog, "Cleaning up registration of dead device manager: " << identifier); catastrophicUnregisterDeviceManager(node); - LOG_TRACE(DomainManager_impl, "Continuing with registration of new device manager: " << identifier); + RH_TRACE(this->_baseLog, "Continuing with registration of new device manager: " << identifier); } if (DevMgr_alive) { ostringstream eout; eout << "Attempt re-register existing device manager: " << identifier; - LOG_ERROR(DomainManager_impl, eout.str()); + RH_ERROR(this->_baseLog, eout.str()); throw CF::DomainManager::RegisterError(CF::CF_NOTSET, eout.str().c_str()); } } } addDeviceMgr (deviceMgr); - + node = findDeviceManagerById(identifier); CORBA::String_var devMgrLabel; try { mountDeviceMgrFileSys(deviceMgr); - LOG_TRACE(DomainManager_impl, "Getting connections from DeviceManager DCD"); - DeviceManagerConfiguration dcdParser; + RH_TRACE(this->_baseLog, "Getting connections from DeviceManager DCD"); + DeviceManagerConfiguration *dcdParser; + DeviceManagerConfiguration _dcdParser; try { CF::FileSystem_var devMgrFileSys = deviceMgr->fileSys(); CORBA::String_var profile = deviceMgr->deviceConfigurationProfile(); - File_stream dcd(devMgrFileSys, profile); - dcdParser.load(dcd); - dcd.close(); + if ( node != _registeredDeviceManagers.end() ) { + if ( node->dcd.isLoaded() == false ) { + File_stream dcd(devMgrFileSys, profile); + node->dcd.load(dcd); + dcd.close(); + } + dcdParser = &(node->dcd); + } + else { + File_stream dcd(devMgrFileSys, profile); + _dcdParser.load(dcd); + dcdParser = &_dcdParser; + dcd.close(); + } } catch ( ossie::parser_error& e ) { std::string parser_error_line = ossie::retrieveParserErrorLineNumber(e.what()); - LOG_ERROR(DomainManager_impl, "Failed device manager registration; error parsing device manager DCD: " << deviceMgr->deviceConfigurationProfile() << ". " << parser_error_line << " The XML parser returned the following error: " << e.what()) + RH_ERROR(this->_baseLog, "Failed device manager registration; error parsing device manager DCD: " << deviceMgr->deviceConfigurationProfile() << ". " << parser_error_line << " The XML parser returned the following error: " << e.what()) throw(CF::DomainManager::RegisterError()); } - const std::vector& connections = dcdParser.getConnections(); + const std::vector& connections = dcdParser->getConnections(); for (size_t ii = 0; ii < connections.size(); ++ii) { try { - _connectionManager.addConnection(dcdParser.getName(), connections[ii]); + _connectionManager.addConnection(dcdParser->getName(), connections[ii]); } catch (const ossie::InvalidConnection& ex) { - LOG_ERROR(DomainManager_impl, "Ignoring unresolvable connection: " << ex.what()); + RH_ERROR(this->_baseLog, "Ignoring unresolvable connection: " << ex.what()); } } try { db.store("CONNECTIONS", _connectionManager.getConnections()); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device manager connections"); + RH_ERROR(this->_baseLog, "Error persisting change to device manager connections"); } } catch ( std::exception& ex ) { std::ostringstream eout; eout << "The following standard exception occurred: "<_baseLog, eout.str()) removeDeviceManager(findDeviceManagerById(identifier)); throw CF::DomainManager::RegisterError(CF::CF_NOTSET, eout.str().c_str()); } catch ( CORBA::Exception& ex ) { std::ostringstream eout; eout << "The following CORBA exception occurred: "<_baseLog, eout.str()) removeDeviceManager(findDeviceManagerById(identifier)); throw CF::DomainManager::RegisterError(CF::CF_NOTSET, eout.str().c_str()); } catch ( ... ) { @@ -1107,42 +1087,49 @@ void DomainManager_impl::_local_registerDeviceManager (CF::DeviceManager_ptr dev throw CF::DomainManager::RegisterError(CF::CF_NOTSET, "Unexpected error registering device manager"); } - LOG_TRACE(DomainManager_impl, "Leaving DomainManager::registerDeviceManager"); + RH_TRACE(this->_baseLog, "Leaving DomainManager::registerDeviceManager"); } void DomainManager_impl::addDeviceMgr (CF::DeviceManager_ptr deviceMgr) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); if (!deviceMgrIsRegistered (deviceMgr)) { - LOG_TRACE(DomainManager_impl, "Adding DeviceManager ref to list") + RH_TRACE(this->_baseLog, "Adding DeviceManager ref to list") DeviceManagerNode tmp_devMgr; CORBA::String_var identifier = deviceMgr->identifier(); CORBA::String_var label = deviceMgr->label(); tmp_devMgr.deviceManager = CF::DeviceManager::_duplicate(deviceMgr); tmp_devMgr.identifier = static_cast(identifier); tmp_devMgr.label = static_cast(label); + try{ + // preload DCD + CF::FileSystem_var devMgrFileSys = tmp_devMgr.deviceManager->fileSys(); + CORBA::String_var profile = tmp_devMgr.deviceManager->deviceConfigurationProfile(); + File_stream dcd(devMgrFileSys, profile); + tmp_devMgr.dcd.load(dcd); + dcd.close(); + } + catch(...){ + } _registeredDeviceManagers.push_back(tmp_devMgr); try { db.store("DEVICE_MANAGERS", _registeredDeviceManagers); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device managers"); + RH_ERROR(this->_baseLog, "Error persisting change to device managers"); } } - TRACE_EXIT(DomainManager_impl) } void DomainManager_impl::addDomainMgr (CF::DomainManager_ptr domainMgr) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); if (!domainMgrIsRegistered (domainMgr)) { - LOG_TRACE(DomainManager_impl, "Adding DomainManager ref to list") + RH_TRACE(this->_baseLog, "Adding DomainManager ref to list") DomainManagerNode node; node.domainManager = CF::DomainManager::_duplicate(domainMgr); node.identifier = ossie::corba::returnString(domainMgr->identifier()); @@ -1152,10 +1139,9 @@ void DomainManager_impl::addDomainMgr (CF::DomainManager_ptr domainMgr) try { db.store("DOMAIN_MANAGERS", _registeredDomainManagers); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to domain managers"); + RH_ERROR(this->_baseLog, "Error persisting change to domain managers"); } } - TRACE_EXIT(DomainManager_impl) } void DomainManager_impl::mountDeviceMgrFileSys (CF::DeviceManager_ptr deviceMgr) { @@ -1164,7 +1150,7 @@ void DomainManager_impl::mountDeviceMgrFileSys (CF::DeviceManager_ptr deviceMgr) // mount filesystem under "//" devMgrLabel = deviceMgr->label(); mountPoint += devMgrLabel; - LOG_TRACE(DomainManager_impl, "Mounting DeviceManager FileSystem at " << mountPoint) + RH_TRACE(this->_baseLog, "Mounting DeviceManager FileSystem at " << mountPoint) CF::FileSystem_var devMgrFileSys = deviceMgr->fileSys(); _fileMgr->mount(mountPoint.c_str(), devMgrFileSys); @@ -1178,18 +1164,18 @@ throw (CORBA::SystemException, CF::InvalidObjectReference, boost::mutex::scoped_lock lock(interfaceAccess); if (CORBA::is_nil(deviceMgr)) { - LOG_ERROR(DomainManager_impl, "Cannot unregister nil DeviceManager"); + RH_ERROR(this->_baseLog, "Cannot unregister nil DeviceManager"); throw CF::InvalidObjectReference("Cannot unregister nil DeviceManager"); } DeviceManagerList::iterator devMgrIter = findDeviceManagerByObject(deviceMgr); if (devMgrIter == _registeredDeviceManagers.end()) { - LOG_WARN(DomainManager_impl, "Ignoring attempt to unregister device manager that was not registered with this domain"); + RH_WARN(this->_baseLog, "Ignoring attempt to unregister device manager that was not registered with this domain"); return; } if (!devMgrIter->deviceManager->_is_equivalent(deviceMgr)) { - LOG_TRACE(DomainManager_impl, "Ignoring attempt to unregister device manager with same identifier but different object"); + RH_TRACE(this->_baseLog, "Ignoring attempt to unregister device manager with same identifier but different object"); return; } @@ -1199,14 +1185,13 @@ throw (CORBA::SystemException, CF::InvalidObjectReference, try { _local_unregisterDeviceManager(devMgrIter); - } CATCH_LOG_ERROR(DomainManager_impl, "Exception unregistering device manager"); + } CATCH_RH_ERROR(this->_baseLog, "Exception unregistering device manager"); - sendRemoveEvent( _identifier.c_str(), identifier.c_str(), label.c_str(), StandardEvent::DEVICE_MANAGER ); + sendRemoveEvent(_identifier, identifier, label, StandardEvent::DEVICE_MANAGER); } ossie::DeviceManagerList::iterator DomainManager_impl::_local_unregisterDeviceManager (ossie::DeviceManagerList::iterator deviceManager) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); // For this function, an exception will be raised only when the Device Manager cannot be found @@ -1218,7 +1203,7 @@ ossie::DeviceManagerList::iterator DomainManager_impl::_local_unregisterDeviceMa try { db.store("CONNECTIONS", _connectionManager.getConnections()); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device manager connections"); + RH_ERROR(this->_baseLog, "Error persisting change to device manager connections"); } // Release all devices and services, which may break connections from other @@ -1229,10 +1214,10 @@ ossie::DeviceManagerList::iterator DomainManager_impl::_local_unregisterDeviceMa // Unmount all DeviceManager FileSystems from the FileManager; currently just // the DeviceManager's root file system. string mountPoint = "/" + deviceManager->label; - LOG_TRACE(DomainManager_impl, "Unmounting DeviceManager FileSystem at " << mountPoint) + RH_TRACE(this->_baseLog, "Unmounting DeviceManager FileSystem at " << mountPoint) try { _fileMgr->unmount(mountPoint.c_str()); - } CATCH_LOG_ERROR(DomainManager_impl, "Unmounting DeviceManager FileSystem failed during unregistration"); + } CATCH_RH_ERROR(this->_baseLog, "Unmounting DeviceManager FileSystem failed during unregistration"); // Remove the DeviceManager from the domain. deviceManager = removeDeviceManager(deviceManager); @@ -1245,45 +1230,38 @@ ossie::DeviceManagerList::iterator DomainManager_impl::_local_unregisterDeviceMa // 3. The sourceName shall be the label attribute of the unregistered DeviceManager. // 4. The sourceCategory shall be DEVICE_MANAGER. - TRACE_EXIT(DomainManager_impl); return deviceManager; } void DomainManager_impl::removeDeviceManagerDevices (const std::string& deviceManagerId) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); // Unregister all devices for the DeviceManager for (DeviceList::iterator device = _registeredDevices.begin(); device != _registeredDevices.end(); ) { if ((*device)->devMgr.identifier == deviceManagerId) { - LOG_TRACE(DomainManager_impl, "Unregistering device " << (*device)->label << " " << (*device)->identifier); + RH_TRACE(this->_baseLog, "Unregistering device " << (*device)->label << " " << (*device)->identifier); device = _local_unregisterDevice(device); } else { ++device; } } - - TRACE_EXIT(DomainManager_impl) } void DomainManager_impl::removeDeviceManagerServices (const std::string& deviceManagerId) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); for (ServiceList::iterator service = _registeredServices.begin(); service != _registeredServices.end(); ) { if (service->deviceManagerId == deviceManagerId) { - LOG_TRACE(DomainManager_impl, "Unregistering service " << service->name); + RH_TRACE(this->_baseLog, "Unregistering service " << service->name); service = _local_unregisterService(service); } else { ++service; } } - - TRACE_EXIT(DomainManager_impl) } @@ -1291,18 +1269,16 @@ ossie::DeviceManagerList::iterator DomainManager_impl::removeDeviceManager (ossi { // This function must work regardless of whether the DeviceManager object // is reachable or not. Therefore, no CORBA calls can be made. - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); deviceManager = _registeredDeviceManagers.erase(deviceManager); try { db.store("DEVICE_MANAGERS", _registeredDeviceManagers); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device managers"); + RH_ERROR(this->_baseLog, "Error persisting change to device managers"); } return deviceManager; - TRACE_EXIT(DomainManager_impl) } void @@ -1319,13 +1295,12 @@ throw (CORBA::SystemException, CF::InvalidObjectReference, CF::InvalidProfile, std::string identifier = ossie::corba::returnString(registeringDevice->identifier()); std::string label = ossie::corba::returnString(registeringDevice->label()); - sendAddEvent( _identifier.c_str(), identifier, label, registeringDevice, StandardEvent::DEVICE ); + sendAddEvent(_identifier, identifier, label, registeringDevice, StandardEvent::DEVICE); } void DomainManager_impl::_local_registerDevice (CF::Device_ptr registeringDevice, CF::DeviceManager_ptr registeredDeviceMgr) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); //Verify they are not a nil reference @@ -1341,20 +1316,20 @@ void DomainManager_impl::_local_registerDevice (CF::Device_ptr registeringDevice } std::string devId = ossie::corba::returnString(registeringDevice->identifier()); - LOG_TRACE(DomainManager_impl, "Registering Device " << devId); + RH_TRACE(this->_baseLog, "Registering Device " << devId); DeviceList::iterator deviceNode = findDeviceById(devId); if (deviceNode != _registeredDevices.end()) { - LOG_TRACE(DomainManager_impl, "Device <" << devId << "> already registered; checking existence"); + RH_TRACE(this->_baseLog, "Device <" << devId << "> already registered; checking existence"); if (!ossie::corba::objectExists((*deviceNode)->device)) { - LOG_WARN(DomainManager_impl, "Cleaning up registration; device <" << devId << "> is registered and no longer exists"); + RH_WARN(this->_baseLog, "Cleaning up registration; device <" << devId << "> is registered and no longer exists"); try { _local_unregisterDevice(deviceNode); - } CATCH_LOG_WARN(DomainManager_impl, "_local_unregisterDevice failed"); + } CATCH_RH_WARN(this->_baseLog, "_local_unregisterDevice failed"); } else { ostringstream eout; eout << "Attempt re-register existing device : " << devId; - LOG_ERROR(DomainManager_impl, eout.str()); + RH_ERROR(this->_baseLog, eout.str()); throw CF::DomainManager::RegisterError(CF::CF_NOTSET, eout.str().c_str()); } } @@ -1364,15 +1339,15 @@ void DomainManager_impl::_local_registerDevice (CF::Device_ptr registeringDevice //Check the DCD for connections and establish them try { - LOG_TRACE(DomainManager_impl, "Establishing Service Connections"); + RH_TRACE(this->_baseLog, "Establishing Service Connections"); _connectionManager.deviceRegistered(devId.c_str()); try { db.store("CONNECTIONS", _connectionManager.getConnections()); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device manager connections"); + RH_ERROR(this->_baseLog, "Error persisting change to device manager connections"); } } catch ( ... ) { - LOG_ERROR(DomainManager_impl, "Service connections could not be established") + RH_ERROR(this->_baseLog, "Service connections could not be established") } //NOTE: This function only checks that the input references are valid and the device manager is registered. @@ -1386,27 +1361,25 @@ void DomainManager_impl::_local_registerDevice (CF::Device_ptr registeringDevice void DomainManager_impl::storeDeviceInDomainMgr (CF::Device_ptr registeringDevice, CF::DeviceManager_ptr registeredDeviceMgr) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); //check if device is already registered if (deviceIsRegistered (registeringDevice)) { - LOG_TRACE(DomainManager_impl, "Device already registered, refusing to store into domain manager") - TRACE_EXIT(DomainManager_impl) + RH_TRACE(this->_baseLog, "Device already registered, refusing to store into domain manager") return; } std::string devMgrId; try { devMgrId = ossie::corba::returnString(registeredDeviceMgr->identifier()); - } CATCH_LOG_ERROR(DomainManager_impl, "DeviceManager is unreachable during device registrations") + } CATCH_RH_ERROR(this->_baseLog, "DeviceManager is unreachable during device registrations") if (devMgrId.empty()){ return; } DeviceManagerList::iterator pDevMgr = findDeviceManagerById(devMgrId); if (pDevMgr == _registeredDeviceManagers.end()) { - LOG_ERROR(DomainManager_impl, "Device Manager for Device is not registered") + RH_ERROR(this->_baseLog, "Device Manager for Device is not registered") return; } @@ -1419,8 +1392,8 @@ void DomainManager_impl::storeDeviceInDomainMgr (CF::Device_ptr registeringDevic newDeviceNode->softwareProfile = ossie::corba::returnString(registeringDevice->softwareProfile()); newDeviceNode->identifier = ossie::corba::returnString(registeringDevice->identifier()); newDeviceNode->implementationId = ossie::corba::returnString(registeredDeviceMgr->getComponentImplementationId(newDeviceNode->identifier.c_str())); - newDeviceNode->isLoadable = registeringDevice->_is_a(CF::LoadableDevice::_PD_repoId); - newDeviceNode->isExecutable = registeringDevice->_is_a(CF::ExecutableDevice::_PD_repoId); + newDeviceNode->loadableDevice = ossie::corba::_narrowSafe(registeringDevice); + newDeviceNode->executableDevice = ossie::corba::_narrowSafe(registeringDevice); parseDeviceProfile(*newDeviceNode); @@ -1429,32 +1402,30 @@ void DomainManager_impl::storeDeviceInDomainMgr (CF::Device_ptr registeringDevic try { db.store("DEVICES", _registeredDevices); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device managers"); + RH_ERROR(this->_baseLog, "Error persisting change to device managers"); } - - TRACE_EXIT(DomainManager_impl) } //This function adds the registeringService and its name to the DomainMgr. //if the service already exists it does nothing void -DomainManager_impl::storeServiceInDomainMgr (CORBA::Object_ptr registeringService, CF::DeviceManager_ptr registeredDeviceMgr, const char* name, const char * serviceId) +DomainManager_impl::storeServiceInDomainMgr (CORBA::Object_ptr registeringService, CF::DeviceManager_ptr registeredDeviceMgr, const std::string& name, const std::string& serviceId) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); // If a service is already registered with that name, do nothing. if (serviceIsRegistered(name)) { - LOG_INFO(DomainManager_impl, "Ignoring duplicate registration of service " << name); - TRACE_EXIT(DomainManager_impl) - return; + RH_INFO(this->_baseLog, "Ignoring duplicate registration of service " << name); + TRACE_EXIT(DomainManager_impl); + std::string message = "A service is already registered with the name '" + std::string(name) + "'"; + throw CF::DomainManager::RegisterError(CF::CF_EEXIST, message.c_str()); } // The service needs to be added to the list. std::string devMgrId; try { devMgrId = ossie::corba::returnString(registeredDeviceMgr->identifier()); - } CATCH_LOG_ERROR(DomainManager_impl, "DeviceManager is unreachable during service registration") + } CATCH_RH_ERROR(this->_baseLog, "DeviceManager is unreachable during service registration") ServiceNode node; node.service = CORBA::Object::_duplicate(registeringService); @@ -1467,7 +1438,7 @@ DomainManager_impl::storeServiceInDomainMgr (CORBA::Object_ptr registeringServic try { db.store("SERVICES", _registeredServices); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to services"); + RH_ERROR(this->_baseLog, "Error persisting change to services"); } } @@ -1486,12 +1457,11 @@ throw (CORBA::SystemException, CF::InvalidObjectReference, try { _local_unregisterDevice(deviceNode); - } CATCH_RETHROW_LOG_ERROR(DomainManager_impl, "Error unregistering a Device") // rethrow for calling object's benefit + } CATCH_RETHROW_RH_ERROR(this->_baseLog, "Error unregistering a Device") // rethrow for calling object's benefit } ossie::DeviceList::iterator DomainManager_impl::_local_unregisterDevice (ossie::DeviceList::iterator deviceNode) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); // Reset the last successful device pointer for deployments @@ -1504,7 +1474,7 @@ ossie::DeviceList::iterator DomainManager_impl::_local_unregisterDevice (ossie:: try { db.store("CONNECTIONS", _connectionManager.getConnections()); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device manager connections"); + RH_ERROR(this->_baseLog, "Error persisting change to device manager connections"); } try { @@ -1520,17 +1490,16 @@ ossie::DeviceList::iterator DomainManager_impl::_local_unregisterDevice (ossie:: } for (std::vector::iterator iter = releasedApps.begin(); iter != releasedApps.end(); ++iter) { - LOG_WARN(DomainManager_impl, "Releasing application that depends on registered device " << (*deviceNode)->identifier); + RH_WARN(this->_baseLog, "Releasing application that depends on registered device " << (*deviceNode)->identifier); Application_impl* app = *iter; app->releaseObject(); app->_remove_ref(); } - } CATCH_LOG_ERROR(DomainManager_impl, "Releasing stale applications from stale device failed"); + } CATCH_RH_ERROR(this->_baseLog, "Releasing stale applications from stale device failed"); // Sent event here (as opposed to unregisterDevice), so we see the event on regular // unregisterDevice calls, and on cleanup (deviceManager shutdown, catastropic cleanup, etc.) - sendRemoveEvent( _identifier.c_str(), (*deviceNode)->identifier.c_str(), (*deviceNode)->label.c_str(), - StandardEvent::DEVICE ); + sendRemoveEvent(_identifier, (*deviceNode)->identifier, (*deviceNode)->label, StandardEvent::DEVICE); // Remove the device from the internal list. deviceNode = _registeredDevices.erase(deviceNode); @@ -1539,9 +1508,8 @@ ossie::DeviceList::iterator DomainManager_impl::_local_unregisterDevice (ossie:: try { db.store("DEVICES", _registeredDevices); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to devices"); + RH_ERROR(this->_baseLog, "Error persisting change to devices"); } - TRACE_EXIT(DomainManager_impl); return deviceNode; } @@ -1549,30 +1517,26 @@ ossie::DeviceList::iterator DomainManager_impl::_local_unregisterDevice (ossie:: //This function returns TRUE if the input registeredDevice is contained in the _registeredDevices bool DomainManager_impl::deviceIsRegistered (CF::Device_ptr registeredDevice) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); DeviceList::iterator device = findDeviceByObject(registeredDevice); - TRACE_EXIT(DomainManager_impl); return (device !=_registeredDevices.end()); } -bool DomainManager_impl::serviceIsRegistered (const char* serviceName) +bool DomainManager_impl::serviceIsRegistered (const std::string& serviceName) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); ossie::ServiceList::iterator service = findServiceByName(serviceName); - TRACE_EXIT(DomainManager_impl); return (service != _registeredServices.end()); } void DomainManager_impl::closeAllOpenFileHandles() { - LOG_INFO(DomainManager_impl, "Received SIGUSR1. Closing all open file handles"); + RH_INFO(this->_baseLog, "Received SIGUSR1. Closing all open file handles"); this->fileMgr_servant->closeAllFiles(); } @@ -1580,24 +1544,20 @@ void DomainManager_impl::closeAllOpenFileHandles() //This function returns TRUE if the input registeredDeviceMgr is contained in the _deviceManagers list attribute bool DomainManager_impl::deviceMgrIsRegistered (CF::DeviceManager_ptr registeredDeviceMgr) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); DeviceManagerList::iterator node = findDeviceManagerByObject(registeredDeviceMgr); - TRACE_EXIT(DomainManager_impl); return (node != _registeredDeviceManagers.end());; } //This function returns TRUE if the input registeredDomainMgr is contained in the _domainManagers list attribute bool DomainManager_impl::domainMgrIsRegistered (CF::DomainManager_ptr registeredDomainMgr) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); DomainManagerList::iterator node = findDomainManagerByObject(registeredDomainMgr); - TRACE_EXIT(DomainManager_impl); return (node != _registeredDomainManagers.end());; } @@ -1607,21 +1567,11 @@ CF::Application_ptr DomainManager_impl::createApplication(const char* profileFil const CF::Properties& initConfiguration, const CF::DeviceAssignmentSequence& deviceAssignments) { - TRACE_ENTER(DomainManager_impl); - - try { - ApplicationFactory_impl factory(profileFileName, _domainName, this); - CF::Application_var application = factory.create(name, initConfiguration, deviceAssignments); - TRACE_EXIT(DomainManager_impl); - return application._retn(); - } - catch( CF::DomainManager::ApplicationInstallationError& ex ) { - LOG_ERROR(DomainManager_impl, "Create application FAILED, reason: " << ex.msg ); - // rethrow as invalid profile... - throw CF::InvalidProfile(); - } + ApplicationFactory_impl factory(profileFileName, _domainName, this); + factory.setLogger(_baseLog->getChildLogger("ApplicationFactory", "")); + CF::Application_var application = factory.create(name, initConfiguration, deviceAssignments); - TRACE_EXIT(DomainManager_impl); + return application._retn(); } @@ -1640,40 +1590,41 @@ throw (CORBA::SystemException, CF::DomainManager::ApplicationInstallationError, CF::DomainManager::ApplicationAlreadyInstalled) { - boost::mutex::scoped_lock lock(interfaceAccess); - _local_installApplication(profileFileName); + boost::mutex::scoped_lock lock(interfaceAccess); + _local_installApplication(profileFileName); - ApplicationFactoryTable::iterator appFact = _applicationFactories.find(profileFileName); - if (appFact != _applicationFactories.end()) { - std::string identifier = ossie::corba::returnString(appFact->first.c_str()); - std::string name = ossie::corba::returnString(appFact->second->name()); - CF::ApplicationFactory_var appFactRef = appFact->second->_this(); - sendAddEvent( _identifier.c_str(), identifier, name, appFactRef, StandardEvent::APPLICATION_FACTORY ); - } + ApplicationFactoryTable::iterator appFact = _applicationFactories.find(profileFileName); + if (appFact != _applicationFactories.end()) { + const std::string& identifier = appFact->first; + const std::string& name = appFact->second->getName(); + CF::ApplicationFactory_var appFactRef = appFact->second->_this(); + sendAddEvent(_identifier, identifier, name, appFactRef, StandardEvent::APPLICATION_FACTORY); + } } -void DomainManager_impl::_local_installApplication (const char* profileFileName) +void DomainManager_impl::_local_installApplication (const std::string& profileFileName) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); // NOTE: the name attribute is the name of the App Factory // that is currently installed because it is the installed factory that // provides the value of profileFileName - try { - // check the profile ends with .sad.xml, warn if it doesn't - if ((strstr (profileFileName, ".sad.xml")) == NULL) - { LOG_WARN(DomainManager_impl, "File " << profileFileName << " should end with .sad.xml."); } + // check the profile ends with .sad.xml, warn if it doesn't + if (profileFileName.find(".sad.xml") == std::string::npos) { + RH_WARN(this->_baseLog, "File " << profileFileName << " should end with .sad.xml."); + } - LOG_TRACE(DomainManager_impl, "installApplication: Createing new AppFac"); + try { + RH_TRACE(this->_baseLog, "installApplication: Createing new AppFac"); ApplicationFactory_impl* appFact = new ApplicationFactory_impl(profileFileName, this->_domainName, this); - const std::string appFactoryId = appFact->getID(); + appFact->setLogger(_baseLog->getChildLogger("ApplicationFactory", "")); + const std::string& appFactoryId = appFact->getIdentifier(); // Check if application factory already exists for this profile - LOG_TRACE(DomainManager_impl, "Installing application ID " << appFactoryId); + RH_TRACE(this->_baseLog, "Installing application ID " << appFactoryId); if (_applicationFactories.count(appFactoryId)) { - LOG_INFO(DomainManager_impl, "Application " << appFact->getName() << " with id " << appFact->getID() + RH_INFO(this->_baseLog, "Application " << appFact->getName() << " with id " << appFactoryId << " already installed (Application Factory already exists)"); delete appFact; appFact=NULL; @@ -1688,35 +1639,33 @@ void DomainManager_impl::_local_installApplication (const char* profileFileName) try { db.store("APP_FACTORIES", _installedApplications); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device managers"); + RH_ERROR(this->_baseLog, "Error persisting change to device managers"); } } catch (CF::FileException& ex) { - LOG_ERROR(DomainManager_impl, "installApplication: While validating the SAD profile: " << ex.msg); + RH_ERROR(this->_baseLog, "installApplication: While validating the SAD profile: " << ex.msg); throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, ex.msg); } catch( CF::InvalidFileName& ex ) { - LOG_ERROR(DomainManager_impl, "installApplication: Invalid file name: " << profileFileName); + RH_ERROR(this->_baseLog, "installApplication: Invalid file name: " << profileFileName); throw CF::DomainManager::ApplicationInstallationError (CF::CF_EBADF, "Invalid file name"); } catch (CF::DomainManager::ApplicationInstallationError& e) { - LOG_TRACE(DomainManager_impl, "rethrowing ApplicationInstallationError" << e.msg); + RH_TRACE(this->_baseLog, "rethrowing ApplicationInstallationError" << e.msg); throw; } catch (CF::DomainManager::ApplicationAlreadyInstalled &) { throw; } catch ( std::exception& ex ) { std::ostringstream eout; eout << "The following standard exception occurred: "<_baseLog, eout.str()) throw CF::DomainManager::ApplicationInstallationError (CF::CF_NOTSET, eout.str().c_str()); } catch ( const CORBA::Exception& ex ) { std::ostringstream eout; eout << "The following CORBA exception occurred: "<_baseLog, eout.str()) throw CF::DomainManager::ApplicationInstallationError (CF::CF_NOTSET, eout.str().c_str()); } catch (...) { - LOG_ERROR(DomainManager_impl, "unexpected exception occurred while installing application"); + RH_ERROR(this->_baseLog, "unexpected exception occurred while installing application"); throw CF::DomainManager::ApplicationInstallationError (CF::CF_NOTSET, "unknown exception"); } - - TRACE_EXIT(DomainManager_impl) } @@ -1725,7 +1674,7 @@ DomainManager_impl::uninstallApplication (const char* applicationId) throw (CORBA::SystemException, CF::DomainManager::InvalidIdentifier, CF::DomainManager::ApplicationUninstallationError) { - LOG_INFO(DomainManager_impl, "Uninstalling application " << applicationId); + RH_INFO(this->_baseLog, "Uninstalling application " << applicationId); boost::mutex::scoped_lock lock(interfaceAccess); std::string appFactory_id; @@ -1734,7 +1683,7 @@ throw (CORBA::SystemException, CF::DomainManager::InvalidIdentifier, ApplicationFactoryTable::iterator appFact = _applicationFactories.find(applicationId); if (appFact != _applicationFactories.end()) { appFactory_id = appFact->first; - appFactory_name = ossie::corba::returnString(appFact->second->name()); + appFactory_name = appFact->second->getName(); } _local_uninstallApplication(applicationId); @@ -1752,28 +1701,26 @@ throw (CORBA::SystemException, CF::DomainManager::InvalidIdentifier, // sourceCategory = APPLICATION_FACTORY // StandardEvent enumeration - sendRemoveEvent(_identifier.c_str(), appFactory_id.c_str(), appFactory_name.c_str(),StandardEvent::APPLICATION_FACTORY); + sendRemoveEvent(_identifier, appFactory_id, appFactory_name, StandardEvent::APPLICATION_FACTORY); } void DomainManager_impl::_local_uninstallApplication (const char* applicationId) { - TRACE_ENTER(DomainManager_impl); boost::recursive_mutex::scoped_lock lock(stateAccess); // Find the factory in the table, which also validates the identifier ApplicationFactoryTable::iterator appFact = _applicationFactories.find(applicationId); if (appFact == _applicationFactories.end()) { - TRACE_EXIT(DomainManager_impl); throw CF::DomainManager::InvalidIdentifier(); } // Update the persistence database - const std::string sad_file = ossie::corba::returnString(appFact->second->softwareProfile()); + const std::string sad_file = appFact->second->getSoftwareProfile(); _installedApplications.erase(sad_file); try { db.store("APP_FACTORIES", _installedApplications); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to installed applications"); + RH_ERROR(this->_baseLog, "Error persisting change to installed applications"); } // Deactivate the servant @@ -1783,105 +1730,90 @@ void DomainManager_impl::_local_uninstallApplication (const char* applicationId) // Remove the servant from the list and clean up the reference _applicationFactories.erase(appFact); appFact->second->_remove_ref(); - - TRACE_EXIT(DomainManager_impl); } void DomainManager_impl::updateLocalAllocations(const ossie::AllocationTable& localAllocations) { - TRACE_ENTER(DomainManager_impl) try { db.store("LOCAL_ALLOCATIONS", localAllocations); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting local allocations"); + RH_ERROR(this->_baseLog, "Error persisting local allocations"); } - TRACE_EXIT(DomainManager_impl) } void DomainManager_impl::updateRemoteAllocations(const ossie::RemoteAllocationTable& remoteAllocations) { - TRACE_ENTER(DomainManager_impl) try { db.store("REMOTE_ALLOCATIONS", remoteAllocations); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting remote allocation"); + RH_ERROR(this->_baseLog, "Error persisting remote allocation"); } - TRACE_EXIT(DomainManager_impl) } void DomainManager_impl::addApplication(Application_impl* new_app) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); - LOG_TRACE(DomainManager_impl, "Attempting to add application to AppSeq with id: " << ossie::corba::returnString(new_app->identifier())); - + const std::string& identifier = new_app->getIdentifier(); + RH_TRACE(this->_baseLog, "Attempting to add application to AppSeq with id: " << identifier); try { - const std::string identifier = ossie::corba::returnString(new_app->identifier()); _applications[identifier] = new_app; new_app->_add_ref(); - ApplicationNode appNode; - appNode.name = ossie::corba::returnString(new_app->name()); - appNode.identifier = ossie::corba::returnString(new_app->identifier()); - appNode.profile = ossie::corba::returnString(new_app->profile()); - appNode.contextName = new_app->_waveformContextName; - appNode.context = CosNaming::NamingContext::_duplicate(new_app->_waveformContext); - appNode.componentDevices = new_app->_componentDevices; - appNode.components = new_app->_components; - appNode.assemblyController = CF::Resource::_duplicate(new_app->assemblyController); - appNode.componentRefs.clear(); - for (unsigned int i = 0; i < new_app->_appStartSeq.size(); ++i) { - appNode.componentRefs.push_back(CF::Resource::_duplicate(new_app->_appStartSeq[i])); - } - appNode.allocationIDs = new_app->_allocationIDs; - appNode.connections = new_app->_connections; - appNode.aware_application = new_app->_isAware; - appNode.ports = new_app->_ports; - // Adds external properties - for (std::map::const_iterator it = new_app->_properties.begin(); - it != new_app->_properties.end(); - ++it) { - std::string extId = it->first; - externalPropertyType extProp; - extProp.property_id = it->second.id; - extProp.component_id = ossie::corba::returnString(it->second.component->identifier()); - extProp.access = it->second.access; - appNode.properties.insert(std::pair(extId, extProp)); - } - - _runningApplications.push_back(appNode); - // Make any deferred connections dependent on this application _connectionManager.applicationRegistered(identifier); - try { - db.store("APPLICATIONS", _runningApplications); - } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device managers"); - } - + _persistApplication(new_app); } catch (...) { - const std::string identifier = ossie::corba::returnString(new_app->identifier()); ostringstream eout; eout << "Could not add new application to AppSeq; "; eout << " application id: " << identifier << "; "; eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; - LOG_ERROR(DomainManager_impl, eout.str()); + RH_ERROR(this->_baseLog, eout.str()); throw CF::DomainManager::ApplicationInstallationError(CF::CF_EFAULT, eout.str().c_str()); } +} + +void DomainManager_impl::addPendingApplication(Application_impl* application) +{ + boost::recursive_mutex::scoped_lock lock(stateAccess); + application->_add_ref(); + _pendingApplications[application->getIdentifier()] = application; +} + +void DomainManager_impl::cancelPendingApplication(Application_impl* application) +{ + boost::recursive_mutex::scoped_lock lock(stateAccess); + ApplicationTable::iterator iter = _pendingApplications.find(application->getIdentifier()); + if (iter == _pendingApplications.end()) { + RH_ERROR(this->_baseLog, "No pending application '" << application->getIdentifier() << "' to cancel"); + } else { + _pendingApplications.erase(iter); + application->_remove_ref(); + } +} - TRACE_EXIT(DomainManager_impl) +void DomainManager_impl::completePendingApplication(Application_impl* application) +{ + boost::recursive_mutex::scoped_lock lock(stateAccess); + ApplicationTable::iterator iter = _pendingApplications.find(application->getIdentifier()); + if (iter == _pendingApplications.end()) { + RH_ERROR(this->_baseLog, "No pending application '" << application->getIdentifier() + << "' to move to active state"); + } else { + addApplication(application); + application->_remove_ref(); + _pendingApplications.erase(iter); + } } void DomainManager_impl::removeApplication(std::string app_id) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); - LOG_TRACE(DomainManager_impl, "Attempting to remove application from AppSeq with id: " << app_id) + RH_TRACE(this->_baseLog, "Attempting to remove application from AppSeq with id: " << app_id) ApplicationTable::iterator app = _applications.find(app_id); // remove the application from the sequence @@ -1903,7 +1835,7 @@ DomainManager_impl::removeApplication(std::string app_id) try { db.store("APPLICATIONS", _runningApplications); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device managers"); + RH_ERROR(this->_baseLog, "Error persisting change to device managers"); } } else { ostringstream eout; @@ -1912,8 +1844,6 @@ DomainManager_impl::removeApplication(std::string app_id) eout << " error occurred near line:" <<__LINE__ << " in file:" << __FILE__ << ";"; throw CF::DomainManager::ApplicationUninstallationError(CF::CF_EFAULT, eout.str().c_str()); } - - TRACE_EXIT(DomainManager_impl) } @@ -1929,17 +1859,13 @@ throw (CORBA::SystemException, CF::InvalidObjectReference, CF::DomainManager::AlreadyConnected) { boost::mutex::scoped_lock lock(interfaceAccess); - std::string tmp_id = ossie::corba::returnString(registeringId); - std::string eventchannel_name = ossie::corba::returnString(eventChannelName); - _local_registerWithEventChannel(registeringObject, tmp_id, eventchannel_name); + _local_registerWithEventChannel(registeringObject, registeringId, eventChannelName); } void DomainManager_impl::_local_registerWithEventChannel (CORBA::Object_ptr registeringObject, - std::string ®isteringId, - std::string &eventChannelName) + const std::string& registeringId, + const std::string& eventChannelName) { - TRACE_ENTER(DomainManager_impl) - if (registeredConsumers.find(registeringId) != registeredConsumers.end()) { throw CF::DomainManager::AlreadyConnected (); } @@ -1977,16 +1903,12 @@ throw (CORBA::SystemException, CF::DomainManager::InvalidEventChannelName, CF::DomainManager::NotConnected) { boost::mutex::scoped_lock lock(interfaceAccess); - std::string tmp_id = ossie::corba::returnString(unregisteringId); - std::string eventchannel_name = ossie::corba::returnString(eventChannelName); - _local_unregisterFromEventChannel(tmp_id, eventchannel_name); + _local_unregisterFromEventChannel(unregisteringId, eventChannelName); } -void DomainManager_impl::_local_unregisterFromEventChannel (std::string &unregisteringId, - std::string &eventChannelName) +void DomainManager_impl::_local_unregisterFromEventChannel (const std::string& unregisteringId, + const std::string& eventChannelName) { - TRACE_ENTER(DomainManager_impl) - if (!eventChannelExists(eventChannelName)) { throw CF::DomainManager::InvalidEventChannelName (); } @@ -2014,40 +1936,52 @@ void DomainManager_impl::registerService (CORBA::Object_ptr registeringService, void DomainManager_impl::_local_registerService (CORBA::Object_ptr registeringService, CF::DeviceManager_ptr registeredDeviceMgr, const char* name) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); // Verify that the service and DeviceManager are not nil references if (CORBA::is_nil(registeringService)) { - LOG_ERROR(DomainManager_impl, "Ignoring registration of nil Service"); + RH_ERROR(this->_baseLog, "Ignoring registration of nil Service"); throw CF::InvalidObjectReference("Registering Service is nil"); } else if (CORBA::is_nil(registeredDeviceMgr)) { - LOG_ERROR(DomainManager_impl, "Ignoring registration of Service with nil DeviceManager"); + RH_ERROR(this->_baseLog, "Ignoring registration of Service with nil DeviceManager"); throw CF::InvalidObjectReference("Registered DeviceManager is nil"); } // Verify that DeviceManager is registered if (!deviceMgrIsRegistered(registeredDeviceMgr)) { - LOG_WARN(DomainManager_impl, "Ignoring attempt to register a Service from an unregistered DeviceManager"); + RH_WARN(this->_baseLog, "Ignoring attempt to register a Service from an unregistered DeviceManager"); throw CF::DomainManager::DeviceManagerNotRegistered(); } - DeviceManagerConfiguration _DCDParser; + DeviceManagerConfiguration *_DCDParser=0; + DeviceManagerConfiguration _dcdParser; bool readDCD = true; std::string serviceId(""); std::string inputUsageName(name); try { CF::FileSystem_var devMgrFileSys = registeredDeviceMgr->fileSys(); CORBA::String_var profile = registeredDeviceMgr->deviceConfigurationProfile(); - File_stream _dcd(devMgrFileSys, profile); - _DCDParser.load(_dcd); - _dcd.close(); + DeviceManagerList::iterator node = findDeviceManagerByObject(registeredDeviceMgr); + if ( node != _registeredDeviceManagers.end() ) { + if ( node->dcd.isLoaded() == false ) { + File_stream dcd(devMgrFileSys, profile); + node->dcd.load(dcd); + dcd.close(); + } + _DCDParser = &(node->dcd); + } + else { + File_stream dcd(devMgrFileSys, profile); + _dcdParser.load(dcd); + _DCDParser = &_dcdParser; + dcd.close(); + } } catch ( ... ) { readDCD = false; } if (readDCD) { - const std::vector& componentPlacements = _DCDParser.getComponentPlacements(); + const std::vector& componentPlacements = _DCDParser->getComponentPlacements(); bool foundId = false; for (unsigned int i = 0; i < componentPlacements.size(); i++) { for (unsigned int j=0; jrebind(service_name, registeringService); + } catch (...) { + LOG_WARN(DomainManager_impl, "Unable to bind service to name " << name); + } + //The registerService operation shall, upon successful service registration, establish any pending //connection requests for the registeringService. The registerService operation shall, upon //successful service registration, write an ADMINISTRATIVE_EVENT log record to a @@ -2090,7 +2035,7 @@ void DomainManager_impl::_local_registerService (CORBA::Object_ptr registeringSe try { db.store("CONNECTIONS", _connectionManager.getConnections()); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device manager connections"); + RH_ERROR(this->_baseLog, "Error persisting change to device manager connections"); } //The registerService operation shall, upon unsuccessful service registration, write a @@ -2105,12 +2050,130 @@ void DomainManager_impl::_local_registerService (CORBA::Object_ptr registeringSe //3. The sourceName shall be the input name parameter for the registering service. //4. The sourceIOR shall be the registered service object reference. //5. The sourceCategory shall be SERVICE. - sendAddEvent( _identifier.c_str(), serviceId.c_str(), name, registeringService, StandardEvent::SERVICE ); + sendAddEvent(_identifier, serviceId, name, registeringService, StandardEvent::SERVICE); //The registerService operation shall raise the RegisterError exception when an internal error //exists which causes an unsuccessful registration. } +void DomainManager_impl::storePubProxies() +{ + EventProxies _proxies; + EventChannelManager::PubProxyMap _retVal = _eventChannelMgr->getPubProxies(); + for (EventChannelManager::PubProxyMap::iterator it = _retVal.begin(); it != _retVal.end(); it++) { + _proxies[it->first] = _orbCtx.orb->object_to_string(it->second); + } + try { + db.store("EVTCHMGR_PUBPROXIES", _proxies); + } catch (const ossie::PersistenceException& ex) { + RH_ERROR(this->_baseLog, "Error persisting change to event channel manager publisher proxies"); + } +} + +void DomainManager_impl::storeSubProxies() +{ + EventProxies _proxies; + EventChannelManager::SubProxyMap _retVal = _eventChannelMgr->getSubProxies(); + for (EventChannelManager::SubProxyMap::iterator it = _retVal.begin(); it != _retVal.end(); it++) { + _proxies[it->first] = _orbCtx.orb->object_to_string(it->second); + } + try { + db.store("EVTCHMGR_SUBPROXIES", _proxies); + } catch (const ossie::PersistenceException& ex) { + RH_ERROR(this->_baseLog, "Error persisting change to event channel manager subscriber proxies"); + } +} + +void DomainManager_impl::storeEventChannelRegistrations() +{ + ChannelRegistrationNodes _nodes; + EventChannelManager::ChannelRegistrationTable _retVal = _eventChannelMgr->getChannelRegistrations(); + for (EventChannelManager::ChannelRegistrationTable::iterator it = _retVal.begin(); it != _retVal.end(); it++) { + ChannelRegistrationNode _tmp; + _tmp.channel_name = it->second.channel_name; + _tmp.fqn = it->second.fqn; + _tmp.channel = _orbCtx.orb->object_to_string(it->second.channel); + _tmp.autoRelease = it->second.autoRelease; + _tmp.release = it->second.release; + _tmp.registrants = it->second.registrants; + _nodes[it->first] = _tmp; + } + try { + db.store("EVTCHMGR_CHANNELREGISTRATIONS", _nodes); + } catch (const ossie::PersistenceException& ex) { + RH_ERROR(this->_baseLog, "Error persisting change to event channel manager event channel registrations"); + } +} + +void DomainManager_impl::restorePubProxies(const std::string& _db_uri) +{ + try { + db.open(_db_uri); + } catch (const ossie::PersistenceException& e) { + RH_ERROR(this->_baseLog, "Error loading persistent state: " << e.what()); + return; + } + EventProxies _proxies; + try { + db.fetch("EVTCHMGR_PUBPROXIES", _proxies, true); + } catch (const ossie::PersistenceException& ex) { + RH_ERROR(this->_baseLog, "Error persisting change to event channel manager publisher proxies"); + } + EventChannelManager::PubProxyMap _newVal; + for (EventProxies::iterator it = _proxies.begin(); it != _proxies.end(); it++) { + _newVal[it->first] = ossie::corba::_narrowSafe(_orbCtx.orb->string_to_object(it->second.c_str())); + } + _eventChannelMgr->setPubProxies(_newVal); +} + +void DomainManager_impl::restoreSubProxies(const std::string& _db_uri) +{ + try { + db.open(_db_uri); + } catch (const ossie::PersistenceException& e) { + RH_ERROR(this->_baseLog, "Error loading persistent state: " << e.what()); + return; + } + EventProxies _proxies; + try { + db.fetch("EVTCHMGR_SUBPROXIES", _proxies, true); + } catch (const ossie::PersistenceException& ex) { + RH_ERROR(this->_baseLog, "Error persisting change to event channel manager subscriber proxies"); + } + EventChannelManager::SubProxyMap _newVal; + for (EventProxies::iterator it = _proxies.begin(); it != _proxies.end(); it++) { + _newVal[it->first] = ossie::corba::_narrowSafe(_orbCtx.orb->string_to_object(it->second.c_str())); + } + _eventChannelMgr->setSubProxies(_newVal); +} + +void DomainManager_impl::restoreEventChannelRegistrations(const std::string& _db_uri) +{ + try { + db.open(_db_uri); + } catch (const ossie::PersistenceException& e) { + RH_ERROR(this->_baseLog, "Error loading persistent state: " << e.what()); + return; + } + ChannelRegistrationNodes _nodes; + try { + db.fetch("EVTCHMGR_CHANNELREGISTRATIONS", _nodes, true); + } catch (const ossie::PersistenceException& ex) { + RH_ERROR(this->_baseLog, "Error persisting change to event channel manager event channel registrations"); + } + EventChannelManager::ChannelRegistrationTable _newVal;// = _eventChannelMgr->getChannelRegistrations(); + for (ChannelRegistrationNodes::iterator it = _nodes.begin(); it != _nodes.end(); it++) { + EventChannelManager::ChannelRegistration _tmp; + _tmp.channel_name = it->second.channel_name; + _tmp.fqn = it->second.fqn; + _tmp.channel = ossie::corba::_narrowSafe(_orbCtx.orb->string_to_object(it->second.channel.c_str())); + _tmp.autoRelease = it->second.autoRelease; + _tmp.release = it->second.release; + _tmp.registrants = it->second.registrants; + _newVal[it->first] = _tmp; + } + _eventChannelMgr->setChannelRegistrations(_newVal); +} void DomainManager_impl::unregisterService(CORBA::Object_ptr unregisteringService, const char* name) throw (CF::DomainManager::UnregisterError, CF::InvalidObjectReference, CORBA::SystemException) @@ -2120,14 +2183,14 @@ void DomainManager_impl::unregisterService(CORBA::Object_ptr unregisteringServic // Try to find a service registered with the given name. ossie::ServiceList::iterator service = findServiceByName(name); if (service == _registeredServices.end()) { - LOG_ERROR(DomainManager_impl, "Cannot unregister service '" << name << "' not registered with domain"); + RH_ERROR(this->_baseLog, "Cannot unregister service '" << name << "' not registered with domain"); throw CF::InvalidObjectReference("Service is not registered with domain"); } // Refuse to unregister if the unregistering object is not the same as the // registered one. if (!service->service->_is_equivalent(unregisteringService)) { - LOG_ERROR(DomainManager_impl, "Not unregistering service '" << name << "' because object does not match prior registration"); + RH_ERROR(this->_baseLog, "Not unregistering service '" << name << "' because object does not match prior registration"); throw CF::InvalidObjectReference("Unregistering service is not the same as registered object"); } @@ -2137,7 +2200,6 @@ void DomainManager_impl::unregisterService(CORBA::Object_ptr unregisteringServic ossie::ServiceList::iterator DomainManager_impl::_local_unregisterService(ossie::ServiceList::iterator service) { - TRACE_ENTER(DomainManager_impl) boost::recursive_mutex::scoped_lock lock(stateAccess); // Disconnect any connections involving this service. @@ -2145,21 +2207,25 @@ ossie::ServiceList::iterator DomainManager_impl::_local_unregisterService(ossie: try { db.store("CONNECTIONS", _connectionManager.getConnections()); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to device manager connections"); + RH_ERROR(this->_baseLog, "Error persisting change to device manager connections"); } std::string serviceName(service->name); std::string serviceId(service->serviceId); + // Remove the naming service binding, ignoring exceptions + try { + CosNaming::Name_var service_name = ossie::corba::stringToName(serviceName); + rootContext->unbind(service_name); + } catch (...) { + LOG_WARN(DomainManager_impl, "Unable to remove name binding for service " << serviceName); + } + // Remove the service from the internal list. service = _registeredServices.erase(service); if (!_applications.empty()) { std::vector appsToRelease; - - PortableServer::POA_var dm_poa = ossie::corba::RootPOA()->find_POA("DomainManager", 0); - PortableServer::POA_var poa = dm_poa->find_POA("Applications", 1); - for (ApplicationTable::iterator app = _applications.begin(); app != _applications.end(); ++app) { if (app->second->checkConnectionDependency(ossie::Endpoint::SERVICENAME, serviceName)) { app->second->_add_ref(); @@ -2167,10 +2233,10 @@ ossie::ServiceList::iterator DomainManager_impl::_local_unregisterService(ossie: } } - LOG_DEBUG(DomainManager_impl, "Releasing " << appsToRelease.size() << " applications"); + RH_DEBUG(this->_baseLog, "Releasing " << appsToRelease.size() << " applications"); for (std::vector::iterator iter = appsToRelease.begin(); iter != appsToRelease.end(); ++iter) { Application_impl* app = *iter; - LOG_DEBUG(DomainManager_impl, "Releasing " << ossie::corba::returnString(app->identifier())); + RH_DEBUG(this->_baseLog, "Releasing " << app->getIdentifier()); app->releaseObject(); app->_remove_ref(); } @@ -2180,7 +2246,7 @@ ossie::ServiceList::iterator DomainManager_impl::_local_unregisterService(ossie: try { db.store("SERVICES", _registeredServices); } catch (const ossie::PersistenceException& ex) { - LOG_ERROR(DomainManager_impl, "Error persisting change to services"); + RH_ERROR(this->_baseLog, "Error persisting change to services"); } //The unregisterService operation shall, upon successful service unregistration, send an event to @@ -2193,7 +2259,7 @@ ossie::ServiceList::iterator DomainManager_impl::_local_unregisterService(ossie: //4. The sourceCategory shall be SERVICE. // Sent event here (as opposed to unregisterDevice), so we see the event on regular // unregisterDevice calls, and on cleanup (deviceManager shutdown, catastropic cleanup, etc.) - sendRemoveEvent( _identifier.c_str(), serviceId.c_str(), serviceName.c_str(), StandardEvent::SERVICE ); + sendRemoveEvent(_identifier, serviceId, serviceName, StandardEvent::SERVICE); return service; } @@ -2209,16 +2275,32 @@ CF::Resource_ptr DomainManager_impl::lookupComponentByInstantiationId(const std: // Search for a Component matching this id // This needs to reconcile the fact that the application id is :, the component id is : // and the unambiguous endpoint for a component is :: - std::size_t pos = identifier.find(":"); + std::size_t begin_pos = 0; + if (identifier.size() > 4) { + if (identifier.substr(0,3) == "DCE") { + begin_pos = 4; + } + } + std::size_t pos = identifier.find(":", begin_pos); if (pos != std::string::npos) { std::string appid = identifier.substr(pos+1); if (_applications.find(appid) == _applications.end()) { - return CF::Resource::_nil(); + // search by id substr instead of id + ApplicationTable::iterator it = _applications.begin(); + while (it != _applications.end()) { + if (appid == it->first.substr(it->first.size()-appid.size(),appid.size())) { + appid = it->first; + break; + } + it++; + } + if (it == _applications.end()) + return CF::Resource::_nil(); } std::string normalized_comp_id = identifier.substr(0,pos)+std::string(":")+appid.substr(appid.rfind(":")+1); - for (ossie::ComponentList::iterator _comp=_applications[appid]->_components.begin(); _comp!=_applications[appid]->_components.end(); _comp++) {\ - if (normalized_comp_id == _comp->identifier) { - return CF::Resource::_duplicate(CF::Resource::_narrow(_comp->componentObject)); + for (Application_impl::ComponentList::iterator _comp=_applications[appid]->_components.begin(); _comp!=_applications[appid]->_components.end(); _comp++) {\ + if (normalized_comp_id == _comp->getIdentifier()) { + return _comp->getResourcePtr(); } } } @@ -2239,11 +2321,11 @@ CF::DeviceManager_ptr DomainManager_impl::lookupDeviceManagerByInstantiationId(c CORBA::Object_ptr DomainManager_impl::lookupDomainObject (const std::string& type, const std::string& name) { - LOG_TRACE(DomainManager_impl, "Resolving domainfinder type='" << type << "' name='" << name << "'"); + RH_TRACE(this->_baseLog, "Resolving domainfinder type='" << type << "' name='" << name << "'"); if (type == "filemanager") { return CF::FileManager::_duplicate(_fileMgr); } else if (type == "log") { - LOG_WARN(DomainManager_impl, "No support for log in domainfinder element"); + RH_WARN(this->_baseLog, "No support for log in domainfinder element"); } else if (type == "eventchannel") { // If no name is given, return the IDM channel. std::string channelName = name; @@ -2264,28 +2346,31 @@ CORBA::Object_ptr DomainManager_impl::lookupDomainObject (const std::string& typ incrementEventChannelConnections(channelName); return channelObj._retn(); } else if (type == "namingservice") { - LOG_WARN(DomainManager_impl, "No support for namingservice in domainfinder element"); + RH_WARN(this->_baseLog, "No support for namingservice in domainfinder element"); } else if (type == "servicename") { ServiceList::iterator serviceNode = findServiceByName(name); if (serviceNode != _registeredServices.end()) { - LOG_TRACE(DomainManager_impl, "Found service " << name); + RH_TRACE(this->_baseLog, "Found service " << name); return CORBA::Object::_duplicate(serviceNode->service); } - LOG_WARN(DomainManager_impl, "No service found for servicename '" << name << "'"); + throw ossie::LookupError("no service '" + name + "' found"); } else if (type == "servicetype") { ServiceList::iterator serviceNode = findServiceByType(name); if (serviceNode != _registeredServices.end()) { - LOG_TRACE(DomainManager_impl, "Found service " << serviceNode->name << " supporting '" << name << "'"); + RH_TRACE(this->_baseLog, "Found service " << serviceNode->name << " supporting '" << name << "'"); return CORBA::Object::_duplicate(serviceNode->service); } - LOG_WARN(DomainManager_impl, "No service found for servicetype '" << name << "'"); + throw ossie::LookupError("no service found for type '" + name + "'"); } else if (type == "domainmanager") { return _this(); } else if (type == "application") { Application_impl* application = findApplicationById(name); - if (application) { - return application->_this(); + if (!application) { + throw ossie::LookupError("no application '" + name + "' found"); } + return application->_this(); + } else { + throw ossie::LookupError("invalid domainfinder type '" + type + "'"); } return CORBA::Object::_nil(); } @@ -2293,26 +2378,23 @@ CORBA::Object_ptr DomainManager_impl::lookupDomainObject (const std::string& typ void DomainManager_impl::catastrophicUnregisterDeviceManager (ossie::DeviceManagerList::iterator deviceManager) { - TRACE_ENTER(DomainManager_impl); boost::recursive_mutex::scoped_lock lock(stateAccess); // NOTE: Assume that the DeviceManager doesn't exist, so make no CORBA calls to it. // Release all devices associated with the DeviceManager that is being unregistered. - LOG_TRACE(DomainManager_impl, "Finding devices for device manager " << deviceManager->identifier); + RH_TRACE(this->_baseLog, "Finding devices for device manager " << deviceManager->identifier); for (DeviceList::iterator device = _registeredDevices.begin(); device != _registeredDevices.end(); ++device) { if ((*device)->devMgr.identifier == deviceManager->identifier) { - LOG_TRACE(DomainManager_impl, "Releasing registered device " << (*device)->label); + RH_TRACE(this->_baseLog, "Releasing registered device " << (*device)->label); try { (*device)->device->releaseObject(); - } CATCH_LOG_WARN(DomainManager_impl, "Failed to release device " << (*device)->label); + } CATCH_RH_WARN(this->_baseLog, "Failed to release device " << (*device)->label); } } // Continue with the normal unregistration code path. _local_unregisterDeviceManager(deviceManager); - - TRACE_EXIT(DomainManager_impl); } @@ -2470,28 +2552,28 @@ void DomainManager_impl::parseDeviceProfile (ossie::DeviceNode& node) CF::FileSystem_var devMgrFS = node.devMgr.deviceManager->fileSys(); // Parse and cache the device's SPD - LOG_TRACE(DomainManager_impl, "Parsing SPD for device " << node.identifier); + RH_TRACE(this->_baseLog, "Parsing SPD for device " << node.identifier); try { File_stream spd(devMgrFS, node.softwareProfile.c_str()); node.spd.load(spd, node.softwareProfile); } catch (const ossie::parser_error& error) { std::string parser_error_line = ossie::retrieveParserErrorLineNumber(error.what()); - LOG_WARN(DomainManager_impl, "Error parsing SPD: " << node.softwareProfile << " Device: " << node.identifier << ". " << parser_error_line << " The XML parser returned the following error: " << error.what()); + RH_WARN(this->_baseLog, "Error parsing SPD: " << node.softwareProfile << " Device: " << node.identifier << ". " << parser_error_line << " The XML parser returned the following error: " << error.what()); } catch (...) { - LOG_WARN(DomainManager_impl, "Unable to cache SPD for device " << node.identifier); + RH_WARN(this->_baseLog, "Unable to cache SPD for device " << node.identifier); } // Parse and cache the device's PRF, if it has one if (node.spd.getPRFFile()) { - LOG_TRACE(DomainManager_impl, "Parsing PRF for device " << node.identifier); + RH_TRACE(this->_baseLog, "Parsing PRF for device " << node.identifier); try { File_stream prf(devMgrFS, node.spd.getPRFFile()); node.prf.load(prf); } catch (const ossie::parser_error& error) { std::string parser_error_line = ossie::retrieveParserErrorLineNumber(error.what()); - LOG_WARN(DomainManager_impl, "Error parsing PRF: " << node.spd.getPRFFile() << " Device: " << node.identifier << ". " << parser_error_line << " The XML parser returned the following error: " << error.what()); + RH_WARN(this->_baseLog, "Error parsing PRF: " << node.spd.getPRFFile() << " Device: " << node.identifier << ". " << parser_error_line << " The XML parser returned the following error: " << error.what()); } catch (...) { - LOG_WARN(DomainManager_impl, "Unable to cache PRF for device " << node.identifier); + RH_WARN(this->_baseLog, "Unable to cache PRF for device " << node.identifier); } } @@ -2501,38 +2583,161 @@ void DomainManager_impl::parseDeviceProfile (ossie::DeviceNode& node) continue; } if (impl->getPRFFile()) { - LOG_TRACE(DomainManager_impl, "Parsing implementation-specific PRF for device " << node.identifier); + RH_TRACE(this->_baseLog, "Parsing implementation-specific PRF for device " << node.identifier); try { File_stream prf_file(devMgrFS, impl->getPRFFile()); node.prf.join(prf_file); } catch (const ossie::parser_error& error) { std::string parser_error_line = ossie::retrieveParserErrorLineNumber(error.what()); - LOG_WARN(DomainManager_impl, "Error parsing implementation-specific PRF for Device: " << node.identifier << ". " << parser_error_line << " The XML parser returned the following error: " << error.what()); + RH_WARN(this->_baseLog, "Error parsing implementation-specific PRF for Device: " << node.identifier << ". " << parser_error_line << " The XML parser returned the following error: " << error.what()); } catch (...) { - LOG_WARN(DomainManager_impl, "Unable to cache implementation-specific PRF for Device: " << node.identifier); + RH_WARN(this->_baseLog, "Unable to cache implementation-specific PRF for Device: " << node.identifier); } } } // Override with values from the DCD - LOG_TRACE(DomainManager_impl, "Parsing DCD overrides for device " << node.identifier); - ossie::DeviceManagerConfiguration dcd; + RH_TRACE(this->_baseLog, "Parsing DCD overrides for device " << node.identifier); const std::string deviceManagerProfile = ossie::corba::returnString(node.devMgr.deviceManager->deviceConfigurationProfile()); - try { - File_stream dcd_file(devMgrFS, deviceManagerProfile.c_str()); - dcd.load(dcd_file); - } catch (const ossie::parser_error& error) { - std::string parser_error_line = ossie::retrieveParserErrorLineNumber(error.what()); - LOG_WARN(DomainManager_impl, "Error parsing DCD: " << deviceManagerProfile.c_str() << " overrides for Device: " << node.identifier << ". " << parser_error_line << " The XML parser returned the following error: " << error.what()); - } catch (...) { - LOG_WARN(DomainManager_impl, "Unable to cache DCD overrides for Device: " << node.identifier); + if ( node.devMgr.dcd.isLoaded() == false ) { + RH_WARN(this->_baseLog, "DCD file was not loaded for node: " << node.identifier ); } + else { + const ComponentInstantiation* instantiation = findComponentInstantiation(node.devMgr.dcd.getComponentPlacements(), node.identifier); + if (instantiation) { + node.prf.override(instantiation->properties); + ossie::convertComponentProperties( instantiation->getDeployerRequires(), + node.requiresProps ); + } else { + RH_WARN(this->_baseLog, "Unable to find device " << node.identifier << " in DCD"); + } + } +} +Application_impl* DomainManager_impl::_restoreApplication(ossie::ApplicationNode& node) +{ + Application_impl* application = new Application_impl(node.identifier, + node.name, + node.profile, + this, + node.contextName, + node.context, + node.aware_application, + node.stop_timeout, + CosNaming::NamingContext::_nil()); + RH_TRACE(this->_baseLog, "Restored " << node.connections.size() << " connections"); + + application->populateApplication(node.componentDevices, + node.connections, + node.allocationIDs); + + // Restore various state about the components in the waveform + BOOST_FOREACH(ossie::ComponentNode& compNode, node.components) { + redhawk::ApplicationComponent* component = application->addComponent(compNode.identifier, compNode.softwareProfile); + component->setName(compNode.name); + component->setNamingContext(compNode.namingContext); + component->setImplementationId(compNode.implementationId); + component->setVisible(compNode.isVisible); + BOOST_FOREACH(const std::string& filename, compNode.loadedFiles) { + component->addLoadedFile(filename); + } + component->setProcessId(compNode.processId); + component->setComponentObject(compNode.componentObject); + component->setAssignedDeviceId(compNode.assignedDeviceId); + ossie::DeviceList::iterator device = findDeviceById(compNode.assignedDeviceId); + if (device == _registeredDevices.end()) { + RH_WARN(this->_baseLog, "Could not find assigned device '" << compNode.assignedDeviceId + << "' for application '" << node.name + << "' component '" << compNode.identifier); + } else { + component->setAssignedDevice(*device); + } + if (!compNode.componentHostId.empty()) { + redhawk::ApplicationComponent* host = application->findComponent(compNode.componentHostId); + if (!host) { + RH_WARN(this->_baseLog, "Could not find component host " << compNode.componentHostId); + } else { + component->setComponentHost(host); + } + } + } + application->setAssemblyController(node.assemblyControllerId); + application->setStartOrder(node.startOrder); + + // Add external ports + for (std::map::const_iterator it = node.ports.begin(); + it != node.ports.end(); + ++it) { + application->addExternalPort(it->first, it->second); + } + + // Add external properties + for (std::map::const_iterator it = node.properties.begin(); + it != node.properties.end(); + ++it) { + std::string extId = it->first; + std::string propId = it->second.property_id; + std::string access = it->second.access; + application->addExternalProperty(propId, extId, access, it->second.component); + } + + return application; +} + +void DomainManager_impl::_persistApplication(Application_impl* application) +{ + ApplicationNode appNode; + appNode.name = application->getName(); + appNode.identifier = application->getIdentifier(); + appNode.profile = application->getProfile(); + appNode.contextName = application->_waveformContextName; + appNode.context = CosNaming::NamingContext::_duplicate(application->_waveformContext); + appNode.componentDevices = application->_componentDevices; + BOOST_FOREACH(redhawk::ApplicationComponent& component, application->_components) { + ossie::ComponentNode compNode; + compNode.identifier = component.getIdentifier(); + compNode.name = component.getName(); + compNode.softwareProfile = component.getSoftwareProfile(); + compNode.namingContext = component.getNamingContext(); + compNode.implementationId = component.getImplementationId(); + compNode.isVisible = component.isVisible(); + compNode.loadedFiles = component.getLoadedFiles(); + compNode.processId = component.getProcessId(); + compNode.componentObject = component.getComponentObject(); + if (component.getAssignedDevice()) { + compNode.assignedDeviceId = component.getAssignedDevice()->identifier; + } else { + compNode.assignedDeviceId = component.getAssignedDeviceId(); + } + if (component.getComponentHost()) { + compNode.componentHostId = component.getComponentHost()->getIdentifier(); + } + appNode.components.push_back(compNode); + } - const ComponentInstantiation* instantiation = findComponentInstantiation(dcd.getComponentPlacements(), node.identifier); - if (instantiation) { - node.prf.override(instantiation->properties); - } else { - LOG_WARN(DomainManager_impl, "Unable to find device " << node.identifier << " in DCD"); + // If an assembly controller is set, store it by identifier + redhawk::ApplicationComponent* assembly_controller = application->getAssemblyController(); + if (assembly_controller) { + appNode.assemblyControllerId = assembly_controller->getIdentifier(); + } + + // Save the start order, storing just the component identifiers + BOOST_FOREACH(redhawk::ApplicationComponent* component, application->_startOrder) { + appNode.startOrder.push_back(component->getIdentifier()); + } + + appNode.allocationIDs = application->_allocationIDs; + appNode.connections = application->_connections; + appNode.aware_application = application->_isAware; + appNode.stop_timeout = application->_stopTimeout; + appNode.ports = application->_ports; + appNode.properties = application->_properties; + + _runningApplications.push_back(appNode); + + try { + db.store("APPLICATIONS", _runningApplications); + } catch (const ossie::PersistenceException& ex) { + RH_ERROR(this->_baseLog, "Error persisting change to device managers"); } } diff --git a/redhawk/src/control/sdr/dommgr/DomainManager_impl.h b/redhawk/src/control/sdr/dommgr/DomainManager_impl.h index f01741b36..77d6f2be0 100644 --- a/redhawk/src/control/sdr/dommgr/DomainManager_impl.h +++ b/redhawk/src/control/sdr/dommgr/DomainManager_impl.h @@ -33,16 +33,23 @@ #include #include +#include #include #include #include #include +#include +#include +#include #include "PersistenceStore.h" #include "connectionSupport.h" #include "DomainManager_EventSupport.h" #include "EventChannelManager.h" #include "struct_props.h" +#include "struct_props.h" + +#include "../../parser/internal/dcd-pimpl.h" class Application_impl; @@ -51,7 +58,7 @@ class AllocationManager_impl; class ConnectionManager_impl; -class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertySet_impl, public ossie::ComponentLookup, public ossie::DomainLookup, public ossie::Runnable +class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertySet_impl, public Logging_impl, public ossie::ComponentLookup, public ossie::DomainLookup, public ossie::Runnable { ENABLE_LOGGING @@ -60,7 +67,7 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS /////////////////////////// public: - DomainManager_impl (const char*, const char*, const char*, const char *, const char*, bool, bool ); + DomainManager_impl (const char*, const char*, const char*, const char *, const char*, bool, bool, bool, int); ~DomainManager_impl (); friend class ODM_Channel_Supplier_i; @@ -118,7 +125,7 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS void installApplication (const char* profileFileName) throw (CF::DomainManager::ApplicationInstallationError, CF::InvalidFileName, CF::InvalidProfile, CORBA::SystemException, CF::DomainManager::ApplicationAlreadyInstalled); - void _local_installApplication (const char* profileFileName); + void _local_installApplication (const std::string& profileFileName); void uninstallApplication (const char* applicationId) throw (CF::DomainManager::ApplicationUninstallationError, CF::DomainManager::InvalidIdentifier, CORBA::SystemException); @@ -133,11 +140,9 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS void registerWithEventChannel (CORBA::Object_ptr registeringObject, const char* registeringId, const char* eventChannelName) throw (CF::DomainManager::AlreadyConnected, CF::DomainManager::InvalidEventChannelName, CF::InvalidObjectReference, CORBA::SystemException); - void _local_registerWithEventChannel (CORBA::Object_ptr registeringObject, std::string ®isteringId, std::string &eventChannelName); void unregisterFromEventChannel (const char* unregisteringId, const char* eventChannelName) throw (CF::DomainManager::NotConnected, CF::DomainManager::InvalidEventChannelName, CORBA::SystemException); - void _local_unregisterFromEventChannel (std::string &unregisteringId, std::string &eventChannelName); void registerRemoteDomainManager (CF::DomainManager_ptr registeringRemoteDomainManager) throw (CF::DomainManager::RegisterError, CF::InvalidObjectReference, CORBA::SystemException); @@ -156,6 +161,10 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS void restoreEventChannels(const std::string& _db_uri); void addApplication(Application_impl* new_app); + + void addPendingApplication(Application_impl* application); + void cancelPendingApplication(Application_impl* application); + void completePendingApplication(Application_impl* application); void releaseAllApplications(); @@ -232,6 +241,15 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS rh_logger::LoggerPtr getLogger() const { return __logger; }; + rh_logger::LoggerPtr getInstanceLogger(const char *name) { + std::string n(name); + return getInstanceLogger(n); + }; + + rh_logger::LoggerPtr getInstanceLogger(std::string &name) { + return this->_baseLog->getChildLogger(name, ""); + }; + bool bindToDomain() { return _bindToDomain; }; std::string getRedhawkVersion() { return redhawk_version; }; @@ -241,6 +259,10 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS uint32_t getManagerWaitTime(); uint32_t getDeviceWaitTime(); uint32_t getServiceWaitTime(); + CF::LogLevel log_level(); + int getInitialLogLevel() { + return _initialLogLevel; + }; ///////////////////////////// // Internal Helper Functions @@ -251,13 +273,16 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS ossie::DeviceList::iterator _local_unregisterDevice (ossie::DeviceList::iterator device); ossie::ServiceList::iterator _local_unregisterService (ossie::ServiceList::iterator service); + void _local_registerWithEventChannel (CORBA::Object_ptr registeringObject, const std::string& registeringId, const std::string& eventChannelName); + void _local_unregisterFromEventChannel (const std::string& unregisteringId, const std::string& eventChannelName); + void parseDMDProfile(); void storeDeviceInDomainMgr (CF::Device_ptr, CF::DeviceManager_ptr); - void storeServiceInDomainMgr (CORBA::Object_ptr, CF::DeviceManager_ptr, const char*, const char*); + void storeServiceInDomainMgr (CORBA::Object_ptr, CF::DeviceManager_ptr, const std::string&, const std::string&); bool deviceMgrIsRegistered (CF::DeviceManager_ptr); bool domainMgrIsRegistered (CF::DomainManager_ptr); bool deviceIsRegistered (CF::Device_ptr); - bool serviceIsRegistered (const char*); + bool serviceIsRegistered (const std::string&); void addDeviceMgr (CF::DeviceManager_ptr deviceMgr); void mountDeviceMgrFileSys (CF::DeviceManager_ptr deviceMgr); void addDomainMgr (CF::DomainManager_ptr domainMgr); @@ -287,9 +312,14 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS // void establishDomainManagementChannels( const std::string &db_uri ); void disconnectDomainManagementChannels(); - void handleIDMChannelMessages( const CORBA::Any &msg ); void idmTerminationMessages( const redhawk::events::ComponentTerminationEvent &msg ); void destroyEventChannels (void); + void storePubProxies(); + void storeSubProxies(); + void storeEventChannelRegistrations(); + void restorePubProxies(const std::string& _db_uri); + void restoreSubProxies(const std::string& _db_uri); + void restoreEventChannelRegistrations(const std::string& _db_uri); bool applicationDependsOnDevice (Application_impl* application, const std::string& deviceId); @@ -312,11 +342,13 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS ossie::ServiceList _registeredServices; std::vector < ossie::EventChannelNode > _eventChannels; - + Application_impl* _restoreApplication(ossie::ApplicationNode& node); + void _persistApplication(Application_impl* application); // // Handle to EventChannelManager for the Domain // + friend class EventChannelManager; EventChannelManager* _eventChannelMgr; @@ -325,6 +357,7 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS DOM_Publisher_ptr _odm_publisher; redhawk::events::DomainEventReader _idm_reader; + bool PERSISTENCE; /////////////////////// // Private Domain State @@ -348,6 +381,7 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS typedef std::map ApplicationTable; ApplicationTable _applications; + ApplicationTable _pendingApplications; typedef std::map ApplicationFactoryTable; ApplicationFactoryTable _applicationFactories; @@ -362,6 +396,9 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS bool _useLogConfigUriResolver; bool _strict_spd_validation; + // orb context + ossie::corba::OrbContext _orbCtx; + void _exit(int __status) { ossie::logging::Terminate(); //no more logging.... exit(__status); @@ -369,6 +406,7 @@ class DomainManager_impl: public virtual POA_CF::DomainManager, public PropertyS FileManager_impl* fileMgr_servant; client_wait_times_struct client_wait_times; + int _initialLogLevel; bool _bindToDomain; }; /* END CLASS DEFINITION DomainManager */ diff --git a/redhawk/src/control/sdr/dommgr/Endpoints.h b/redhawk/src/control/sdr/dommgr/Endpoints.h index 13c03e8c4..561854696 100644 --- a/redhawk/src/control/sdr/dommgr/Endpoints.h +++ b/redhawk/src/control/sdr/dommgr/Endpoints.h @@ -50,6 +50,11 @@ namespace ossie { return ((type == APPLICATION) && (identifier == identifier_)); } + virtual std::string description() const + { + return "application '" + identifier_ + "'"; + } + virtual ApplicationEndpoint* clone() const { return new ApplicationEndpoint(*this); @@ -96,6 +101,11 @@ namespace ossie { return ((type == COMPONENT) && (identifier == identifier_)); } + virtual std::string description() const + { + return "component '" + identifier_ + "'"; + } + virtual ComponentEndpoint* clone() const { return new ComponentEndpoint(*this); @@ -146,6 +156,11 @@ namespace ossie { return false; } + virtual std::string description() const + { + return "device that loaded component '" + identifier_ + "'"; + } + virtual DeviceLoadedEndpoint* clone() const { return new DeviceLoadedEndpoint(*this); @@ -203,6 +218,11 @@ namespace ossie { return new DeviceUsedEndpoint(*this); } + virtual std::string description() const + { + return "usesdevice '" + usesIdentifier_ + "' for component '" + componentIdentifier_ + "'"; + } + private: virtual CORBA::Object_ptr resolve_(ConnectionManager& manager) { @@ -249,6 +269,11 @@ namespace ossie { return false; } + virtual std::string description() const + { + return "application usesdevice '" + usesIdentifier_ + "'"; + } + virtual ApplicationUsesDeviceEndpoint* clone() const { return new ApplicationUsesDeviceEndpoint(*this); @@ -298,6 +323,11 @@ namespace ossie { return false; } + virtual std::string description() const + { + return "find by naming service '" + name_ + "'"; + } + virtual FindByNamingServiceEndpoint* clone() const { return new FindByNamingServiceEndpoint(*this); @@ -347,6 +377,15 @@ namespace ossie { return ((type == Endpoint::SERVICENAME) && (identifier == name_)); } + virtual std::string description() const + { + std::string desc = "domain object type " + type_; + if (!name_.empty()) { + desc += " '" + name_ + "'"; + } + return desc; + } + virtual FindByDomainFinderEndpoint* clone() const { return new FindByDomainFinderEndpoint(*this); @@ -405,6 +444,11 @@ namespace ossie { return ((type == Endpoint::SERVICENAME) && (identifier == name_)); } + virtual std::string description() const + { + return "service '" + name_ + "'"; + } + virtual ServiceEndpoint* clone() const { return new ServiceEndpoint(*this); @@ -457,6 +501,11 @@ namespace ossie { return false; } + virtual std::string description() const + { + return "event channel '" + name_ + "'"; + } + virtual EventChannelEndpoint* clone() const { return new EventChannelEndpoint(*this); @@ -509,6 +558,11 @@ namespace ossie { return false; } + virtual std::string description() const + { + return "object reference"; + } + virtual ObjectrefEndpoint* clone() const { return new ObjectrefEndpoint(*this); @@ -583,6 +637,11 @@ namespace ossie { return supplier_->checkDependency(type, identifier); } + virtual std::string description() const + { + return supplier_->description() + " port '" + name_ + "'"; + } + virtual PortEndpoint* clone() const { return new PortEndpoint(*this); @@ -592,17 +651,24 @@ namespace ossie { virtual CORBA::Object_ptr resolve_(ConnectionManager& manager) { CORBA::Object_var supplierObject = supplier_->resolve(manager); - CF::PortSupplier_var portSupplier = ossie::corba::_narrowSafe(supplierObject); - if (!CORBA::is_nil(portSupplier)) { - try { - return portSupplier->getPort(name_.c_str()); - } catch (const CF::PortSupplier::UnknownPort&) { - LOG_ERROR(PortEndpoint, "Port supplier reports no port with name " << name_); - } CATCH_LOG_ERROR(PortEndpoint, "Failure in getPort"); - - invalidPort_ = true; - } else { + if (CORBA::is_nil(supplierObject)) { LOG_DEBUG(PortEndpoint, "Unable to resolve port supplier"); + } else { + CF::PortSupplier_var portSupplier = ossie::corba::_narrowSafe(supplierObject); + if (!CORBA::is_nil(portSupplier)) { + try { + return portSupplier->getPort(name_.c_str()); + } catch (const CF::PortSupplier::UnknownPort&) { + LOG_ERROR(PortEndpoint, "Port supplier reports no port with name " << name_); + } CATCH_LOG_ERROR(PortEndpoint, "Failure in getPort"); + } else { + if (manager.exceptionsEnabled()) { + throw LookupError(supplier_->description() + " is not a port supplier"); + } else { + LOG_ERROR(PortEndpoint, "Object " << supplier_->description() << " is not a port supplier"); + } + } + invalidPort_ = true; } return CORBA::Object::_nil(); } diff --git a/redhawk/src/control/sdr/dommgr/EventChannelManager.cpp b/redhawk/src/control/sdr/dommgr/EventChannelManager.cpp index c5ea61ad6..e03ebf696 100644 --- a/redhawk/src/control/sdr/dommgr/EventChannelManager.cpp +++ b/redhawk/src/control/sdr/dommgr/EventChannelManager.cpp @@ -33,15 +33,6 @@ #include "EventChannelManager.h" -#define ECM_TRACE( fname, expression ) RH_NL_TRACE( "EventChannelManager", fname << "--"<< expression ) -#define ECM_DEBUG( fname, expression ) RH_NL_DEBUG( "EventChannelManager", fname << "--"<< expression ) -#define ECM_INFO( fname, expression ) RH_NL_INFO( "EventChannelManager", fname << "--"<< expression ) -#define ECM_WARN( fname, expression ) RH_NL_WARN( "EventChannelManager", fname << "--"<< expression ) -#define ECM_ERROR( fname, expression ) RH_NL_ERROR( "EventChannelManager", fname << "--" << expression ) -#define ECM_FATAL( fname, expression ) RH_NL_FATAL( "EventChannelManager", fname << "--"<< expression ) - - - typedef ossie::corba::Iterator< CF::EventChannelManager::EventChannelInfo, CF::EventChannelManager::EventChannelInfo_out, CF::EventChannelManager::EventChannelInfoList, @@ -68,12 +59,8 @@ typedef ossie::corba::Iterator< CF::EventChannelManager::EventRegistrant, _allow_es_resolve(allow_es), _default_poll_period(20000000) { - ECM_DEBUG("CTOR", "Creating EventChannel Manager "); - - ECM_DEBUG("CTOR", "Initializing EventChannel Manager ... "); try { _getEventChannelFactory(); - ECM_DEBUG("CTOR", "Establish Event Channel Factory Reference ... "); } catch(...){ } @@ -92,14 +79,11 @@ typedef ossie::corba::Iterator< CF::EventChannelManager::EventRegistrant, _allow_es_resolve(allow_es), _default_poll_period(20000000) { - ECM_TRACE("CTOR", "Creating EventChannel Manager "); if ( domainManager ) _domain_context = domainManager->getDomainManagerName(); - ECM_TRACE("CTOR", "Initializing EventChannel Manager ... "); try { _getEventChannelFactory(); - ECM_TRACE("CTOR", "Establish Event Channel Factory Reference ... "); } catch(...){ } @@ -109,10 +93,8 @@ typedef ossie::corba::Iterator< CF::EventChannelManager::EventRegistrant, EventChannelManager::~EventChannelManager () { - ECM_TRACE("DTOR", "DTOR ENTER"); // call terminate to clean up resources terminate(); - ECM_TRACE("DTOR", "DTOR EXIT"); } @@ -126,48 +108,48 @@ void EventChannelManager::_initialize () { void EventChannelManager::terminate ( const bool destroyChannels ) { - ECM_DEBUG("terminate", " Terminate START" ); + RH_DEBUG(_eventChannelMgrLog, " Terminate START" ); SCOPED_LOCK(_mgrlock); - ECM_DEBUG("terminate", "Event Channel Registry Size:" << _channels.size() ); + RH_DEBUG(_eventChannelMgrLog, "Event Channel Registry Size:" << _channels.size() ); ChannelRegistrationTable::iterator iter = _channels.begin(); for ( ; iter != _channels.end(); iter++ ) { try { - ECM_DEBUG("terminate", "Removing Channel: " << iter->first ); - ECM_DEBUG("terminate", " ChannelRecord: name:" << iter->second.channel_name ); - ECM_DEBUG("terminate", " ChannelRecord: fqn:" << iter->second.fqn ); - ECM_DEBUG("terminate", " ChannelRecord: autoRelease:" << iter->second.autoRelease ); - ECM_DEBUG("terminate", " ChannelRecord: release:" << iter->second.release ); - ECM_DEBUG("terminate", " ChannelRecord: registrants:" << iter->second.registrants.size()); - ECM_DEBUG("terminate", " ChannelRecord: channel:" << iter->second.channel); + RH_DEBUG(_eventChannelMgrLog, "Removing Channel: " << iter->first ); + RH_DEBUG(_eventChannelMgrLog, " ChannelRecord: name:" << iter->second.channel_name ); + RH_DEBUG(_eventChannelMgrLog, " ChannelRecord: fqn:" << iter->second.fqn ); + RH_DEBUG(_eventChannelMgrLog, " ChannelRecord: autoRelease:" << iter->second.autoRelease ); + RH_DEBUG(_eventChannelMgrLog, " ChannelRecord: release:" << iter->second.release ); + RH_DEBUG(_eventChannelMgrLog, " ChannelRecord: registrants:" << iter->second.registrants.size()); + RH_DEBUG(_eventChannelMgrLog, " ChannelRecord: channel:" << iter->second.channel); //if ( CORBA::is_nil(iter->second.channel) == true ) { if ( ossie::corba::objectExists(iter->second.channel) == false ) { - ECM_DEBUG("terminate", " Channel is NIL : " << iter->first ); + RH_DEBUG(_eventChannelMgrLog, " Channel is NIL : " << iter->first ); } else { - ECM_DEBUG("terminate", " Destroy EventChannel PRE: " << iter->first ); + RH_DEBUG(_eventChannelMgrLog, " Destroy EventChannel PRE: " << iter->first ); if ( destroyChannels ) { iter->second.channel->destroy(); } - ECM_DEBUG("terminate", " Destroy EventChannel POST: " << iter->first ); + RH_DEBUG(_eventChannelMgrLog, " Destroy EventChannel POST: " << iter->first ); } iter->second.channel = ossie::events::EventChannel::_nil(); } catch(CORBA::OBJECT_NOT_EXIST){ // only report issue when event service was available if ( !CORBA::is_nil(_event_channel_factory) ) { - ECM_WARN("Terminate", "Remove Channel FAILED, CHANNEL:" << iter->first << " REASON: Object does not exists"); + RH_WARN(_eventChannelMgrLog, "Remove Channel FAILED, CHANNEL:" << iter->first << " REASON: Object does not exists"); } } catch(...){ if ( !CORBA::is_nil(_event_channel_factory) ) { - ECM_WARN("Terminate","Remove Channel FAILED, CHANNEL:" << iter->first << " Possible legacy channels in event service "); + RH_WARN(_eventChannelMgrLog,"Remove Channel FAILED, CHANNEL:" << iter->first << " Possible legacy channels in event service "); } } } _channels.clear(); - ECM_DEBUG("terminate", " Terminate COMPLETED " ); + RH_DEBUG(_eventChannelMgrLog, " Terminate COMPLETED " ); } @@ -226,10 +208,43 @@ const ossie::events::EventChannel_ptr EventChannelManager::findChannel( const st } reg->autoRelease = true; - ECM_DEBUG( "markForRegistrationr", " EventChannel: " << cname << " marked for autoRelease" ); + RH_DEBUG(_eventChannelMgrLog, " EventChannel: " << cname << " marked for autoRelease" ); } + void EventChannelManager::forceRelease( const char *channel_name) + throw ( CF::EventChannelManager::ChannelDoesNotExist, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ) { + + SCOPED_LOCK(_mgrlock); + // get the event channel factory... throws ServiceUnavailable if factory is not resolved + _getEventChannelFactory(); + + RH_DEBUG(_eventChannelMgrLog, " Delete event channel: " << channel_name ); + std::string cname(channel_name); + + RH_DEBUG(_eventChannelMgrLog, " Check registration for event channel: " << channel_name ); + ChannelRegistrationPtr reg = _getChannelRegistration( cname ); + + // channel registration entry does not exists + if ( reg == NULL ) { + RH_DEBUG(_eventChannelMgrLog, " Registration DOES NOT EXISTS event channel: " << channel_name ); + throw (CF::EventChannelManager::ChannelDoesNotExist()); + } + + // check if anyone is still registered + CF::EventChannelManager::EventRegistration_var tmp_evt = new CF::EventChannelManager::EventRegistration(); + tmp_evt->channel_name = channel_name; + while ( reg->nregistrants() > 0 ) { + tmp_evt->reg_id = reg->registrants.begin()->first.c_str(); + _unregister(tmp_evt); + } + _release(channel_name); + + } + void EventChannelManager::release( const std::string &channel_name) throw ( CF::EventChannelManager::ChannelDoesNotExist, CF::EventChannelManager::RegistrationsExists, @@ -263,21 +278,21 @@ const ossie::events::EventChannel_ptr EventChannelManager::findChannel( const st // get the event channel factory... throws ServiceUnavailable if factory is not resolved _getEventChannelFactory(); - ECM_DEBUG( "release", " Delete event channel: " << channel_name ); + RH_DEBUG(_eventChannelMgrLog, " Delete event channel: " << channel_name ); std::string cname(channel_name); - ECM_DEBUG( "release", " Check registration for event channel: " << channel_name ); + RH_DEBUG(_eventChannelMgrLog, " Check registration for event channel: " << channel_name ); ChannelRegistrationPtr reg = _getChannelRegistration( cname ); // channel registration entry does not exists if ( reg == NULL ) { - ECM_DEBUG( "release", " Registration DOES NOT EXISTS event channel: " << channel_name ); + RH_DEBUG(_eventChannelMgrLog, " Registration DOES NOT EXISTS event channel: " << channel_name ); throw (CF::EventChannelManager::ChannelDoesNotExist()); } // check if anyone is still registered if ( reg->nregistrants() > 0 ) { - ECM_DEBUG( "release", " Registrations still exists: " << channel_name ); + RH_DEBUG(_eventChannelMgrLog, " Registrations still exists: " << channel_name ); // mark channel for deletion when registrations are emptied reg->autoRelease = true; reg->release = true; @@ -288,27 +303,28 @@ const ossie::events::EventChannel_ptr EventChannelManager::findChannel( const st // if naming service registry is used then unbind the object // if ( _use_naming_service ) { - ECM_DEBUG( "release", " Remove channel from NamingService: " << channel_name ); + RH_DEBUG(_eventChannelMgrLog, " Remove channel from NamingService: " << channel_name ); ossie::corba::Unbind( cname, _domain_context ); } // destroy the event channel and delete the registration try { - ECM_DEBUG( "release", " Delete the registration: " << channel_name ); + RH_DEBUG(_eventChannelMgrLog, " Delete the registration: " << channel_name ); // delete the registration _deleteChannelRegistration( cname ); } catch(CORBA::SystemException& ex) { // this will happen if channel is destroyed but - ECM_WARN("release", " Event Channel RELEASE operation, System exception occured, ex " << ex._name() ); + RH_WARN(_eventChannelMgrLog, " Event Channel RELEASE operation, System exception occured, ex " << ex._name() ); throw (CF::EventChannelManager::OperationFailed()); } catch(CORBA::Exception& ex) { - ECM_WARN("release", " Event Channel RELEASE operation, CORBA exception occured, ex " << ex._name() ); + RH_WARN(_eventChannelMgrLog, " Event Channel RELEASE operation, CORBA exception occured, ex " << ex._name() ); throw (CF::EventChannelManager::OperationFailed()); } - - ECM_DEBUG( "release", " Released EventChannel: " << cname ); + _domainManager->sendRemoveEvent(channel_name.c_str(), channel_name.c_str(), channel_name.c_str(), StandardEvent::EVENT_CHANNEL); + + RH_DEBUG(_eventChannelMgrLog, " Released EventChannel: " << cname ); } @@ -337,6 +353,30 @@ ossie::events::EventChannel_ptr EventChannelManager::create( const std::string & } +ossie::events::EventChannel_ptr EventChannelManager::get( const std::string &channel_name) + throw ( CF::EventChannelManager::ChannelDoesNotExist, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ) + { + SCOPED_LOCK(_mgrlock); + return _get( channel_name ); + + } + + + + ossie::events::EventChannel_ptr EventChannelManager::get( const char *channel_name) + throw ( CF::EventChannelManager::ChannelDoesNotExist, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ) + { + SCOPED_LOCK(_mgrlock); + return _get( channel_name ); + + } + ossie::events::EventChannel_ptr EventChannelManager::createForRegistrations( const char *channel_name) throw ( CF::EventChannelManager::ChannelAlreadyExists, @@ -370,7 +410,7 @@ ossie::events::EventChannel_ptr EventChannelManager::create( const std::string & // check if channel name is already registered // if ( _channelExists( channel_name ) ) { - ECM_DEBUG("create", "Event channel: "<< channel_name << " exists, in the local domain"); + RH_DEBUG(_eventChannelMgrLog, "Event channel: "<< channel_name << " exists, in the local domain"); throw (CF::EventChannelManager::ChannelAlreadyExists()); } @@ -388,17 +428,17 @@ ossie::events::EventChannel_ptr EventChannelManager::create( const std::string & // if naming service disabled then throw if ( _use_naming_service == false && _allow_es_resolve == false ) { - ECM_WARN("EventChannelManager", "Event Channel: "<< channel_name << " exists in EventService!"); + RH_WARN(_eventChannelMgrLog, "Event Channel: "<< channel_name << " exists in EventService!"); throw (CF::EventChannelManager::ChannelAlreadyExists()); } else { if ( _allow_es_resolve == false ) { // set next search method to require use of NS require_ns=true; - ECM_DEBUG("EventChannelManager", "Event Channel: "<< channel_name << " exists, requiring NamingService resolution."); + RH_DEBUG(_eventChannelMgrLog, "Event Channel: "<< channel_name << " exists, requiring NamingService resolution."); } else { - ECM_DEBUG("EventChannelManager", "Event Channel: "<< channel_name << " exists, using EventService resolution."); + RH_DEBUG(_eventChannelMgrLog, "Event Channel: "<< channel_name << " exists, using EventService resolution."); } } } @@ -407,7 +447,7 @@ ossie::events::EventChannel_ptr EventChannelManager::create( const std::string & // try and resolve with naming service (if enabled) // if ( require_ns ) { - ECM_TRACE( "_createChannel", " Checking NamingService for:" << fqn ); + RH_TRACE(_eventChannelMgrLog, " Checking NamingService for:" << fqn ); event_channel = _resolve_ns( cname, fqn, _domain_context ); // if NamingService is enable and we require its use, but channel evaluation failed @@ -425,21 +465,84 @@ ossie::events::EventChannel_ptr EventChannelManager::create( const std::string & } if ( CORBA::is_nil(event_channel) == true ) { - ECM_ERROR("EventChannelManager", "Create Event Channel failed. channel: "<< channel_name ); + RH_ERROR(_eventChannelMgrLog, "Create Event Channel failed. channel: "<< channel_name ); throw (CF::EventChannelManager::OperationFailed()); } - ECM_TRACE("create", + RH_TRACE(_eventChannelMgrLog, "ADD Channel Registration, Event Channel: "<< channel_name << " fqn:" << fqn ); ChannelRegistrationPtr reg __attribute__((unused)) = _addChannelRegistration( channel_name, fqn, event_channel, autoRelease ); + this->_domainManager->sendAddEvent(cname, cname, cname, event_channel, StandardEvent::EVENT_CHANNEL); + // // return pointer the channel... we maintain a separate copy // - ECM_TRACE("create", "Completed create Event Channel: "<< channel_name ); + RH_TRACE(_eventChannelMgrLog, "Completed create Event Channel: "<< channel_name ); return event_channel._retn(); } + ossie::events::EventChannel_ptr EventChannelManager::_get( const std::string &channel_name ) + throw ( CF::EventChannelManager::ChannelDoesNotExist, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ) + { + // + // validate channel name... + // + if ( _validateChannelName( channel_name ) == false ) { + throw ( CF::EventChannelManager::InvalidChannelName()); + } + + // + // check if channel name is already registered + // + if ( _channelExists( channel_name ) ) { + ChannelRegistrationPtr cr_ptr = _getChannelRegistration( channel_name ); + return(ossie::events::EventChannel::_duplicate(cr_ptr->channel)); + } + + // + // check if channel name is already exists in the event service + // + ossie::events::EventChannel_var event_channel = ossie::events::EventChannel::_nil(); + std::string cname(channel_name); + std::string fqn = _getFQN(cname); + bool require_ns = false; // if channel exists and use_nameing_service is enabled + event_channel = _resolve_es( cname, fqn ); + + // if we found a matching channel + if ( !CORBA::is_nil(event_channel) ) { + + // if naming service disabled then throw + if ( _use_naming_service == false && _allow_es_resolve == false ) { + return(ossie::events::EventChannel::_duplicate(event_channel)); + } + else { + if ( _allow_es_resolve == false ) { + // set next search method to require use of NS + require_ns=true; + } + } + } + + // + // try and resolve with naming service (if enabled) + // + if ( require_ns ) { + RH_TRACE(_eventChannelMgrLog, " Checking NamingService for:" << fqn ); + event_channel = _resolve_ns( cname, fqn, _domain_context ); + // if NamingService is enable and we require its use, but channel evaluation failed + if ( CORBA::is_nil(event_channel) && require_ns) { + throw (CF::EventChannelManager::OperationFailed()); + } + return(ossie::events::EventChannel::_duplicate(event_channel)); + } + RH_DEBUG(_eventChannelMgrLog, "Event channel: "<< channel_name << " does not exist in the local domain"); + throw (CF::EventChannelManager::ChannelDoesNotExist()); + } + void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, @@ -464,7 +567,7 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, // check if channel name is already registered // if ( _channelExists( channel_name ) ) { - ECM_DEBUG("restore", "Event channel: "<< channel_name << " exists, in the local domain"); + RH_DEBUG(_eventChannelMgrLog, "Event channel: "<< channel_name << " exists, in the local domain"); throw (CF::EventChannelManager::ChannelAlreadyExists()); } @@ -486,9 +589,9 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, // try and resolve with naming service (if enabled) // if ( _use_naming_service && CORBA::is_nil(event_channel) ) { - ECM_TRACE( "restore", " Rebind Exiting Channel to:" << fqn ); + RH_TRACE(_eventChannelMgrLog, " Rebind Exiting Channel to:" << fqn ); if ( ossie::corba::Bind( fqn, savedChannel, _domain_context ) != 0 ) { - ECM_TRACE( "restore", " Checking NamingService for:" << fqn ); + RH_TRACE(_eventChannelMgrLog, " Checking NamingService for:" << fqn ); event_channel = _resolve_ns( cname, fqn, _domain_context ); // if NamingService is enable and we require its use, but channel evaluation failed if ( CORBA::is_nil(event_channel) && require_ns) { @@ -509,32 +612,37 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, } if ( CORBA::is_nil(event_channel) == true ) { - ECM_ERROR("EventChannelManager", "Create Event Channel failed. channel: "<< channel_name ); + RH_ERROR(_eventChannelMgrLog, "Create Event Channel failed. channel: "<< channel_name ); throw (CF::EventChannelManager::OperationFailed()); } - ECM_DEBUG("restore", - "ADD Channel Registration, Event Channel: "<< channel_name << " fqn:" << fqn ); - ChannelRegistrationPtr reg __attribute__((unused)) = _addChannelRegistration( channel_name, fqn, event_channel, false ); - // // return pointer the channel... we maintain a separate copy // - ECM_DEBUG("restore", "Completed create Event Channel: "<< channel_name ); + RH_DEBUG(_eventChannelMgrLog, "Completed create Event Channel: "<< channel_name ); } +ossie::events::EventChannelReg_ptr EventChannelManager::registerResource( const ossie::events::EventRegistration &request) + throw ( CF::EventChannelManager::InvalidChannelName, + CF::EventChannelManager::RegistrationAlreadyExists, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ) +{ + SCOPED_LOCK(_mgrlock); + return _registerResource( request ); +} - ossie::events::EventChannelReg_ptr EventChannelManager::registerResource( const ossie::events::EventRegistration &request) +ossie::events::EventChannelReg_ptr EventChannelManager::_registerResource( const ossie::events::EventRegistration &request) throw ( CF::EventChannelManager::InvalidChannelName, - CF::EventChannelManager::RegistrationAlreadyExists, - CF::EventChannelManager::OperationFailed, - CF::EventChannelManager::OperationNotAllowed, - CF::EventChannelManager::ServiceUnavailable ) + CF::EventChannelManager::RegistrationAlreadyExists, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ) { - ECM_DEBUG("registerResource", "REQUEST REGISTRATION , REG-ID:" << request.reg_id << " CHANNEL:" << request.channel_name ); - SCOPED_LOCK(_mgrlock); + RH_DEBUG(_eventChannelMgrLog, "REQUEST REGISTRATION , REG-ID:" << request.reg_id << " CHANNEL:" << request.channel_name ); // get the event channel factory... throws ServiceUnavailable _getEventChannelFactory(); @@ -575,7 +683,7 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, ossie::events::EventChannel_var ch __attribute__((unused)) = _create(channel_name); // adds to list of channel registrations automatically } catch( CF::EventChannelManager::ChannelAlreadyExists ){ - ECM_ERROR("registerResource", "REGISTRATION ERROR, REG-ID:" << regid << " CHANNEL:" << channel_name << " Channel exists in EventService" ); + RH_ERROR(_eventChannelMgrLog, "REGISTRATION ERROR, REG-ID:" << regid << " CHANNEL:" << channel_name << " Channel exists in EventService" ); throw (CF::EventChannelManager::OperationFailed()); } creg = _getChannelRegistration( channel_name ); @@ -588,16 +696,166 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, reg->channel = ossie::events::EventChannel::_duplicate(creg->channel); std::string ior = ossie::corba::objectToString( reg->channel ); - ECM_DEBUG("register", "NEW REGISTRATION FOR:" << ior ); creg->registrants.insert( RegRecord( regid, ior ) ); + this->_domainManager->storeEventChannelRegistrations(); - ECM_DEBUG("registerResource", "NEW REGISTRATION REG-ID:" << regid << " CHANNEL:" << channel_name ); + RH_DEBUG(_eventChannelMgrLog, "NEW REGISTRATION REG-ID:" << regid << " CHANNEL:" << channel_name ); // // Release memory to the caller....they will clean up // return reg; } + + ossie::events::EventChannelReg_ptr EventChannelManager::registerConsumer( CosEventComm::PushConsumer_ptr consumer, const ossie::events::EventRegistration &req) + throw ( CF::EventChannelManager::InvalidChannelName, + CF::EventChannelManager::RegistrationAlreadyExists, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ) { + SCOPED_LOCK(_mgrlock); + int retries = 10; + int retry_wait = 10; + int tries = retries; + CosEventChannelAdmin::ConsumerAdmin_var consumer_admin; + std::string _channel_name(req.channel_name); + ossie::events::EventChannel_var channel = _get(_channel_name); + do + { + try { + consumer_admin = channel->for_consumers (); + break; + } + catch (CORBA::COMM_FAILURE& ex) { + } + if ( retry_wait > 0 ) { + boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); + } else { + boost::this_thread::yield(); + } + tries--; + } while ( tries ); + + ossie::events::EventSubscriber_var _proxy; + + if ( CORBA::is_nil(consumer_admin) ) + throw (CF::EventChannelManager::OperationFailed()); + tries=retries; + do { + try { + _proxy = consumer_admin->obtain_push_supplier (); + break; + } + catch (CORBA::COMM_FAILURE& ex) { + } + if ( retry_wait > 0 ) { + boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); + } else { + boost::this_thread::yield(); + } + tries--; + } while ( tries ); + if ( CORBA::is_nil(_proxy) ) + throw (CF::EventChannelManager::OperationFailed()); + tries=retries; + do { + try { + _proxy->connect_push_consumer(consumer); + } + catch (CORBA::BAD_PARAM& ex) { + RH_DEBUG(_eventChannelMgrLog, "Unable to connect consumer to the Event channel" ); + throw (CF::EventChannelManager::OperationFailed()); + } + catch (CosEventChannelAdmin::AlreadyConnected& ex) { + break; + } + catch (CORBA::COMM_FAILURE& ex) { + } + if ( retry_wait > 0 ) { + boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); + } else { + boost::this_thread::yield(); + } + tries--; + } while ( tries ); + ossie::events::EventChannelReg_ptr ret_reg; + try { + ret_reg = _registerResource(req); + } catch ( ... ) { + try { + _proxy->disconnect_push_supplier(); + } catch ( ... ) { + } + throw; + } + std::string _reg_id(ret_reg->reg.reg_id); + _subProxies[_reg_id] = ossie::events::EventSubscriber::_duplicate(_proxy); + this->_domainManager->storeSubProxies(); + return ret_reg; + } + + ossie::events::PublisherReg_ptr EventChannelManager::registerPublisher( const ossie::events::EventRegistration &req, CosEventComm::PushSupplier_ptr disconnectReceiver) + throw ( CF::EventChannelManager::InvalidChannelName, + CF::EventChannelManager::RegistrationAlreadyExists, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ) { + SCOPED_LOCK(_mgrlock); + int retries = 10; + int retry_wait = 10; + int tries = retries; + + CosEventChannelAdmin::SupplierAdmin_var supplier_admin; + ossie::events::PublisherReg_ptr reg = new ossie::events::PublisherReg(); + std::string _channel_name(req.channel_name); + ossie::events::EventChannel_var channel = _get(_channel_name); + do + { + try { + supplier_admin = channel->for_suppliers (); + break; + } catch (CORBA::COMM_FAILURE& ex) {} + if ( retry_wait > 0 ) { + boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); + } else { + boost::this_thread::yield(); + } + tries--; + } while ( tries ); + + if ( CORBA::is_nil(supplier_admin) ) + throw (CF::EventChannelManager::OperationFailed());; + + tries=retries; + do { + try { + reg->proxy_consumer = supplier_admin->obtain_push_consumer (); + break; + } catch (CORBA::COMM_FAILURE& ex) {} + if ( retry_wait > 0 ) { + boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); + } else { + boost::this_thread::yield(); + } + tries--; + } while ( tries ); + + ossie::events::EventChannelReg_var ret_reg; + try { + ret_reg = _registerResource(req); + } catch ( ... ) { + throw; + } + reg->proxy_consumer->connect_push_supplier( disconnectReceiver ); + + reg->reg.channel_name = CORBA::string_dup(req.channel_name); + reg->reg.reg_id = CORBA::string_dup(ret_reg->reg.reg_id); + reg->channel = ossie::events::EventChannel::_duplicate(ret_reg->channel); + + std::string _reg_id(ret_reg->reg.reg_id); + _pubProxies[_reg_id] = CF::EventPublisher::_duplicate(reg->proxy_consumer); + return reg; + } @@ -612,13 +870,13 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, { SCOPED_LOCK(_mgrlock); - ECM_DEBUG("unregister", "REQUEST TO UNREGISTER, REG-ID:" << reg.reg_id << " CHANNEL:" << reg.channel_name); + RH_DEBUG(_eventChannelMgrLog, "REQUEST TO UNREGISTER, REG-ID:" << reg.reg_id << " CHANNEL:" << reg.channel_name); // get the event channel factory... throw ServiceUnavailable _getEventChannelFactory(); _unregister(reg); - ECM_DEBUG("unregister", "UNREGISTER COMPLETED, REG-ID:" << reg.reg_id << " CHANNEL:" << reg.channel_name); + RH_DEBUG(_eventChannelMgrLog, "UNREGISTER COMPLETED, REG-ID:" << reg.reg_id << " CHANNEL:" << reg.channel_name); } @@ -630,34 +888,70 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, CF::EventChannelManager::RegistrationDoesNotExist, CF::EventChannelManager::ServiceUnavailable ) { - ECM_DEBUG("unregister", "START: ID:" << reg.reg_id ); + RH_DEBUG(_eventChannelMgrLog, "START: ID:" << reg.reg_id ); // // check channel and registration existence // std::string regid(reg.reg_id); std::string cname(reg.channel_name); - - ECM_DEBUG("unregister", "CHECK REGISTRATION, ID:" << regid<< " CHANNEL:" << cname); + + if (_subProxies.find(regid) != _subProxies.end()) { + try { + _subProxies[regid]->disconnect_push_supplier(); + } catch ( ... ) { + } + _subProxies.erase(regid); + } + + int retries = 10; + int retry_wait = 10; + if (_pubProxies.find(regid) != _pubProxies.end()) { + int tries = retries; + do { + try { + _pubProxies[regid]->disconnect_push_consumer(); + break; + } + catch (CORBA::COMM_FAILURE& ex) { + if ( tries == retries ) { + RH_WARN(_eventChannelMgrLog, "::disconnect, Caught COMM_FAILURE Exception " << "disconnecting Push Consumer! Retrying..." ); + } + } catch (...) { + if ( tries == retries ) { + RH_WARN(_eventChannelMgrLog, "::disconnect, UNKNOWN Exception " << "disconnecting Push Consumer! Retrying..." ); + } + } + if ( retry_wait > 0 ) { + boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); + } else { + boost::this_thread::yield(); + } + tries--; + } while(tries); + _pubProxies.erase(regid); + } + + RH_DEBUG(_eventChannelMgrLog, "CHECK REGISTRATION, ID:" << regid<< " CHANNEL:" << cname); _regExists( cname, regid ); // get the registration record for this channel - ECM_DEBUG("unregister", "GET REGISTRATION RECORD, ID:" << reg.reg_id ); + RH_DEBUG(_eventChannelMgrLog, "GET REGISTRATION RECORD, ID:" << reg.reg_id ); ChannelRegistrationPtr creg = _getChannelRegistration( cname ); if (creg) { // search for registration entry and remove registration from the list, RegIdList::iterator itr = creg->registrants.find(regid); if ( itr != creg->registrants.end() ) { - ECM_DEBUG("unregister", "ERASE REGISTRATION RECORD, REG-ID:" << reg.reg_id ); + RH_DEBUG(_eventChannelMgrLog, "ERASE REGISTRATION RECORD, REG-ID:" << reg.reg_id ); creg->registrants.erase(itr); } if ( creg->registrants.size()==0 and creg->autoRelease ) { - ECM_DEBUG("EventChannelManager", "NO MORE REGISTRATIONS, (AUTO-RELEASE IS ON), DELETING CHANNEL:" << cname ); + RH_DEBUG(_eventChannelMgrLog, "NO MORE REGISTRATIONS, (AUTO-RELEASE IS ON), DELETING CHANNEL:" << cname ); _deleteChannelRegistration(cname); } } - ECM_DEBUG("unregister", "UNREGISTER END: REG-ID:" << reg.reg_id ); + RH_DEBUG(_eventChannelMgrLog, "UNREGISTER END: REG-ID:" << reg.reg_id ); } @@ -669,7 +963,7 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, SCOPED_LOCK(_mgrlock); uint64_t size = _channels.size(); - ECM_TRACE( "listChannels", " listChannel context " << this << ", how_many " << how_many << ", size " << size ); + RH_TRACE(_eventChannelMgrLog, " listChannel context " << this << ", how_many " << how_many << ", size " << size ); // create copy of entire table... ossie::events::EventChannelInfoList* all = new ossie::events::EventChannelInfoList(size); @@ -680,7 +974,7 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, std::string cname(iter->first.c_str()); (*all)[i].channel_name = CORBA::string_dup(cname.c_str()); (*all)[i].reg_count = iter->second.registrants.size(); - ECM_DEBUG("listChannels", " list channel context: (" << i << ") channel_name: " << iter->first << + RH_DEBUG(_eventChannelMgrLog, " list channel context: (" << i << ") channel_name: " << iter->first << " N registrants: " << iter->second.registrants.size() ); } @@ -694,13 +988,13 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, CF::EventRegistrantIterator_out riter) { SCOPED_LOCK(_mgrlock); ChannelRegistrationPtr reg = _getChannelRegistration( channel_name ); - ECM_TRACE( "listRegistrants", " channel_name " << channel_name << " reg : " << reg ); + RH_TRACE(_eventChannelMgrLog, " channel_name " << channel_name << " reg : " << reg ); uint64_t size = 0; ossie::events::EventRegistrantList* all = new ossie::events::EventRegistrantList(size); if ( reg != 0 ) { // get number of registrants to size = reg->nregistrants(); - ECM_TRACE( "listRegistrants", " list channel registrants context " << this << ", how_many " << how_many << ", size " << size ); + RH_TRACE(_eventChannelMgrLog, " list channel registrants context " << this << ", how_many " << how_many << ", size " << size ); all->length(size); RegIdList::iterator iter = reg->registrants.begin(); @@ -708,7 +1002,7 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, std::string cname(channel_name); (*all)[i].channel_name = CORBA::string_dup(cname.c_str()); (*all)[i].reg_id = CORBA::string_dup( iter->first.c_str() ); - ECM_DEBUG("listRegistrants", " Registrant : (" << i << ") regid: " << iter->first ); + RH_DEBUG(_eventChannelMgrLog, " Registrant : (" << i << ") regid: " << iter->first ); } } @@ -749,7 +1043,7 @@ void EventChannelManager::restore( ossie::events::EventChannel_ptr savedChannel, } - ECM_TRACE("_getFQN", "CHANNEL:" << cname << " FQN:" << fqn); + RH_TRACE(_eventChannelMgrLog, "CHANNEL:" << cname << " FQN:" << fqn); return fqn; } @@ -759,9 +1053,9 @@ void EventChannelManager::_getEventChannelFactory () throw ( CF::EventChannelManager::ServiceUnavailable ) { - ECM_TRACE("_getEventChannelFactory", " .. Checking ORB Context"); + RH_TRACE(_eventChannelMgrLog, " .. Checking ORB Context"); if ( CORBA::is_nil(_orbCtx.orb) == true ) { - ECM_TRACE("_getEventChannelFactory", " ... ORB Context is invalid..."); + RH_TRACE(_eventChannelMgrLog, " ... ORB Context is invalid..."); throw (CF::EventChannelManager::ServiceUnavailable() ); } @@ -771,13 +1065,13 @@ void EventChannelManager::_getEventChannelFactory () // First, check for an initial reference in the omniORB configuration; if it cannot be // resolved in this manner, look it up via the naming service. CORBA::Object_var factoryObj; - ECM_TRACE("_getEventChannelFactory", " ... Get EventChannelFactory..."); + RH_TRACE(_eventChannelMgrLog, " ... Get EventChannelFactory..."); try { factoryObj = _orbCtx.namingServiceCtx->resolve_str("EventChannelFactory"); } catch (const CosNaming::NamingContext::NotFound&) { - ECM_DEBUG("_getEventChannelFactory", "No naming service entry for 'EventChannelFactory'"); + RH_DEBUG(_eventChannelMgrLog, "No naming service entry for 'EventChannelFactory'"); } catch (const CORBA::Exception& e) { - ECM_WARN( "_getEventChannel", "CORBA " << e._name() << " exception looking up EventChannelFactory in name service"); + RH_WARN(_eventChannelMgrLog, "CORBA " << e._name() << " exception looking up EventChannelFactory in name service"); } if (!CORBA::is_nil(factoryObj)) { @@ -785,16 +1079,16 @@ void EventChannelManager::_getEventChannelFactory () ossie::corba::overrideBlockingCall(factoryObj); if (!factoryObj->_non_existent()) { _event_channel_factory = CosLifeCycle::GenericFactory::_narrow(factoryObj); - ECM_TRACE("_getEventChannelFactory", "Resolved EventChannelFactory in NameService"); + RH_TRACE(_eventChannelMgrLog, "Resolved EventChannelFactory in NameService"); } } catch (const CORBA::TRANSIENT&) { - ECM_WARN( "_getEventChannelEvent", "Could not contact EventChannelFactory"); + RH_WARN(_eventChannelMgrLog, "Could not contact EventChannelFactory"); } } } if ( CORBA::is_nil(_event_channel_factory) ) { - ECM_TRACE( "_getEventChannelEvent", "EventChannelFactory unavailable."); + RH_TRACE(_eventChannelMgrLog, "EventChannelFactory unavailable."); throw (CF::EventChannelManager::ServiceUnavailable() ); } @@ -824,9 +1118,9 @@ void EventChannelManager::_getEventChannelFactory () key[0].id = "EventChannel"; key[0].kind = "object interface"; - ECM_TRACE( "_createChannel", " Check EventChannelFactory API" ); + RH_TRACE(_eventChannelMgrLog, " Check EventChannelFactory API" ); if(!_event_channel_factory->supports(key)) { - ECM_WARN( "Create Event Channel", " EventChannelFactory does not support Event Channel Interface!" ); + RH_WARN(_eventChannelMgrLog, " EventChannelFactory does not support Event Channel Interface!" ); throw (CF::EventChannelManager::OperationNotAllowed()); } @@ -842,31 +1136,31 @@ void EventChannelManager::_getEventChannelFactory () // // Create Event Channel Object. // - ECM_TRACE( "_createChannel", " Create CHANNEL:" << cname << " AS FQN:" << fqn ); + RH_TRACE(_eventChannelMgrLog, " Create CHANNEL:" << cname << " AS FQN:" << fqn ); CORBA::Object_var obj; try { obj =_event_channel_factory->create_object(key, criteria); } catch (CosLifeCycle::CannotMeetCriteria& ex) /* create_object() */ { - ECM_ERROR( "Create Event Channel", "Create failed, CHANNEL: " << cname << " REASON: CannotMeetCriteria " ); + RH_ERROR(_eventChannelMgrLog, "Create failed, CHANNEL: " << cname << " REASON: CannotMeetCriteria " ); throw (CF::EventChannelManager::OperationFailed()); } catch (CosLifeCycle::InvalidCriteria& ex) /* create_object() */ { - ECM_ERROR( "Create Event Channel", "Create failed, CHANNEL: " << cname << " REASON: InvalidCriteria " ); + RH_ERROR(_eventChannelMgrLog, "Create failed, CHANNEL: " << cname << " REASON: InvalidCriteria " ); if(ex.invalid_criteria.length()>0) { int j; for ( j=0; (unsigned int)j < ex.invalid_criteria.length(); j++ ) { - ECM_TRACE( "_createChannel", "--- Criteria Name: " << ex.invalid_criteria[j].name ); + RH_TRACE(_eventChannelMgrLog, "--- Criteria Name: " << ex.invalid_criteria[j].name ); if ( j == 0 ) { const char * xx; ex.invalid_criteria[j].value >>= xx; - ECM_TRACE( "_createChannel", "--- Criteria Value : " << xx ); + RH_TRACE(_eventChannelMgrLog, "--- Criteria Value : " << xx ); } else { CORBA::ULong xx; ex.invalid_criteria[j].value >>= xx; - ECM_TRACE( "_createChannel", "--- Criteria Value : " << xx ); + RH_TRACE(_eventChannelMgrLog, "--- Criteria Value : " << xx ); } } } @@ -874,23 +1168,23 @@ void EventChannelManager::_getEventChannelFactory () throw (CF::EventChannelManager::OperationFailed()); } catch( CORBA::Exception &ex ) { - ECM_ERROR( "Create Event Channel", " Create failed, CHANNEL:" << cname << " REASON: corba exception" ); + RH_ERROR(_eventChannelMgrLog, " Create failed, CHANNEL:" << cname << " REASON: corba exception" ); throw CF::EventChannelManager::OperationFailed(); } if (CORBA::is_nil(obj)) { - ECM_ERROR( "Create Event Channel", " Create failed, CHANNEL:" << cname << " REASON: Factory failed to create channel"); + RH_ERROR(_eventChannelMgrLog, " Create failed, CHANNEL:" << cname << " REASON: Factory failed to create channel"); throw CF::EventChannelManager::OperationFailed(); } try { - ECM_TRACE( "_createChannel", " action - Narrow EventChannel" ); + RH_TRACE(_eventChannelMgrLog, " action - Narrow EventChannel" ); event_channel = ossie::events::EventChannel::_narrow(obj); } catch( CORBA::Exception &ex ) { - ECM_ERROR( "Create Event Channel", " Create failed, CHANNEL:" << cname << " REASON: Failed to narrow to EventChannel"); + RH_ERROR(_eventChannelMgrLog, " Create failed, CHANNEL:" << cname << " REASON: Failed to narrow to EventChannel"); } - ECM_TRACE( "_createChannel", " created event channel " << cname ); + RH_TRACE(_eventChannelMgrLog, " created event channel " << cname ); if ( _use_naming_service ){ try { @@ -898,11 +1192,11 @@ void EventChannelManager::_getEventChannelFactory () ossie::corba::Bind( cname, event_channel.in(), nc_name, false ); } catch (const CORBA::Exception& ex) { - ECM_ERROR( "Create Event Channel", " Bind Failed, CHANNEL:" << cname << " REASON: CORBA " << ex._name() ); + RH_ERROR(_eventChannelMgrLog, " Bind Failed, CHANNEL:" << cname << " REASON: CORBA " << ex._name() ); } } - ECM_TRACE( "_createChannel", " completed create event channel : " << cname ); + RH_TRACE(_eventChannelMgrLog, " completed create event channel : " << cname ); return event_channel._retn(); } @@ -910,7 +1204,7 @@ void EventChannelManager::_getEventChannelFactory () ossie::events::EventChannel_ptr EventChannelManager::_resolve_ns( const std::string &cname, const std::string &fqn, const std::string &nc_name) { - ECM_DEBUG("_resolve", " : resolve event channel with NamingService cname/fqn... " << cname << "/" << fqn ); + RH_DEBUG(_eventChannelMgrLog, " : resolve event channel with NamingService cname/fqn... " << cname << "/" << fqn ); // return value if no event channel was found or error occured ossie::events::EventChannel_var event_channel = ossie::events::EventChannel::_nil(); @@ -920,15 +1214,15 @@ ossie::events::EventChannel_ptr EventChannelManager::_resolve_ns( const std::str // try to resolve using channel name as InitRef and resolve_initial_references // try { - ECM_TRACE( "_resolve_ns", " : Trying InitRef Lookup " << fqn); + RH_TRACE(_eventChannelMgrLog, " : Trying InitRef Lookup " << fqn); CORBA::Object_var obj = _orbCtx.orb->resolve_initial_references(fqn.c_str()); if ( CORBA::is_nil(obj) == false ){ event_channel = ossie::events::EventChannel::_narrow(obj); found =true; - ECM_TRACE( "_resolve_ns", " : FOUND EXISTING (InitRef), Channel " << cname ); + RH_TRACE(_eventChannelMgrLog, " : FOUND EXISTING (InitRef), Channel " << cname ); } }catch (const CORBA::Exception& e) { - ECM_DEBUG( "_resolve_ns", " Unable to lookup with InitRef:" << fqn << ", CORBA RETURNED(" << e._name() << ")" ); + RH_DEBUG(_eventChannelMgrLog, " Unable to lookup with InitRef:" << fqn << ", CORBA RETURNED(" << e._name() << ")" ); } /* @@ -944,16 +1238,16 @@ ossie::events::EventChannel_ptr EventChannelManager::_resolve_ns( const std::str CosNaming::NamingContext_ptr context = ossie::corba::ResolveNamingContextPath( nc_name ); if ( !CORBA::is_nil(context) ) { CORBA::Object_var obj = context->resolve(boundName); - ECM_TRACE( "_resolve_ns", " : FOUND EXISTING (NamingService - domain/domain.channel), Channel/FQN " << cname << "/" << fqn ); + RH_TRACE(_eventChannelMgrLog, " : FOUND EXISTING (NamingService - domain/domain.channel), Channel/FQN " << cname << "/" << fqn ); event_channel = ossie::events::EventChannel::_narrow(obj); found = true; } } catch (const CosNaming::NamingContext::NotFound&) { // The channel does not exist and can be safely created. } catch (const CORBA::Exception& e) { - ECM_WARN("Name Resolution", " Naming Service failed for CHANNEL:" << cname << " REASON: CORBA (" << e._name() << ")" ); + RH_WARN(_eventChannelMgrLog, " Naming Service failed for CHANNEL:" << cname << " REASON: CORBA (" << e._name() << ")" ); } catch (...) { - ECM_DEBUG("Name Resolution", " Naming Service failed ... (NamingService - domain/domain.channel) Channel/FQN " << cname << "/" << fqn ); + RH_DEBUG(_eventChannelMgrLog, " Naming Service failed ... (NamingService - domain/domain.channel) Channel/FQN " << cname << "/" << fqn ); } } @@ -963,16 +1257,16 @@ ossie::events::EventChannel_ptr EventChannelManager::_resolve_ns( const std::str CosNaming::NamingContext_ptr context = ossie::corba::ResolveNamingContextPath( nc_name ); if ( !CORBA::is_nil(context) ) { CORBA::Object_var obj = context->resolve(boundName); - ECM_TRACE( "_resolve_ns", " : FOUND EXISTING (NamingService - domaincontext/channel ), Channel/FQN " << cname << "/" << fqn ); + RH_TRACE(_eventChannelMgrLog, " : FOUND EXISTING (NamingService - domaincontext/channel ), Channel/FQN " << cname << "/" << fqn ); event_channel = ossie::events::EventChannel::_narrow(obj); found = true; } } catch (const CosNaming::NamingContext::NotFound&) { // The channel does not exist and can be safely created. } catch (const CORBA::Exception& e) { - ECM_WARN("Name Resolution", " Naming Service failed for CHANNEL:" << cname << " REASON: CORBA (" << e._name() << ")" ); + RH_WARN(_eventChannelMgrLog, " Naming Service failed for CHANNEL:" << cname << " REASON: CORBA (" << e._name() << ")" ); } catch (...) { - ECM_DEBUG("Name Resolution", " Naming Service failed ... (NamingService - domaincontext/channel) Channel/FQN " << cname << "/" << fqn ); + RH_DEBUG(_eventChannelMgrLog, " Naming Service failed ... (NamingService - domaincontext/channel) Channel/FQN " << cname << "/" << fqn ); } } @@ -983,16 +1277,16 @@ ossie::events::EventChannel_ptr EventChannelManager::_resolve_ns( const std::str CosNaming::NamingContext_ptr context = ossie::corba::ResolveNamingContextPath(""); if ( !CORBA::is_nil(context) ) { CORBA::Object_var obj = context->resolve(boundName); - ECM_TRACE( "_resolve_ns", " : FOUND EXISTING (NamingService - root context/dommain.channel), Channel/FQN " << cname << "/" << fqn ); + RH_TRACE(_eventChannelMgrLog, " : FOUND EXISTING (NamingService - root context/dommain.channel), Channel/FQN " << cname << "/" << fqn ); event_channel = ossie::events::EventChannel::_narrow(obj); found = true; } } catch (const CosNaming::NamingContext::NotFound&) { // The channel does not exist and can be safely created. } catch (const CORBA::Exception& e) { - ECM_WARN("Name Resolution", " Naming Service failed for CHANNEL:" << cname << " REASON: CORBA (" << e._name() << ")" ); + RH_WARN(_eventChannelMgrLog, " Naming Service failed for CHANNEL:" << cname << " REASON: CORBA (" << e._name() << ")" ); } catch (...) { - ECM_WARN("Name Resolution", " Naming Service failed for CHANNEL:" << cname ); + RH_WARN(_eventChannelMgrLog, " Naming Service failed for CHANNEL:" << cname ); } } @@ -1006,7 +1300,7 @@ ossie::events::EventChannel_ptr EventChannelManager::_resolve_es ( const std::s const std::string& fqn, bool suppress ) { - ECM_DEBUG("_resolve_es", " : resolve event channel ... " << cname ); + RH_DEBUG(_eventChannelMgrLog, " : resolve event channel ... " << cname ); // return value if no event channel was found or error occured ossie::events::EventChannel_var event_channel = ossie::events::EventChannel::_nil(); @@ -1022,22 +1316,22 @@ ossie::events::EventChannel_ptr EventChannelManager::_resolve_es ( const std::s // os << "corbaloc::localhost:11169/"<< fqn; tname=os.str(); - ECM_TRACE( "_resolve_es"," : Trying corbaloc resolution " << tname ); + RH_TRACE(_eventChannelMgrLog," : Trying corbaloc resolution " << tname ); CORBA::Object_var obj = _orbCtx.orb->string_to_object(tname.c_str()); if ( !CORBA::is_nil(obj) ) { event_channel = ossie::events::EventChannel::_narrow(obj); if ( CORBA::is_nil(event_channel) == false ){ - ECM_TRACE( "_resolve_es", " : FOUND EXISTING (corbaloc), Channel " << tname ); + RH_TRACE(_eventChannelMgrLog, " : FOUND EXISTING (corbaloc), Channel " << tname ); } else { - ECM_TRACE( "_resolve_es", " : RESOLVE FAILED VIA (corbaloc), Channel " << tname ); + RH_TRACE(_eventChannelMgrLog, " : RESOLVE FAILED VIA (corbaloc), Channel " << tname ); } } else { - ECM_TRACE( "_resolve_es", " : SEARCH FOR Channel " << tname << " FAILED"); + RH_TRACE(_eventChannelMgrLog, " : SEARCH FOR Channel " << tname << " FAILED"); } }catch (const CORBA::Exception& e) { - if (!suppress) ECM_WARN( "Event Service Lookup", + if (!suppress) RH_WARN(_eventChannelMgrLog, " Unable to lookup with corbaloc URI:" << tname << ", CHANNEL:" << cname << " REASON: CORBA RETURNED(" << e._name() << ")" ); } @@ -1186,9 +1480,8 @@ EventChannelManager::ChannelRegistrationPtr EventChannelManager::_addChannelRegi ossie::events::EventChannel_ptr channel, bool autoRelease ) { - - ECM_TRACE("_addChannelRegistration", "Created ChannelRegistrationRecord, Event Channel/FQN : "<< cname << "/" << fqn ); - ECM_TRACE("_addChannelRegistration", "Created ChannelRegistrationRecord, Event Channel : "<< channel << "/" << autoRelease ); + RH_TRACE(_eventChannelMgrLog, "Created ChannelRegistrationRecord, Event Channel/FQN : "<< cname << "/" << fqn ); + RH_TRACE(_eventChannelMgrLog, "Created ChannelRegistrationRecord, Event Channel : "<< channel << "/" << autoRelease ); ChannelRegistrationTable::iterator itr = _channels.find( cname ); ChannelRegistrationPtr ret=NULL; @@ -1201,15 +1494,16 @@ EventChannelManager::ChannelRegistrationPtr EventChannelManager::_addChannelRegi _channels[cname].release = false; _channels[cname].autoRelease = autoRelease; ret = &(_channels[cname]); - ECM_TRACE("_addChannelRegistration", "Created ChannelRegistrationRecord, Event Channel/FQN : "<< cname << "/" << fqn ); - ECM_TRACE("addChannelRegistration", " ChannelRecord: name:" << _channels[cname].channel_name ); - ECM_TRACE("addChannelRegistration", " ChannelRecord: fqn:" << _channels[cname].fqn ); - ECM_TRACE("addChannelRegistration", " ChannelRecord: autoRelease:" << _channels[cname].autoRelease ); - ECM_TRACE("addChannelRegistration", " ChannelRecord: release:" << _channels[cname].release ); - ECM_TRACE("addChannelRegistration", " ChannelRecord: registrants:" << _channels[cname].registrants.size()); - ECM_TRACE("addChannelRegistration", " ChannelRecord: channel:" << _channels[cname].channel); - ECM_TRACE("_addChannelRegistration", "Registration Table Size: "<< _channels.size() ); + RH_TRACE(_eventChannelMgrLog, "Created ChannelRegistrationRecord, Event Channel/FQN : "<< cname << "/" << fqn ); + RH_TRACE(_eventChannelMgrLog, " ChannelRecord: name:" << _channels[cname].channel_name ); + RH_TRACE(_eventChannelMgrLog, " ChannelRecord: fqn:" << _channels[cname].fqn ); + RH_TRACE(_eventChannelMgrLog, " ChannelRecord: autoRelease:" << _channels[cname].autoRelease ); + RH_TRACE(_eventChannelMgrLog, " ChannelRecord: release:" << _channels[cname].release ); + RH_TRACE(_eventChannelMgrLog, " ChannelRecord: registrants:" << _channels[cname].registrants.size()); + RH_TRACE(_eventChannelMgrLog, " ChannelRecord: channel:" << _channels[cname].channel); + RH_TRACE(_eventChannelMgrLog, "Registration Table Size: "<< _channels.size() ); } + this->_domainManager->storeEventChannelRegistrations(); return ret; } @@ -1225,23 +1519,24 @@ EventChannelManager::ChannelRegistrationPtr EventChannelManager::_addChannelRegi ChannelRegistrationPtr reg = &(itr->second); if ( reg ) { if ( CORBA::is_nil( reg->channel) == false ){ - ECM_DEBUG("_deleteChannelRegistration", "Calling Destroy, Channel/EventChannel: "<< cname << "/" << reg->fqn); + RH_DEBUG(_eventChannelMgrLog, "Calling Destroy, Channel/EventChannel: "<< cname << "/" << reg->fqn); try { reg->channel->destroy(); } catch(...){ - ECM_DEBUG("_deleteChannelRegistration", "Exception during destroy EventService.. channel/EventChannel: "<< cname << "/" << reg->fqn); + RH_DEBUG(_eventChannelMgrLog, "Exception during destroy EventService.. channel/EventChannel: "<< cname << "/" << reg->fqn); } reg->channel = ossie::events::EventChannel::_nil(); - ECM_DEBUG("_deleteChannelRegistration", "Destory Completed, Channel/EventChannel: "<< cname << "/" << reg->fqn); + RH_DEBUG(_eventChannelMgrLog, "Destory Completed, Channel/EventChannel: "<< cname << "/" << reg->fqn); } } // remove the channel registration entry - ECM_DEBUG("_deleteChannelRegistration", "Deleting Registration for EventChannel: "<< cname ); + RH_DEBUG(_eventChannelMgrLog, "Deleting Registration for EventChannel: "<< cname ); _channels.erase(itr); } - ECM_DEBUG("_deleteChannelRegistration", "Completed delete registration...EventChannel: "<< cname ); + RH_DEBUG(_eventChannelMgrLog, "Completed delete registration...EventChannel: "<< cname ); + this->_domainManager->storeEventChannelRegistrations(); return; } diff --git a/redhawk/src/control/sdr/dommgr/EventChannelManager.h b/redhawk/src/control/sdr/dommgr/EventChannelManager.h index 7722f25d9..12c4a6a19 100644 --- a/redhawk/src/control/sdr/dommgr/EventChannelManager.h +++ b/redhawk/src/control/sdr/dommgr/EventChannelManager.h @@ -83,6 +83,18 @@ class EventChannelManager: public virtual EventChannelManagerBase { CF::EventChannelManager::OperationNotAllowed, CF::EventChannelManager::ServiceUnavailable ); + ossie::events::EventChannel_ptr get( const char *channel_name ) + throw ( CF::EventChannelManager::ChannelDoesNotExist, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ); + + ossie::events::EventChannel_ptr get( const std::string &channel_name ) + throw ( CF::EventChannelManager::ChannelDoesNotExist, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ); + /* Return an Event Channel in the Domain associated with the Manager from the specified channel_name parameter. @@ -107,6 +119,19 @@ class EventChannelManager: public virtual EventChannelManagerBase { CF::EventChannelManager::OperationNotAllowed, CF::EventChannelManager::ServiceUnavailable ); + /* + void setLogger(rh_logger::LoggerPtr logptr) { + _allocMgrLog = logptr; + }; + + Force the removal of the event channel from the Domain + */ + void forceRelease( const char *channel_name ) + throw ( CF::EventChannelManager::ChannelDoesNotExist, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ); + /* Remove the event channel from the Domain */ @@ -146,6 +171,13 @@ class EventChannelManager: public virtual EventChannelManagerBase { */ + ossie::events::EventChannelReg_ptr _registerResource( const ossie::events::EventRegistration &req) + throw ( CF::EventChannelManager::InvalidChannelName, + CF::EventChannelManager::RegistrationAlreadyExists, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ); + ossie::events::EventChannelReg_ptr registerResource( const ossie::events::EventRegistration &req) throw ( CF::EventChannelManager::InvalidChannelName, CF::EventChannelManager::RegistrationAlreadyExists, @@ -153,6 +185,19 @@ class EventChannelManager: public virtual EventChannelManagerBase { CF::EventChannelManager::OperationNotAllowed, CF::EventChannelManager::ServiceUnavailable ); + ossie::events::EventChannelReg_ptr registerConsumer( CosEventComm::PushConsumer_ptr consumer, const ossie::events::EventRegistration &req) + throw ( CF::EventChannelManager::InvalidChannelName, + CF::EventChannelManager::RegistrationAlreadyExists, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ); + ossie::events::PublisherReg_ptr registerPublisher( const ossie::events::EventRegistration &req, CosEventComm::PushSupplier_ptr disconnectReceiver) + throw ( CF::EventChannelManager::InvalidChannelName, + CF::EventChannelManager::RegistrationAlreadyExists, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ); + /* Unregister a publisher or subcriber from an event channel and invalidates the context */ @@ -180,11 +225,7 @@ class EventChannelManager: public virtual EventChannelManagerBase { bool isChannel( const std::string &channel_name ); void setPollingPeriod( const int64_t period ); - private: - - typedef std::pair< std::string, std::string > RegRecord; typedef std::map< std::string, std::string > RegIdList; - struct ChannelRegistration { std::string channel_name; std::string fqn; @@ -197,11 +238,58 @@ class EventChannelManager: public virtual EventChannelManagerBase { return registrants.size(); } }; + typedef std::map< std::string, ChannelRegistration > ChannelRegistrationTable; + + typedef std::map SubProxyMap; + typedef std::map PubProxyMap; + + SubProxyMap getSubProxies() { return _subProxies;}; + PubProxyMap getPubProxies() { return _pubProxies;}; + ChannelRegistrationTable getChannelRegistrations() { return _channels;}; + void setSubProxies(SubProxyMap &_inval) { _subProxies = _inval;}; + void setPubProxies(PubProxyMap &_inval) { _pubProxies = _inval;}; + void setChannelRegistrations(ChannelRegistrationTable &_inval) { _channels = _inval; }; + void setLogger(rh_logger::LoggerPtr logptr) { + _eventChannelMgrLog = logptr; + }; + private: + + // type definitions + typedef std::pair< std::string, std::string > RegRecord; typedef ChannelRegistration* ChannelRegistrationPtr; + rh_logger::LoggerPtr _eventChannelMgrLog; - typedef std::map< std::string, ChannelRegistration > ChannelRegistrationTable; + // event channel manager state + SubProxyMap _subProxies; + PubProxyMap _pubProxies; + // + // Channel Registration database + // + ChannelRegistrationTable _channels; + + // configuration and default state information +#ifndef CPPUNIT_TEST + // Handle to the Resource that owns us + DomainManager_impl* _domainManager; +#endif + // naming context directory to bind event channgels to... + std::string _domain_context; + // orb context + ossie::corba::OrbContext _orbCtx; + // Handle to factory interface to create EventChannels + ossie::events::EventChannelFactory_var _event_channel_factory; + // if enabled, events will show up in the NamingService + bool _use_naming_service; + // use fully qualified domain names when creating channels. + bool _use_fqn; + // allow event service to resolve channels + bool _allow_es_resolve; + // default polling period to assign to a channel + int64_t _default_poll_period; + // synchronize access to member variables + redhawk::Mutex _mgrlock; void _initialize(); @@ -226,6 +314,11 @@ class EventChannelManager: public virtual EventChannelManagerBase { CF::EventChannelManager::OperationNotAllowed, CF::EventChannelManager::ServiceUnavailable ); + ossie::events::EventChannel_ptr _get( const std::string &channel_name ) + throw ( CF::EventChannelManager::ChannelDoesNotExist, + CF::EventChannelManager::OperationFailed, + CF::EventChannelManager::OperationNotAllowed, + CF::EventChannelManager::ServiceUnavailable ); ossie::events::EventChannel_ptr _create( const std::string &channel_name, const bool autoRelease=false ) throw ( CF::EventChannelManager::ChannelAlreadyExists, @@ -312,42 +405,6 @@ class EventChannelManager: public virtual EventChannelManagerBase { */ void _deleteChannelRegistration( const std::string &cname ); - - -#ifndef CPPUNIT_TEST - // Handle to the Resource that owns us - DomainManager_impl* _domainManager; -#endif - - // naming context directory to bind event channgels to... - std::string _domain_context; - - // orb context - ossie::corba::OrbContext _orbCtx; - - // Handle to factory interface to create EventChannels - ossie::events::EventChannelFactory_var _event_channel_factory; - - // - // Channel Registration database - // - ChannelRegistrationTable _channels; - - // if enabled, events will show up in the NamingService - bool _use_naming_service; - - // use fully qualified domain names when creating channels. - bool _use_fqn; - - // allow event service to resolve channels - bool _allow_es_resolve; - - // default polling period to assign to a channel - int64_t _default_poll_period; - - // synchronize access to member variables - redhawk::Mutex _mgrlock; - }; #endif diff --git a/redhawk/src/control/sdr/dommgr/FakeApplication.cpp b/redhawk/src/control/sdr/dommgr/FakeApplication.cpp index 2027c1472..84d921db8 100644 --- a/redhawk/src/control/sdr/dommgr/FakeApplication.cpp +++ b/redhawk/src/control/sdr/dommgr/FakeApplication.cpp @@ -22,6 +22,7 @@ #include "Application_impl.h" FakeApplication::FakeApplication (Application_impl* app) : + Logging_impl(app->getName()), _app(app) { } @@ -59,6 +60,12 @@ void FakeApplication::query (CF::Properties& configProperties) throw CF::UnknownProperties(); } +CF::Properties* FakeApplication::metrics(const CF::StringSequence& components, const CF::StringSequence& attributes) + throw (CF::Application::InvalidMetric, CORBA::SystemException) +{ + throw CF::Application::InvalidMetric(components, attributes); +} + char * FakeApplication::registerPropertyListener( CORBA::Object_ptr listener, const CF::StringSequence &prop_ids, const CORBA::Float interval) throw(CF::UnknownProperties, CF::InvalidObjectReference) { @@ -116,6 +123,13 @@ bool FakeApplication::aware () return false; } +CORBA::Float FakeApplication::stopTimeout () throw (CORBA::SystemException) { + return -1; +} + +void FakeApplication::stopTimeout (CORBA::Float timeout) throw (CORBA::SystemException) { +} + CF::DeviceAssignmentSequence * FakeApplication::componentDevices () { return new CF::DeviceAssignmentSequence(); diff --git a/redhawk/src/control/sdr/dommgr/FakeApplication.h b/redhawk/src/control/sdr/dommgr/FakeApplication.h index 1a5b0f888..9b33669a2 100644 --- a/redhawk/src/control/sdr/dommgr/FakeApplication.h +++ b/redhawk/src/control/sdr/dommgr/FakeApplication.h @@ -43,6 +43,8 @@ class FakeApplication : public virtual POA_CF::Application, public Logging_impl void initializeProperties (const CF::Properties& configProperties){}; void configure (const CF::Properties& configProperties); void query (CF::Properties& configProperties); + CF::Properties* metrics (const CF::StringSequence& components, const CF::StringSequence& attributes) + throw (CF::Application::InvalidMetric, CORBA::SystemException); char *registerPropertyListener( CORBA::Object_ptr listener, const CF::StringSequence &prop_ids, const CORBA::Float interval) throw(CF::UnknownProperties, CF::InvalidObjectReference); void unregisterPropertyListener( const char *reg_id ) @@ -66,6 +68,10 @@ class FakeApplication : public virtual POA_CF::Application, public Logging_impl bool aware (); + CORBA::Float stopTimeout () throw (CORBA::SystemException); + + void stopTimeout (CORBA::Float timeout) throw (CORBA::SystemException); + CF::DeviceAssignmentSequence * componentDevices (); CF::Application::ComponentElementSequence * componentImplementations (); CF::Application::ComponentElementSequence * componentNamingContexts (); diff --git a/redhawk/src/control/sdr/dommgr/Makefile.am b/redhawk/src/control/sdr/dommgr/Makefile.am index 40f5face9..32869fcd5 100644 --- a/redhawk/src/control/sdr/dommgr/Makefile.am +++ b/redhawk/src/control/sdr/dommgr/Makefile.am @@ -22,8 +22,7 @@ domainmgrdir = $(SDR_ROOT)/dom/mgr dist_domainmgr_DATA = DomainManager.spd.xml DomainManager.scd.xml DomainManager.prf.xml domainmgr_PROGRAMS = DomainManager -DomainManager_SOURCES = applicationSupport.cpp \ - connectionSupport.cpp \ +DomainManager_SOURCES = connectionSupport.cpp \ AllocationManager_impl.cpp \ EventChannelManager.cpp \ Application_impl.cpp \ @@ -34,9 +33,18 @@ DomainManager_SOURCES = applicationSupport.cpp \ RH_NamingContext.cpp \ DomainManager_impl.cpp \ FakeApplication.cpp \ + Deployment.cpp \ + DeploymentExceptions.cpp \ + ApplicationDeployment.cpp \ + ApplicationValidator.cpp \ + ApplicationComponent.cpp \ + RH_LogEventAppender.cpp \ + RH_SyncRollingAppender.cpp \ + ProfileCache.cpp \ main.cpp + DomainManager_CPPFLAGS = -I../../include -I../../parser -I$(top_srcdir)/base/include -I$(top_srcdir)/base $(BOOST_CPPFLAGS) $(OMNIORB_CFLAGS) $(LOG4CXX_FLAGS) DomainManager_CXXFLAGS = -Wall DomainManager_LDADD = ../../framework/libossiedomain.la ../../parser/libossieparser.la $(top_builddir)/base/framework/libossiecf.la $(top_builddir)/base/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_FILESYSTEM_LIB) $(BOOST_SERIALIZATION_LIB) $(BOOST_REGEX_LIB) $(BOOST_THREAD_LIB) $(BOOST_SYSTEM_LIB) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(LOG4CXX_LIBS) $(PERSISTENCE_LIBS) -DomainManager_LDFLAGS = -static +DomainManager_LDFLAGS = -static -lrt diff --git a/redhawk/src/control/sdr/dommgr/PersistenceStore.h b/redhawk/src/control/sdr/dommgr/PersistenceStore.h index 35e054a40..a8f01af54 100644 --- a/redhawk/src/control/sdr/dommgr/PersistenceStore.h +++ b/redhawk/src/control/sdr/dommgr/PersistenceStore.h @@ -25,7 +25,11 @@ #include #include -#include "applicationSupport.h" +#include +#include +#include +#include + #include "connectionSupport.h" @@ -42,6 +46,7 @@ namespace ossie { std::string identifier; std::string label; CF::DeviceManager_var deviceManager; + DeviceManagerConfiguration dcd; }; class DomainManagerNode { @@ -68,8 +73,19 @@ namespace ossie { ossie::SoftPkg spd; ossie::Properties prf; std::string implementationId; - bool isLoadable; - bool isExecutable; + redhawk::PropertyMap requiresProps; + CF::LoadableDevice_var loadableDevice; + CF::ExecutableDevice_var executableDevice; + + bool isLoadable() const + { + return !CORBA::is_nil(loadableDevice); + } + + bool isExecutable() const + { + return !CORBA::is_nil(executableDevice); + } }; typedef std::list > DeviceList; @@ -92,11 +108,36 @@ namespace ossie { typedef std::map AllocationTable; typedef std::map RemoteAllocationTable; + typedef std::map EventProxies; + struct ChannelRegistrationNode { + std::string channel_name; + std::string fqn; + std::string channel; + bool autoRelease; + bool release; + std::map< std::string, std::string > registrants; + }; + typedef std::map ChannelRegistrationNodes; + + struct ComponentNode { + std::string identifier; + std::string name; + std::string softwareProfile; + std::string namingContext; + std::string implementationId; + bool isVisible; + std::vector loadedFiles; + unsigned long processId; + CORBA::Object_var componentObject; + std::string assignedDeviceId; + std::string componentHostId; + }; + typedef std::list ComponentList; struct externalPropertyType { public: std::string property_id; - std::string component_id; + CF::Resource_var component; std::string access; }; @@ -106,16 +147,16 @@ namespace ossie { std::string identifier; std::string contextName; CosNaming::NamingContext_var context; - std::vector componentDevices; + CF::DeviceAssignmentSequence componentDevices; ossie::ComponentList components; - CF::Resource_var assemblyController; + std::string assemblyControllerId; std::vector connections; std::vector allocationIDs; - std::vector componentRefs; + std::vector startOrder; std::map ports; - // Ext Props map : extid -> (propid, access, compid) std::map properties; bool aware_application; + float stop_timeout; }; class ServiceNode { @@ -235,7 +276,7 @@ namespace boost { } ar & (*ptr); } - + template void serialize(Archive& ar, ossie::EventChannelNode& node, const unsigned int version) { ar & (node.channel); @@ -245,21 +286,34 @@ namespace boost { } template - void serialize(Archive& ar, ossie::ApplicationComponent& node, const unsigned int version) { + void serialize(Archive& ar, ossie::ChannelRegistrationNode& node, const unsigned int version) { + ar & (node.channel_name); + ar & (node.fqn); + ar & (node.channel); + ar & (node.autoRelease); + ar & (node.release); + ar & (node.registrants); + } + + template + void serialize(Archive& ar, ossie::ComponentNode& node, const unsigned int version) { ar & node.identifier; + ar & node.name; ar & node.softwareProfile; ar & node.namingContext; ar & node.implementationId; + ar & node.isVisible; ar & node.loadedFiles; ar & node.processId; ar & node.componentObject; - ar & node.assignedDevice; + ar & node.assignedDeviceId; + ar & node.componentHostId; } template void serialize(Archive& ar, ossie::externalPropertyType& prop, const unsigned int version) { ar & prop.property_id; - ar & prop.component_id; + ar & prop.component; ar & prop.access; } @@ -272,10 +326,10 @@ namespace boost { ar & (node.context); ar & (node.componentDevices); ar & (node.components); - ar & (node.assemblyController); + ar & (node.assemblyControllerId); ar & (node.allocationIDs); ar & (node.connections); - ar & (node.componentRefs); + ar & (node.startOrder); ar & (node.ports); ar & (node.properties); } @@ -295,12 +349,6 @@ namespace boost { ar & (node.connected); } - template - void serialize(Archive& ar, ossie::DeviceAssignmentInfo& dai, const unsigned int version) { - ar & (dai.deviceAssignment); - ar & (dai.device); - } - template void serialize(Archive& ar, ossie::AllocationType& at, const unsigned int version) { ar & (at.allocationID); diff --git a/redhawk/src/control/sdr/dommgr/ProfileCache.cpp b/redhawk/src/control/sdr/dommgr/ProfileCache.cpp new file mode 100644 index 000000000..382195f9b --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/ProfileCache.cpp @@ -0,0 +1,143 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include +#include +#include + +#include "ProfileCache.h" + +using namespace redhawk; +using namespace ossie; + +PREPARE_CF_LOGGING(ProfileCache); + +namespace { + static std::string getVersionMismatchMessage(const std::string& version) + { + if (redhawk::compareVersions(VERSION, version) > 0) { + return " (attempting to load a profile from version " + version + " on REDHAWK version " VERSION ")"; + } else { + return std::string(); + } + } +} + +ProfileCache::ProfileCache(CF::FileSystem_ptr fileSystem, rh_logger::LoggerPtr log) : + fileSystem(CF::FileSystem::_duplicate(fileSystem)), + _profilecache_log(log) +{ +} + +const SoftPkg* ProfileCache::loadProfile(const std::string& spdFilename) +{ + // Use basic load to find or load the SPD, then cast the const away so that + // the PRF and SCD can be loaded if needed + SoftPkg* softpkg = const_cast(loadSoftPkg(spdFilename)); + + // If the SPD has a PRF reference, and it hasn't already been loaded, try + // to load it + if (softpkg->getPRFFile() && !softpkg->getProperties()) { + const std::string prf_file = softpkg->getPRFFile(); + RH_TRACE(_profilecache_log, "Loading PRF file " << prf_file); + try { + File_stream prf_stream(fileSystem, prf_file.c_str()); + softpkg->loadProperties(prf_stream); + } catch (const std::exception& exc) { + std::string message = spdFilename + " has invalid PRF file " + prf_file + ": " + exc.what(); + message += ::getVersionMismatchMessage(softpkg->getSoftPkgType()); + throw invalid_profile(spdFilename, message); + } + } + + // If the SPD has an SCD reference, and it hasn't already been loaded, try + // to load it + if (softpkg->getSCDFile() && !softpkg->getDescriptor()) { + const std::string scd_file = softpkg->getSCDFile(); + RH_TRACE(_profilecache_log, "Loading SCD file " << scd_file); + try { + File_stream scd_stream(fileSystem, scd_file.c_str()); + softpkg->loadDescriptor(scd_stream); + } catch (const std::exception& exc) { + std::string message = spdFilename + " has invalid SCD file " + scd_file + ": " + exc.what(); + message += ::getVersionMismatchMessage(softpkg->getSoftPkgType()); + throw invalid_profile(spdFilename, message); + } + } + + return softpkg; +} + +const SoftPkg* ProfileCache::loadSoftPkg(const std::string& filename) +{ + // Check the cache first + BOOST_FOREACH(SoftPkg& softpkg, profiles) { + if (softpkg.getSPDFile() == filename) { + RH_TRACE(_profilecache_log, "Found existing SPD " << filename); + return &softpkg; + } + } + + RH_TRACE(_profilecache_log, "Loading SPD file " << filename); + SoftPkg* softpkg = 0; + try { + File_stream spd_stream(fileSystem, filename.c_str()); + softpkg = new SoftPkg(spd_stream, filename); + } catch (const std::exception& exc) { + std::string message = filename + " is invalid: " + exc.what(); + std::string softpkg_version = _extractVersion(filename); + if (!softpkg_version.empty()) { + message += ::getVersionMismatchMessage(softpkg_version); + } + throw invalid_profile(filename, message); + } + + profiles.push_back(softpkg); + return softpkg; +} + +std::string ProfileCache::_extractVersion(const std::string& filename) +{ + // When the SPD itself cannot be parsed, try to recover the type attribute + // from the element manually. If the SPD is from a newer version + // of REDHAWK that has extended the XSD, this allows for a more helpful + // error message. + try { + File_stream stream(fileSystem, filename.c_str()); + std::string line; + while (std::getline(stream, line)) { + std::string::size_type type_idx = line.find("type"); + if (type_idx != std::string::npos) { + std::string::size_type first_quote = line.find('"', type_idx); + if (first_quote != std::string::npos) { + size_t second_quote = line.find('"', first_quote + 1); + if (second_quote != std::string::npos) { + return line.substr(first_quote + 1, second_quote-(first_quote+1)); + } + } + } + } + } catch (...) { + // Ignore all errors + } + return std::string(); +} diff --git a/redhawk/src/control/sdr/dommgr/ProfileCache.h b/redhawk/src/control/sdr/dommgr/ProfileCache.h new file mode 100644 index 000000000..9a85aa0d3 --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/ProfileCache.h @@ -0,0 +1,124 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef PROFILECACHE_H +#define PROFILECACHE_H + +#include +#include + +#include + +#include +#include +#include + +namespace redhawk { + + /** + * @brief An exception raised when a SoftPkg profile is invalid + */ + class invalid_profile : public std::runtime_error + { + public: + invalid_profile(const std::string& filename, const std::string& message) : + std::runtime_error(message), + filename(filename) + { + } + + virtual ~invalid_profile() throw() + { + // Only defined because the compiler-generated destructor has a + // looser throw specification than runtime_error's destructor + } + + /** + * @brief Returns the filename of the invalid SoftPkg profile + */ + const std::string& get_filename() const + { + return filename; + } + + private: + const std::string filename; + }; + + /** + * @brief Caching softpkg profile loader + */ + class ProfileCache + { + ENABLE_LOGGING; + + public: + /** + * @brief Creates a new cache + * @param fileSystem the CF::FileSystem used to load files + * + * Creates a new empty cache. When this cache is destroyed, all loaded + * profiles are deleted. + */ + ProfileCache(CF::FileSystem_ptr fileSystem, rh_logger::LoggerPtr log); + + /** + * @brief Loads an SPD file and its PRF and SCD, if available + * @param spdFilename the path to the SPD file + * @return a pointer to the loaded SoftPkg + * @exception redhawk::invalid_profile a file is invalid or cannot be + * parsed + * + * Reads and parses the SPD file @a spdFilename and its referenced PRF + * and SCD files (if any), caching the result. Subsequent calls with + * the same filename will return the cached object. + * + * The returned SoftPkg is owned by this object, not the caller. + */ + const ossie::SoftPkg* loadProfile(const std::string& spdFilename); + + /** + * @brief Loads an SPD file + * @param spdFilename the path to the SPD file + * @return a pointer to the loaded SoftPkg + * @exception redhawk::invalid_profile file is invalid or cannot be + * parsed + * + * Reads and parses the SPD file @a spdFilename and its referenced PRF + * and SCD files (if any), caching the result. Subsequent calls with + * the same filename will return the cached object. + + * The returned SoftPkg is owned by this object, not the caller. + */ + const ossie::SoftPkg* loadSoftPkg(const std::string& filename); + + protected: + ossie::SoftPkg* findSoftPkg(const std::string& filename); + + std::string _extractVersion(const std::string& filename); + + CF::FileSystem_var fileSystem; + boost::ptr_vector profiles; + + rh_logger::LoggerPtr _profilecache_log; + }; +} + +#endif // PROFILECACHE_H diff --git a/redhawk/src/control/sdr/dommgr/RH_LogEventAppender.cpp b/redhawk/src/control/sdr/dommgr/RH_LogEventAppender.cpp new file mode 120000 index 000000000..6f6d8f53a --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/RH_LogEventAppender.cpp @@ -0,0 +1 @@ +../../../base/framework/logging/RH_LogEventAppender.cpp \ No newline at end of file diff --git a/redhawk/src/control/sdr/dommgr/RH_LogEventAppender.h b/redhawk/src/control/sdr/dommgr/RH_LogEventAppender.h new file mode 120000 index 000000000..5f4033ea4 --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/RH_LogEventAppender.h @@ -0,0 +1 @@ +../../../base/framework/logging/RH_LogEventAppender.h \ No newline at end of file diff --git a/redhawk/src/control/sdr/dommgr/RH_SyncRollingAppender.cpp b/redhawk/src/control/sdr/dommgr/RH_SyncRollingAppender.cpp new file mode 120000 index 000000000..02a0f84bd --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/RH_SyncRollingAppender.cpp @@ -0,0 +1 @@ +../../../base/framework/logging/RH_SyncRollingAppender.cpp \ No newline at end of file diff --git a/redhawk/src/control/sdr/dommgr/RH_SyncRollingAppender.h b/redhawk/src/control/sdr/dommgr/RH_SyncRollingAppender.h new file mode 120000 index 000000000..d37415bb9 --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/RH_SyncRollingAppender.h @@ -0,0 +1 @@ +../../../base/framework/logging/RH_SyncRollingAppender.h \ No newline at end of file diff --git a/redhawk/src/control/sdr/dommgr/applicationSupport.cpp b/redhawk/src/control/sdr/dommgr/applicationSupport.cpp index ea9c6fe89..1c9f4df23 100644 --- a/redhawk/src/control/sdr/dommgr/applicationSupport.cpp +++ b/redhawk/src/control/sdr/dommgr/applicationSupport.cpp @@ -604,6 +604,9 @@ ComponentInfo* ComponentInfo::buildComponentInfoFromSPDFile(CF::FileManager_ptr if (not cprop[i]->isNone()) { newComponent->addExecParameter(convertPropertyToDataType(cprop[i])); } + if ( cprop[i]->isProperty() ) { + newComponent->addConstructProperty(convertPropertyToDataType(cprop[i])); + } } else { newComponent->addConstructProperty(convertPropertyToDataType(cprop[i])); } @@ -953,7 +956,8 @@ void ComponentInfo::fillSeqForStructProperty(CF::Properties &props) { if (_inner_id == ossie::corba::returnString(structIter->id)) { const ossie::SimpleSequenceProperty* _type = dynamic_cast(*internal_iter); std::vector empty_string_vector; - structProps[ossie::corba::returnString(structIter->id)] = ossie::strings_to_any(empty_string_vector, ossie::getTypeKind(_type->getType())); + CORBA::TypeCode_ptr _typecode = ossie::getTypeCode(static_cast(_type->getType())); + structProps[ossie::corba::returnString(structIter->id)] = ossie::strings_to_any(empty_string_vector, ossie::getTypeKind(_type->getType()), _typecode); } } } diff --git a/redhawk/src/control/sdr/dommgr/connectionSupport.cpp b/redhawk/src/control/sdr/dommgr/connectionSupport.cpp index cf2248414..a6358abdc 100644 --- a/redhawk/src/control/sdr/dommgr/connectionSupport.cpp +++ b/redhawk/src/control/sdr/dommgr/connectionSupport.cpp @@ -43,10 +43,12 @@ PREPARE_CF_LOGGING(ConnectionManager); ConnectionManager::ConnectionManager(DomainLookup* domainLookup, ComponentLookup* componentLookup, - const std::string& namingContext) : + const std::string& namingContext, + bool enableExceptions) : _domainLookup(domainLookup), _componentLookup(componentLookup), - _namingContext(namingContext) + _namingContext(namingContext), + _enableExceptions(enableExceptions) { assert(_domainLookup != 0); assert(_componentLookup != 0); @@ -59,14 +61,10 @@ ConnectionManager::~ConnectionManager() void ConnectionManager::disconnectAll(std::vector& connections, ossie::DomainLookup* domainLookup) { // Disconnect all connections made for the application in the reverse order of their creation. - LOG_TRACE(ConnectionManager, "Disconnecting " << connections.size() << " ports"); for (std::vector::reverse_iterator connection = connections.rbegin(); connection != connections.rend(); ++connection) { - LOG_TRACE(ConnectionManager, "Disconnecting connection " << connection->identifier); connection->disconnect(domainLookup); } connections.clear(); - - LOG_TRACE(ConnectionManager, "All connected ports disconnected"); } CORBA::Object_ptr ConnectionManager::resolveComponent(const std::string& identifier) @@ -76,14 +74,28 @@ CORBA::Object_ptr ConnectionManager::resolveComponent(const std::string& identif target = _domainLookup->lookupDeviceManagerByInstantiationId(identifier); } if (CORBA::is_nil(target)) { - LOG_DEBUG(ConnectionManager, "Could not locate component with instantiation id " << identifier); + if (exceptionsEnabled()) { + throw ossie::LookupError("component '" + identifier + "' not found"); + } else { + RH_DEBUG(_connectionLog, "Could not locate component with instantiation id " << identifier); + } } return target._retn(); } CORBA::Object_ptr ConnectionManager::resolveDomainObject(const std::string& type, const std::string& name) { - return _domainLookup->lookupDomainObject(type, name); + try { + return _domainLookup->lookupDomainObject(type, name); + } catch (const LookupError& error) { + if (exceptionsEnabled()) { + // Pass the exception on to the caller + throw; + } else { + RH_WARN(_connectionLog, "Failed to resolve domain object: " << error.what()); + } + } + return CORBA::Object::_nil(); } CORBA::Object_ptr ConnectionManager::resolveFindByNamingService(const std::string& name) @@ -97,23 +109,28 @@ CORBA::Object_ptr ConnectionManager::resolveFindByNamingService(const std::strin findbyName = _namingContext + "/" + name; } - LOG_TRACE(ConnectionManager, "resolveFindByNamingService: The findname that I'm using is: " << findbyName); + RH_TRACE(_connectionLog, "resolveFindByNamingService: The findname that I'm using is: " << findbyName); try { return ossie::corba::objectFromName(findbyName); } catch (CosNaming::NamingContext::NotFound) { // The name was not found, continue on and return nil. - } CATCH_LOG_ERROR(ConnectionManager, "Exception trying to resolve findbynamingservice \"" << findbyName << "\""); + } CATCH_RH_ERROR(_connectionLog, "Exception trying to resolve findbynamingservice \"" << findbyName << "\""); return CORBA::Object::_nil(); } +bool ConnectionManager::exceptionsEnabled() +{ + return _enableExceptions; +} + PREPARE_CF_LOGGING(AppConnectionManager); AppConnectionManager::AppConnectionManager(DomainLookup* domainLookup, ComponentLookup* componentLookup, DeviceLookup* deviceLookup, const std::string& namingContext) : - ConnectionManager(domainLookup, componentLookup, namingContext), + ConnectionManager(domainLookup, componentLookup, namingContext, true), _deviceLookup(deviceLookup), _connections() { @@ -128,11 +145,11 @@ bool AppConnectionManager::resolveConnection(const Connection& connection) { std::auto_ptr connectionNode(ConnectionNode::ParseConnection(connection)); if (!connectionNode.get()) { - LOG_ERROR(AppConnectionManager, "Unable to parse connection"); + RH_ERROR(_connectionLog, "Unable to parse connection"); return false; } - LOG_TRACE(AppConnectionManager, "Attempting to resolve connection " << connectionNode->identifier); + RH_TRACE(_connectionLog, "Attempting to resolve connection " << connectionNode->identifier); if (connectionNode->connect(*this)) { addConnection_(*connectionNode); return true; @@ -152,43 +169,31 @@ CORBA::Object_ptr AppConnectionManager::resolveFindByNamingService(const std::st std::string::size_type islash = _namingContext.find('/'); if (islash != std::string::npos) { std::string findbyName = _namingContext.substr(0, islash + 1) + name; - LOG_TRACE(AppConnectionManager, "resolveFindBy: The findname that I'm using is: " << findbyName); + RH_TRACE(_connectionLog, "resolveFindBy: The findname that I'm using is: " << findbyName); try { return ossie::corba::objectFromName(findbyName); } catch (CosNaming::NamingContext::NotFound) { // The name was not found, continue on and return nil. - } CATCH_LOG_ERROR(AppConnectionManager, "Exception trying to resolve findbynamingservice \"" << findbyName << "\""); + } CATCH_RH_ERROR(_connectionLog, "Exception trying to resolve findbynamingservice \"" << findbyName << "\""); } return CORBA::Object::_nil(); } CF::Device_ptr AppConnectionManager::resolveDeviceThatLoadedThisComponentRef(const std::string& refid) { - CF::Device_ptr device = _deviceLookup->lookupDeviceThatLoadedComponentInstantiationId(refid); - if (CORBA::is_nil(device)) { - LOG_ERROR(AppConnectionManager, "devicethatloadedthiscomponentref not found"); - } - return device; + return _deviceLookup->lookupDeviceThatLoadedComponentInstantiationId(refid); } CF::Device_ptr AppConnectionManager::resolveDeviceUsedByThisComponentRef(const std::string& refid, const std::string& usesrefid) { - CF::Device_ptr device = _deviceLookup->lookupDeviceUsedByComponentInstantiationId(refid, usesrefid); - if (CORBA::is_nil(device)) { - LOG_ERROR(AppConnectionManager, "deviceusedbythiscomponentref not found"); - } - return device; + return _deviceLookup->lookupDeviceUsedByComponentInstantiationId(refid, usesrefid); } CF::Device_ptr AppConnectionManager::resolveDeviceUsedByApplication(const std::string& usesrefid) { - CF::Device_ptr device = _deviceLookup->lookupDeviceUsedByApplication(usesrefid); - if (CORBA::is_nil(device)) { - LOG_ERROR(AppConnectionManager, "deviceusedbyapplication not found"); - } - return device; + return _deviceLookup->lookupDeviceUsedByApplication(usesrefid); } const std::vector& AppConnectionManager::getConnections() { @@ -197,7 +202,7 @@ const std::vector& AppConnectionManager::getConnections() { void AppConnectionManager::addConnection_(const ConnectionNode& connection) { - LOG_TRACE(AppConnectionManager, "Adding connection " << connection.identifier << " to connection list"); + RH_TRACE(_connectionLog, "Adding connection " << connection.identifier << " to connection list"); _connections.push_back(connection); } @@ -207,7 +212,7 @@ PREPARE_CF_LOGGING(DomainConnectionManager); DomainConnectionManager::DomainConnectionManager(DomainLookup* domainLookup, ComponentLookup* componentLookup, const std::string& domainName) : - ConnectionManager(domainLookup, componentLookup, domainName), + ConnectionManager(domainLookup, componentLookup, domainName, false), _connectionsByRequester() { } @@ -218,19 +223,19 @@ DomainConnectionManager::~DomainConnectionManager() CF::Device_ptr DomainConnectionManager::resolveDeviceThatLoadedThisComponentRef(const std::string&) { - LOG_ERROR(DomainConnectionManager, "Not supported in this context: Port is devicethatloadedthiscomponentref"); + RH_ERROR(_connectionLog, "Not supported in this context: Port is devicethatloadedthiscomponentref"); return CF::Device::_nil(); } CF::Device_ptr DomainConnectionManager::resolveDeviceUsedByThisComponentRef(const std::string&, const std::string&) { - LOG_ERROR(DomainConnectionManager, "Not supported in this context: Port is deviceusedbythiscomponentref "); + RH_ERROR(_connectionLog, "Not supported in this context: Port is deviceusedbythiscomponentref "); return CF::Device::_nil(); } CF::Device_ptr DomainConnectionManager::resolveDeviceUsedByApplication(const std::string&) { - LOG_ERROR(DomainConnectionManager, "Not supported in this context: Port is deviceusedbyapplication"); + RH_ERROR(_connectionLog, "Not supported in this context: Port is deviceusedbyapplication"); return CF::Device::_nil(); } @@ -238,19 +243,19 @@ std::string DomainConnectionManager::addConnection(const std::string& deviceMana { boost::scoped_ptr connectionNode(ConnectionNode::ParseConnection(connection)); if (!connectionNode.get()) { - LOG_ERROR(DomainConnectionManager, "Skipping invalid connection for DeviceManager " << deviceManagerId); + RH_ERROR(_connectionLog, "Skipping invalid connection for DeviceManager " << deviceManagerId); return ""; } if (!connectionNode->connect(*this)) { if (!connectionNode->allowDeferral()) { - LOG_ERROR(DomainConnectionManager, "Connection " << connectionNode->identifier << " is not resolvable"); + RH_ERROR(_connectionLog, "Connection " << connectionNode->identifier << " is not resolvable"); return ""; } } connectionNode.get()->setrequesterId(deviceManagerId); - LOG_DEBUG(DomainConnectionManager, "Connection " << connectionNode->identifier << " could not be resolved, marked as pending"); + RH_DEBUG(_connectionLog, "Connection " << connectionNode->identifier << " could not be resolved, marked as pending"); std::string connectionRecordId = addConnection_(deviceManagerId, *connectionNode); return connectionRecordId; } @@ -302,7 +307,6 @@ void DomainConnectionManager::breakConnection(const std::string& connectionRecor void DomainConnectionManager::deviceManagerUnregistered(const std::string& deviceManagerName) { - TRACE_ENTER(DomainConnectionManager); boost::mutex::scoped_lock lock(_connectionLock); ConnectionTable::iterator devMgr = _connectionsByRequester.find(deviceManagerName); if (devMgr == _connectionsByRequester.end()) { @@ -310,7 +314,7 @@ void DomainConnectionManager::deviceManagerUnregistered(const std::string& devic return; } ConnectionList& connectionList = devMgr->second; - LOG_TRACE(DomainConnectionManager, "Deleting " << connectionList.size() << " connection(s) from DeviceManager " << deviceManagerName); + RH_TRACE(_connectionLog, "Deleting " << connectionList.size() << " connection(s) from DeviceManager " << deviceManagerName); for (ConnectionList::iterator connection = connectionList.begin(); connection != connectionList.end(); ++connection) { connection->disconnect(_domainLookup); try { @@ -319,73 +323,60 @@ void DomainConnectionManager::deviceManagerUnregistered(const std::string& devic } } _connectionsByRequester.erase(devMgr); - TRACE_EXIT(DomainConnectionManager); } void DomainConnectionManager::deviceRegistered(const std::string& deviceId) { - TRACE_ENTER(DomainConnectionManager); try { tryPendingConnections_(Endpoint::COMPONENT, deviceId); } catch ( ossie::InvalidConnection &e ) { std::ostringstream err; err << "Invalid connection: "<identifier); + RH_TRACE(_connectionLog, "Resolving pending connection " << connection->identifier); if (!connection->connect(*this)) { if (!connection->allowDeferral()) { // This connection needs to be removed from the list - LOG_ERROR(DomainConnectionManager, "Connection " << connection->identifier << " cannot be resolved"); + RH_ERROR(_connectionLog, "Connection " << connection->identifier << " cannot be resolved"); connection = connections.erase(connection); } else { - LOG_TRACE(DomainConnectionManager, "Connection " << connection->identifier << " still has pending dependencies"); + RH_TRACE(_connectionLog, "Connection " << connection->identifier << " still has pending dependencies"); connection++; } } else { - LOG_DEBUG(DomainConnectionManager, "Connection " << connection->identifier << " resolved"); + RH_DEBUG(_connectionLog, "Connection " << connection->identifier << " resolved"); connection++; } } } - TRACE_EXIT(DomainConnectionManager); } void DomainConnectionManager::breakConnections_(Endpoint::DependencyType type, const std::string& identifier) { - TRACE_ENTER(DomainConnectionManager); - boost::mutex::scoped_lock lock(_connectionLock); for (ConnectionTable::iterator devMgr = _connectionsByRequester.begin(); devMgr != _connectionsByRequester.end(); ++devMgr) { // Go through the list of connections for each DeviceManager to check @@ -468,20 +454,18 @@ void DomainConnectionManager::breakConnections_(Endpoint::DependencyType type, c } // If the connection is still connected, break it if (connection->connected) { - LOG_TRACE(DomainConnectionManager, "Breaking connection " << connection->identifier); + RH_TRACE(_connectionLog, "Breaking connection " << connection->identifier); connection->disconnect(_domainLookup); } } if (remove) { - LOG_TRACE(DomainConnectionManager, "Removing connection " << connection->identifier << " that does not allow deferral"); + RH_TRACE(_connectionLog, "Removing connection " << connection->identifier << " that does not allow deferral"); connection = connections.erase(connection); } else { ++connection; } } } - - TRACE_EXIT(DomainConnectionManager); } @@ -502,6 +486,8 @@ EXPORT_CLASS_SERIALIZATION(PortEndpoint); PREPARE_CF_LOGGING(Endpoint); PREPARE_CF_LOGGING(PortEndpoint); +rh_logger::LoggerPtr ossie::connectionSupportLog; + Endpoint* Endpoint::ParsePortSupplier(const Port* port) { if (port->isFindBy()) { @@ -509,21 +495,21 @@ Endpoint* Endpoint::ParsePortSupplier(const Port* port) } else if (port->isComponentInstantiationRef()) { assert(port->getComponentInstantiationRefID() != 0); std::string identifier = port->getComponentInstantiationRefID(); - LOG_TRACE(Endpoint, "ComponentEndpoint refid=" << identifier); + RH_TRACE(connectionSupportLog, "ComponentEndpoint refid=" << identifier); return new ComponentEndpoint(identifier); } else if (port->isDeviceThatLoadedThisComponentRef()) { return new DeviceLoadedEndpoint(port->getDeviceThatLoadedThisComponentRef()); } else if (port->isDeviceUsedByThisComponentRef()) { std::string refid = port->getDeviceUsedByThisComponentRefID(); std::string usesrefid = port->getDeviceUsedByThisComponentRefUsesRefID(); - LOG_TRACE(Endpoint, "DeviceUsedEndpoint refid=" << refid << " usesrefid=" << usesrefid); + RH_TRACE(connectionSupportLog, "DeviceUsedEndpoint refid=" << refid << " usesrefid=" << usesrefid); return new DeviceUsedEndpoint(refid, usesrefid); } else if (port->isDeviceUsedByApplication()) { std::string usesrefid = port->getDeviceUsedByApplicationUsesRefID(); - LOG_TRACE(Endpoint, "ApplicationDeviceUsedEndpoint usesrefid=" << usesrefid); + RH_TRACE(connectionSupportLog, "ApplicationDeviceUsedEndpoint usesrefid=" << usesrefid); return new ApplicationUsesDeviceEndpoint(usesrefid); } else { - LOG_ERROR(Endpoint, "Unknown port location type"); + RH_ERROR(connectionSupportLog, "Unknown port location type"); } return 0; @@ -540,7 +526,7 @@ Endpoint* Endpoint::ParseProvidesEndpoint(const Connection& connection) } else if (connection.isComponentSupportedInterface()) { return Endpoint::ParsePortSupplier(connection.getComponentSupportedInterface()); } else { - LOG_ERROR(Endpoint, "Cannot find port information for provides port"); + RH_ERROR(connectionSupportLog, "Cannot find port information for provides port"); } return 0; @@ -552,7 +538,7 @@ Endpoint* Endpoint::ParsePort(const Port* port) if (supplier) { assert(port->getID() != 0); std::string name = port->getID(); - LOG_TRACE(Endpoint, "PortEndpoint name=" << name); + RH_TRACE(connectionSupportLog, "PortEndpoint name=" << name); return new PortEndpoint(supplier, name); } return 0; @@ -563,7 +549,7 @@ Endpoint* Endpoint::ParseFindBy(const FindBy* findby) if (findby->isFindByNamingService()) { assert(findby->getFindByNamingServiceName() != 0); std::string name = findby->getFindByNamingServiceName(); - LOG_TRACE(Endpoint, "FindByNamingServiceEndpoint name=" << name); + RH_TRACE(connectionSupportLog, "FindByNamingServiceEndpoint name=" << name); return new FindByNamingServiceEndpoint(name); } else if (findby->isFindByDomainFinder()) { assert(findby->getFindByDomainFinderType() != 0); @@ -571,16 +557,16 @@ Endpoint* Endpoint::ParseFindBy(const FindBy* findby) std::string type = findby->getFindByDomainFinderType(); std::string name = findby->getFindByDomainFinderName(); if (type == "servicename") { - LOG_TRACE(Endpoint, "ServiceEndpoint name=" << name); + RH_TRACE(connectionSupportLog, "ServiceEndpoint name=" << name); return new ServiceEndpoint(name); } else if (type == "eventchannel") { - LOG_TRACE(Endpoint, "EventChannelEndpoint name=" << name); + RH_TRACE(connectionSupportLog, "EventChannelEndpoint name=" << name); return new EventChannelEndpoint(name); } - LOG_TRACE(Endpoint, "FindByDomainFinderEndpoint type=" << type << " name=" << name); + RH_TRACE(connectionSupportLog, "FindByDomainFinderEndpoint type=" << type << " name=" << name); return new FindByDomainFinderEndpoint(type, name); } else { - LOG_ERROR(Endpoint, "Unknown findby type"); + RH_ERROR(connectionSupportLog, "Unknown findby type"); } return 0; } @@ -597,15 +583,20 @@ CF::ConnectionManager::EndpointStatusType Endpoint::toEndpointStatusType() const return status; } -bool Endpoint::isResolved() +bool Endpoint::isResolved() const { return !(CORBA::is_nil(object_)); } +bool Endpoint::isTerminated() const +{ + return terminated_; +} + CORBA::Object_ptr Endpoint::resolve(ConnectionManager& manager) { if (!isResolved()) { - LOG_TRACE(Endpoint, "Resolving endpoint"); + RH_TRACE(connectionSupportLog, "Resolving endpoint"); object_ = resolve_(manager); } return CORBA::Object::_duplicate(object_); @@ -626,6 +617,11 @@ void Endpoint::setIdentifier(std::string identifier) identifier__ = identifier; } +void Endpoint::dependencyTerminated() +{ + terminated_ = true; +} + void Endpoint::release() { // Call the subclass-specific release method. @@ -642,14 +638,14 @@ ConnectionNode* ConnectionNode::ParseConnection(const Connection& connection) // Parse the uses port. std::auto_ptr usesEndpoint(Endpoint::ParsePort(connection.getUsesPort())); if (!usesEndpoint.get()) { - LOG_ERROR(ConnectionNode, "Unable to parse uses endpoint"); + RH_ERROR(connectionSupportLog, "Unable to parse uses endpoint"); return 0; } // Parse the provides port. std::auto_ptr providesEndpoint(Endpoint::ParseProvidesEndpoint(connection)); if (!providesEndpoint.get()) { - LOG_ERROR(ConnectionNode, "Unable to parse provides endpoint"); + RH_ERROR(connectionSupportLog, "Unable to parse provides endpoint"); return 0; } @@ -696,31 +692,39 @@ bool ConnectionNode::connect(ConnectionManager& manager) try { usesObject = uses->resolve(manager); } catch ( ... ) { - LOG_TRACE(ConnectionNode, "Unable to resolve the uses object"); + if (manager.exceptionsEnabled()) { + throw; + } else { + RH_TRACE(connectionSupportLog, "Unable to resolve the uses object"); + } } try { providesPort = provides->resolve(manager); } catch ( ... ) { - LOG_TRACE(ConnectionNode, "Unable to resolve the provides object"); + if (manager.exceptionsEnabled()) { + throw; + } else { + RH_TRACE(connectionSupportLog, "Unable to resolve the provides object"); + } } if (CORBA::is_nil(usesObject) || CORBA::is_nil(providesPort)) { - LOG_TRACE(ConnectionNode, "Unable to establish a connection because one or more objects cannot be resolved (i.e.: cannot create an event channel or device is not available)"); + RH_TRACE(connectionSupportLog, "Unable to establish a connection because one or more objects cannot be resolved (i.e.: cannot create an event channel or device is not available)"); if (allowDeferral()) { - LOG_DEBUG(ConnectionNode, "Connection is deferred to a later date"); + RH_DEBUG(connectionSupportLog, "Connection is deferred to a later date"); return false; } else { if (!uses->isResolved() && !uses->allowDeferral()) { - throw InvalidConnection("Uses endpoint for "+identifier+" cannot be resolved or deferred"); + throw InvalidConnection(uses->description() + " cannot be resolved or deferred"); } else { - throw InvalidConnection("Provides endpoint for "+identifier+" cannot be resolved or deferred"); + throw InvalidConnection(provides->description() + " cannot be resolved or deferred"); } } } CF::Port_var usesPort = ossie::corba::_narrowSafe(usesObject); if (CORBA::is_nil(usesPort)) { - LOG_ERROR(ConnectionNode, "Uses port is not a CF::Port"); + RH_ERROR(connectionSupportLog, "Uses port is not a CF::Port"); throw InvalidConnection("Uses port is not a CF::Port"); } @@ -731,12 +735,12 @@ bool ConnectionNode::connect(ConnectionManager& manager) } catch (const CF::Port::InvalidPort& ip) { std::ostringstream err; err << "Invalid port: " << ip.msg; - LOG_ERROR(ConnectionNode, err.str()); + RH_ERROR(connectionSupportLog, err.str()); throw InvalidConnection(err.str()); } catch (const CF::Port::OccupiedPort& op) { - LOG_ERROR(ConnectionNode, "Port is occupied"); + RH_ERROR(connectionSupportLog, "Port is occupied"); throw InvalidConnection("Port is occupied"); - } CATCH_LOG_ERROR(ConnectionNode, "Port connection failed for connection " << identifier); + } CATCH_RH_ERROR(connectionSupportLog, "Port connection failed for connection " << identifier); throw InvalidConnection("Unknown error"); } @@ -757,7 +761,11 @@ void ConnectionNode::disconnect(DomainLookup* domainLookup) uses->release(); provides->release(); if (CORBA::is_nil(usesPort)) { - LOG_ERROR(ConnectionNode, "Uses port is not a CF::Port"); + if (uses->isTerminated()) { + RH_DEBUG(connectionSupportLog, "Uses port provider terminated"); + } else { + RH_ERROR(connectionSupportLog, "Uses port is not a CF::Port"); + } return; } @@ -765,7 +773,15 @@ void ConnectionNode::disconnect(DomainLookup* domainLookup) unsigned long timeout = 500; // milliseconds omniORB::setClientCallTimeout(usesPort, timeout); usesPort->disconnectPort(identifier.c_str()); - } CATCH_LOG_WARN(ConnectionNode, "Unable to disconnect port for connection " << identifier); + } catch (const CORBA::SystemException& exc) { + if (uses->isTerminated()) { + RH_DEBUG(connectionSupportLog, "Disconnecting port for connection " << identifier + << " failed, but uses port provider terminated"); + } else { + RH_WARN(connectionSupportLog, "Unable to disconnect port for connection " << identifier + << ": " << ossie::corba::describeException(exc)); + } + } CATCH_RH_WARN(connectionSupportLog, "Unable to disconnect port for connection " << identifier); FindByDomainFinderEndpoint* endpoint = dynamic_cast(provides.get()); if (endpoint && endpoint->type() == "eventchannel") { @@ -810,6 +826,20 @@ bool ConnectionNode::checkDependency(Endpoint::DependencyType type, const std::s return (uses->checkDependency(type, identifier) || provides->checkDependency(type, identifier)); } +bool ConnectionNode::dependencyTerminated(Endpoint::DependencyType type, const std::string& identifier) +{ + bool terminated = false; + if (uses->checkDependency(type, identifier)) { + uses->dependencyTerminated(); + terminated = true; + } + if (provides->checkDependency(type, identifier)) { + provides->dependencyTerminated(); + terminated = true; + } + return terminated; +} + CREATE_LOGGER(connectionSupport); std::string ossie::eventChannelName(const FindBy* findby) @@ -829,20 +859,20 @@ std::string ossie::eventChannelName(const FindBy* findby) CORBA::Object_ptr ossie::getPort(CORBA::Object_ptr obj, const std::string& portId) { - LOG_TRACE(connectionSupport, "Finding port"); + RH_TRACE(connectionSupportLog, "Finding port"); CF::PortSupplier_var portSupplier; - LOG_TRACE(connectionSupport, "Narrowing resource"); + RH_TRACE(connectionSupportLog, "Narrowing resource"); try { portSupplier = CF::PortSupplier::_narrow (obj); } catch( ... ) { - LOG_ERROR(connectionSupport, "Failed to narrow CF::Resource before obtaining Port with Unknown Exception"); + RH_ERROR(connectionSupportLog, "Failed to narrow CF::Resource before obtaining Port with Unknown Exception"); return CORBA::Object::_nil(); } - LOG_TRACE(connectionSupport, "Getting port with id - " << portId); + RH_TRACE(connectionSupportLog, "Getting port with id - " << portId); try { return portSupplier->getPort(portId.c_str()); } catch( ... ) { - LOG_ERROR(connectionSupport, "getPort failed with Unknown Exception"); + RH_ERROR(connectionSupportLog, "getPort failed with Unknown Exception"); return CORBA::Object::_nil(); } } diff --git a/redhawk/src/control/sdr/dommgr/connectionSupport.h b/redhawk/src/control/sdr/dommgr/connectionSupport.h index b4e07376b..f467679f2 100644 --- a/redhawk/src/control/sdr/dommgr/connectionSupport.h +++ b/redhawk/src/control/sdr/dommgr/connectionSupport.h @@ -40,6 +40,8 @@ namespace ossie { + extern rh_logger::LoggerPtr connectionSupportLog; + // Exception type for connections that cannot be parsed into our internal // structures. class InvalidConnection : public std::runtime_error { @@ -50,11 +52,24 @@ namespace ossie } }; + // Exception type that may be thrown when an implementation of one of the + // lookup interfaces cannot find the requested object. + class LookupError : public std::runtime_error { + public: + LookupError(const std::string& message) : + std::runtime_error(message) + { + } + }; + // Interface to look up components by their identifier. class ComponentLookup { public: virtual ~ComponentLookup() {}; + + /* Given a component instantiation id, returns the associated CORBA Resource pointer + */ virtual CF::Resource_ptr lookupComponentByInstantiationId(const std::string& identifier) = 0; }; @@ -74,8 +89,17 @@ namespace ossie { public: virtual ~DeviceLookup() {}; + + /* Given a component instantiation id, returns the associated CORBA Device pointer + */ virtual CF::Device_ptr lookupDeviceThatLoadedComponentInstantiationId(const std::string& componentId) = 0; + + /* Given a component instantiation id and uses id, returns the associated CORBA Device pointer + */ virtual CF::Device_ptr lookupDeviceUsedByComponentInstantiationId(const std::string& componentId, const std::string& usesId) = 0; + + /* Given a uses id, returns the associated CORBA Device pointer + */ virtual CF::Device_ptr lookupDeviceUsedByApplication(const std::string& usesRefId) = 0; }; @@ -93,29 +117,38 @@ namespace ossie APPLICATION } DependencyType; - Endpoint() { } + Endpoint() : + terminated_(false) + { + } + virtual ~Endpoint() { } CORBA::Object_ptr resolve(ConnectionManager& manager); CORBA::Object_ptr object(); std::string getIdentifier(); void setIdentifier(std::string identifier); - bool isResolved(); + + bool isResolved() const; + bool isTerminated() const; virtual CF::ConnectionManager::EndpointStatusType toEndpointStatusType() const; virtual bool allowDeferral() = 0; virtual bool checkDependency(DependencyType type, const std::string& identifier) const = 0; + void dependencyTerminated(); + void release(); - // Virtual copy contstructor + virtual std::string description() const = 0; + + // Virtual copy constructor virtual Endpoint* clone() const = 0; static Endpoint* ParsePortSupplier(const Port* port); static Endpoint* ParsePort(const Port* port); static Endpoint* ParseProvidesEndpoint(const ossie::Connection& connection); static Endpoint* ParseFindBy(const ossie::FindBy* findby); - private: // Subclasses must implement their own resolution method. virtual CORBA::Object_ptr resolve_(ConnectionManager& manager) = 0; @@ -130,10 +163,12 @@ namespace ossie void serialize(Archive& ar, const unsigned int version) { ar & object_; + ar & terminated_; } #endif CORBA::Object_var object_; + bool terminated_; protected: std::string identifier__; @@ -156,6 +191,8 @@ namespace ossie bool allowDeferral(Endpoint::DependencyType type, const std::string& identifier); bool checkDependency(Endpoint::DependencyType type, const std::string& identifier) const; + bool dependencyTerminated(Endpoint::DependencyType type, const std::string& identifier); + // Default ctor and assignment exist only for deserialization support. ConnectionNode() { } const ConnectionNode& operator=(const ConnectionNode& other) @@ -195,9 +232,6 @@ namespace ossie ENABLE_LOGGING; public: - ConnectionManager(DomainLookup* domainLookup, - ComponentLookup* componentLookup, - const std::string& namingContext); virtual ~ConnectionManager(); static void disconnectAll(ConnectionList& connections, ossie::DomainLookup* domainLookup); @@ -215,11 +249,24 @@ namespace ossie virtual CF::Device_ptr resolveDeviceUsedByThisComponentRef(const std::string& refid, const std::string& usesid) = 0; virtual CF::Device_ptr resolveDeviceUsedByApplication(const std::string& usesrefid) = 0; + bool exceptionsEnabled(); + + void setLogger(rh_logger::LoggerPtr logptr) + { + _connectionLog = logptr; + }; + protected: + ConnectionManager(DomainLookup* domainLookup, + ComponentLookup* componentLookup, + const std::string& namingContext, + bool enableExceptions); + ossie::DomainLookup* _domainLookup; ossie::ComponentLookup* _componentLookup; std::string _namingContext; - + bool _enableExceptions; + rh_logger::LoggerPtr _connectionLog; }; class AppConnectionManager : public ConnectionManager diff --git a/redhawk/src/control/sdr/dommgr/createHelper.h b/redhawk/src/control/sdr/dommgr/createHelper.h new file mode 100644 index 000000000..c46dc2e47 --- /dev/null +++ b/redhawk/src/control/sdr/dommgr/createHelper.h @@ -0,0 +1,214 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef CREATEHELPER_H +#define CREATEHELPER_H + +#include +#include +#include + +#include + +#include "PersistenceStore.h" +#include "ApplicationDeployment.h" +#include "ProfileCache.h" + +class Application_impl; +class AllocationManager_impl; + +class ScopedAllocations { +public: + ScopedAllocations(AllocationManager_impl& allocator); + ~ScopedAllocations(); + + void push_back(const std::string& allocationID); + + template + void transfer(T& dest); + + void transfer(ScopedAllocations& dest); + + void deallocate(); + +private: + AllocationManager_impl& _allocator; + std::list _allocations; +}; + +class createHelper +{ + +public: + typedef std::map DeviceAssignmentMap; + + createHelper (const ApplicationFactory_impl& appFact, + std::string waveformContextName, + std::string base_naming_context, + CosNaming::NamingContext_ptr WaveformContext, + CosNaming::NamingContext_ptr DomainContext); + ~createHelper (); + + CF::Application_ptr create (const char* name, + const CF::Properties& initConfiguration, + const DeviceAssignmentMap& deviceAssignments); + +private: + // Used for storing the current state of the OE & create process + const ApplicationFactory_impl& _appFact; + + const rh_logger::LoggerPtr _createHelperLog; + + // Local pointer to the allocation manager + AllocationManager_impl* _allocationMgr; + + // Tracks allocation IDs made during creation, and automates cleanup on + // failure + ScopedAllocations _allocations; + + ossie::DeviceList _registeredDevices; + ossie::DeviceList _executableDevices; + + // waveform instance-specific naming context (unique to the instance of the waveform) + std::string _waveformContextName; + + // full (includes full context path) waveform instance-specific naming context + std::string _baseNamingContext; + + // CORBA naming context + CosNaming::NamingContext_var _waveformContext; + CosNaming::NamingContext_ptr _domainContext; + + redhawk::ProfileCache _profileCache; + + typedef std::vector DeploymentList; + typedef std::vector ContainerList; + typedef std::vector ProcessorList; + typedef std::vector OSList; + typedef std::vector ReservationList; + + // createHelper helper methods + void assignPlacementsToDevices(redhawk::ApplicationDeployment& appDeployment, + const DeviceAssignmentMap& devices, + const std::map& specialized_reservations); + void _resolveAssemblyController(redhawk::ApplicationDeployment& appDeployment); + void _validateDAS(redhawk::ApplicationDeployment& appDeployment, const DeviceAssignmentMap& deviceAssignments); + void checkOptions(); + void setUpExternalPorts(redhawk::ApplicationDeployment& appDeployment, Application_impl* application); + void setUpExternalProperties(redhawk::ApplicationDeployment& appDeployment, Application_impl* application); + std::vector overloadReservations(const ossie::SoftwareAssembly::HostCollocation& collocation, + const std::map& specialized_reservations); + void _placeHostCollocation(redhawk::ApplicationDeployment& appDeployment, + const ossie::SoftwareAssembly::HostCollocation& collocation, + const DeviceAssignmentMap& devices, + const std::map& specialized_reservations); + bool placeHostCollocation(redhawk::ApplicationDeployment& appDeployment, + const DeploymentList& components, + DeploymentList::const_iterator current, + ossie::DeviceList& deploymentDevices, + const redhawk::PropertyMap& deviceRequires=redhawk::PropertyMap(), + const ReservationList& reservations=ReservationList(), + const ProcessorList& processorDeps=ProcessorList(), + const OSList& osDeps=OSList()); + + void _handleUsesDevices(redhawk::ApplicationDeployment& appDeployment, + const std::string& appName); + std::vector _getFailedUsesDevices(const std::vector& usesDevices, + redhawk::UsesDeviceDeployment& assignedDevices); + bool _allDevicesBusy(ossie::DeviceList& devices); + + redhawk::PropertyMap _consolidateAllocations(const DeploymentList& implementations, std::map& nicAllocs); + void _evaluateMATHinRequest(CF::Properties &request, const CF::Properties &configureProperties); + void _castRequestProperties(CF::Properties& allocationProperties, const std::vector &prop_refs, unsigned int offset=0); + + redhawk::PropertyMap _getComponentAllocations(const redhawk::ComponentDeployment* deployment); + std::string _getNicAllocationId(redhawk::PropertyMap& allocationProperties); + void _applyNicAllocation(redhawk::ComponentDeployment* deployment, const std::string& allocId, CF::Device_ptr device); + + // Supports allocation + bool allocateUsesDevices(const std::vector& usesDevices, + const CF::Properties& configureProperties, + redhawk::UsesDeviceDeployment& assignedDevices, + ScopedAllocations& allocations); + CF::AllocationManager::AllocationResponseSequence* allocateUsesDeviceProperties( + const std::vector& component, + const CF::Properties& configureProperties); + void allocateComponent(redhawk::ApplicationDeployment& appDeployment, + redhawk::ComponentDeployment* deployment, + const std::string& assignedDeviceId, + const std::map& specialized_reservations); + + ossie::AllocationResult allocateComponentToDevice(redhawk::ComponentDeployment* deployment, + const std::string& assignedDeviceId, + const std::string& appIdentifier, + const std::map& specialized_reservations); + + bool checkPartitionMatching( ossie::DeviceNode& node, + const CF::Properties& devicerequires ); + + bool allocateHostCollocation(redhawk::ApplicationDeployment& appDeployment, + const DeploymentList& components, + ossie::DeviceList& deploymentDevices, + const ProcessorList& processorDeps, + const OSList& osDeps, + const redhawk::PropertyMap &, + const ReservationList& reservations); + + bool resolveSoftpkgDependencies(redhawk::ApplicationDeployment& appDeployment, + redhawk::SoftPkgDeployment* deployment, + ossie::DeviceNode& device); + redhawk::SoftPkgDeployment* resolveDependencyImplementation(redhawk::ApplicationDeployment& appDeployment, + const ossie::SPD::SoftPkgRef& ref, + ossie::DeviceNode& device); + + // Supports loading, executing, initializing, configuring, & connecting + void loadAndExecuteContainers(const ContainerList& containers, + CF::ApplicationRegistrar_ptr _appReg); + void waitForContainerRegistration(redhawk::ApplicationDeployment& appDeployment); + + void loadAndExecuteComponents(const DeploymentList& deployments, + CF::ApplicationRegistrar_ptr _appReg); + void applyApplicationAffinityOptions(const DeploymentList& deployments); + + void attemptComponentExecution(CF::ApplicationRegistrar_ptr registrar, redhawk::ComponentDeployment* deployment); + + void waitForComponentRegistration(redhawk::ApplicationDeployment& appDeployment); + void initializeComponents(const DeploymentList& deployments); + + void configureComponents(const DeploymentList& deployments); + void connectComponents(redhawk::ApplicationDeployment& appDeployment, + std::vector& connections, + std::string base_naming_context); + + int resolveDebugLevel( const std::string &level_in ); + void resolveLoggingConfiguration(redhawk::ComponentDeployment* deployment, redhawk::PropertyMap &execParams ); + std::vector getStartOrder(const DeploymentList& deployments); + void verifyNoCpuSpecializationCollisions(const ossie::SoftwareAssembly& sad, std::map specialized_reservations); + std::vector getComponentUsageNames(redhawk::ApplicationDeployment& appDeployment); + std::vector getHostCollocationsIds(); + + // Cleanup - used when create fails/doesn't succeed for some reason + bool _isComplete; + void _cleanupFailedCreate(); + Application_impl* _application; + float _stopTimeout; + bool _aware; +}; +#endif diff --git a/redhawk/src/control/sdr/dommgr/main.cpp b/redhawk/src/control/sdr/dommgr/main.cpp index 248da28bd..2544c3bea 100644 --- a/redhawk/src/control/sdr/dommgr/main.cpp +++ b/redhawk/src/control/sdr/dommgr/main.cpp @@ -114,7 +114,8 @@ int old_main(int argc, char* argv[]) string logfile_uri(""); string db_uri(""); string domainName(""); - int debugLevel = 3; + int debugLevel = -1; + int initialDebugLevel = -1; std::string dpath(""); std::string name_binding("DomainManager"); bool useLogCfgResolver = false; @@ -168,6 +169,7 @@ int old_main(int argc, char* argv[]) std::cout<<"Logging level "< 0 ) { // any other argument besides the first one is part of the execparams execparams[param] = argv[ii]; } @@ -220,84 +224,44 @@ int old_main(int argc, char* argv[]) return(EXIT_FAILURE); } - std::ostringstream os; - os << domainName << "/" << domainName; - dpath= os.str(); + dpath= domRootPath.string(); + std::string logname("DomainManagerLoader"); // setup logging context for a component resource - ossie::logging::ResourceCtxPtr ctx( new ossie::logging::DomainCtx(name_binding, domainName, dpath ) ); - std::string logcfg_uri = logfile_uri; - if ( !logfile_uri.empty() ) { - // Determine the scheme, if any. This isn't a full fledged URI parser so we can - // get tripped up on complex URIs. We should probably incorporate a URI parser - // library for this sooner rather than later - std::string scheme; - fs::path path; - - std::string::size_type colonIdx = logfile_uri.find(":"); // Find the scheme separator - if (colonIdx == std::string::npos) { - - scheme = "file"; - path = logfile_uri; - // Make the path absolute - fs::path logfile_path(path); - if (! logfile_path.is_complete()) { - // Get the root path so we can resolve relative paths - fs::path root = fs::initial_path(); - logfile_path = fs::path(root / path); - } - path = logfile_path; - logfile_uri = "file://" + path.string(); - - } else { - - scheme = logfile_uri.substr(0, colonIdx); - colonIdx += 1; - if ((logfile_uri.at(colonIdx + 1) == '/') && (logfile_uri.at(colonIdx + 2) == '/')) { - colonIdx += 2; - } - path = logfile_uri.substr(colonIdx, logfile_uri.length() - colonIdx); - } - - if (scheme == "file") { - std::string fpath((char*)path.string().c_str()); - logcfg_uri = "file://" + fpath; - } - if (scheme == "sca") { - std::string fpath((char*)fs::path(domRootPath / path).string().c_str()); - logcfg_uri = "file://" + fpath; - } - } + ossie::logging::DomainCtx *ctx_=new ossie::logging::DomainCtx( name_binding, domainName, dpath ); + ctx_->configure( logcfg_uri, debugLevel, logfile_uri ); + ossie::logging::ResourceCtxPtr ctx(ctx_); // configure the logging library - ossie::logging::Configure(logcfg_uri, debugLevel, ctx); // This log statement is exempt from the "NO LOG STATEMENTS" warning below if ( logfile_uri == "") { - LOG_INFO(DomainManager, "Loading DEFAULT logging configuration. " ); + RH_NL_INFO(logname, "Loading DEFAULT logging configuration. " ); } else { - LOG_INFO(DomainManager, "Loading log configuration from uri:" << logfile_uri); + RH_NL_INFO(logname, "Loading log configuration from uri:" << logfile_uri); } -#if ! defined ENABLE_PERSISTENCE +#ifdef ENABLE_PERSISTENCE + if (!db_uri.empty()) { + // Validate the path by trying to open the database file; it will be + // automatically closed when it goes out of scope, and catches path or + // permissions errors without complicated checks + ossie::PersistenceStore db; + try { + db.open(db_uri); + } catch (const ossie::PersistenceException& exc) { + RH_NL_FATAL(logname, "Cannot open persistence store " << db_uri << ": " << exc.what()); + return EXIT_FAILURE; + } + } +#else if (!db_uri.empty()) { // reset db_uri to empty... to force ignore of restore operations db_uri.clear(); } #endif -#if 0 - // test logger configuration.... - LOG_FATAL(DomainManager, "FATAL MESSAGE " ); - LOG_ERROR(DomainManager, "ERROR MESSAGE " ); - LOG_WARN(DomainManager, "WARN MESSAGE " ); - LOG_INFO(DomainManager, "INFO MESSAGE " ); - LOG_DEBUG(DomainManager, "DEBUG MESSAGE " ); - LOG_TRACE(DomainManager, "TRACE MESSAGE " ); - std::cout << " END OF TEST LOGGER MESSAGES " << std::endl; -#endif - /////////////////////////////////////////////////////////////////////////// // NO LOG_ STATEMENTS ABOVE THIS POINT /////////////////////////////////////////////////////////////////////////// @@ -315,25 +279,25 @@ int old_main(int argc, char* argv[]) // Associate SIGUSR1 to signal_catcher interrupt handler if (sigaction(SIGUSR1, &fp_sa, NULL) == -1) { - LOG_ERROR(DomainManager, "sigaction(SIGUSR1): " << strerror(errno)); + RH_NL_ERROR(logname, "sigaction(SIGUSR1): " << strerror(errno)); return(EXIT_FAILURE); } // Associate SIGINT to signal_catcher interrupt handler if (sigaction(SIGINT, &sa, NULL) == -1) { - LOG_ERROR(DomainManager, "sigaction(SIGINT): " << strerror(errno)); + RH_NL_ERROR(logname, "sigaction(SIGINT): " << strerror(errno)); return(EXIT_FAILURE); } // Associate SIGQUIT to signal_catcher interrupt handler if (sigaction(SIGQUIT, &sa, NULL) == -1) { - LOG_ERROR(DomainManager, "sigaction(SIGQUIT): " << strerror(errno)); + RH_NL_ERROR(logname, "sigaction(SIGQUIT): " << strerror(errno)); return(EXIT_FAILURE); } // Associate SIGTERM to signal_catcher interrupt handler if (sigaction(SIGTERM, &sa, NULL) == -1) { - LOG_ERROR(DomainManager, "sigaction(SIGTERM): " << strerror(errno)); + RH_NL_ERROR(logname, "sigaction(SIGTERM): " << strerror(errno)); return(EXIT_FAILURE); } @@ -362,7 +326,7 @@ int old_main(int argc, char* argv[]) // Install an adaptor to automatically create our own POAs. root_poa = ossie::corba::RootPOA(); } catch ( CORBA::INITIALIZE& ex ) { - LOG_FATAL(DomainManager, "Failed to initialize the POA. Is there a Domain Manager already running?"); + RH_NL_FATAL(logname, "Failed to initialize the POA. Is there a Domain Manager already running?"); return(EXIT_FAILURE); } ossie::corba::POACreator *activator_servant = new ossie::corba::POACreator(); @@ -378,29 +342,29 @@ int old_main(int argc, char* argv[]) // Map i686 to SCA x86 struct utsname un; if (uname(&un) != 0) { - LOG_FATAL(DomainManager, "Unable to determine system information: " << strerror(errno)); + RH_NL_FATAL(logname, "Unable to determine system information: " << strerror(errno)); return(EXIT_FAILURE); } if (strcmp("i686", un.machine) == 0) { strcpy(un.machine, "x86"); } - LOG_DEBUG(DomainManager, "Machine " << un.machine); - LOG_DEBUG(DomainManager, "Version " << un.release); - LOG_DEBUG(DomainManager, "OS " << un.sysname); + RH_NL_DEBUG(logname, "Machine " << un.machine); + RH_NL_DEBUG(logname, "Version " << un.release); + RH_NL_DEBUG(logname, "OS " << un.sysname); struct rlimit limit; if (getrlimit(RLIMIT_NPROC, &limit) == 0) { - LOG_DEBUG(DomainManager, "Process limit " << limit.rlim_cur); + RH_NL_DEBUG(logname, "Process limit " << limit.rlim_cur); } if (getrlimit(RLIMIT_NOFILE, &limit) == 0) { - LOG_DEBUG(DomainManager, "File descriptor limit " << limit.rlim_cur); + RH_NL_DEBUG(logname, "File descriptor limit " << limit.rlim_cur); } // Create Domain Manager servant and object - LOG_INFO(DomainManager, "Starting Domain Manager"); - LOG_DEBUG(DomainManager, "Root of DomainManager FileSystem set to " << domRootPath); - LOG_DEBUG(DomainManager, "DMD path set to " << dmdFile); - LOG_DEBUG(DomainManager, "Domain Name set to " << domainName); - if ( bindToDomain ) { LOG_INFO(DomainManager, "Binding applications to the domain." ); } + RH_NL_INFO(logname, "Starting Domain Manager"); + RH_NL_DEBUG(logname, "Root of DomainManager FileSystem set to " << domRootPath); + RH_NL_DEBUG(logname, "DMD path set to " << dmdFile); + RH_NL_DEBUG(logname, "Domain Name set to " << domainName); + if ( bindToDomain ) { RH_NL_INFO(logname, "Binding applications to the domain." ); } try { DomainManager_servant = new DomainManager_impl(dmdFile.c_str(), @@ -409,22 +373,24 @@ int old_main(int argc, char* argv[]) (db_uri.empty()) ? NULL : db_uri.c_str(), (logfile_uri.empty()) ? NULL : logfile_uri.c_str(), useLogCfgResolver, - bindToDomain + bindToDomain, + enablePersistence, + initialDebugLevel ); // set logging level for the DomainManager's logger if ( DomainManager_servant ) { - DomainManager_servant->getLogger()->setLevel( ossie::logging::ConvertDebugToRHLevel(debugLevel) ); + DomainManager_servant->saveLoggingContext( logfile_uri, initialDebugLevel, ctx ); } } catch (const CORBA::Exception& ex) { - LOG_ERROR(DomainManager, "Terminated with CORBA::" << ex._name() << " exception"); + RH_NL_ERROR(logname, "Terminated with CORBA::" << ex._name() << " exception"); return(-1); } catch (const std::exception& ex) { - LOG_ERROR(DomainManager, "Terminated with exception: " << ex.what()); + RH_NL_ERROR(logname, "Terminated with exception: " << ex.what()); return(-1); } catch (...) { - LOG_ERROR(DomainManager, "Terminated with unknown exception"); + RH_NL_ERROR(logname, "Terminated with unknown exception"); return(EXIT_FAILURE); } @@ -442,13 +408,13 @@ int old_main(int argc, char* argv[]) try { DomainManager_servant->restoreState(db_uri); } catch (const CORBA::Exception& ex) { - LOG_FATAL(DomainManager, "Unable to restore state: CORBA::" << ex._name()); + RH_NL_FATAL(logname, "Unable to restore state: CORBA::" << ex._name()); return(EXIT_FAILURE); } catch (const std::exception& ex) { - LOG_FATAL(DomainManager, "Unable to restore state: " << ex.what()); + RH_NL_FATAL(logname, "Unable to restore state: " << ex.what()); return(EXIT_FAILURE); } catch (...) { - LOG_FATAL(DomainManager, "Unrecoverable error restoring state"); + RH_NL_FATAL(logname, "Unrecoverable error restoring state"); return(EXIT_FAILURE); } } @@ -456,12 +422,12 @@ int old_main(int argc, char* argv[]) try { // Activate the DomainManager servant into its own POA, giving the POA responsibility // for its deletion. - LOG_DEBUG(DomainManager, "Activating DomainManager into POA"); + RH_NL_DEBUG(logname, "Activating DomainManager into POA"); PortableServer::POA_var dommgr_poa = root_poa->find_POA("DomainManager", 1); PortableServer::ObjectId_var oid = ossie::corba::activatePersistentObject(dommgr_poa, DomainManager_servant, DomainManager_servant->getFullDomainManagerName()); // Bind the DomainManager object to its full name (e.g. "DomainName/DomainName") in the NameService. - LOG_DEBUG(DomainManager, "Binding DomainManager to NamingService name " << DomainManager_servant->getFullDomainManagerName()); + RH_NL_DEBUG(logname, "Binding DomainManager to NamingService name " << DomainManager_servant->getFullDomainManagerName()); CF::DomainManager_var DomainManager_obj = DomainManager_servant->_this(); CosNaming::Name_var name = ossie::corba::stringToName(DomainManager_servant->getFullDomainManagerName()); try { @@ -471,40 +437,40 @@ int old_main(int argc, char* argv[]) } catch (const CosNaming::NamingContext::AlreadyBound&) { if (forceRebind) { // Forcibly replace the existing name binding. - LOG_INFO(DomainManager, "Replacing existing name binding " << DomainManager_servant->getFullDomainManagerName()); + RH_NL_INFO(logname, "Replacing existing name binding " << DomainManager_servant->getFullDomainManagerName()); ossie::corba::InitialNamingContext()->rebind(name, DomainManager_obj); } else { - LOG_FATAL(DomainManager, "A DomainManager is already running as " << DomainManager_servant->getFullDomainManagerName()); + RH_NL_FATAL(logname, "A DomainManager is already running as " << DomainManager_servant->getFullDomainManagerName()); return(-1); } } CORBA::String_var ior_str = orb->object_to_string(DomainManager_obj); - LOG_DEBUG(DomainManager, ior_str); + RH_NL_DEBUG(logname, ior_str); DomainManager_servant->_remove_ref(); - LOG_INFO(DomainManager, "Starting ORB!"); + RH_NL_INFO(logname, "Starting ORB!"); DomainManager_servant->run(); - LOG_DEBUG(DomainManager, "Shutting down DomainManager"); + RH_NL_DEBUG(logname, "Shutting down DomainManager"); DomainManager_servant->shutdown(received_signal); - LOG_INFO(DomainManager, "Requesting ORB shutdown"); + RH_NL_INFO(logname, "Requesting ORB shutdown"); ossie::corba::OrbShutdown(true); - LOG_INFO(DomainManager, "Farewell!"); + RH_NL_INFO(logname, "Farewell!"); ossie::logging::Terminate(); //no more logging.... } catch (const CORBA::Exception& ex) { - LOG_FATAL(DomainManager, "Terminated with CORBA::" << ex._name() << " exception"); + RH_NL_FATAL(logname, "Terminated with CORBA::" << ex._name() << " exception"); return(-1); } catch (const std::exception& ex) { - LOG_FATAL(DomainManager, "Terminated with exception: " << ex.what()); + RH_NL_FATAL(logname, "Terminated with exception: " << ex.what()); return(-1); } catch (...) { - LOG_FATAL(DomainManager, "Terminated with unknown exception"); + RH_NL_FATAL(logname, "Terminated with unknown exception"); DomainManager_servant->shutdown(-1); - LOG_INFO(DomainManager, "ORB shutdown.... short startup.."); + RH_NL_INFO(logname, "ORB shutdown.... short startup.."); ossie::corba::OrbShutdown(true); return(-1); } diff --git a/redhawk/src/etc/bash_completion.d/nodeBooter b/redhawk/src/etc/bash_completion.d/nodeBooter index 53f474eaf..6627f4f83 100644 --- a/redhawk/src/etc/bash_completion.d/nodeBooter +++ b/redhawk/src/etc/bash_completion.d/nodeBooter @@ -58,7 +58,7 @@ _nodeBooter() COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" - opts="--help -D -d -sdrroot -sdrcache -debug -logcfgfile --dburl --nopersist --force-rebind --daemon --pidfile --user --group --ORBport --ORBInitRef --domainname --version" + opts="--help -D -d -sdrroot -sdrcache -debug -logcfgfile --dburl --force-rebind --daemon --pidfile --user --group --ORBport --ORBInitRef --domainname --version" # Determine the SDRROOT to use, if any sdrroot=`_nodeBooter_get_sdrroot` @@ -92,7 +92,7 @@ _nodeBooter() COMPREPLY=( $(compgen -W "0 1 2 3 4 5" -- "$cur") ) return 0 ;; - --nopersist|--force-rebind|--daemon|--version) + --force-rebind|--daemon|--version) # These are no-arg; suggest a new option COMPREPLY=( $(compgen -W "$opts" -- "$cur") ) return 0 diff --git a/redhawk/src/idl/Makefile.am b/redhawk/src/idl/Makefile.am index dcefd48e8..ffb40b659 100644 --- a/redhawk/src/idl/Makefile.am +++ b/redhawk/src/idl/Makefile.am @@ -19,4 +19,4 @@ # cfidldir = $(datadir)/idl/ossie/CF -dist_cfidl_DATA = ossie/CF/cf.idl ossie/CF/DataType.idl ossie/CF/Port.idl ossie/CF/PortTypes.idl ossie/CF/StandardEvent.idl ossie/CF/AggregateDevices.idl ossie/CF/ExtendedEvent.idl ossie/CF/QueryablePort.idl ossie/CF/WellKnownProperties.idl ossie/CF/sandbox.idl ossie/CF/LogInterfaces.idl ossie/CF/EventChannelManager.idl +dist_cfidl_DATA = ossie/CF/cf.idl ossie/CF/DataType.idl ossie/CF/Port.idl ossie/CF/PortTypes.idl ossie/CF/StandardEvent.idl ossie/CF/AggregateDevices.idl ossie/CF/ExtendedEvent.idl ossie/CF/QueryablePort.idl ossie/CF/NegotiablePort.idl ossie/CF/WellKnownProperties.idl ossie/CF/sandbox.idl ossie/CF/LogInterfaces.idl ossie/CF/EventChannelManager.idl diff --git a/redhawk/src/idl/ossie/CF/DataType.idl b/redhawk/src/idl/ossie/CF/DataType.idl index 6fd235990..59fe3fa2a 100644 --- a/redhawk/src/idl/ossie/CF/DataType.idl +++ b/redhawk/src/idl/ossie/CF/DataType.idl @@ -39,6 +39,13 @@ module CF { /* This type defines a sequence of strings */ typedef sequence StringSequence; + struct UTCTime { + short tcstatus; // timecode status + double twsec; // J1970 GMT + double tfsec; // 0.0 to 1.0 + }; + typedef sequence UTCTimeSequence; + }; #endif diff --git a/redhawk/src/idl/ossie/CF/EventChannelManager.idl b/redhawk/src/idl/ossie/CF/EventChannelManager.idl index df48404d6..865ec653f 100644 --- a/redhawk/src/idl/ossie/CF/EventChannelManager.idl +++ b/redhawk/src/idl/ossie/CF/EventChannelManager.idl @@ -96,7 +96,7 @@ module CF { typedef CosEventChannelAdmin::EventChannel EventChannel; typedef CosEventChannelAdmin::ProxyPushConsumer EventPublisher; typedef CosEventChannelAdmin::ProxyPushSupplier EventSubscriber; - + /** EventChannelManager Interface */ @@ -125,7 +125,13 @@ module CF { struct EventChannelReg { EventRegistration reg; CosEventChannelAdmin::EventChannel channel; - }; + }; + + struct PublisherReg { + EventRegistration reg; + CosEventChannelAdmin::EventChannel channel; + CF::EventPublisher proxy_consumer; + }; /** @@ -158,6 +164,23 @@ module CF { OperationFailed, ServiceUnavailable ); + /** + create + + Creates an Event Channel construct with the contents of the parameter channel_name + in the Domain associate with this Manager. Event channel names must be unique across the entire domain. + Event Channels created with this manager's interface will remain available for use during the entire duration the Domain + Manager's execution. Event channels can be removed with the release method. + + @param channel_name name of channel to create + @return EventChannel returns an EventChannel + */ + EventChannel get( in string channel_name ) + raises ( ChannelDoesNotExist, + OperationNotAllowed, + OperationFailed, + ServiceUnavailable ); + /** Create an Event Channel in the Domain assocaited with the Manager. Event Channel names must be unique across the Domain. If this channels is used by registrations when all registrants have unregistered the channel resources @@ -189,12 +212,46 @@ module CF { OperationFailed, ServiceUnavailable ); + /** + Force the release of the event channel from the domain. + */ + void forceRelease( in string channel_name ) + raises ( ChannelDoesNotExist, + OperationNotAllowed, + OperationFailed, + ServiceUnavailable ); + + /** + Attach a consumer to the event channel + - a registration is created automatically (use an empty string for automatic registration id) + */ + EventChannelReg registerConsumer( in CosEventComm::PushConsumer consumer, in EventRegistration req) + raises ( InvalidChannelName, + RegistrationAlreadyExists, + OperationFailed, + OperationNotAllowed, + ServiceUnavailable ); + + /** + Attach a publisher (supplier) to the event channel + - a registration is created automatically (use an empty string for automatic registration id) + - disconnectReceiver is to receive the notification that the de-registration of the supplier was + successful. This argument is optional. Use a nil reference to void the argument. + */ + PublisherReg registerPublisher( in EventRegistration req, in CosEventComm::PushSupplier disconnectReceiver) + raises ( InvalidChannelName, + RegistrationAlreadyExists, + OperationFailed, + OperationNotAllowed, + ServiceUnavailable ); /** Register an association with an Event Channel. Look up the registration against the current list for a match, if one exists then throw RegistrationAlreadyExists + + To generate a new unique registration id, pass an empty string in the registration request Look for an existing Event Channel object being managed. If the Event Channel is not found then add a new Event Channel object @@ -209,6 +266,8 @@ module CF { /** Unregister a from an event channel and invalidates the context + - if attachConsumer or attachProducer to create the registration, + the consumer or producer is automatically disconnected */ void unregister( in EventRegistration reg ) raises ( ChannelDoesNotExist, diff --git a/redhawk/src/idl/ossie/CF/LogInterfaces.idl b/redhawk/src/idl/ossie/CF/LogInterfaces.idl index 35bfb71c0..0a1aaaea6 100644 --- a/redhawk/src/idl/ossie/CF/LogInterfaces.idl +++ b/redhawk/src/idl/ossie/CF/LogInterfaces.idl @@ -20,6 +20,8 @@ #ifndef MODULE_CF_LOGGING_INF_IDL #define MODULE_CF_LOGGING_INF_IDL +#include + // // // Common Namespace for RedHawk Logging interface...in lue of CosLwLog @@ -255,16 +257,30 @@ module CF { */ attribute LogLevel log_level; + /** + * Retrieves the named logger level + */ + LogLevel getLogLevel( in string logger_id ) raises (CF::UnknownIdentifier); /** * Assigns the named logger to the new level */ - void setLogLevel( in string logger_id, in LogLevel newLevel ) raises (CF::UnknownIdentifier); - + void setLogLevel( in string logger_id, in LogLevel newLevel ) raises (CF::UnknownIdentifier); + + /** + * Retrieves the list of named loggers associated with the logger + */ + CF::StringSequence getNamedLoggers(); + + /** + * Reset the logger to its initial configuration state + */ + void resetLog(); + /** - * Return the current contents of the logging configuration as a string object. + * Return the current contents of the logging configuration as a string object. */ - string getLogConfig( ); + string getLogConfig( ); /** * Set the logging configuration context diff --git a/redhawk/src/idl/ossie/CF/NegotiablePort.idl b/redhawk/src/idl/ossie/CF/NegotiablePort.idl new file mode 100644 index 000000000..6b05d937c --- /dev/null +++ b/redhawk/src/idl/ossie/CF/NegotiablePort.idl @@ -0,0 +1,71 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef _NEGOTIABLE_PORT_IDL_ +#define _NEGOTIABLE_PORT_IDL_ + +#include "ossie/CF/QueryablePort.idl" + +module ExtendedCF { + + exception NegotiationError { + string msg; + }; + + struct TransportInfo { + string transportType; + CF::Properties transportProperties; + }; + + typedef sequence TransportInfoSequence; + + interface NegotiablePort { + readonly attribute TransportInfoSequence supportedTransports; + }; + + struct ConnectionStatus { + string connectionId; + Object port; + boolean alive; + string transportType; + CF::Properties transportInfo; + }; + + typedef sequence ConnectionStatusSequence; + + interface NegotiableUsesPort : ExtendedCF::QueryablePort, NegotiablePort { + readonly attribute ConnectionStatusSequence connectionStatus; + }; + + struct NegotiationResult { + string transportId; + CF::Properties properties; + }; + + interface NegotiableProvidesPort : NegotiablePort { + NegotiationResult negotiateTransport(in string transportType, in CF::Properties transportProperties) + raises (ExtendedCF::NegotiationError); + + void disconnectTransport(in string transportId) + raises (ExtendedCF::NegotiationError); + }; +}; + +#endif diff --git a/redhawk/src/idl/ossie/CF/StandardEvent.idl b/redhawk/src/idl/ossie/CF/StandardEvent.idl index 9bf6d2b9f..5051b200f 100644 --- a/redhawk/src/idl/ossie/CF/StandardEvent.idl +++ b/redhawk/src/idl/ossie/CF/StandardEvent.idl @@ -58,6 +58,7 @@ module StandardEvent { DEVICE_MANAGER, DEVICE, APPLICATION_FACTORY, + EVENT_CHANNEL, APPLICATION, SERVICE }; diff --git a/redhawk/src/idl/ossie/CF/WellKnownProperties.idl b/redhawk/src/idl/ossie/CF/WellKnownProperties.idl index 3dfdd9af8..2d3937f33 100644 --- a/redhawk/src/idl/ossie/CF/WellKnownProperties.idl +++ b/redhawk/src/idl/ossie/CF/WellKnownProperties.idl @@ -28,7 +28,8 @@ module ExtendedCF { const string OS_NAME = "DCE:4a23ad60-0b25-4121-a630-68803a498f75"; const string OS_VERSION = "DCE:0f3a9a37-a342-43d8-9b7f-78dc6da74192"; const string PROCESSOR_NAME = "DCE:fefb9c66-d14a-438d-ad59-2cfd1adb272b"; - const string AWARE_APPLICATION = "AWARE_APPLICATION"; + const string AWARE_APPLICATION = "RH::AWARE_APPLICATION"; + const string STOP_TIMEOUT = "RH::STOP_TIMEOUT"; }; }; #endif diff --git a/redhawk/src/idl/ossie/CF/cf.idl b/redhawk/src/idl/ossie/CF/cf.idl index cf680ad2a..811767fa2 100644 --- a/redhawk/src/idl/ossie/CF/cf.idl +++ b/redhawk/src/idl/ossie/CF/cf.idl @@ -413,6 +413,7 @@ module CF { string reg_id; string resource_id; CF::Properties properties; + CF::UTCTime timestamp; }; void propertyChange( in PropertyChangeEvent prop_event ); @@ -701,7 +702,7 @@ module CF { }; /* The DomainManager interface is for the control and configuration of the radio domain. */ - interface DomainManager : PropertyEmitter { + interface DomainManager : PropertyEmitter, Logging { /* This exception is raised when an Application installation has not completed correctly. The message provides additional information describing the reason for the error. */ exception ApplicationInstallationError { CF::ErrorNumberType errorNumber; @@ -805,6 +806,7 @@ module CF { in CF::DeviceAssignmentSequence deviceAssignments ) raises (CF::InvalidProfile,CF::InvalidFileName, + CF::DomainManager::ApplicationInstallationError, CF::ApplicationFactory::CreateApplicationError, CF::ApplicationFactory::CreateApplicationRequestError, CF::ApplicationFactory::CreateApplicationInsufficientCapacityError, @@ -888,6 +890,12 @@ module CF { string description; string direction; }; + /* Constant for PortInfoType direction field for provides ports */ + const string DIRECTION_PROVIDES = "Provides"; + /* Constant for PortInfoType direction field for uses ports */ + const string DIRECTION_USES = "Uses"; + /* Constant for PortInfoType direction field for bi-directional ports */ + const string DIRECTION_BIDIR = "Bidir"; /* The PortInfoSequence type defines an unbounded sequence of ports' information for getPortSet function. */ typedef sequence PortInfoSequence; /* The getPortSet operation provides a mechanism to obtain information about all ports in the resource. */ @@ -1032,6 +1040,14 @@ module CF { /*readonly attribute ApplicationRegistrar appReg;*/ /* This boolean attribute contains the aware state of Application. This attribute shows whether the Components in the Application are given a pointer to the Application and Domain Manager. */ readonly attribute boolean aware; + /* This readwrite float attribute is the stop timeout for the Application. This is how long the framework will wait on each component when stop is called (not during releaseObject). */ + attribute float stopTimeout; + exception InvalidMetric { + CF::StringSequence components; + CF::StringSequence attributes; + }; + /* The metrics method returns the requested metrics for the Application. An empty sequence returns all metrics. */ + CF::Properties metrics(in CF::StringSequence components, in CF::StringSequence attributes) raises (CF::Application::InvalidMetric); }; /* This interface extends the Device interface by adding software loading and unloading behavior to a Device. */ interface LoadableDevice : Device { @@ -1127,7 +1143,7 @@ module CF { CF::ExecutableDevice::ExecuteFail); }; /* The DeviceManager interface is used to manage a set of logical Devices and services. */ - interface DeviceManager : PropertyEmitter, PortSet { + interface DeviceManager : PropertyEmitter, PortSet, Logging { /* This structure provides the object reference and name of services that have registered with the DeviceManager. */ struct ServiceType { Object serviceObject; diff --git a/redhawk/src/omnijni/pom.xml b/redhawk/src/omnijni/pom.xml deleted file mode 100644 index 708f75eba..000000000 --- a/redhawk/src/omnijni/pom.xml +++ /dev/null @@ -1,15 +0,0 @@ - - 4.0.0 - - redhawk.coreframework - parent - 2.0.9-SNAPSHOT - ../../../pom.xml - - omnijni - bundle - - src/java - - diff --git a/redhawk/src/omnijni/src/cpp/omnijni.cpp b/redhawk/src/omnijni/src/cpp/omnijni.cpp index 77b662a3f..4b5b8b3b6 100644 --- a/redhawk/src/omnijni/src/cpp/omnijni.cpp +++ b/redhawk/src/omnijni/src/cpp/omnijni.cpp @@ -31,23 +31,26 @@ namespace { extern "C" JNIEXPORT jint JNICALL JNI_OnLoad (JavaVM* jvm, void* reserved) { - // Map to the JVM enviroment. - JNIEnv* env; - if (jvm->GetEnv((void**)&env, JNI_VERSION_1_2)) { - return false; + // Only initialize if the shared mutex doesn't exist + if (!sharedMutex_) { + // Map to the JVM enviroment. + JNIEnv* env; + if (jvm->GetEnv((void**)&env, JNI_VERSION_1_4)) { + return false; + } + + // Must initialize the CORBA ORB before anything else happens. + omnijni::ORB::Init(env); + + // Initialize the omniORB/JVM thread interface code. + omnijni::threading::Init(env); + + // Create the shared mutex at JNI initialization time (rather than whenever + // static initializers are called) + sharedMutex_ = new omni_mutex(); } - // Must initialize the CORBA ORB before anything else happens. - omnijni::ORB::Init(env); - - // Initialize the omniORB/JVM thread interface code. - omnijni::threading::Init(env); - - // Create the shared mutex at JNI initialization time (rather than whenever - // static initializers are called) - sharedMutex_ = new omni_mutex(); - - return JNI_VERSION_1_2; + return JNI_VERSION_1_4; } namespace omnijni { diff --git a/redhawk/src/omnijni/src/cpp/orb.cpp b/redhawk/src/omnijni/src/cpp/orb.cpp index 04576de36..b1a384369 100644 --- a/redhawk/src/omnijni/src/cpp/orb.cpp +++ b/redhawk/src/omnijni/src/cpp/orb.cpp @@ -46,6 +46,10 @@ void omnijni::ORB::Init (JNIEnv* env) // Initialize JNI references cls_ = omnijni::loadClass(env, "omnijni.ORB"); object_to_string_ = env->GetStaticMethodID(cls_, "object_to_string", "(Lorg/omg/CORBA/Object;)Ljava/lang/String;"); + + // Set up for shutdown on JVM exit + jmethodID register_shutdown = env->GetStaticMethodID(cls_, "register_shutdown", "()V"); + env->CallStaticVoidMethod(cls_, register_shutdown); } CORBA::Object_ptr omnijni::ORB::object_to_native (JNIEnv* env, jobject obj) @@ -57,7 +61,7 @@ CORBA::Object_ptr omnijni::ORB::object_to_native (JNIEnv* env, jobject obj) return object; } -extern "C" JNIEXPORT jlong JNICALL Java_omnijni_ORB_string_1to_1object_1ref (JNIEnv* env, jclass, jstring jior) +extern "C" JNIEXPORT jlong JNICALL Java_omnijni_ORB_00024NativeORB_string_1to_1object_1ref (JNIEnv* env, jclass, jstring jior) { const char* ior = env->GetStringUTFChars(jior, NULL); CORBA::Object_ptr object = orb->string_to_object(ior); @@ -65,14 +69,14 @@ extern "C" JNIEXPORT jlong JNICALL Java_omnijni_ORB_string_1to_1object_1ref (JNI return reinterpret_cast(object); } -extern "C" JNIEXPORT jstring JNICALL Java_omnijni_ORB_objectref_1to_1string (JNIEnv* env, jclass, jlong ref) +extern "C" JNIEXPORT jstring JNICALL Java_omnijni_ORB_00024NativeORB_objectref_1to_1string (JNIEnv* env, jclass, jlong ref) { CORBA::Object_ptr object = reinterpret_cast(ref); CORBA::String_var ior = orb->object_to_string(object); return env->NewStringUTF(ior); } -extern "C" JNIEXPORT void JNICALL Java_omnijni_ORB_shutdown (JNIEnv* env, jclass) +extern "C" JNIEXPORT void JNICALL Java_omnijni_ORB_00024NativeORB_shutdown (JNIEnv* env, jclass) { if (!CORBA::is_nil(orb)) { orb->shutdown(true); diff --git a/redhawk/src/omnijni/src/java/omnijni/ORB.java b/redhawk/src/omnijni/src/java/omnijni/ORB.java index c38d95f6b..cc9006071 100644 --- a/redhawk/src/omnijni/src/java/omnijni/ORB.java +++ b/redhawk/src/omnijni/src/java/omnijni/ORB.java @@ -32,26 +32,41 @@ public static org.omg.CORBA.portable.OutputStream create_output_stream () public static org.omg.CORBA.Object string_to_object (String ior) { - long ref = string_to_object_ref(ior); + long ref = NativeORB.string_to_object_ref(ior); return new CORBAObject(ref); } public static String object_to_string (org.omg.CORBA.Object obj) { if (obj instanceof omnijni.ObjectImpl) { - return objectref_to_string(((omnijni.ObjectImpl)obj)._get_object_ref()); + return NativeORB.objectref_to_string(((omnijni.ObjectImpl)obj)._get_object_ref()); } else { org.omg.CORBA.ORB orb = ((org.omg.CORBA.portable.ObjectImpl)obj)._orb(); return orb.object_to_string(obj); } } - public static native void shutdown (); + public static void shutdown () + { + NativeORB.shutdown(); + } - static { - System.loadLibrary("omnijni"); + private static void register_shutdown () + { + Runtime.getRuntime().addShutdownHook(new Thread() { + public void run() { + omnijni.ORB.shutdown(); + } + }); } - private static native long string_to_object_ref (String ior); - private static native String objectref_to_string (long ref); + private static class NativeORB { + static { + System.loadLibrary("omnijni"); + } + + private static native void shutdown(); + private static native long string_to_object_ref (String ior); + private static native String objectref_to_string (long ref); + } } diff --git a/redhawk/src/releng/redhawk.spec b/redhawk/src/releng/redhawk.spec index e92c7305d..c307faf88 100644 --- a/redhawk/src/releng/redhawk.spec +++ b/redhawk/src/releng/redhawk.spec @@ -17,17 +17,19 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # - -%{!?_ossiehome: %define _ossiehome /usr/local/redhawk/core} -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_ossiehome} +%if 0%{?fedora} >= 17 || 0%{?rhel} >=7 +%global with_systemd 1 +%endif +%{!?_ossiehome: %global _ossiehome /usr/local/redhawk/core} +%{!?_sdrroot: %global _sdrroot /var/redhawk/sdr} +%global _prefix %{_ossiehome} Prefix: %{_ossiehome} Prefix: %{_sdrroot} Prefix: %{_sysconfdir} Name: redhawk -Version: 2.0.9 -Release: 1%{?dist} +Version: 2.2.1 +Release: 2%{?dist} Summary: REDHAWK is a Software Defined Radio framework Group: Applications/Engineering @@ -36,41 +38,55 @@ URL: http://redhawksdr.org/ Source: %{name}-%{version}.tar.gz Vendor: REDHAWK -%define __arch_install_post %{nil} +%global __arch_install_post %{nil} Requires: util-linux-ng +Requires: java >= 1:1.8.0 %if 0%{?rhel} >= 7 || 0%{?fedora} >= 17 -Requires: java >= 1.7 Requires: python-matplotlib-qt4 Requires: gstreamer-python +Requires: numactl-libs %else -Requires: java7 >= 1.7 Requires: python-matplotlib %endif Requires: python -Requires: numpy +%if 0%{?fedora} == 16 || 0%{?rhel} == 6 +Requires: python-lxml Requires: python-omniORB >= 3.0 Requires: omniORB-devel >= 4.1.0 +%endif +%if 0%{?rhel} >= 7 || 0%{?fedora} >= 17 +Requires: python-omniORB > 4.2.2 +Requires: omniORB-devel > 4.2.2 +%endif +Requires: numpy Requires: binutils +Requires: numactl +Requires: sqlite + BuildRequires: libuuid-devel BuildRequires: boost-devel >= 1.41 BuildRequires: autoconf automake libtool BuildRequires: expat-devel - -%if 0%{?rhel} >= 7 || 0%{?fedora} >= 17 -BuildRequires: java-devel >= 1.7 -%else -BuildRequires: java7-devel >= 1.7 -%endif - +BuildRequires: java-1.8.0-openjdk-devel +BuildRequires: python-setuptools BuildRequires: python-devel >= 2.4 BuildRequires: log4cxx-devel >= 0.10 +%if 0%{?fedora} == 16 || 0%{?rhel} == 6 BuildRequires: omniORB-devel >= 4.1.0 BuildRequires: omniORBpy-devel >= 3.0 +%endif +%if 0%{?rhel} >= 7 || 0%{?fedora} >= 17 +BuildRequires: omniORB-devel > 4.2.2 +BuildRequires: omniORBpy-devel > 4.2.2 +%endif BuildRequires: libomniEvents2-devel BuildRequires: xsd >= 3.3.0 +BuildRequires: cppunit-devel +BuildRequires: numactl-devel +BuildRequires: sqlite-devel %description REDHAWK is a Software Defined Radio framework. @@ -124,35 +140,41 @@ Requires: libuuid-devel Requires: boost-devel >= 1.41 Requires: autoconf automake libtool Requires: log4cxx-devel >= 0.10 +Requires: numactl-devel # omniORB / omniORBpy + +%if 0%{?fedora} == 16 || 0%{?rhel} == 6 Requires: omniORB-devel >= 4.1.0 -Requires: omniORB-doc Requires: omniORBpy-devel >= 3.0 - +%endif +%if 0%{?fedora} == 17 || 0%{?rhel} == 7 +Requires: omniORB-devel > 4.2.2 +Requires: omniORBpy-devel > 4.2.2 +%endif +Requires: omniORB-doc # Languages Requires: gcc-c++ Requires: python-devel >= 2.4 - -%if 0%{?rhel} >= 7 || 0%{?fedora} >= 17 -Requires: java-devel >= 1.7 -%else -Requires: java7-devel >= 1.7 -%endif +Requires: java-1.8.0-openjdk-devel %description devel This package ensures that all requirements for REDHAWK development are installed. It also provides a useful development utilities. - %prep -%setup -q +%if 0%{?_localbuild} +%setup -q -n redhawk +%else +%setup -q +%endif %build # build the core framework cd src ./reconf -%configure --with-sdr=%{_sdrroot} --with-pyscheme=home +%configure --with-sdr=%{_sdrroot} --with-pyscheme=home --without-tests + make %{?_smp_mflags} @@ -163,7 +185,6 @@ rm -rf --preserve-root $RPM_BUILD_ROOT cd src make install DESTDIR=$RPM_BUILD_ROOT - %clean rm -rf --preserve-root $RPM_BUILD_ROOT @@ -172,10 +193,18 @@ rm -rf --preserve-root $RPM_BUILD_ROOT # -r is system account, -f is force (ignore already exists) groupadd -r -f redhawk if ! id redhawk &> /dev/null; then - # -M is don't create home dir, -r is system account, -s is shell - # -c is comment, -n is don't create group, -g is group name/id - /usr/sbin/useradd -M -r -s /sbin/nologin \ + # -r is system account, -s is shell, -M is don't create home dir, + # -d is the home directory, -c is comment, -n is don't create group, + # -g is group name/id + /usr/sbin/useradd -r -s /sbin/nologin -M -d /var/redhawk \ -c "REDHAWK System Account" -n -g redhawk redhawk > /dev/null +elif [ `getent passwd redhawk | cut -d: -f6` == "/home/redhawk" ]; then + if [ `ps -u redhawk | wc -l` != '1' ]; then + echo "The redhawk user still has processes running, cannot update user account" + exit 1 + fi + # Reassign the redhawk home directory to something that exists + /usr/sbin/usermod -d /var/redhawk redhawk fi @@ -226,8 +255,11 @@ fi %attr(2775,redhawk,redhawk) %dir %{_sdrroot}/dom/deps %attr(2775,redhawk,redhawk) %dir %{_sdrroot}/dom/domain %attr(2775,redhawk,redhawk) %dir %{_sdrroot}/dom/mgr +%attr(2775,redhawk,redhawk) %dir %{_sdrroot}/dom/mgr/rh %attr(775,redhawk,redhawk) %{_sdrroot}/dom/mgr/DomainManager +%attr(775,redhawk,redhawk) %{_sdrroot}/dom/mgr/rh/ComponentHost %{_sdrroot}/dom/mgr/*.xml +%{_sdrroot}/dom/mgr/rh/ComponentHost/* %attr(2775,redhawk,redhawk) %dir %{_sdrroot}/dom/waveforms %attr(644,root,root) %{_sysconfdir}/profile.d/redhawk-sdrroot.csh %attr(644,root,root) %{_sysconfdir}/profile.d/redhawk-sdrroot.sh @@ -273,19 +305,28 @@ fi %post /sbin/ldconfig + %postun /sbin/ldconfig - %changelog +* Wed Jun 28 2017 Ryan Bauman - 2.1.2-1 +- Update for 2.1.2-rc1 + +* Wed Jun 28 2017 Ryan Bauman - 2.1.1-2 +- Bump for 2.1.1-rc2 + * Sat Nov 26 2016 - 2.0.4 - Added service directory in redhawk-sdrroot-dev-mgr +* Fri Sep 16 2016 - 2.0.3-1 +- Update for dependency on Java 8 + * Wed Sep 9 2015 - 2.0.0-2 - Add qt-tools package - Remove el5 support -* Wed Sep 15 2014 - 1.11.0-1 +* Mon Sep 15 2014 - 1.11.0-1 - Update for dependency on java7 * Wed May 21 2014 - 1.10.0-7 diff --git a/redhawk/src/releng/yumgroups.xml b/redhawk/src/releng/yumgroups.xml index 16827d7bf..392406250 100644 --- a/redhawk/src/releng/yumgroups.xml +++ b/redhawk/src/releng/yumgroups.xml @@ -66,18 +66,4 @@ with this program. If not, see http://www.gnu.org/licenses/. omniEvents-bootscripts - - redhawk-enterprise-integration - REDHAWK Enterprise Integration - true - REDHAWK Enterprise Integration - true - - redhawk-enterprise-integration-demo-dist - redhawk-enterprise-integration-dist-jetty - redhawk-enterprise-integration-dist-karaf - redhawk-enterprise-integration-dist-shell - redhawk-enterprise-integration-docs - - diff --git a/redhawk/src/testing/.gitignore b/redhawk/src/testing/.gitignore index 77d32acee..438c8974d 100644 --- a/redhawk/src/testing/.gitignore +++ b/redhawk/src/testing/.gitignore @@ -1,12 +1,15 @@ *.jar +*.so build helpers/buildconfig.py _unitTestHelpers/buildconfig.py sdr/cache/ sdr/dev/devices/BasicDevWithExecParam_cpp/BasicDevWithExecParam_cpp_impl1/BasicDevWithExecParam_cpp_impl1 sdr/dev/devices/BasicTestDevice_cpp/BasicTestDevice_cpp_impl1/BasicTestDevice_cpp_impl1 +sdr/dev/devices/base_programmable/cpp/base_programmable sdr/dev/devices/cpp_dev/cpp/cpp_dev sdr/dev/devices/devcpp/cpp/devcpp +sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp sdr/dev/devices/DevC/cpp/DevC sdr/dev/devices/CppTestDevice/cpp/CppTestDevice sdr/dev/devices/dev_kill_devmgr/cpp/dev_kill_devmgr @@ -17,6 +20,7 @@ sdr/dev/devices/issue_111_cpp/cpp/issue_111_cpp sdr/dev/devices/props_test_device/props_test_device_cpp_impl1/props_test_device_cpp_impl1 sdr/dev/devices/base_programmable/cpp/base_programmable sdr/dev/devices/ProgrammableDevice/cpp/ProgrammableDevice +sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp sdr/dev/mgr/DeviceManager* sdr/dev/nodes/test_Concurrent_nodes/ sdr/dom/components/BasicAC/BasicAC_cpp_impl1/BasicAC_cpp_impl1 @@ -42,9 +46,14 @@ sdr/dom/components/TestCppOptionalProps/cpp/TestCppOptionalProps sdr/dom/components/foo/bar/comp/cpp/comp sdr/dom/components/C1/cpp/C1 sdr/dom/components/C2/cpp/C2 +sdr/dom/components/huge_msg_cpp/cpp/huge_msg_cpp +sdr/dom/components/msg_through_cpp/cpp/msg_through_cpp sdr/dom/components/prop_trigger_timing/cpp/prop_trigger_timing +sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp +sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp sdr/dom/domain/ sdr/dom/mgr/DomainManager* +sdr/dom/mgr/rh/ sdr/runtests.log .pythonInstallFiles sdr/dev/nodes/test_affinity_node_socket/DeviceManager.dcd.xml @@ -65,4 +74,4 @@ sdr/dom/deps/cpp_dep1/cpp_dep1.spd.xml sdr/dom/deps/cpp_dep2/cpp_dep2.spd.xml sdr/dev/devices/LongDevice/cpp/LongDevice sdr/dev/nodes/LongDeviceCalls/DeviceManager.dcd.xml - +sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp diff --git a/redhawk/src/testing/Makefile.am b/redhawk/src/testing/Makefile.am index e31f3be3c..be48be811 100644 --- a/redhawk/src/testing/Makefile.am +++ b/redhawk/src/testing/Makefile.am @@ -42,14 +42,23 @@ SUBDIRS = sdr/dom/deps/cpp_dep1/cpp \ sdr/dev/devices/base_programmable/cpp \ sdr/dev/devices/base_persona/cpp \ sdr/dev/devices/devcpp/cpp \ + sdr/dev/devices/writeonly_cpp/cpp \ + sdr/dev/devices/log_test_cpp/cpp \ sdr/dev/devices/DevC/cpp \ sdr/dev/devices/dev_kill_devmgr/cpp \ sdr/dev/devices/GPP/cpp \ sdr/dev/devices/LongDevice/cpp \ + sdr/dev/devices/dev_alloc_cpp/cpp \ + sdr/dev/services/BasicService_cpp/cpp \ + sdr/dom/deps/cpp_dep1/cpp \ + sdr/dom/deps/cpp_dep2/cpp \ sdr/dom/components/TestCppProps \ sdr/dom/components/linkedLibraryTest \ sdr/dom/components/TestCppsoftpkgDeps \ sdr/dom/components/BasicAC/BasicAC_cpp_impl1 \ + sdr/dom/components/BasicShared/cpp \ + sdr/dom/components/logger/cpp \ + sdr/dom/components/alloc_shm/cpp \ sdr/dom/components/SimpleComponent/SimpleComponent_cpp_impl1 \ sdr/dom/components/MessageReceiverCpp \ sdr/dom/components/MessageSenderCpp \ @@ -64,6 +73,7 @@ SUBDIRS = sdr/dom/deps/cpp_dep1/cpp \ sdr/dom/components/cpp_comp/cpp \ sdr/dom/components/TestLoggingAPI/cpp \ sdr/dom/components/ECM_CPP/cpp \ + sdr/dom/components/EmptyString/cpp \ sdr/dom/components/C1/cpp \ sdr/dom/components/C2/cpp \ sdr/dom/components/PropertyChange_C1/cpp \ @@ -73,10 +83,15 @@ SUBDIRS = sdr/dom/deps/cpp_dep1/cpp \ sdr/dom/components/foo/bar/comp/cpp \ sdr/dom/components/TestCppOptionalProps/cpp \ sdr/dom/components/cpp_with_deps/cpp \ + sdr/dom/components/slow_stop_cpp/cpp \ sdr/dom/components/huge_msg_cpp/cpp \ + sdr/dom/components/msg_through_cpp/cpp \ + sdr/dom/components/timeprop_cpp/cpp \ + sdr/dom/components/time_cp_now/cpp \ sdr/dom/components/zero_length/cpp \ - sdr/dom/components/svc_error_cpp/cpp \ - sdr/dom/components/msg_through_cpp/cpp + sdr/dom/components/msg_through_cpp/cpp \ + sdr/dom/components/busycomp/cpp \ + sdr/dom/components/svc_fn_error_cpp/cpp if HAVE_JAVASUPPORT SUBDIRS += sdr/dom/deps/java_dep1/java \ @@ -84,7 +99,9 @@ SUBDIRS += sdr/dom/deps/java_dep1/java \ sdr/dev/devices/JavaTestDevice/java \ sdr/dev/devices/issue_111_java/java \ sdr/dev/devices/devj/java \ + sdr/dev/devices/writeonly_java/java \ sdr/dev/devices/DevC/java \ + sdr/dev/devices/log_test_java/java \ sdr/dev/devices/java_dev/java \ sdr/dev/services/BasicService_java/java \ sdr/dom/components/BasicAC/basicac_java_impl1 \ @@ -92,8 +109,12 @@ SUBDIRS += sdr/dom/deps/java_dep1/java \ sdr/dom/components/EventReceive/EventReceive_java_impl1 \ sdr/dom/components/javaDep/javaDep \ sdr/dom/components/java_comp/java \ + sdr/dom/components/javaSoftpkgJarDep/java \ sdr/dom/components/huge_msg_java/java \ sdr/dom/components/msg_through_java/java \ + sdr/dom/components/timeprop_java/java \ + sdr/dom/components/logger_java/java \ + sdr/dom/components/time_ja_now/java \ sdr/dom/components/HardLimit/HardLimit_java_impl1 \ sdr/dom/components/PropertyChangeEventsJava/PropertyChangeEventsJava_java_impl1 \ sdr/dom/components/TestJavaProps \ @@ -106,7 +127,14 @@ SUBDIRS += sdr/dom/deps/java_dep1/java \ sdr/dom/components/PropertyChange_J1/java \ sdr/dom/components/Property_JAVA/java \ sdr/dom/components/foo/bar/jv/java \ - sdr/dom/components/TestJavaOptionalProps/java + sdr/dom/components/TestJavaOptionalProps/java \ + sdr/dom/components/svc_fn_error_java/java \ + sdr/dom/components/EmptyString/java +endif + +SUBDIRS += cpp +if HAVE_JAVASUPPORT +SUBDIRS += java endif all-local: diff --git a/redhawk/src/testing/_unitTestHelpers/runtestHelpers.py b/redhawk/src/testing/_unitTestHelpers/runtestHelpers.py index 411a070dc..449440660 100644 --- a/redhawk/src/testing/_unitTestHelpers/runtestHelpers.py +++ b/redhawk/src/testing/_unitTestHelpers/runtestHelpers.py @@ -72,3 +72,11 @@ def haveLoggingSupport(filename): if line.split('=')[1] == ' \n': log4cxx_support = False return log4cxx_support + +def haveDefine(filename, variable): + definition = '-D%s=1' % variable + with open(filename, 'r') as fp: + for line in fp: + if definition in line: + return True + return False diff --git a/redhawk/src/testing/_unitTestHelpers/scatest.py b/redhawk/src/testing/_unitTestHelpers/scatest.py index 84d1cee9c..062c83133 100644 --- a/redhawk/src/testing/_unitTestHelpers/scatest.py +++ b/redhawk/src/testing/_unitTestHelpers/scatest.py @@ -129,6 +129,18 @@ def setupDeviceAndDomainMgrPackage(): for xmlFile in glob.glob(os.path.join(domMgrSrc, '*.xml')): updateLink(xmlFile, os.path.join(domMgrDest, os.path.basename(xmlFile))) + # "Install" the ComponentHost softpkg + compHostSrc = os.path.join(sdrSrc, 'ComponentHost') + compHostDest = os.path.join(getSdrPath(), "dom/mgr/rh/ComponentHost") + try: + os.makedirs(compHostDest) + except OSError: + # Assume it failed because the directory already exists + pass + updateLink(os.path.join(compHostSrc, 'ComponentHost'), os.path.join(compHostDest, 'ComponentHost')) + for xmlFile in glob.glob(os.path.join(compHostSrc, '*.xml')): + updateLink(xmlFile, os.path.join(compHostDest, os.path.basename(xmlFile))) + # "Install" the DeviceManager softpkg. devMgrSrc = os.path.join(sdrSrc, 'devmgr') devMgrDest = os.path.join(getSdrPath(), "dev", "mgr") @@ -205,13 +217,14 @@ def requirePersistence(obj): GDB_CMD_FILE=None def spawnNodeBooter(dmdFile=None, dcdFile=None, - debug=0, + debug=-1, domainname=None, loggingURI=None, endpoint=None, dbURI=None, execparams="", - nodeBooterPath="../../control/framework/nodeBooter"): + nodeBooterPath="../../control/framework/nodeBooter", + stderr=None): args = [] if dmdFile != None: args.extend(["-D", dmdFile]) @@ -224,15 +237,15 @@ def spawnNodeBooter(dmdFile=None, else: args.extend(["--domainname", domainname]) - if endpoint == None: - args.append("--nopersist") - else: + if endpoint is not None: args.extend(["-ORBendPoint", endpoint]) if dbURI: args.extend(["--dburl", dbURI]) - args.extend(["-debug", str(debug)]) + if debug != -1: + args.extend(["-debug", str(debug)]) + if loggingURI is not None: if loggingURI: args.extend(["-log4cxx", loggingURI]) @@ -249,7 +262,7 @@ def spawnNodeBooter(dmdFile=None, print '\n-------------------------------------------------------------------' print 'Launching nodeBooter', " ".join(args) print '-------------------------------------------------------------------' - nb = ossie.utils.Popen(args, cwd=getSdrPath(), shell=False, preexec_fn=os.setpgrp) + nb = ossie.utils.Popen(args, cwd=getSdrPath(), shell=False, preexec_fn=os.setpgrp, stderr=stderr) if DEBUG_NODEBOOTER: absNodeBooterPath = os.path.abspath("../control/framework/nodeBooter") if GDB_CMD_FILE != None: @@ -273,7 +286,7 @@ def getProcessArgs( pname ): pass return args - + class OssieTestCase(unittest.TestCase): @@ -380,7 +393,7 @@ class CorbaTestCase(OssieTestCase): def __init__(self, methodName='runTest', orbArgs=[]): unittest.TestCase.__init__(self, methodName) args = sys.argv - self.debuglevel = 3 + self.debuglevel = -1 for arg in args: if '--debuglevel' in arg: self.debuglevel = arg.split('=')[-1] @@ -483,7 +496,8 @@ def launchDomainManager(self, dmdFile="", *args, **kwargs): return (self._domainBooter, self._domainManager) # If debug level is not given, default to configured level - kwargs.setdefault('debug', self.debuglevel) + if self.debuglevel != -1: + kwargs.setdefault('debug', self.debuglevel) # Launch the nodebooter. self._domainBooter = spawnNodeBooter(dmdFile=dmdFile, execparams=self._execparams, *args, **kwargs) @@ -506,7 +520,8 @@ def launchDeviceManager(self, dcdFile, domainManager=None, wait=True, *args, **k return (None, None) # If debug level is not given, default to configured level - kwargs.setdefault('debug', self.debuglevel) + if self.debuglevel != -1: + kwargs.setdefault('debug', self.debuglevel) # Launch the nodebooter. if domainManager == None: @@ -557,13 +572,40 @@ def waitDeviceManager(self, devBooter, dcdFile, domainManager=None): self._addDeviceManager(devMgr) return devMgr + def waitForDeviceManager(self, node_dir): + dcdPath = getSdrPath()+"/dev/nodes/"+node_dir+"/DeviceManager.dcd.xml" + + dcd = DCDParser.parse(dcdPath) + if dcd.get_partitioning(): + numDevices = len(dcd.get_partitioning().get_componentplacement()) + else: + numDevices = 0 + + dm = self._getDomainManager() + + devMgr = None + while devMgr == None: + devMgr = self._getDeviceManager(dm, dcd.get_id()) + if devMgr: + break + time.sleep(0.1) + + if devMgr: + self._waitRegisteredDevices(devMgr, numDevices) + self._addDeviceManager(devMgr) + return devMgr + + def _waitRegisteredDevices(self, devMgr, numDevices, timeout=5.0, pause=0.1): while timeout > 0.0: - if (len(devMgr._get_registeredDevices())+len(devMgr._get_registeredServices())) == numDevices: - return True - else: - timeout -= pause - time.sleep(pause) + try: # when responding to error conditions during some tests, the Device Manager will trigger a TRANSIENT error + if (len(devMgr._get_registeredDevices())+len(devMgr._get_registeredServices())) == numDevices: + return True + else: + timeout -= pause + time.sleep(pause) + except: + break return False def waitTermination(self, child, timeout=5.0, pause=0.1): diff --git a/redhawk/src/testing/cpp/.gitignore b/redhawk/src/testing/cpp/.gitignore new file mode 100644 index 000000000..cf6be6771 --- /dev/null +++ b/redhawk/src/testing/cpp/.gitignore @@ -0,0 +1,3 @@ +*.csv +test_libossiecf +benchmark_bitops diff --git a/redhawk/src/testing/cpp/AnyUtilsTest.cpp b/redhawk/src/testing/cpp/AnyUtilsTest.cpp new file mode 100644 index 000000000..a0e44ed1b --- /dev/null +++ b/redhawk/src/testing/cpp/AnyUtilsTest.cpp @@ -0,0 +1,320 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "AnyUtilsTest.h" + +#include +#include +#include + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(AnyUtilsTest); + +namespace { + + template + class NumericTestImpl + { + public: + typedef T (*conversion_func)(const CORBA::Any&); + + typedef std::numeric_limits limits; + + NumericTestImpl(conversion_func func) : + func(func) + { + } + + void testFromBoolean() + { + CORBA::Any any; + + any <<= true; + CPPUNIT_ASSERT_EQUAL((T) 1, func(any)); + + any <<= false; + CPPUNIT_ASSERT_EQUAL((T) 0, func(any)); + } + + void testFromString() + { + const T mid = (limits::min() / 2) + (limits::max() / 2); + std::ostringstream oss; + oss << mid; + + CORBA::Any any; + any <<= oss.str(); + CPPUNIT_ASSERT_EQUAL(mid, func(any)); + + any <<= "1.27e2"; + CPPUNIT_ASSERT_EQUAL((T) 127, func(any)); + } + + void testFromNumber() + { + CORBA::Any any; + + any <<= CORBA::Any::from_octet(1); + CPPUNIT_ASSERT_EQUAL((T) 1, func(any)); + + any <<= (CORBA::Short) 2; + CPPUNIT_ASSERT_EQUAL((T) 2, func(any)); + + any <<= (CORBA::UShort) 3; + CPPUNIT_ASSERT_EQUAL((T) 3, func(any)); + + any <<= (CORBA::Long) 4; + CPPUNIT_ASSERT_EQUAL((T) 4, func(any)); + + any <<= (CORBA::ULong) 5; + CPPUNIT_ASSERT_EQUAL((T) 5, func(any)); + + any <<= (CORBA::LongLong) 6; + CPPUNIT_ASSERT_EQUAL((T) 6, func(any)); + + any <<= (CORBA::ULongLong) 7; + CPPUNIT_ASSERT_EQUAL((T) 7, func(any)); + + any <<= (CORBA::Float) 8; + CPPUNIT_ASSERT_EQUAL((T) 8, func(any)); + + any <<= (CORBA::Double) 9; + CPPUNIT_ASSERT_EQUAL((T) 9, func(any)); + } + + void testRange() + { + T min = limits::min(); + T max = limits::max(); + testRangeImpl(min, max, min - 1.0, max + 1.0); + } + + private: + void testRangeImpl(T min, T max, double under, double over) + { + CORBA::Any any; + any <<= (double) min; + T result; + CPPUNIT_ASSERT_NO_THROW(result = func(any)); + CPPUNIT_ASSERT_EQUAL(min, result); + + result = max; + CPPUNIT_ASSERT(ossie::any::toNumber(any, result)); + CPPUNIT_ASSERT_EQUAL(min, result); + + any <<= (double) max; + CPPUNIT_ASSERT_NO_THROW(result = func(any)); + CPPUNIT_ASSERT_EQUAL(max, result); + + result = min; + CPPUNIT_ASSERT(ossie::any::toNumber(any, result)); + CPPUNIT_ASSERT_EQUAL(max, result); + + any <<= under; + CPPUNIT_ASSERT_THROW(func(any), std::range_error); + CPPUNIT_ASSERT(!ossie::any::toNumber(any, result)); + + any <<= over; + CPPUNIT_ASSERT_THROW(func(any), std::range_error); + CPPUNIT_ASSERT(!ossie::any::toNumber(any, result)); + } + + conversion_func func; + }; + + template <> + void NumericTestImpl::testRange() + { + // Double has a wider range but less precision (52 bits) than 64-bit + // integers, so it cannot precisely represent the maximum or minimum + // values. To ensure that the minimum and maximum are within the legal + // range of the converters (see boost::numeric::converter), adjust the + // test values towards zero by the effective epsilon (the minimum + // difference representible with a double at a given magnitude). In + // other words, the double values are quantized, so explicitly pick the + // nearest value that is less than the "real" value. + int bits = limits::digits - std::numeric_limits::digits; + double epsilon = (1 << bits); + CORBA::LongLong min = limits::min() + epsilon; + // In essence, mask out the least significant bits of the maximum + CORBA::LongLong max = limits::max() - (epsilon - 1); + // On 32-bit x86, we need to ensure that the difference between the + // double-precision "under" value and the 64-bit signed integer values + // is enough to register as out of range; multiplying the epsilon value + // by 4 (i.e., shift up by 2 bits) solves the problem. + testRangeImpl(min, max, min - (epsilon*4), max + epsilon); + } + + template <> + void NumericTestImpl::testRange() + { + // See above, except only the maximum actually uses the full precision + int bits = limits::digits - std::numeric_limits::digits; + double epsilon = (1 << bits); + CORBA::ULongLong max = limits::max() - (epsilon - 1); + testRangeImpl(0, max, -1.0, max + epsilon); + } + + template <> + void NumericTestImpl::testFromString() + { + CORBA::Any any; + any <<= "0"; + CPPUNIT_ASSERT_EQUAL((CORBA::Octet) 0, ossie::any::toOctet(any)); + + any <<= "255"; + CPPUNIT_ASSERT_EQUAL((CORBA::Octet) 255, ossie::any::toOctet(any)); + + any <<= "256"; + CPPUNIT_ASSERT_THROW(ossie::any::toOctet(any), std::range_error); + } + + template <> + void NumericTestImpl::testRange() + { + CORBA::Float max = limits::max(); + testRangeImpl(-max, max, -2.0 * max, 2.0 * max); + } + + template <> + void NumericTestImpl::testFromString() + { + // Very large value + CORBA::Any any; + any <<= "1.125e+38"; + CPPUNIT_ASSERT_EQUAL(1.125e38f, ossie::any::toFloat(any)); + + // Very small value + any <<= "-1.0002441406250e-32"; + CPPUNIT_ASSERT_EQUAL(-1.0002441406250e-32f, ossie::any::toFloat(any)); + + // Beyond the maximum range of float should throw an exception + any <<= "7e40"; + CPPUNIT_ASSERT_THROW(ossie::any::toFloat(any), std::range_error); + + // Number too small to be represented as a float (but valid for double) + // should get rounded to zero + any <<= "5.03125e-46"; + CPPUNIT_ASSERT_EQUAL(0.0f, ossie::any::toFloat(any)); + } + + template <> + void NumericTestImpl::testRange() + { + // Double has the largest range of the common numeric primitive types, + // so it's not possible to exceed its range with another primitive type + // (this test is here so that double tests can be created using the + // same macros as the other types) + } + + template <> + void NumericTestImpl::testFromString() + { + CORBA::Any any; + + // Simple integer conversion + any <<= "100000"; + CPPUNIT_ASSERT_EQUAL(100000.0, ossie::any::toDouble(any)); + + // Use a floating point value that is known to show a decimal point and + // exponent + double value = 1.125e7; + std::ostringstream oss; + oss << value; + any <<= oss.str(); + + // Conversion back to double should be simple + CPPUNIT_ASSERT_EQUAL(value, ossie::any::toDouble(any)); + } +} + +void AnyUtilsTest::setUp() +{ +} + +void AnyUtilsTest::tearDown() +{ +} + +void AnyUtilsTest::testIsNull() +{ + // Default constructor, Any has no type information + CORBA::Any any; + CPPUNIT_ASSERT(ossie::any::isNull(any)); + + // Insert a number, should no longer be null + any <<= (float)1; + CPPUNIT_ASSERT(!ossie::any::isNull(any)); + + // Likewise, a more complex type should not be null + any <<= CF::Properties(); + CPPUNIT_ASSERT(!ossie::any::isNull(any)); +} + +void AnyUtilsTest::testToBoolean() +{ + CORBA::Any any; + bool result; + + // Case-insensitive string literal + any <<= "true"; + CPPUNIT_ASSERT(ossie::any::toBoolean(any)); + CPPUNIT_ASSERT(ossie::any::toNumber(any, result)); + CPPUNIT_ASSERT_EQUAL(true, result); + + // Case-insensitive string literal + any <<= "False"; + CPPUNIT_ASSERT(!ossie::any::toBoolean(any)); + CPPUNIT_ASSERT(ossie::any::toNumber(any, result)); + CPPUNIT_ASSERT_EQUAL(false, result); + + // Integer, converted by C++ rules (zero == false) + any <<= (short)0; + CPPUNIT_ASSERT(!ossie::any::toBoolean(any)); + CPPUNIT_ASSERT(ossie::any::toNumber(any, result)); + CPPUNIT_ASSERT_EQUAL(false, result); + + // Double, converted by C++ rules (non-zero == true) + any <<= 100.5; + CPPUNIT_ASSERT(ossie::any::toBoolean(any)); + CPPUNIT_ASSERT(ossie::any::toNumber(any, result)); + CPPUNIT_ASSERT_EQUAL(true, result); + + // String that can be converted via a number + any <<= "5000"; + CPPUNIT_ASSERT(ossie::any::toBoolean(any)); + CPPUNIT_ASSERT(ossie::any::toNumber(any, result)); + CPPUNIT_ASSERT_EQUAL(true, result); + + // String that cannot be interpreted as a boolean at all + any <<= "invalid"; + CPPUNIT_ASSERT_THROW(ossie::any::toBoolean(any), std::bad_cast); + CPPUNIT_ASSERT(!ossie::any::toNumber(any, result)); +} + +#define DEFINE_NUMERIC_TEST(T,NAME) \ + void AnyUtilsTest::testTo##T##NAME() \ + { \ + NumericTestImpl impl(ossie::any::to##T); \ + impl.test##NAME(); \ + } + +FOREACH_TYPE_TEST(DEFINE_NUMERIC_TEST); diff --git a/redhawk/src/testing/cpp/AnyUtilsTest.h b/redhawk/src/testing/cpp/AnyUtilsTest.h new file mode 100644 index 000000000..3968eb868 --- /dev/null +++ b/redhawk/src/testing/cpp/AnyUtilsTest.h @@ -0,0 +1,67 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef ANYUTILSTEST_H +#define ANYUTILSTEST_H + +#include "CFTest.h" + +#include + +#define FOREACH_TEST(X, T) \ + X(T, FromBoolean) \ + X(T, FromNumber) \ + X(T, FromString) \ + X(T, Range) + +#define FOREACH_TYPE_TEST(X) \ + FOREACH_TEST(X, Octet) \ + FOREACH_TEST(X, Short) \ + FOREACH_TEST(X, UShort) \ + FOREACH_TEST(X, Long) \ + FOREACH_TEST(X, ULong) \ + FOREACH_TEST(X, LongLong) \ + FOREACH_TEST(X, ULongLong) \ + FOREACH_TEST(X, Float) \ + FOREACH_TEST(X, Double) + +class AnyUtilsTest : public CppUnit::TestFixture +{ +#define REGISTER_TESTS(T,NAME) CPPUNIT_TEST(testTo##T##NAME); + + CPPUNIT_TEST_SUITE(AnyUtilsTest); + CPPUNIT_TEST(testIsNull); + CPPUNIT_TEST(testToBoolean); + FOREACH_TYPE_TEST(REGISTER_TESTS); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testIsNull(); + + void testToBoolean(); + +#define DECLARE_TESTS(T,NAME) void testTo##T##NAME(); + FOREACH_TYPE_TEST(DECLARE_TESTS); +}; + +#endif // ANYUTILS_TEST_H diff --git a/redhawk/src/testing/cpp/BitBufferTest.cpp b/redhawk/src/testing/cpp/BitBufferTest.cpp new file mode 100644 index 000000000..1ac8ddc22 --- /dev/null +++ b/redhawk/src/testing/cpp/BitBufferTest.cpp @@ -0,0 +1,647 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "BitBufferTest.h" + +#include +#include + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(BitBufferTest); + +void BitBufferTest::testDefaultConstructor() +{ + // Empty const shared bitbuffer + const redhawk::shared_bitbuffer shared; + CPPUNIT_ASSERT(shared.size() == 0); + CPPUNIT_ASSERT(shared.empty()); + + // Empty regular bitbuffer + redhawk::bitbuffer buffer; + CPPUNIT_ASSERT(buffer.size() == 0); + CPPUNIT_ASSERT(buffer.empty()); +} + +void BitBufferTest::testConstructor() +{ + // Test allocating constructor + const size_t NUM_BITS = 16; + redhawk::bitbuffer buffer(NUM_BITS); + CPPUNIT_ASSERT(!buffer.empty()); + CPPUNIT_ASSERT_EQUAL(NUM_BITS, buffer.size()); + + // Test construction of shared buffer from mutable buffer + redhawk::shared_bitbuffer shared(buffer); + CPPUNIT_ASSERT(!shared.empty()); + CPPUNIT_ASSERT_EQUAL(buffer.size(), shared.size()); + CPPUNIT_ASSERT(shared.data() == buffer.data()); +} + +void BitBufferTest::testFromInt() +{ + // Input value is right-aligned (i.e., take lowest 28 bits) + redhawk::bitbuffer buffer = redhawk::bitbuffer::from_int(0xBADC0DE, 28); + // Bit buffer should be left-aligned + CPPUNIT_ASSERT_EQUAL((size_t) 0, buffer.offset()); + const data_type* data = buffer.data(); + CPPUNIT_ASSERT_EQUAL((data_type) 0xBA, data[0]); + CPPUNIT_ASSERT_EQUAL((data_type) 0xDC, data[1]); + CPPUNIT_ASSERT_EQUAL((data_type) 0x0D, data[2]); + CPPUNIT_ASSERT_EQUAL((data_type) 0xE0, (data_type) (data[3] & 0xF0)); +} + +void BitBufferTest::testFromArray() +{ + // Test with a large array and offset of 0 + const data_type array[] = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x11, 0x22 }; + const size_t bits = sizeof(array) * 8; + redhawk::bitbuffer buffer = redhawk::bitbuffer::from_array(array, bits); + // Bit buffer should be left-aligned, new memory, and equivalent + CPPUNIT_ASSERT_EQUAL((size_t) 0, buffer.offset()); + CPPUNIT_ASSERT(buffer.data() != array); + CPPUNIT_ASSERT_ARRAYS_EQUAL(array, buffer.data(), sizeof(array)); + + // Test with a non-zero offset and non-integral number of bytes + buffer = redhawk::bitbuffer::from_array(array, 4, 18); + // Bit buffer should be left-aligned, new memory, and equivalent + CPPUNIT_ASSERT_EQUAL((size_t) 0, buffer.offset()); + CPPUNIT_ASSERT(buffer.data() != array); + int status = redhawk::bitops::compare(buffer.data(), buffer.offset(), array, 4, buffer.size()); + CPPUNIT_ASSERT_MESSAGE("Offset array is not equal", status == 0); +} + +void BitBufferTest::testFromString() +{ + const std::string literal = "0101110101101011010101"; + redhawk::bitbuffer buffer = redhawk::bitbuffer::from_string(literal); + CPPUNIT_ASSERT_EQUAL(literal.size(), buffer.size()); + const data_type* data = buffer.data(); + CPPUNIT_ASSERT_EQUAL((data_type) 0x5D, data[0]); + CPPUNIT_ASSERT_EQUAL((data_type) 0x6B, data[1]); + CPPUNIT_ASSERT_EQUAL((data_type) 0x54, (data_type) (data[2] & 0xFC)); + + // Exceptions + CPPUNIT_ASSERT_THROW(redhawk::bitbuffer::from_string("0101101q"), std::invalid_argument); +} + +void BitBufferTest::testFromUnpacked() +{ + const redhawk::bitops::byte unpacked[] = { 0, 1, 0, 1, 0, 0, 0, 1 , 0, 2, 0xFF, 0x80 }; + const size_t bits = sizeof(unpacked); + redhawk::bitbuffer buffer = redhawk::bitbuffer::from_unpacked(unpacked, bits); + CPPUNIT_ASSERT_EQUAL(bits, buffer.size()); + const data_type* data = buffer.data(); + CPPUNIT_ASSERT_EQUAL((data_type) 0x51, data[0]); + CPPUNIT_ASSERT_EQUAL((data_type) 0x70, (data_type) (data[1] & 0xF0)); +} + +void BitBufferTest::testEquals() +{ + // Fill a bit buffer with a known pattern + const int64_t pattern = 0x194AB70D385; + redhawk::bitbuffer first = redhawk::bitbuffer::from_int(pattern, 41); + CPPUNIT_ASSERT(first == first); + + // Shared buffer aliasing the first should always be equal + const redhawk::shared_bitbuffer shared = first; + CPPUNIT_ASSERT(first == shared); + + // Another buffer with different backing memory should still compare + // equal + redhawk::bitbuffer second = redhawk::bitbuffer::from_int(pattern, 41); + CPPUNIT_ASSERT(second.data() != first.data()); + CPPUNIT_ASSERT(first == second); + + // Flip a bit, the comparison should now fail + second[17] = !second[17]; + CPPUNIT_ASSERT(first != second); + + // Create a new buffer with a different size, but the same data (just + // offset by few bits). It should compare unequal as-is; however, it should + // compare equal if taking a slice of the original buffer to re-align them. + redhawk::bitbuffer third = redhawk::bitbuffer::from_int(pattern, 38); + CPPUNIT_ASSERT(third != first); + CPPUNIT_ASSERT(third == shared.slice(3)); +} + +void BitBufferTest::testCopy() +{ + // Create a bit buffer with known data + redhawk::bitbuffer original(127); + for (size_t index = 0; index < original.size(); ++index) { + // Value is true if index is odd + original[index] = index & 1; + } + + // Make a copy, and verify that it's a new underlying buffer + redhawk::bitbuffer copy = original.copy(); + CPPUNIT_ASSERT(copy == original); + CPPUNIT_ASSERT(copy.data() != original.data()); + + // Set an even index to 1; the copy should be unaffected + original[2] = 1; + CPPUNIT_ASSERT_EQUAL(0, (int) copy[2]); +} + +void BitBufferTest::testSwap() +{ + // Create two mutable bit buffers with different contents + redhawk::bitbuffer first(31); + first.fill(1); + redhawk::bitbuffer second(24); + second.fill(0); + + // Swap them and check that the swap worked as expected + first.swap(second); + CPPUNIT_ASSERT_EQUAL((size_t) 24, first.size()); + CPPUNIT_ASSERT_EQUAL(0, (int) first[0]); + CPPUNIT_ASSERT_EQUAL((size_t) 31, second.size()); + CPPUNIT_ASSERT_EQUAL(1, (int) second[0]); + + // Create shared bit buffer aliases for each buffer + redhawk::shared_bitbuffer shared_first = first; + redhawk::shared_bitbuffer shared_second = second; + + // Swap the shared buffers and make sure that the underlying data pointers + // are correct + shared_first.swap(shared_second); + CPPUNIT_ASSERT(shared_first.data() == second.data()); + CPPUNIT_ASSERT(shared_second.data() == first.data()); +} + +void BitBufferTest::testResize() +{ + // Fill a bit buffer with known byte data + const data_type expected[] = { 0xB3, 0x47, 0xC0 }; + redhawk::bitbuffer buffer = redhawk::bitbuffer::from_array(expected, 18); + + // Resize the bit buffer, then check that the memory is new and values are + // preserved + const data_type* data = buffer.data(); + buffer.resize(31); + CPPUNIT_ASSERT_EQUAL((size_t) 31, buffer.size()); + CPPUNIT_ASSERT(buffer.data() != data); + data = buffer.data(); + CPPUNIT_ASSERT_EQUAL(expected[0], data[0]); + CPPUNIT_ASSERT_EQUAL(expected[1], data[1]); + data_type mask = 0xC0; + CPPUNIT_ASSERT_EQUAL((data_type) (expected[2] & mask), (data_type) (data[2] & mask)); + + // Resize down (which can be done better with trim, but is still legal) and + // check values + buffer.resize(13); + CPPUNIT_ASSERT_EQUAL((size_t) 13, buffer.size()); + data = buffer.data(); + CPPUNIT_ASSERT_EQUAL(expected[0], data[0]); + mask = 0xF8; + CPPUNIT_ASSERT_EQUAL((data_type) (expected[1] & mask), (data_type) (data[1] & mask)); +} + +void BitBufferTest::testFill() +{ + // Create a new buffer and set the underlying memory to all ones + redhawk::bitbuffer buffer(64); + std::memset(buffer.data(), 0xFF, 8); + + // Fill the entire buffer with all zeros + buffer.fill(0); + + data_type expected[8]; + std::memset(expected, 0, sizeof(expected)); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, buffer.data(), sizeof(expected)); + + // Fill a subset of the buffer with ones + buffer.fill(9, 33, 1); + expected[1] = 0x7F; + expected[2] = expected[3] = 0xFF; + expected[4] = 0x80; + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, buffer.data(), sizeof(expected)); + + // Implicit offset and non-byte-aligned end + buffer.trim(42, 47); + buffer.fill(1); + CPPUNIT_ASSERT_EQUAL((data_type) 0x3E, *buffer.data()); +} + +void BitBufferTest::testIndexAccess() +{ + // Bit pattern: 001 0101 0011 1100 + const redhawk::bitbuffer buffer = redhawk::bitbuffer::from_int(0x153C, 15); + CPPUNIT_ASSERT_EQUAL(0, buffer[0]); + CPPUNIT_ASSERT_EQUAL(0, buffer[1]); + CPPUNIT_ASSERT_EQUAL(1, buffer[2]); + CPPUNIT_ASSERT_EQUAL(0, buffer[3]); + CPPUNIT_ASSERT_EQUAL(1, buffer[4]); + + // Create a shared alias and continue checking (these should be the same + // code path, so it's really just checking that syntactically both forms + // are valid) + redhawk::shared_bitbuffer shared = buffer; + CPPUNIT_ASSERT_EQUAL(0, shared[5]); + CPPUNIT_ASSERT_EQUAL(1, shared[6]); + CPPUNIT_ASSERT_EQUAL(0, shared[7]); + CPPUNIT_ASSERT_EQUAL(0, shared[8]); + CPPUNIT_ASSERT_EQUAL(1, shared[9]); + CPPUNIT_ASSERT_EQUAL(1, shared[10]); + + // Use slice to create a new bit buffer with a non-zero offset to test that + // the offset is taken into account + redhawk::shared_bitbuffer slice = shared.slice(11, 15); + CPPUNIT_ASSERT(slice.offset() != 0); + CPPUNIT_ASSERT_EQUAL(1, slice[0]); + CPPUNIT_ASSERT_EQUAL(1, slice[1]); + CPPUNIT_ASSERT_EQUAL(0, slice[2]); + CPPUNIT_ASSERT_EQUAL(0, slice[3]); +} + +void BitBufferTest::testIndexAssignment() +{ + // Start with a zero-filled buffer + redhawk::bitbuffer buffer(48); + buffer.fill(0); + + // Basic bit setting + buffer[3] = 1; + const data_type* data = buffer.data(); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Set bit", (data_type) 0x10, data[0]); + + // Two bits in the same byte + buffer[8] = 1; + buffer[13] = 1; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Set two bits in same byte", (data_type) 0x84, data[1]); + + // Any non-zero integer should be interpreted as a 1 + buffer[18] = 2; + buffer[22] = -5289; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Set non-zero integer", (data_type) 0x22, data[2]); + + // 0 should clear an existing bit + buffer[8] = 0; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Clear bit", (data_type) 0x04, data[1]); + + // Transitive assignment + buffer[24] = buffer[27] = 1; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Transitive assignment", (data_type) 0x90, data[3]); + + // Use a slice to test that offsets are accounted for + redhawk::bitbuffer slice = buffer.slice(35, 47); + CPPUNIT_ASSERT(slice.offset() != 0); + slice[1] = 1; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Slice with offset", 1, (int) buffer[36]); +} + +void BitBufferTest::testSharing() +{ + // Create a bitbuffer with a known pattern + redhawk::bitbuffer buffer = redhawk::bitbuffer::from_int(0xE581, 16); + + // Create a const shared bit buffer aliasing the original + const redhawk::shared_bitbuffer shared = buffer; + CPPUNIT_ASSERT(shared == buffer); + + // Invert some bits and ensure that the bit buffers are still equal + buffer[2] = !buffer[2]; + buffer[5] = !buffer[5]; + buffer[11] = !buffer[11]; + CPPUNIT_ASSERT(shared == buffer); +} + +void BitBufferTest::testSlicing() +{ + // Fill a new bit buffer with alternating 0's and 1's + redhawk::bitbuffer buffer(12); + for (size_t index = 0; index < buffer.size(); ++index) { + buffer[index] = index & 1; + } + + // Take a 4-bit slice from the middle and check that it points to the + // same data (offset by the start index) + const redhawk::shared_bitbuffer middle = buffer.slice(4, 8); + CPPUNIT_ASSERT_EQUAL((size_t) 4, middle.size()); + CPPUNIT_ASSERT_EQUAL((size_t) 4, middle.offset()); + + // Take a slice from the midpoint to the end, and check that the bits match + const redhawk::shared_bitbuffer end = buffer.slice(6); + for (size_t index = 0; index < end.size(); ++index) { + CPPUNIT_ASSERT_EQUAL((int) buffer[index + 6], end[index]); + } + + // Compare the overlap between the two slices by taking sub-slices + CPPUNIT_ASSERT(middle.slice(2) == end.slice(0, 2)); + + // Starting at the end index should return an empty bit buffer + redhawk::shared_bitbuffer empty = buffer.slice(buffer.size()); + CPPUNIT_ASSERT(empty.empty()); + + // Exceptions + CPPUNIT_ASSERT_THROW(buffer.slice(buffer.size() + 1), std::out_of_range); + CPPUNIT_ASSERT_THROW(buffer.slice(1, 0), std::invalid_argument); +} + +void BitBufferTest::testTrim() +{ + // Tests bit buffer trimming. Only shared_bitbuffer needs to be tested, + // because bitbuffer inherits and does not override this function. + + // Start with known bit pattern: 1000 1010 1100 1101 1100 + redhawk::shared_bitbuffer buffer = redhawk::bitbuffer::from_int(0x8ACDC, 20); + const data_type* data = buffer.data(); + + // Use 1-argument trim to remove the first 4 bits. The data pointer should + // remain the same. + buffer.trim(4); + CPPUNIT_ASSERT_EQUAL(data, buffer.data()); + CPPUNIT_ASSERT_EQUAL((size_t) 4, buffer.offset()); + CPPUNIT_ASSERT_EQUAL((size_t) 16, buffer.size()); + + // 1000 (1010 1100 1101 1100) + redhawk::shared_bitbuffer expected = redhawk::bitbuffer::from_int(0xACDC, 16); + CPPUNIT_ASSERT(expected == buffer); + + // Use 2-argument trim to 9-bit range. The data pointer should advance + // and the offset should wrap around. + buffer.trim(5, 14); + CPPUNIT_ASSERT(buffer.data() != data); + CPPUNIT_ASSERT_EQUAL((size_t) 1, buffer.offset()); + CPPUNIT_ASSERT_EQUAL((size_t) 9, buffer.size()); + + // 1010 1(100 1101 11)00 + expected = redhawk::bitbuffer::from_int(0x137, 9); + CPPUNIT_ASSERT(expected == buffer); + + // Trim starting at the end index should result in empty buffer + redhawk::shared_bitbuffer empty = buffer; + empty.trim(empty.size()); + CPPUNIT_ASSERT(empty.empty()); + + // Exceptions + CPPUNIT_ASSERT_THROW(buffer.trim(buffer.size() + 1), std::out_of_range); + CPPUNIT_ASSERT_THROW(buffer.trim(1, 0), std::invalid_argument); +} + +void BitBufferTest::testReplace() +{ + // Destination is all 0's + redhawk::bitbuffer dest(36); + dest.fill(0); + + // Set known pattern in source + // 10001100|11000110|1101xxxx + redhawk::shared_bitbuffer src = redhawk::bitbuffer::from_int(0x8CC6D, 20); + + // 3-argument version: replace 9 bits at offset 1 + // (1000110 0|1)100 + // 0(1000110|0 1)000000 = 0x4640 + dest.replace(1, 9, (const redhawk::shared_bitbuffer&) src); + const data_type* data = dest.data(); + CPPUNIT_ASSERT_EQUAL((data_type) 0x46, data[0]); + CPPUNIT_ASSERT_EQUAL((data_type) 0x40, data[1]); + + // 4-argument version: replace 13 bits at offset 22, starting with the 4th + // bit of the source + // 1000(11 00|110001 10|1)101 + // 000000(11|00 110001|10 1)0xxxx = 0x0331A + dest.replace(22, 13, (const redhawk::shared_bitbuffer&) src, 4); + CPPUNIT_ASSERT_EQUAL((data_type) 0x03, data[2]); + CPPUNIT_ASSERT_EQUAL((data_type) 0x31, data[3]); + CPPUNIT_ASSERT_EQUAL((data_type) 0xA0, (data_type) (data[4] & 0xF0)); +} + +void BitBufferTest::testGetInt() +{ + const data_type pattern[] = { + 0x35, 0x45, 0xE6, 0xA9, 0xA1, 0x1B, 0xAA, 0xE4, 0x9A, 0x3F, 0x3B, 0x38 + }; + const redhawk::shared_bitbuffer buffer = redhawk::bitbuffer::from_array(pattern, sizeof(pattern)*8); + + // Small value + CPPUNIT_ASSERT_EQUAL(3, (int) buffer.getint(0, 4)); + + // Multi-byte with offset + // 0x3545E6A9 = 001(10101|01000101|11100110|10101001) + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x1545E6A9, buffer.getint(3, 29)); + + // Implicit offset (slice) + redhawk::shared_bitbuffer slice = buffer.slice(2, 32); + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x1545E6A9, slice.getint(1, 29)); + + // Maximum size (64 bits) + CPPUNIT_ASSERT_EQUAL((uint64_t) 0xA11BAAE49A3F3B38, buffer.getint(32, 64)); + + // Exceptions + CPPUNIT_ASSERT_THROW(buffer.getint(buffer.size(), 1), std::out_of_range); + CPPUNIT_ASSERT_THROW(buffer.getint(0, 65), std::length_error); +} + +void BitBufferTest::testSetInt() +{ + redhawk::bitbuffer buffer(96); + buffer.fill(0); + const data_type* data = buffer.data(); + + // Small value + buffer.setint(0, 0x03, 4); + CPPUNIT_ASSERT_EQUAL((data_type) 0x30, data[0]); + + // Multi-byte with offset + buffer.setint(4, 0x6A8BCD, 24); + CPPUNIT_ASSERT_EQUAL((data_type) 0x36, data[0]); + CPPUNIT_ASSERT_EQUAL((data_type) 0xA8, data[1]); + CPPUNIT_ASSERT_EQUAL((data_type) 0xBC, data[2]); + CPPUNIT_ASSERT_EQUAL((data_type) 0xD0, data[3]); + + // Implicit offset (slice) + // 00110110|10101000 + // slice 110110|101 + // set 01001|0 + // = 00101001|00101000 + redhawk::bitbuffer slice = buffer.slice(2, 11); + slice.setint(1, 0x12, 6); + CPPUNIT_ASSERT_EQUAL((data_type) 0x29, data[0]); + CPPUNIT_ASSERT_EQUAL((data_type) 0x28, data[1]); + + // Maximum size (64 bits) + const data_type expected[] = { + 0xA1, 0x1B, 0xAA, 0xE4, 0x9A, 0x3F, 0x3B, 0x38 + }; + buffer.setint(32, 0xA11BAAE49A3F3B38, 64); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, data + 4, sizeof(expected)); + + // Exceptions + CPPUNIT_ASSERT_THROW(buffer.setint(buffer.size(), 0, 1), std::out_of_range); + CPPUNIT_ASSERT_THROW(buffer.setint(0, 0, 65), std::length_error); +} + +void BitBufferTest::testPopcount() +{ + // 10111001000100011101000110111100001 + redhawk::shared_bitbuffer buffer = redhawk::bitbuffer::from_int(0x5C88E8DE1, 35); + CPPUNIT_ASSERT_EQUAL(17, buffer.popcount()); + + // 100011101000110111100 + buffer.trim(11, buffer.size() - 3); + CPPUNIT_ASSERT_EQUAL(11, buffer.popcount()); + + // 1010001 + buffer.trim(6, buffer.size() - 8); + CPPUNIT_ASSERT_EQUAL(3, buffer.popcount()); +} + +void BitBufferTest::testDistance() +{ + // 110100010011110100011001111100111 + redhawk::shared_bitbuffer first = redhawk::bitbuffer::from_int(0x1A27A33E7, 33); + + // 000101011111111010011110100000011 + redhawk::shared_bitbuffer second = redhawk::bitbuffer::from_int(0x02BFD3D03, 33); + + // Distance from self should always be 0 + CPPUNIT_ASSERT_EQUAL(0, first.distance(first)); + + // a 110100010011110100011001111100111 + // b 000101011111111010011110100000011 + // a XOR b = 110001001100001110000111011100100 + CPPUNIT_ASSERT_EQUAL(15, first.distance(second)); +} + +void BitBufferTest::testFind() +{ + // Pick a oddly-sized pattern + redhawk::shared_bitbuffer pattern = redhawk::bitbuffer::from_int(0x2C07BE, 22); + + // Fill a bit buffer with 1's, then copy the pattern into it in a couple of + // places + redhawk::bitbuffer buffer(300); + buffer.fill(1); + buffer.replace(37, pattern.size(), pattern); + buffer.replace(200, pattern.size(), pattern); + + // 2-argument find searches from the beginning + CPPUNIT_ASSERT_EQUAL((size_t) 37, buffer.find(pattern, 0)); + + // Use the optional 3rd argument to start after the first occurrence + CPPUNIT_ASSERT_EQUAL((size_t) 200, buffer.find(59, pattern, 0)); + + // Finally, the search should fail when started after both occurrences + CPPUNIT_ASSERT_EQUAL(redhawk::bitbuffer::npos, buffer.find(222, pattern, 0)); + + // Introduce some bit errors + buffer[38] = !buffer[38]; + buffer[48] = !buffer[48]; + buffer[220] = !buffer[220]; + + // Try decreasing tolerances + CPPUNIT_ASSERT_EQUAL((size_t) 37, buffer.find(pattern, 2)); + CPPUNIT_ASSERT_EQUAL((size_t) 200, buffer.find(pattern, 1)); + CPPUNIT_ASSERT_EQUAL(redhawk::bitbuffer::npos, buffer.find(pattern, 0)); + + // Starting the search past the end of the bit buffer should always fail, + // but without an exception (or crash) + CPPUNIT_ASSERT_EQUAL(redhawk::bitbuffer::npos, buffer.find(buffer.size(), pattern, 0)); +} + + +static std::string ascii7toString(const redhawk::shared_bitbuffer& ascii) +{ + std::string result; + result.resize(ascii.size() / 7); + for (size_t bit = 0; bit < ascii.size(); bit += 7) { + result[bit/7] = (char) ascii.getint(bit, 7); + } + return result; +} + +void BitBufferTest::testTakeSkip() +{ + // Use ASCII text, where the high bit is always zero; a start offset is + // required + const std::string msg = "Here is some text"; + redhawk::shared_bitbuffer buffer = redhawk::bitbuffer::from_array((unsigned char*) msg.c_str(), msg.size() * 8); + // Take 7, skip 1, start at 1 + redhawk::bitbuffer ascii = buffer.takeskip(7, 1, 1); + CPPUNIT_ASSERT_EQUAL(msg.size() * 7, ascii.size()); + + // Reconstruct the input text by taking 7 bits at a time + std::string result = ascii7toString(ascii); + CPPUNIT_ASSERT_EQUAL(msg, result); + + // Repeat with a starting and ending offset + // char 5 = bit 40 (+1 to skip high bit) + // char 12 = bit 96 (+1 to skip high bit) + ascii = buffer.takeskip(7, 1, 41, 97); + result = ascii7toString(ascii); + CPPUNIT_ASSERT_EQUAL(msg.substr(5, 7), result); + + // Exceptions + // Start index past end of source + CPPUNIT_ASSERT_THROW(buffer.takeskip(7, 1, buffer.size() + 1), std::out_of_range); + // End less than start + CPPUNIT_ASSERT_THROW(buffer.takeskip(7, 1, 19, 18), std::invalid_argument); +} + +void BitBufferTest::testTakeSkipIntoBuffer() +{ + // Use a 28-bit marker and an 8-bit counter + const uint32_t marker = 0x7C3ABA9; + uint8_t counter = 0; + + // Source buffer holds 8 "frames" + redhawk::bitbuffer src(288); + src.fill(0); + for (size_t pos = 0; pos < src.size(); pos += 36) { + src.setint(pos, marker, 28); + src.setint(pos+28, counter, 8); + ++counter; + } + + // Do a take/skip to copy just the markers out of the source + redhawk::bitbuffer dest(28*8); + dest.fill(0); + size_t bits = dest.takeskip(src, 28, 8); + CPPUNIT_ASSERT_EQUAL(dest.size(), bits); + for (size_t pos = 0; pos < dest.size(); pos += 28) { + std::ostringstream oss; + oss << "position " << pos; + CPPUNIT_ASSERT_EQUAL_MESSAGE(oss.str(), (uint64_t) marker, dest.getint(pos, 28)); + } + + // Use a start index to copy the counters, and an end index to limit the + // bits copied + dest = redhawk::bitbuffer(6*8); + dest.fill(0); + bits = dest.takeskip(src, 8, 28, 28, 6*36); + CPPUNIT_ASSERT_EQUAL(dest.size(), bits); + counter = 0; + for (size_t pos = 0; pos < dest.size(); pos += 8) { + std::ostringstream oss; + oss << "position " << pos; + CPPUNIT_ASSERT_EQUAL_MESSAGE(oss.str(), (uint64_t) counter, dest.getint(pos, 8)); + ++counter; + } + + // Exceptions + // Start index past end of source + CPPUNIT_ASSERT_THROW(dest.takeskip(src, 28, 8, src.size() + 1), std::out_of_range); + // End less than start + CPPUNIT_ASSERT_THROW(dest.takeskip(src, 28, 8, 1, 0), std::invalid_argument); + // Destination too small + CPPUNIT_ASSERT_THROW(dest.takeskip(src, 40, 1), std::length_error); +} diff --git a/redhawk/src/testing/cpp/BitBufferTest.h b/redhawk/src/testing/cpp/BitBufferTest.h new file mode 100644 index 000000000..428262087 --- /dev/null +++ b/redhawk/src/testing/cpp/BitBufferTest.h @@ -0,0 +1,91 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef BITBUFFERTEST_H +#define BITBUFFERTEST_H + +#include "CFTest.h" + +#include + +class BitBufferTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(BitBufferTest); + CPPUNIT_TEST(testDefaultConstructor); + CPPUNIT_TEST(testConstructor); + CPPUNIT_TEST(testFromInt); + CPPUNIT_TEST(testFromArray); + CPPUNIT_TEST(testFromString); + CPPUNIT_TEST(testFromUnpacked); + CPPUNIT_TEST(testEquals); + CPPUNIT_TEST(testCopy); + CPPUNIT_TEST(testSwap); + CPPUNIT_TEST(testResize); + CPPUNIT_TEST(testFill); + CPPUNIT_TEST(testIndexAccess); + CPPUNIT_TEST(testIndexAssignment); + CPPUNIT_TEST(testSharing); + CPPUNIT_TEST(testSlicing); + CPPUNIT_TEST(testTrim); + CPPUNIT_TEST(testReplace); + CPPUNIT_TEST(testGetInt); + CPPUNIT_TEST(testSetInt); + CPPUNIT_TEST(testPopcount); + CPPUNIT_TEST(testDistance); + CPPUNIT_TEST(testFind); + CPPUNIT_TEST(testTakeSkip); + CPPUNIT_TEST(testTakeSkipIntoBuffer); + CPPUNIT_TEST_SUITE_END(); + + typedef redhawk::shared_bitbuffer::data_type data_type; + +public: + void testDefaultConstructor(); + void testConstructor(); + void testFromInt(); + void testFromArray(); + void testFromString(); + void testFromUnpacked(); + + void testEquals(); + void testCopy(); + void testSwap(); + void testResize(); + void testFill(); + + void testIndexAccess(); + void testIndexAssignment(); + + void testSharing(); + void testSlicing(); + void testTrim(); + void testReplace(); + + // Bit-specific operations + void testGetInt(); + void testSetInt(); + void testPopcount(); + void testDistance(); + void testFind(); + void testTakeSkip(); + void testTakeSkipIntoBuffer(); +}; + +#endif // BITBUFFER_TEST_H diff --git a/redhawk/src/testing/cpp/BitopsTest.cpp b/redhawk/src/testing/cpp/BitopsTest.cpp new file mode 100644 index 000000000..0a6652e08 --- /dev/null +++ b/redhawk/src/testing/cpp/BitopsTest.cpp @@ -0,0 +1,1176 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "BitopsTest.h" + +#include +#include +#include +#include +#include + +#include + +#include + +void BitopsTest::setUp() +{ +} + +void BitopsTest::tearDown() +{ +} + +void BitopsTest::testGetBit() +{ + const unsigned char data[] = { 0x5a, 0x18, 0xe7 }; + + int expected[] = { + 0, 1, 0, 1, 1, 0, 1, 0, // 0x5a + 0, 0, 0, 1, 1, 0, 0, 0, // 0x18 + 1, 1, 1, 0, 0, 1, 1, 1, // 0xe7 + }; + const size_t bits = sizeof(data) * 8; + + for (size_t pos = 0; pos < bits; ++pos) { + if (expected[pos] != redhawk::bitops::getbit(data, pos)) { + std::ostringstream message; + message << "bit position " << pos << " differs"; + CPPUNIT_FAIL(message.str()); + } + } +} + +void BitopsTest::testSetBit() +{ + unsigned char data[] = { 0x00, 0xff, 0x3a }; + + redhawk::bitops::setbit(data, 1, 1); + redhawk::bitops::setbit(data, 6, 1); + redhawk::bitops::setbit(data, 7, 1); + CPPUNIT_ASSERT_EQUAL(0x43, (int) data[0]); + CPPUNIT_ASSERT_EQUAL(0xff, (int) data[1]); + CPPUNIT_ASSERT_EQUAL(0x3a, (int) data[2]); + + redhawk::bitops::setbit(data, 8, 0); + redhawk::bitops::setbit(data, 11, 0); + redhawk::bitops::setbit(data, 15, 0); + CPPUNIT_ASSERT_EQUAL(0x43, (int) data[0]); + CPPUNIT_ASSERT_EQUAL(0x6e, (int) data[1]); + CPPUNIT_ASSERT_EQUAL(0x3a, (int) data[2]); + + redhawk::bitops::setbit(data, 16, 1); + redhawk::bitops::setbit(data, 18, 0); + redhawk::bitops::setbit(data, 20, 0); + redhawk::bitops::setbit(data, 21, 1); + CPPUNIT_ASSERT_EQUAL(0x43, (int) data[0]); + CPPUNIT_ASSERT_EQUAL(0x6e, (int) data[1]); + CPPUNIT_ASSERT_EQUAL(0x96, (int) data[2]); +} + +void BitopsTest::testGetInt() +{ + const unsigned char packed[] = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF + }; + + // Read increasing sizes up to full 64-bit value + uint64_t value = 0x0123456789ABCDEF; + for (size_t bits = 64; bits > 0; bits -= 4) { + CPPUNIT_ASSERT_EQUAL(value, redhawk::bitops::getint(packed, 0, bits)); + value >>= 4; + } + + // 4-bit (nibble) aligned reads + value = 0; + for (size_t pos = 0; pos < 16; ++pos) { + CPPUNIT_ASSERT_EQUAL(value, redhawk::bitops::getint(packed, pos*4, 4)); + value += 1; + } + + // Byte-aligned 8-bit reads + value = 0x01; + for (size_t pos = 0; pos < 8; ++pos) { + CPPUNIT_ASSERT_EQUAL(value, redhawk::bitops::getint(packed, pos*8, 8)); + value += 0x22; + } + + // Byte-aligned 16-bit reads + value = 0x0123; + for (size_t pos = 0; pos < 7; ++pos) { + CPPUNIT_ASSERT_EQUAL(value, redhawk::bitops::getint(packed, pos*8, 16)); + value += 0x2222; + } + + // Byte-aligned 32-bit reads + value = 0x01234567; + for (size_t pos = 0; pos < 5; ++pos) { + CPPUNIT_ASSERT_EQUAL(value, redhawk::bitops::getint(packed, pos*8, 32)); + value += 0x22222222; + } + + // More than 64 bits is an error + CPPUNIT_ASSERT_THROW(redhawk::bitops::getint(packed, 0, 65), std::length_error); +} + +void BitopsTest::testGetIntUnaligned() +{ + const unsigned char packed[] = { + 0xE4, 0xBC, 0x4F // 11100100|10111100|01001111 + }; + + // 111(00100|1)0111100 = 001001 + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x09, redhawk::bitops::getint(packed, 3, 6)); + + // 11(100100|10111100) = 100100|10111100 = 0x24BC + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x24BC, redhawk::bitops::getint(packed, 2, 14)); + + // 11100(100|10111100|010011)11 = 1|00101111|00010011 = 0x12F13 + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x12F13, redhawk::bitops::getint(packed, 5, 17)); +} + +void BitopsTest::testGetIntUnalignedSmall() +{ + const unsigned char packed = 0xA7; + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x53, redhawk::bitops::getint(&packed, 0, 7)); + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x13, redhawk::bitops::getint(&packed, 1, 6)); + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x13, redhawk::bitops::getint(&packed, 2, 5)); + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x01, redhawk::bitops::getint(&packed, 3, 3)); + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x03, redhawk::bitops::getint(&packed, 4, 3)); + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x03, redhawk::bitops::getint(&packed, 5, 2)); + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x03, redhawk::bitops::getint(&packed, 6, 2)); + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x01, redhawk::bitops::getint(&packed, 7, 1)); +} + +void BitopsTest::testSetInt() +{ + unsigned char packed[8]; + std::fill(packed, packed + sizeof(packed), 0); + + const unsigned char expected[] = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF + }; + + // 4-bit (nibble) writes + std::fill(packed, packed + sizeof(packed), 0xFF); + redhawk::bitops::setint(packed, 0, 0x0, 4); + CPPUNIT_ASSERT_EQUAL(0x0F, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xFF, (int) packed[1]); + redhawk::bitops::setint(packed, 4, 0x1, 4); + CPPUNIT_ASSERT_EQUAL(0x01, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xFF, (int) packed[1]); + + // 8-bit write + redhawk::bitops::setint(packed, 8, 0x23, 8); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, packed, 2); + CPPUNIT_ASSERT_EQUAL(0xFF, (int) packed[2]); + + // 16-bit write + redhawk::bitops::setint(packed, 16, 0x4567, 16); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, packed, 4); + CPPUNIT_ASSERT_EQUAL(0xFF, (int) packed[4]); + + // 32-bit write + redhawk::bitops::setint(packed, 32, 0x89ABCDEF, 32); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, packed, 8); + + // 64-bit write + std::fill(packed, packed + sizeof(packed), 0xFF); + redhawk::bitops::setint(packed, 0, 0x0123456789ABCDEF, 64); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, packed, 8); + + // More than 64 bits is an error + CPPUNIT_ASSERT_THROW(redhawk::bitops::setint(packed, 0, 0, 65), std::length_error); +} + +void BitopsTest::testSetIntUnaligned() +{ + unsigned char packed[] = { + 0xE4, 0xBC, 0x4F // 11100100|10111100|01001111 + }; + + // 111(11100|0)0111100 = 0xFC3C + // 111000 = 0x38 + redhawk::bitops::setint(packed, 3, 0x38, 6); + CPPUNIT_ASSERT_EQUAL(0xFC, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0x3C, (int) packed[1]); + CPPUNIT_ASSERT_EQUAL(0x4F, (int) packed[2]); + + // 11(000010|11010011) = 0xC2D3 + // 000010|11010011 = 0x02D3 + redhawk::bitops::setint(packed, 2, 0x02D3, 14); + CPPUNIT_ASSERT_EQUAL(0xC2, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xD3, (int) packed[1]); + CPPUNIT_ASSERT_EQUAL(0x4F, (int) packed[2]); + + // 11000(100|01111001|100000)11 = 0xC47983 + // 1|00011110|01100000 = 0x11E60 + redhawk::bitops::setint(packed, 5, 0x11E60, 17); + CPPUNIT_ASSERT_EQUAL(0xC4, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0x79, (int) packed[1]); + CPPUNIT_ASSERT_EQUAL(0x83, (int) packed[2]); +} + +void BitopsTest::testSetIntUnalignedSmall() +{ + // First byte is initial state, second byte is guard byte + unsigned char packed[] = { 0x11, 0xBB }; + + redhawk::bitops::setint(packed, 0, 0x41, 7); + CPPUNIT_ASSERT_EQUAL(0x83, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xBB, (int) packed[1]); + + redhawk::bitops::setint(packed, 1, 0x1B, 5); + CPPUNIT_ASSERT_EQUAL(0xEF, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xBB, (int) packed[1]); + + redhawk::bitops::setint(packed, 2, 0x02, 3); + CPPUNIT_ASSERT_EQUAL(0xD7, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xBB, (int) packed[1]); + + redhawk::bitops::setint(packed, 3, 0x04, 4); + CPPUNIT_ASSERT_EQUAL(0xC9, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xBB, (int) packed[1]); + + redhawk::bitops::setint(packed, 4, 0x03, 3); + CPPUNIT_ASSERT_EQUAL(0xC7, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xBB, (int) packed[1]); + + redhawk::bitops::setint(packed, 5, 0x00, 2); + CPPUNIT_ASSERT_EQUAL(0xC1, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xBB, (int) packed[1]); + + redhawk::bitops::setint(packed, 6, 0x02, 2); + CPPUNIT_ASSERT_EQUAL(0xC2, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xBB, (int) packed[1]); + + redhawk::bitops::setint(packed, 7, 0x01, 1); + CPPUNIT_ASSERT_EQUAL(0xC3, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0xBB, (int) packed[1]); +} + +void BitopsTest::testFill() +{ + // Basic byte-aligned fill + // pre: 00000000|00000000|00000000|00000000|00000000 + // ^^^^^^^^ ^^^^^^^^ ^^^^^^^^ ^^^^^^^^ + // post: 11111111|11111111|11111111|11111111|00000000 + unsigned char expected[] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0x00 + }; + unsigned char buffer[sizeof(expected)]; + memset(buffer, 0, sizeof(buffer)); + redhawk::bitops::fill(buffer, 0, 32, 1); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, buffer, sizeof(expected)); + + // Clear first 13 bits (partial byte on end) + // pre: 11111111|11111111|11111111|11111111|00000000 + // ^^^^^^^^ ^^^^^ + // post: 00000000|00000111|11111111|11111111|00000000 + expected[0] = 0x00; + expected[1] = 0x07; + redhawk::bitops::fill(buffer, 0, 13, 0); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, buffer, sizeof(expected)); + + // Middle 5 bits in first byte + // pre: 00000000|00000111|11111111|11111111|00000000 + // ^^^^^ + // post: 00111110|00000111|11111111|11111111|00000000 + expected[0] = 0x3E; + redhawk::bitops::fill(buffer, 2, 5, 1); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, buffer, sizeof(expected)); + + // Clear range of bits with partial bytes at beginning and end + // pre: 00111110|00000111|11111111|11111111|00000000 + // ^^ ^^^^^^^^ ^^^ + // post: 00111110|00000100|00000000|00011111|00000000 + expected[1] = 0x04; + expected[2] = 0x00; + expected[3] = 0x1F; + redhawk::bitops::fill(buffer, 14, 13, 0); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, buffer, sizeof(expected)); + + // Set right-aligned partial bits in a single byte + // pre: 00111110|00000100|00000000|00011111|00000000 + // ^^^^^^^ + // post: 00111110|00000100|01111111|00011111|00000000 + expected[2] = 0x7F; + redhawk::bitops::fill(buffer, 17, 7, 1); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, buffer, sizeof(expected)); + + // Set left-aligned partial bits in a single byte + // pre: 00111110|00000100|01111111|00011111|00000000 + // ^^^^ + // post: 00111110|11110100|01111111|00011111|00000000 + expected[1] = 0xF4; + redhawk::bitops::fill(buffer, 8, 4, 1); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, buffer, sizeof(expected)); +} + +void BitopsTest::testPack() +{ + const unsigned char implode[] = { + 1, 0, 1, 0, 0, 1, 1, 0, // 0xa6 + 1, 0, 0, 1, 0, 0, 1, 1, // 0x93 + 0, 1, 0, 1, 1, 0, 1, 1, // 0x5b + }; + const size_t bits = sizeof(implode); + + std::vector packed; + packed.resize((bits + 7) / 8); + std::fill(packed.begin(), packed.end(), 0); + + redhawk::bitops::pack(&packed[0], 0, &implode[0], bits); + + CPPUNIT_ASSERT_EQUAL(0xa6, (int) packed[0]); + CPPUNIT_ASSERT_EQUAL(0x93, (int) packed[1]); + CPPUNIT_ASSERT_EQUAL(0x5b, (int) packed[2]); +} + +void BitopsTest::testPackSmall() +{ + const unsigned char implode[] = { + 1, 0, 1, 1, 0, 1, 1, 1, // 0xb7 + }; + + unsigned char packed = 0; + + redhawk::bitops::pack(&packed, 0, &implode[0], 6); + CPPUNIT_ASSERT_EQUAL(0xb4, (int) packed); +} + +void BitopsTest::testPackUnaligned() +{ + const unsigned char implode[] = { + 0, 0, 0, 0, 0, 1, 0, 1, // 0x05 + 1, 0, 0, 1, 0, 0, 1, 1, // 0x93 + 1, 1, 1, 1, 1, 0, 0, 0, // 0xf8 + }; + const size_t offset = 5; + const size_t bits = sizeof(implode) - offset - 3; + + std::vector packed; + packed.resize(3); + packed[0] = 0x9a; // 10011010 + packed[1] = 0x00; // 00000000 + packed[2] = 0x33; // 00110011 + + redhawk::bitops::pack(&packed[0], offset, &implode[offset], bits); + + // First byte should mix 5 bits from existing value with first 3 bits + // from unpacked value: 10011(010) | (00000)101 = 10011101 + CPPUNIT_ASSERT_EQUAL(0x9d, (int) packed[0]); + + // Second byte should match the middle 8 bits of unpacked value + CPPUNIT_ASSERT_EQUAL(0x93, (int) packed[1]); + + // Last byte should mix 3 trailing bits from existing value with last + // 5 bits of unpacked value: (00110)011 | 11111(000) = 11111011 + CPPUNIT_ASSERT_EQUAL(0xfb, (int) packed[2]); +} + +void BitopsTest::testPackUnalignedSmall() +{ + const unsigned char implode[] = { + 1, 0, 0, 1, // 0x9 + }; + + unsigned char packed = 0; + + redhawk::bitops::pack(&packed, 3, &implode[0], 4); + + CPPUNIT_ASSERT_EQUAL(0x12, (int) packed); +} + +void BitopsTest::testUnpack() +{ + const unsigned char packed[] = { 0xa6, 0x93, 0x5b }; + const size_t bits = sizeof(packed) * 8; + + const unsigned char expected[] = { + 1, 0, 1, 0, 0, 1, 1, 0, // 0xa6 + 1, 0, 0, 1, 0, 0, 1, 1, // 0x93 + 0, 1, 0, 1, 1, 0, 1, 1, // 0x5b + }; + + std::vector explode; + explode.resize(bits); + std::fill(explode.begin(), explode.end(), 0); + + redhawk::bitops::unpack(&explode[0], &packed[0], 0, bits); + + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, &explode[0], explode.size()); +} + +void BitopsTest::testUnpackSmall() +{ + const unsigned char packed = 0x9f; // 10011111 + const unsigned char expected[] = { + 1, 0, 0, 1, 1, 1, 1, 1 + }; + const size_t bits = 6; + std::vector explode; + explode.resize(bits + 1); + std::fill(explode.begin(), explode.end(), 0); + + redhawk::bitops::unpack(&explode[0], &packed, 0, bits); + + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, &explode[0], bits); + + // Make sure the last value is untouched + CPPUNIT_ASSERT_EQUAL(0, (int) explode[bits]); +} + +void BitopsTest::testUnpackUnaligned() +{ + const size_t offset = 1; + const size_t remain = 2; + const unsigned char packed[] = { + 0xa6, // 1010 0110 + 0x93, // 1001 0011 + 0x5b, // 0101 1011 + }; + const unsigned char expected[] = { + 1, 0, 1, 0, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 0, 1, 1, + 0, 1, 0, 1, 1, 0, 1, 1 + }; + const size_t bits = sizeof(expected) - (offset + remain); + + std::vector explode; + explode.resize(bits + 1); + std::fill(explode.begin(), explode.end(), 0); + + redhawk::bitops::unpack(&explode[0], &packed[0], offset, bits); + + // Check unpacked against the expected bits + CPPUNIT_ASSERT_ARRAYS_EQUAL(&expected[offset], &explode[0], bits); + + // Make sure the last value is untouched + CPPUNIT_ASSERT_EQUAL(0, (int) explode[bits]); +} + +void BitopsTest::testUnpackUnalignedSmall() +{ + const unsigned char packed = 0x18; // 00011000 + const unsigned char expected[] = { + 0, 0, 0, 1, 1, 0, 0, 0 + }; + const size_t offset = 2; + const size_t bits = 4; + + std::vector explode; + explode.resize(bits + 1); + std::fill(explode.begin(), explode.end(), 1); + + redhawk::bitops::unpack(&explode[0], &packed, offset, bits); + + CPPUNIT_ASSERT_ARRAYS_EQUAL(&expected[offset], &explode[0], bits); + + // Make sure the last value is untouched + CPPUNIT_ASSERT_EQUAL(1, (int) explode[bits]); +} + +void BitopsTest::testPopcount() +{ + // Nibble: 0 1 2 3 4 5 6 7 8 9 A B C D E F + // Popcount: 0 1 1 2 1 2 2 3 1 2 2 3 2 3 3 4 + const unsigned char packed[] = { + 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, + 0x1F, 0x21, 0x87, 0x68, 0xBA, 0xDD, 0xC0, 0xDE + }; + + CPPUNIT_ASSERT_EQUAL(1, redhawk::bitops::popcount(packed, 0, 8)); + CPPUNIT_ASSERT_EQUAL(3, redhawk::bitops::popcount(packed, 8, 8)); + CPPUNIT_ASSERT_EQUAL(3, redhawk::bitops::popcount(packed, 16, 8)); + CPPUNIT_ASSERT_EQUAL(5, redhawk::bitops::popcount(packed, 24, 8)); + CPPUNIT_ASSERT_EQUAL(3, redhawk::bitops::popcount(packed, 32, 8)); + CPPUNIT_ASSERT_EQUAL(5, redhawk::bitops::popcount(packed, 40, 8)); + CPPUNIT_ASSERT_EQUAL(5, redhawk::bitops::popcount(packed, 48, 8)); + CPPUNIT_ASSERT_EQUAL(7, redhawk::bitops::popcount(packed, 56, 8)); + + CPPUNIT_ASSERT_EQUAL(4, redhawk::bitops::popcount(packed, 0, 16)); + CPPUNIT_ASSERT_EQUAL(8, redhawk::bitops::popcount(packed, 16, 16)); + CPPUNIT_ASSERT_EQUAL(8, redhawk::bitops::popcount(packed, 32, 16)); + CPPUNIT_ASSERT_EQUAL(12, redhawk::bitops::popcount(packed, 48, 16)); + + CPPUNIT_ASSERT_EQUAL(12, redhawk::bitops::popcount(packed, 0, 32)); + CPPUNIT_ASSERT_EQUAL(20, redhawk::bitops::popcount(packed, 32, 32)); + + CPPUNIT_ASSERT_EQUAL(32, redhawk::bitops::popcount(packed, 0, 64)); + + CPPUNIT_ASSERT_EQUAL(65, redhawk::bitops::popcount(packed, 0, 128)); +} + +void BitopsTest::testPopcountUnaligned() +{ + unsigned char packed[] = { + 0xB3, 0x9E, 0x6F // 10110011|10011110|01101111 + }; + + // 10(11001)1 + CPPUNIT_ASSERT_EQUAL(3, redhawk::bitops::popcount(packed, 2, 5)); + + // 10110(011|1001)1110 + CPPUNIT_ASSERT_EQUAL(4, redhawk::bitops::popcount(packed, 5, 7)); + + // 101100(11|10011110)|01101111 + CPPUNIT_ASSERT_EQUAL(7, redhawk::bitops::popcount(packed, 6, 10)); + + // 101(10011|10011110|01101)111 + CPPUNIT_ASSERT_EQUAL(11, redhawk::bitops::popcount(packed, 3, 18)); +} + +void BitopsTest::testToString() +{ + // 10100111|10111001|01000110|11100101 + const unsigned char packed[] = { 0xA7, 0xB9, 0x46, 0xE5 }; + const size_t bits = sizeof(packed) * 8; + + // Test with the full string + std::string dest; + dest.resize(bits); + redhawk::bitops::toString(&dest[0], packed, 0, bits); + CPPUNIT_ASSERT_EQUAL(std::string("10100111101110010100011011100101"), dest); + + // Test with an offset and a non-byte aligned end + // 101(00111|10111001|01000110|1110)0101 + dest.resize(bits - 7); + redhawk::bitops::toString(&dest[0], packed, 3, dest.size()); + CPPUNIT_ASSERT_EQUAL(std::string("0011110111001010001101110"), dest); + + // Test with a small unaligned value + // 10(10011)1 + dest.resize(5); + redhawk::bitops::toString(&dest[0], packed, 2, dest.size()); + CPPUNIT_ASSERT_EQUAL(std::string("10011"), dest); +} + +void BitopsTest::testParseString() +{ + const std::string str("10011010011110001111001000111110"); + + // Test with the full string + unsigned char packed[4]; + int count = redhawk::bitops::parseString(packed, 0, &str[0], str.size()); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Not all characters parsed", (int) str.size(), count); + + // 10011010|01111000|11110010|00111110 + const unsigned char expected1[] = { 0x9A, 0x78, 0xF2, 0x3E }; + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected1, packed, 4); + + // Test with an offset and a non-byte aligned end + // src: (01111 000|11110 010|001)11 1110 + // dest: 100(11010|011 11000|111 100)10|00111110 + // = 100(01111|000 11110|010 001)10|00111110 + const unsigned char expected2[] = { 0x8F, 0x1E, 0x46, 0x3E }; + count = redhawk::bitops::parseString(packed, 3, &str[8], 19); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Not all characters parsed", 19, count); + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected2, packed, 4); + + // Test with a small unaligned value + // src: 100(11010)0 + // dest: 10(00111)1 + // = 10(11010)1 + count = redhawk::bitops::parseString(packed, 2, &str[3], 5); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Not all characters parsed", 5, count); + CPPUNIT_ASSERT_EQUAL((unsigned char) 0xB5, packed[0]); +} + +void BitopsTest::testParseStringError() +{ + // Invalid string with a letter instead of a number at position 11 + const std::string invalid("01101101001x010101101100"); + + // Parsing should stop when it hits the invalid character (tests that the + // full byte case returns early correctly) + unsigned char packed[] = { 0x00, 0x0A }; + int count = redhawk::bitops::parseString(packed, 0, &invalid[0], invalid.size()); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Returned count does not match invalid character", 11, count); + + // The values up to that point should still be updated + // src: (01101101|001)x0101 + // dest: (00000000|000)01010 + // = (01101101|001)01010 + const unsigned char expected[] = { 0x6D, 0x2A }; + CPPUNIT_ASSERT_ARRAYS_EQUAL(expected, packed, sizeof(expected)); + + // Repeat with a partial byte, no offset + // src: 0110110(1001)x0101 + // ^ ^ + // dest: (0110)1101 + // = (1001)1101 + count = redhawk::bitops::parseString(packed, 0, &invalid[7], 6); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Returned count does not match invalid character", 4, count); + CPPUNIT_ASSERT_EQUAL((unsigned char) 0x9D, packed[0]); + + // Sub-byte with offset + // src: 0110110100(1)x0101 + // ^ ^ + // dest: 10(0)11101 + // - 10(1)11101 + count = redhawk::bitops::parseString(packed, 2, &invalid[10], 5); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Returned count does not match invalid character", 1, count); + CPPUNIT_ASSERT_EQUAL((unsigned char) 0xBD, packed[0]); +} + +void BitopsTest::testCompare() +{ + const unsigned char first[] = { + // 10100001|10000011|11110000|10110101 + 0xA1, 0x83, 0xF0, 0xB5 + }; + unsigned char second[sizeof(first)]; + std::memcpy(second, first, sizeof(first)); + + // 1: 10100001|10000011|11110000|10110101 + // 2: 10100001|10000011|11110000|10110101 + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 0, 32) == 0); + + // 1: 10100001|10000011|11110000|10110101 + // 2: 10100001|10000011|11110000|10110100 + // ^ + redhawk::bitops::setbit(second, 31, 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 0, 32) > 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(second, 0, first, 0, 32) < 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 0, 31) == 0); + + // 1: 10100001|10000011|11110000|10110101 + // 2: 10100001|10000011|10110000|10110100 + // ^ ^ + redhawk::bitops::setbit(second, 17, 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 0, 31) > 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(second, 0, first, 0, 31) < 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 0, 17) == 0); + + // 1: 10100001|10000011|11110000|10110101 + // 2: 00100001|10000011|10110000|10110100 + // ^ ^ ^ + redhawk::bitops::setbit(second, 0, 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 0, 17) > 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(second, 0, first, 0, 17) < 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 1, 16) == 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 18, second, 18, 13) == 0); + + // 1: 10100001|10000011|11110000|10110101 + // 2: 00100001|10100011|10110000|10110100 + // ^ ^ ^ ^ + redhawk::bitops::setbit(second, 10, 1); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 1, 10) < 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(second, 1, first, 1, 10) > 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 1, 9) == 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 11, second, 11, 4) == 0); + + // 1: 10100001|10000011|11110000|10110101 + // 2: 00100011|10100011|10110000|10110100 + // ^ ^ ^ ^ ^ + redhawk::bitops::setbit(second, 6, 1); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 1, 6) < 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(second, 1, first, 1, 6) > 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 1, 5) == 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 7, second, 7, 3) == 0); +} + +void BitopsTest::testCompareUnaligned() +{ + const unsigned char first[] = { + // 01011101|10100011|11101001|01111011 + 0x5D, 0xA3, 0xE9, 0x7B + }; + unsigned char second[] = { + // rotate right 3 + // 01101011|10110100|01111101|00101111 + 0x6B, 0xB4, 0x7D, 0x2F + }; + + // 01011101|10100011|11101001|01111011 + // 01101011|10110100|01111101|00101111 + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 0, 32) < 0); + + // Offset by 3 to re-align + // 01011 101|10100 011|11101 001|01111(011) + // (011)01011|101 10100|011 11101|001 01111 + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 3, 29) == 0); + + // 01011 101|10100 011|11101 001|01111(011) + // (011)01011|101 10100|011 11101|001 01110 + // ^ + redhawk::bitops::setbit(second, 31, 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 3, 29) > 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(second, 3, first, 0, 29) < 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 3, 28) == 0); + + // 01011 101|10100 011|11101 001|01111(011) + // (011)01011|101 10100|011 10101|001 01110 + // ^ ^ + redhawk::bitops::setbit(second, 20, 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 3, 28) > 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(second, 3, first, 0, 28) < 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 3, 17) == 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 18, second, 21, 10) == 0); + + // 01011 101|10100 011|11101 001|01111(011) + // (011)11011|101 10100|011 10101|001 01110 + // ^ ^ ^ + redhawk::bitops::setbit(second, 3, 1); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 0, second, 3, 17) < 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(second, 3, first, 0, 17) > 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 4, 17) != 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 4, 16) == 0); + + // 01011 101|10100 011|11101 001|01111(011) + // (011)11011|101 10000|011 10101|001 01110 + // ^ ^ ^ ^ + redhawk::bitops::setbit(second, 13, 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 4, 16) > 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(second, 4, first, 1, 16) < 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 4, 10) != 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 4, 9) == 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 11, second, 14, 6) == 0); + + // 01011 101|10100 011|11101 001|01111(011) + // (011)11001|101 10000|011 10101|001 01110 + // ^ ^ ^ ^ ^ + redhawk::bitops::setbit(second, 6, 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 4, 9) > 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(second, 4, first, 1, 9) < 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 4, 3) != 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 1, second, 4, 2) == 0); + CPPUNIT_ASSERT(redhawk::bitops::compare(first, 4, second, 7, 6) == 0); +} + +void BitopsTest::testHammingDistance() +{ + const unsigned char first[] = { + 0x71, 0x0A, 0x68, 0xF2, // 01110001|00001010|01101000|11110010 + }; + unsigned char second[sizeof(first)]; + std::memcpy(second, first, sizeof(first)); + + // 01110001|00001010|01101000|11110010 + // 01110001|00001010|01101000|11110010 + CPPUNIT_ASSERT_EQUAL(0, redhawk::bitops::hammingDistance(first, 0, second, 0, 32)); + + // 01110001|00001010|01101000|11110010 + // 11110001|00001010|01101000|11110010 + // ^ + // 1 |---------------------------------| + // 2 |--------------------------------| + redhawk::bitops::setbit(second, 0, 1); + CPPUNIT_ASSERT_EQUAL(1, redhawk::bitops::hammingDistance(first, 0, second, 0, 32)); + CPPUNIT_ASSERT_EQUAL(0, redhawk::bitops::hammingDistance(first, 1, second, 1, 31)); + + // 01110001|00001010|01101000|11110010 + // 11110001|01001110|01101000|11110010 + // ^ ^ ^ + // 1 |---------------------------------| + // 2 |--------------------------------| + // 3 |-------| + redhawk::bitops::setbit(second, 9, 1); + redhawk::bitops::setbit(second, 13, 1); + CPPUNIT_ASSERT_EQUAL(3, redhawk::bitops::hammingDistance(first, 0, second, 0, 32)); + CPPUNIT_ASSERT_EQUAL(2, redhawk::bitops::hammingDistance(first, 1, second, 1, 31)); + CPPUNIT_ASSERT_EQUAL(0, redhawk::bitops::hammingDistance(first, 1, second, 1, 8)); + + // 01110001|00011010|01101000|11110010 + // 11110001|01011110|01001001|11110010 + // ^ ^ ^ ^ ^ ^ + // 1 |---------------------------------| + // 2 |--------------------| + redhawk::bitops::setbit(second, 11, 1); + redhawk::bitops::setbit(second, 18, 0); + redhawk::bitops::setbit(second, 23, 1); + CPPUNIT_ASSERT_EQUAL(6, redhawk::bitops::hammingDistance(first, 0, second, 0, 32)); + CPPUNIT_ASSERT_EQUAL(4, redhawk::bitops::hammingDistance(first, 3, second, 3, 20)); + + // 01110001|00011010|01101000|11110010 + // 11110011|01011110|01001001|00010010 + // ^ ^ ^ ^ ^ ^ ^ ^^^ + // 1 |---------------------------------| + // 2 |--------------------| + // 2 |---| + redhawk::bitops::setbit(second, 6, 1); + redhawk::bitops::setint(second, 24, 0, 3); + CPPUNIT_ASSERT_EQUAL(10, redhawk::bitops::hammingDistance(first, 0, second, 0, 32)); + CPPUNIT_ASSERT_EQUAL(7, redhawk::bitops::hammingDistance(first, 7, second, 7, 19)); + CPPUNIT_ASSERT_EQUAL(4, redhawk::bitops::hammingDistance(first, 23, second, 23, 4)); + + // 01110001|00011010|01101000|11110010 + // 11110011|11100101|01001001|00010010 + // ^ ^ ^^^^^^^^ ^ ^ ^^^ + // 1 |---------------------------------| + // 2 |-------------| + // 3 |------| + second[1] = ~first[1]; + CPPUNIT_ASSERT_EQUAL(15, redhawk::bitops::hammingDistance(first, 0, second, 0, 32)); + CPPUNIT_ASSERT_EQUAL(10, redhawk::bitops::hammingDistance(first, 6, second, 6, 13)); + CPPUNIT_ASSERT_EQUAL(8, redhawk::bitops::hammingDistance(first, 8, second, 8, 8)); + + // Sub-byte tests + // 01110001 + // 11100010 + // ^ ^ ^^ + // 1 |-----| + // 2 |----| + // 3 |-----| + redhawk::bitops::setbit(second, 3, 0); + redhawk::bitops::setbit(second, 7, 0); + CPPUNIT_ASSERT_EQUAL(3, redhawk::bitops::hammingDistance(first, 0, second, 0, 7)); + CPPUNIT_ASSERT_EQUAL(2, redhawk::bitops::hammingDistance(first, 1, second, 1, 6)); + CPPUNIT_ASSERT_EQUAL(3, redhawk::bitops::hammingDistance(first, 1, second, 1, 7)); +} + +void BitopsTest::testHammingDistanceUnaligned() +{ + const unsigned char first[] = { + // 01011101|10100011|11101001|01111011 + 0x5D, 0xA3, 0xE9, 0x7B + }; + unsigned char second[] = { + // rotate right 5 + // 11011010|11101101|00011111|01001011 + 0xDA, 0xED, 0x1F, 0x4B + }; + + // 010 11101|101 00011|111 01001|011(11011) + // (11011)010|11101 101|00011 111|01001 011 + CPPUNIT_ASSERT_EQUAL(0, redhawk::bitops::hammingDistance(first, 0, second, 5, 27)); + + // 010 11101|101 00011|111 01001|011(11011) + // (11011)010|11101 101|00011 111|01001 010 + // ^ + // 1 |-------------------------------| + // 2 |------------------------------| + redhawk::bitops::setbit(second, 31, 0); + CPPUNIT_ASSERT_EQUAL(1, redhawk::bitops::hammingDistance(first, 0, second, 5, 27)); + CPPUNIT_ASSERT_EQUAL(0, redhawk::bitops::hammingDistance(first, 0, second, 5, 26)); + + // 010 11101|101 00011|111 01001|011(11011) + // (11011)110|11101 101|00011 111|01001 010 + // ^ ^ + // 1 |-------------------------------| + // 2 |-----------------------------| + redhawk::bitops::setbit(second, 5, 1); + CPPUNIT_ASSERT_EQUAL(2, redhawk::bitops::hammingDistance(first, 0, second, 5, 27)); + CPPUNIT_ASSERT_EQUAL(0, redhawk::bitops::hammingDistance(first, 1, second, 6, 25)); + + // 010 11101|101 00011|111 01001|011(11011) + // (11011)110|11101 111|01011 111|01001 010 + // ^ ^ ^ ^ + // 1 |-------------------------------| + // 2 |-----------------------------| + // 3 |-------------| + // 4 |--------------| + redhawk::bitops::setbit(second, 14, 1); + redhawk::bitops::setbit(second, 17, 1); + CPPUNIT_ASSERT_EQUAL(4, redhawk::bitops::hammingDistance(first, 0, second, 5, 27)); + CPPUNIT_ASSERT_EQUAL(2, redhawk::bitops::hammingDistance(first, 1, second, 6, 25)); + CPPUNIT_ASSERT_EQUAL(2, redhawk::bitops::hammingDistance(first, 0, second, 5, 12)); + CPPUNIT_ASSERT_EQUAL(0, redhawk::bitops::hammingDistance(first, 13, second, 18, 13)); + + // 010 11101|101 00011|111 01001|011(11011) + // (11011)110|00001 111|01011 111|01001 010 + // ^ ^^^ ^ ^ ^ + // 1 |-------------------------------| + // 2 |----| + // 3 |-----| + // 4 |-| + redhawk::bitops::setint(second, 8, 0, 3); + // Full string + CPPUNIT_ASSERT_EQUAL(7, redhawk::bitops::hammingDistance(first, 0, second, 5, 27)); + // Middle bits from 1st byte of first, split 1st/2nd byte of second + CPPUNIT_ASSERT_EQUAL(3, redhawk::bitops::hammingDistance(first, 2, second, 7, 5)); + // Split 1st/2nd byte of first, middle bits from 2nd byte of second + CPPUNIT_ASSERT_EQUAL(3, redhawk::bitops::hammingDistance(first, 4, second, 9, 6)); + // Left bits from 1st byte of first, right bits from 1st byte of second + CPPUNIT_ASSERT_EQUAL(1, redhawk::bitops::hammingDistance(first, 0, second, 5, 3)); +} + +void BitopsTest::testCopyAligned() +{ + const unsigned char src[] = { + // 00101001|10110111|10010110 + 0x29, 0xB7, 0x96, 0xA8 + }; + unsigned char dest[sizeof(src)]; + + // Single byte copy + dest[0] = 0; + redhawk::bitops::copy(dest, 0, src, 0, 8); + CPPUNIT_ASSERT_EQUAL((int) src[0], (int) dest[0]); + + // Left bits in single byte + // src: (00101)001 + // dest: (11100)110 + // = (00101)110 + dest[0] = 0xE6; + redhawk::bitops::copy(dest, 0, src, 0, 5); + CPPUNIT_ASSERT_EQUAL(0x2E,(int) dest[0]); + + // Right bits in single byte + // src: 0010(1001) + // dest: 1110(0110) + // = 1110(1001) + dest[0] = 0xE6; + redhawk::bitops::copy(dest, 4, src, 4, 4); + CPPUNIT_ASSERT_EQUAL(0xE9, (int) dest[0]); + + // Middle bits in single byte + // src: 001(010)01 + // dest: 101(111)10 + // = 101(010)10 + dest[0] = 0xBE; + redhawk::bitops::copy(dest, 3, src, 3, 3); + CPPUNIT_ASSERT_EQUAL(0xAA, (int) dest[0]); + + // Split across 2 bytes: right, full byte + // src: 0010100(1|10110111) + // dest: 1001100(0|10011000) + // = 1001100(1|10110111) + dest[0] = dest[1] = 0x98; + redhawk::bitops::copy(dest, 7, src, 7, 9); + CPPUNIT_ASSERT_EQUAL(0x99, (int) dest[0]); + CPPUNIT_ASSERT_EQUAL((int) src[1], (int) dest[1]); + + // Split across 2 bytes: full byte, left + // src: (00101001|101101)11 + // dest: (10011000|100110)00 + // = (00101001|101101)00 + dest[0] = dest[1] = 0x98; + redhawk::bitops::copy(dest, 0, src, 0, 14); + CPPUNIT_ASSERT_EQUAL((int) src[0], (int) dest[0]); + CPPUNIT_ASSERT_EQUAL(0xB4, (int) dest[1]); + + // Split across 2 bytes: right, left + // src: 00101(001|1)0110111 + // dest: 01011(100|0)1011100 + // = 01011(001|1)1011100 + dest[0] = dest[1] = 0x5C; + redhawk::bitops::copy(dest, 5, src, 5, 4); + CPPUNIT_ASSERT_EQUAL(0x59, (int) dest[0]); + CPPUNIT_ASSERT_EQUAL(0xDC, (int) dest[1]); + + // Split across 3 bytes: right, full byte, left + // src: 00(101001|10110111|100)10110 + // dest: 11(111111|11111111|111)11111 + // = 11(101001|10110111|100)11111 + memset(dest, 0xFF, sizeof(dest)); + redhawk::bitops::copy(dest, 2, src, 2, 17); + CPPUNIT_ASSERT_EQUAL(0xE9, (int) dest[0]); + CPPUNIT_ASSERT_EQUAL((int) src[1], (int) dest[1]); + CPPUNIT_ASSERT_EQUAL(0x9F, (int) dest[2]); +} + +void BitopsTest::testCopyUnaligned() +{ + const unsigned char src[] = { + // 00101001|10110111|00001110 + 0x29, 0xB7, 0x0E + }; + unsigned char dest[] = { + // 10001111|01100100|10100101 + 0x8F, 0x64, 0xA5 + }; + + // Middle bits in both + // src: 00(1010)01 + // dest: 1(0001)111 + // = 1(1010)111 + redhawk::bitops::copy(dest, 1, src, 2, 4); + CPPUNIT_ASSERT_EQUAL(0xD7, (int) dest[0]); + CPPUNIT_ASSERT_EQUAL(0x64, (int) dest[1]); + CPPUNIT_ASSERT_EQUAL(0xA5, (int) dest[2]); + + // Middle bits in src, split in dest + // src: 0(010 100)1 + // dest: 11010(111|011)00100 + // = 11010(010|100)00100 + redhawk::bitops::copy(dest, 5, src, 1, 6); + CPPUNIT_ASSERT_EQUAL(0xD2, (int) dest[0]); + CPPUNIT_ASSERT_EQUAL(0x84, (int) dest[1]); + CPPUNIT_ASSERT_EQUAL(0xA5, (int) dest[2]); + + // Split in src, middle bits in dest + // src: 0010(1001|1)0110111 + // dest: 11(0100 1)0 + // = 11(1001 1)0 + redhawk::bitops::copy(dest, 2, src, 4, 5); + CPPUNIT_ASSERT_EQUAL(0xE6, (int) dest[0]); + CPPUNIT_ASSERT_EQUAL(0x84, (int) dest[1]); + CPPUNIT_ASSERT_EQUAL(0xA5, (int) dest[2]); + + // src: 00(1010 01|101101)11 + // dest: 1110(0110|10 000100) + // = 1110(1010|01 101101) + redhawk::bitops::copy(dest, 4, src, 2, 12); + CPPUNIT_ASSERT_EQUAL(0xEA, (int) dest[0]); + CPPUNIT_ASSERT_EQUAL(0x6D, (int) dest[1]); + CPPUNIT_ASSERT_EQUAL(0xA5, (int) dest[2]); + + // src: 001(01 001|10110 111|0)0001110 + // dest: 110110(10|011 01101|101 0)0101 + // = 110110(01|001 10110|111 0)0101 + redhawk::bitops::copy(dest, 6, src, 3, 14); + CPPUNIT_ASSERT_EQUAL(0xE9, (int) dest[0]); + CPPUNIT_ASSERT_EQUAL(0x36, (int) dest[1]); + CPPUNIT_ASSERT_EQUAL(0xE5, (int) dest[2]); +} + +void BitopsTest::testCopyLarge() +{ + // Use a source vector with a predictable bit pattern (01010101...) + std::vector src; + src.resize(253); + std::fill(src.begin(), src.end(), 0x55); + std::vector dest; + dest.resize(src.size()); + const size_t bits = src.size() * 8; + + // Aligned bytewise copy + std::fill(dest.begin(), dest.end(), 0); + redhawk::bitops::copy(&dest[0], 0, &src[0], 0, bits); + CPPUNIT_ASSERT_ARRAYS_EQUAL(&src[0], &dest[0], src.size()); + + // Aligned copy, 4-bit offset + std::fill(dest.begin(), dest.end(), 0xAA); + redhawk::bitops::copy(&dest[0], 4, &src[0], 4, bits-8); + CPPUNIT_ASSERT_EQUAL(0xA5, (int) dest[0]); + CPPUNIT_ASSERT_ARRAYS_EQUAL(&src[1], &dest[1], src.size() - 2); + CPPUNIT_ASSERT_EQUAL(0x5A, (int) dest[dest.size()-1]); + + // Unaligned copy, shifting by 3 bits turns 01010101 into 10101010 + // dest: 01000001 + // first: 01(101010) + // last: (10101)001 + std::fill(dest.begin(), dest.end(), 0x41); + redhawk::bitops::copy(&dest[0], 2, &src[0], 5, bits-5); + CPPUNIT_ASSERT_EQUAL(0x6A, (int) dest[0]); + for (size_t pos = 1; pos < (dest.size() - 1); ++pos) { + CPPUNIT_ASSERT_EQUAL(0xAA, (int) dest[pos]); + } + CPPUNIT_ASSERT_EQUAL(0xA9, (int) dest[dest.size()-1]); +} + +void BitopsTest::testFind() +{ + // String: 10101010... + const size_t string_bits = 253; + std::vector buf; + buf.resize((string_bits + 7) / 8); + std::fill(buf.begin(), buf.end(), 0xAA); + unsigned char* string = &buf[0]; + + // Pattern: 10110011|0000111x + const size_t pattern_bits = 15; + const unsigned char pattern[] = { 0xB3, 0x0E }; + + // Pre-condition: the pattern should not match the string as-is + CPPUNIT_ASSERT_EQUAL(-1, redhawk::bitops::find(string, 0, string_bits, pattern, 0, pattern_bits, 0)); + + // Copy the pattern into the string four times, with different numbers + // of errors; the higher error counts go first, so that stricter max + // distance values will ignore them. + const int three_errors = 16; + redhawk::bitops::copy(string, three_errors, pattern, 0, pattern_bits); + _flipBit(string, three_errors + 2); + _flipBit(string, three_errors + 8); + _flipBit(string, three_errors + 11); + + const int two_errors = 59; + redhawk::bitops::copy(string, two_errors, pattern, 0, pattern_bits); + _flipBit(string, two_errors + 4); + _flipBit(string, two_errors + 5); + + const int one_error = 126; + redhawk::bitops::copy(string, one_error, pattern, 0, pattern_bits); + _flipBit(string, one_error + 14); + + const int exact = 200; + redhawk::bitops::copy(string, exact, pattern, 0, pattern_bits); + + // Increasing tolerances should find the earlier occurrences + CPPUNIT_ASSERT_EQUAL(exact, redhawk::bitops::find(string, 0, string_bits, pattern, 0, pattern_bits, 0)); + CPPUNIT_ASSERT_EQUAL(one_error, redhawk::bitops::find(string, 0, string_bits, pattern, 0, pattern_bits, 1)); + CPPUNIT_ASSERT_EQUAL(two_errors, redhawk::bitops::find(string, 0, string_bits, pattern, 0, pattern_bits, 2)); + CPPUNIT_ASSERT_EQUAL(three_errors, redhawk::bitops::find(string, 0, string_bits, pattern, 0, pattern_bits, 3)); + + // Exclude the exact match at the end + CPPUNIT_ASSERT_EQUAL(-1, redhawk::bitops::find(string, 0, exact, pattern, 0, pattern_bits, 0)); + + // Exclude the three-error match at the beginning + CPPUNIT_ASSERT_EQUAL(two_errors, redhawk::bitops::find(string, three_errors + pattern_bits, string_bits, + pattern, 0, pattern_bits, 3)); +} + +void BitopsTest::testTakeSkip() +{ + // Use a non byte-aligned starting offset and a repeating pattern of an + // irregular length, where the discarded part is disjoint + // 10000100001/1001 = 0x4219 + size_t patt_len = 15; + const size_t src_start = 5; + size_t src_bits = 5 * patt_len; + std::vector src((src_start + src_bits + 7) / 8); + for (size_t pos = src_start; pos < (src_start+src_bits); pos += patt_len) { + redhawk::bitops::setint(&src[0], pos, 0x4219, patt_len); + } + + // Take 11 and skip 4 for the extent of the input string, using a different + // non byte-aligned starting offset + const size_t take_len = 11; + const size_t dest_start = 3; + const size_t dest_bits = 5 * take_len; + + // Allocate memory and intialize with alternating 0/1 bit pattern + std::vector dest((dest_start + dest_bits +7) / 8, 0x55); + + // Ensure that the take/skip copies the correct number of bits, and that + // the destination now contains the first 11 bits of the pattern repeated + size_t count = redhawk::bitops::takeskip(&dest[0], dest_start, &src[0], src_start, src_bits, take_len, 4); + CPPUNIT_ASSERT_EQUAL(dest_bits, count); + for (size_t pos = dest_start; pos < (dest_start+dest_bits); pos += take_len) { + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x421, redhawk::bitops::getint(&dest[0], pos, take_len)); + } + + // Make sure it didn't disturb any existing values + // front: 010xxxxx + // back: xx010101 + CPPUNIT_ASSERT_EQUAL((uint64_t) 2, redhawk::bitops::getint(&dest[0], 0, dest_start)); + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x15, redhawk::bitops::getint(&dest[0], dest_start+dest_bits, 6)); + + // Use a source length that truncates the last take and verify that only + // bits up to that point are taken + // src = (10000100001100110000)10000 + // (10000xxx00110xxx0000) = 10000001100000 + // dest = (01010101010101)01 + // (10000001100000)xx + // 10000001100000 01 = 0x8181 + std::fill(dest.begin(), dest.end(), 0x55); + count = redhawk::bitops::takeskip(&dest[0], 0, &src[0], src_start, 20, 5, 3); + CPPUNIT_ASSERT_EQUAL((size_t) 14, count); + CPPUNIT_ASSERT_EQUAL((uint64_t) 0x8181, redhawk::bitops::getint(&dest[0], 0, 16)); +} + +void BitopsTest::_flipBit(unsigned char* buffer, size_t offset) +{ + redhawk::bitops::setbit(buffer, offset, !(redhawk::bitops::getbit(buffer, offset))); +} + +CPPUNIT_TEST_SUITE_REGISTRATION(BitopsTest); diff --git a/redhawk/src/testing/cpp/BitopsTest.h b/redhawk/src/testing/cpp/BitopsTest.h new file mode 100644 index 000000000..8d1dffe0e --- /dev/null +++ b/redhawk/src/testing/cpp/BitopsTest.h @@ -0,0 +1,113 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef BITOPSTEST_H +#define BITOPSTEST_H + +#include "CFTest.h" + +class BitopsTest : public CppUnit::TestFixture { + + CPPUNIT_TEST_SUITE(BitopsTest); + CPPUNIT_TEST(testGetBit); + CPPUNIT_TEST(testSetBit); + CPPUNIT_TEST(testGetInt); + CPPUNIT_TEST(testGetIntUnaligned); + CPPUNIT_TEST(testGetIntUnalignedSmall); + CPPUNIT_TEST(testSetInt); + CPPUNIT_TEST(testSetIntUnaligned); + CPPUNIT_TEST(testSetIntUnalignedSmall); + CPPUNIT_TEST(testFill); + CPPUNIT_TEST(testPack); + CPPUNIT_TEST(testPackSmall); + CPPUNIT_TEST(testPackUnaligned); + CPPUNIT_TEST(testPackUnalignedSmall); + CPPUNIT_TEST(testUnpack); + CPPUNIT_TEST(testUnpackSmall); + CPPUNIT_TEST(testUnpackUnaligned); + CPPUNIT_TEST(testUnpackUnalignedSmall); + CPPUNIT_TEST(testPopcount); + CPPUNIT_TEST(testPopcountUnaligned); + CPPUNIT_TEST(testToString); + CPPUNIT_TEST(testParseString); + CPPUNIT_TEST(testParseStringError); + CPPUNIT_TEST(testCompare); + CPPUNIT_TEST(testCompareUnaligned); + CPPUNIT_TEST(testHammingDistance); + CPPUNIT_TEST(testHammingDistanceUnaligned); + CPPUNIT_TEST(testCopyAligned); + CPPUNIT_TEST(testCopyUnaligned); + CPPUNIT_TEST(testCopyLarge); + CPPUNIT_TEST(testFind); + CPPUNIT_TEST(testTakeSkip); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testGetBit(); + void testSetBit(); + + void testGetInt(); + void testGetIntUnaligned(); + void testGetIntUnalignedSmall(); + + void testSetInt(); + void testSetIntUnaligned(); + void testSetIntUnalignedSmall(); + + void testFill(); + + void testPack(); + void testPackSmall(); + void testPackUnaligned(); + void testPackUnalignedSmall(); + + void testUnpack(); + void testUnpackSmall(); + void testUnpackUnaligned(); + void testUnpackUnalignedSmall(); + + void testPopcount(); + void testPopcountUnaligned(); + + void testToString(); + void testParseString(); + void testParseStringError(); + + void testCompare(); + void testCompareUnaligned(); + + void testHammingDistance(); + void testHammingDistanceUnaligned(); + + void testCopyAligned(); + void testCopyUnaligned(); + void testCopyLarge(); + + void testFind(); + void testTakeSkip(); + +private: + void _flipBit(unsigned char* buffer, size_t offset); +}; + +#endif // BITOPSTEST_H diff --git a/redhawk/src/testing/cpp/BufferManagerTest.cpp b/redhawk/src/testing/cpp/BufferManagerTest.cpp new file mode 100644 index 000000000..d6574d6bb --- /dev/null +++ b/redhawk/src/testing/cpp/BufferManagerTest.cpp @@ -0,0 +1,444 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "BufferManagerTest.h" +#include + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(BufferManagerTest); + +void BufferManagerTest::setUp() +{ + _manager = &redhawk::BufferManager::Instance(); + + // (Re-)enable the buffer manager + _manager->enable(true); + + // Reset cache policies + _manager->setMaxThreadBytes(-1); + _manager->setMaxThreadBlocks(-1); + _manager->setMaxThreadAge(-1); +} + +void BufferManagerTest::tearDown() +{ + // Clean up all allocations from the test + std::for_each(_allocations.begin(), _allocations.end(), &redhawk::BufferManager::Deallocate); + _allocations.clear(); + + // Disable the buffer manager to return all memory to the operating system + _manager->enable(false); +} + +void BufferManagerTest::testBasicAllocate() +{ + // Make sure a simple allocation succeeds, then return it to the cache + const size_t SMALL_BYTES = 1000; + void* buffer = _allocate(SMALL_BYTES); + CPPUNIT_ASSERT(buffer != 0); + _deallocate(buffer); + + // Allocate a different-sized buffer that cannot re-use the prior buffer + const size_t LARGE_BYTES = 128*1024; + void* large_buffer = _allocate(LARGE_BYTES); + CPPUNIT_ASSERT(large_buffer != 0); + CPPUNIT_ASSERT(buffer != large_buffer); + + // Allocate the original size and check that it returned the same buffer + void* buffer2 = _allocate(SMALL_BYTES); + CPPUNIT_ASSERT_EQUAL(buffer, buffer2); + + // Release the large buffer, and allocate another block of the smaller size + _deallocate(large_buffer); + buffer2 = _allocate(SMALL_BYTES); + CPPUNIT_ASSERT(buffer2 != 0); + CPPUNIT_ASSERT(buffer2 != buffer); + CPPUNIT_ASSERT(buffer2 != large_buffer); +} + +void BufferManagerTest::testAllocator() +{ + typedef std::vector > FloatVec; + + // Create a 1K-element vector; the allocation had better succeed + FloatVec vec; + vec.resize(1024); + float* buffer = vec.data(); + CPPUNIT_ASSERT(buffer != 0); + + // Resize vector up enough that it gets a different allocation + const size_t ELEMENT_COUNT = 65536; + vec.resize(ELEMENT_COUNT); + float* large_buffer = vec.data(); + CPPUNIT_ASSERT(large_buffer != buffer); + + // Clear the vector's buffer + size_t blocks_pre = _manager->getStatistics().blocks; + { + // Swapping with a new, empty vector is the most reliable way to reset + // the vector's internal buffer; when the temporary gets destroyed at + // the end of the scope, it should deallocate the buffer + FloatVec tmp; + vec.swap(tmp); + } + CPPUNIT_ASSERT(vec.data() != large_buffer); + size_t blocks_post = _manager->getStatistics().blocks; + CPPUNIT_ASSERT_EQUAL(blocks_pre + 1, blocks_post); + + // Resize back up to the last used size and check that we got the same + // buffer back + vec.resize(ELEMENT_COUNT); + CPPUNIT_ASSERT_EQUAL(large_buffer, vec.data()); +} + +void BufferManagerTest::testEnable() +{ + // Start enabled and check that it reports true + _manager->enable(true); + CPPUNIT_ASSERT(_manager->isEnabled()); + + // Disable the buffer manager and check that it reports false + _manager->enable(false); + CPPUNIT_ASSERT_EQUAL(false, _manager->isEnabled()); + + // The cache(s) should be empty + redhawk::BufferManager::Statistics stats = _manager->getStatistics(); + CPPUNIT_ASSERT_EQUAL((size_t) 0, stats.bytes); + CPPUNIT_ASSERT_EQUAL((size_t) 0, stats.blocks); + + // Allocate and deallocate some buffers; the cache(s) should still be empty + _fillCache(16, 8192); + stats = _manager->getStatistics(); + CPPUNIT_ASSERT_EQUAL((size_t) 0, stats.bytes); + CPPUNIT_ASSERT_EQUAL((size_t) 0, stats.blocks); + + // Re-enable and allocate more buffers + _manager->enable(true); + _fillCache(16, 8192); + stats = _manager->getStatistics(); + CPPUNIT_ASSERT(stats.bytes >= (16*8192)); + CPPUNIT_ASSERT_EQUAL((size_t) 16, stats.blocks); + + // Disable the buffer manager again; it should purge the caches + _manager->enable(false); + stats = _manager->getStatistics(); + CPPUNIT_ASSERT_EQUAL((size_t) 0, stats.bytes); + CPPUNIT_ASSERT_EQUAL((size_t) 0, stats.blocks); +} + +void BufferManagerTest::testStatistics() +{ + // The cache should be clear (tearDown is supposed to clear it for us) + redhawk::BufferManager::Statistics pre_stats = _manager->getStatistics(); + CPPUNIT_ASSERT_EQUAL((size_t) 0, pre_stats.blocks); + CPPUNIT_ASSERT_EQUAL((size_t) 0, pre_stats.bytes); + + // Allocate in increasing sizes; each one should be a cache miss + size_t total_blocks = 8; + size_t total_bytes = 0; + size_t current_size = 1024; + for (size_t ii = 0; ii < total_blocks; ++ii) { + _deallocate(_allocate(current_size)); + total_bytes += current_size; + current_size <<= 1; + } + redhawk::BufferManager::Statistics post_stats = _manager->getStatistics(); + size_t hits = post_stats.hits - pre_stats.hits; + size_t misses = post_stats.misses - pre_stats.misses; + CPPUNIT_ASSERT_EQUAL((size_t) 0, hits); + CPPUNIT_ASSERT_EQUAL((size_t) total_blocks, misses); + CPPUNIT_ASSERT(post_stats.blocks >= total_blocks); + CPPUNIT_ASSERT(post_stats.bytes >= total_bytes); + + // Allocate a few blocks of sizes that should be in the cache; they should + // all be hits + current_size = 2048; + for (size_t ii = 0; ii < 4; ++ii) { + pre_stats = post_stats; + _deallocate(_allocate(current_size)); + current_size <<= 2; + post_stats = _manager->getStatistics(); + hits = post_stats.hits - pre_stats.hits; + misses = post_stats.misses - pre_stats.misses; + CPPUNIT_ASSERT_EQUAL((size_t) 1, hits); + CPPUNIT_ASSERT_EQUAL((size_t) 0, misses); + } + + // Exceed the high water mark + pre_stats = post_stats; + size_t gap = std::min(pre_stats.highBytes - pre_stats.bytes, (size_t) 1024); + size_t count = (gap / 1024) + 2; + _fillCache(count, 1024); + post_stats = _manager->getStatistics(); + CPPUNIT_ASSERT(post_stats.highBytes > pre_stats.highBytes); +} + +void BufferManagerTest::testThreading() +{ + // Pre-cache a buffer on the current thread + _fillCache(1, 1024); + redhawk::BufferManager::Statistics pre_stats = _manager->getStatistics(); + + // Allocate a buffer on the executor service's thread + redhawk::ExecutorService service; + service.start(); + boost::packaged_task task(boost::bind(&BufferManagerTest::_allocate, this, 1024)); + boost::unique_future future = task.get_future(); + service.execute(boost::ref(task)); + void* buffer = future.get(); + + // It should be a new buffer, not one from the cache; also, a new cache + // should have been created + redhawk::BufferManager::Statistics post_stats = _manager->getStatistics(); + CPPUNIT_ASSERT_EQUAL(pre_stats.caches + 1, post_stats.caches); + CPPUNIT_ASSERT_EQUAL(pre_stats.blocks, post_stats.blocks); + CPPUNIT_ASSERT_EQUAL(pre_stats.bytes, post_stats.bytes); + + // Free the buffers allocated on the other thread; it should go back into + // the executor thread's cache + _deallocate(buffer); + post_stats = _manager->getStatistics(); + CPPUNIT_ASSERT_EQUAL(pre_stats.blocks + 1, post_stats.blocks); + size_t delta = post_stats.bytes - pre_stats.bytes; + CPPUNIT_ASSERT(delta >= 1024); + + // Allocate two buffers on the current thread; only one of them should come + // from the cache--the other buffer is in the executor thread's cache + CPPUNIT_ASSERT_EQUAL((size_t) 2, post_stats.blocks); + pre_stats = post_stats; + void* buffer1 = _allocate(1024); + void* buffer2 = _allocate(1024); + post_stats = _manager->getStatistics(); + CPPUNIT_ASSERT_EQUAL(pre_stats.blocks - 1, post_stats.blocks); + + // Deallocate both buffers on the executor thread + pre_stats = post_stats; + boost::packaged_task deallocate1(boost::bind(&BufferManagerTest::_deallocate, this, buffer1)); + boost::packaged_task deallocate2(boost::bind(&BufferManagerTest::_deallocate, this, buffer2)); + service.execute(boost::ref(deallocate1)); + service.execute(boost::ref(deallocate2)); + deallocate1.get_future().wait(); + deallocate2.get_future().wait(); + + // Make sure that the buffers returned to the cache + post_stats = _manager->getStatistics(); + CPPUNIT_ASSERT_EQUAL(pre_stats.blocks + 2, post_stats.blocks); + CPPUNIT_ASSERT(post_stats.bytes > pre_stats.bytes); + + // End the executable service's thread, which should free that cache; only + // the buffer that was *allocated* on the thread should be freed + pre_stats = post_stats; + service.stop(); + post_stats = _manager->getStatistics(); + CPPUNIT_ASSERT_EQUAL(pre_stats.caches - 1, post_stats.caches); + CPPUNIT_ASSERT_EQUAL(pre_stats.blocks - 1, post_stats.blocks); + CPPUNIT_ASSERT(pre_stats.bytes > post_stats.bytes); +} + +void BufferManagerTest::testPolicyBytes() +{ + // Fill the cache with more than 64K worth of buffers + const size_t SMALL_BYTES = 8192; + const size_t SMALL_LIMIT = 65536; + _fillCache((SMALL_LIMIT / SMALL_BYTES) + 1, SMALL_BYTES); + CPPUNIT_ASSERT(_manager->getStatistics().bytes > SMALL_LIMIT); + + // Limit the cache to 64K; this should free some buffers + _manager->setMaxThreadBytes(65536); + CPPUNIT_ASSERT_EQUAL((size_t) 65536, _manager->getMaxThreadBytes()); + CPPUNIT_ASSERT(_manager->getStatistics().bytes <= _manager->getMaxThreadBytes()); + + // Allocate a couple of buffers and make sure they came from the cache + size_t blocks = _manager->getStatistics().blocks; + void* buffer1 = _allocate(SMALL_BYTES); + void* buffer2 = _allocate(SMALL_BYTES); + CPPUNIT_ASSERT(_manager->getStatistics().blocks < blocks); + + // Bring the limit down a little more; this shouldn't change the cache + // size + const size_t TEST_LIMIT = SMALL_LIMIT - (SMALL_BYTES/2); + CPPUNIT_ASSERT(_manager->getStatistics().bytes <= TEST_LIMIT); + size_t pre_bytes = _manager->getStatistics().bytes; + _manager->setMaxThreadBytes(TEST_LIMIT); + size_t post_bytes = _manager->getStatistics().bytes; + CPPUNIT_ASSERT_EQUAL(pre_bytes, post_bytes); + + // Return one buffer to the cache; this should succeed and remain below the + // limit + pre_bytes = _manager->getStatistics().bytes; + _deallocate(buffer1); + post_bytes = _manager->getStatistics().bytes; + CPPUNIT_ASSERT(post_bytes > pre_bytes); + CPPUNIT_ASSERT(post_bytes <= _manager->getMaxThreadBytes()); + + // Deallocate the second; this would exceed the limit, so the cache should + // free it + pre_bytes = post_bytes; + _deallocate(buffer2); + post_bytes = _manager->getStatistics().bytes; + CPPUNIT_ASSERT_EQUAL(pre_bytes, post_bytes); + + // Increase the max bytes and try to fill past the limit; the cached bytes + // should not exceed the limit, but the should be within a buffer size + // (otherwise it would have accepted the buffer) + const size_t LARGE_LIMIT = 1024 * 1024; + _manager->setMaxThreadBytes(LARGE_LIMIT); + const size_t LARGE_BYTES = 128*1024; + _fillCache(10, LARGE_BYTES); + redhawk::BufferManager::Statistics stats = _manager->getStatistics(); + CPPUNIT_ASSERT(stats.bytes <= LARGE_LIMIT); + CPPUNIT_ASSERT((LARGE_LIMIT - stats.bytes) < LARGE_BYTES); + + // Turn off the byte policy, retaining all buffers from now on + _manager->setMaxThreadBytes(-1); + size_t pre_blocks = stats.blocks; + _fillCache(10, 65536); + stats = _manager->getStatistics(); + CPPUNIT_ASSERT(stats.bytes > LARGE_LIMIT); + CPPUNIT_ASSERT_EQUAL(pre_blocks + 10, stats.blocks); +} + +void BufferManagerTest::testPolicyBlocks() +{ + // Fill the cache with a set number of buffers + const size_t SMALL_BYTES = 8192; + size_t block_count = 12; + _fillCache(block_count, SMALL_BYTES); + CPPUNIT_ASSERT(_manager->getStatistics().blocks >= block_count); + + // Limit the cache to 8 block; this should free some buffers + block_count = 8; + _manager->setMaxThreadBlocks(block_count); + CPPUNIT_ASSERT_EQUAL(block_count, _manager->getMaxThreadBlocks()); + CPPUNIT_ASSERT_EQUAL(block_count, _manager->getStatistics().blocks); + + // Allocate a couple of buffers and make sure they came from the cache + void* buffer1 = _allocate(SMALL_BYTES); + void* buffer2 = _allocate(SMALL_BYTES); + CPPUNIT_ASSERT(_manager->getStatistics().blocks < block_count); + + // Bring the limit down a little more; this shouldn't change the cache + // size + block_count = 7; + size_t pre_blocks = _manager->getStatistics().blocks; + _manager->setMaxThreadBlocks(block_count); + CPPUNIT_ASSERT_EQUAL(pre_blocks, _manager->getStatistics().blocks); + + // Return one buffer to the cache; this should succeed and remain below the + // limit + _deallocate(buffer1); + size_t post_blocks = _manager->getStatistics().blocks; + CPPUNIT_ASSERT(post_blocks > pre_blocks); + CPPUNIT_ASSERT(post_blocks <= _manager->getMaxThreadBlocks()); + + // Deallocate the second; this would exceed the limit, so the cache should + // free it + pre_blocks = post_blocks; + _deallocate(buffer2); + CPPUNIT_ASSERT_EQUAL(pre_blocks, _manager->getStatistics().blocks); + + // Increase the block limit and allocate enough more buffers both hit the + // limit and flush the old buffers + block_count += 5; + _manager->setMaxThreadBlocks(block_count); + const size_t LARGE_BYTES = SMALL_BYTES * 4; + _fillCache(block_count * 2, LARGE_BYTES); + redhawk::BufferManager::Statistics stats = _manager->getStatistics(); + CPPUNIT_ASSERT_EQUAL(block_count, stats.blocks); + CPPUNIT_ASSERT(stats.bytes >= (stats.blocks * LARGE_BYTES)); + + // Turn off the block policy, retaining all buffers from now on + _manager->setMaxThreadBlocks(-1); + pre_blocks = stats.blocks; + _fillCache(10, SMALL_BYTES); + CPPUNIT_ASSERT_EQUAL(pre_blocks + 10, _manager->getStatistics().blocks); +} + +void BufferManagerTest::testPolicyAge() +{ + const size_t SMALL_BYTES = 1024; // 1K + _fillCache(10, SMALL_BYTES); + + // Set a maximum thread age less than the current number of blocks and + // check that some blocks are freed + size_t pre_blocks = _manager->getStatistics().blocks; + CPPUNIT_ASSERT_EQUAL((size_t) 10, pre_blocks); + _manager->setMaxThreadAge(8); + CPPUNIT_ASSERT_EQUAL((size_t) 8, _manager->getMaxThreadAge()); + CPPUNIT_ASSERT(_manager->getStatistics().blocks < pre_blocks); + + // Allocate a bunch of different-sized buffers; this should age off all of + // the smaller buffers + const size_t MEDIUM_BYTES = 65536; // 64K + for (int ii = 0; ii < 2; ++ii) { + _fillCache(5, MEDIUM_BYTES); + } + redhawk::BufferManager::Statistics stats = _manager->getStatistics(); + size_t post_blocks = stats.blocks; + CPPUNIT_ASSERT_EQUAL((size_t) 5, post_blocks); + CPPUNIT_ASSERT(stats.bytes >= (post_blocks * MEDIUM_BYTES)); + + // Cycle another larger buffer to the front just enough times that the + // oldest medium buffer is still in the cache + pre_blocks = post_blocks; + const size_t LARGE_BYTES = 1024*1024; // 1M + for (int ii = 0; ii < 4; ++ii) { + _deallocate(_allocate(LARGE_BYTES)); + } + post_blocks = _manager->getStatistics().blocks; + CPPUNIT_ASSERT(post_blocks > pre_blocks); + + // The next allocate/deallocate cycle should age off an old buffer + pre_blocks = _manager->getStatistics().blocks; + _deallocate(_allocate(LARGE_BYTES)); + post_blocks = _manager->getStatistics().blocks; + CPPUNIT_ASSERT(post_blocks < pre_blocks); + + // Turn off the age policy, retaining all buffers from now on + pre_blocks = post_blocks; + _manager->setMaxThreadAge(-1); + for (int ii = 0; ii < 1000; ++ii) { + _deallocate(_allocate(LARGE_BYTES)); + } + CPPUNIT_ASSERT_EQUAL(pre_blocks, _manager->getStatistics().blocks); +} + +void* BufferManagerTest::_allocate(size_t bytes) +{ + void* ptr = redhawk::BufferManager::Allocate(bytes); + _allocations.insert(ptr); + return ptr; +} + +void BufferManagerTest::_deallocate(void* ptr) +{ + CPPUNIT_ASSERT_MESSAGE("Deallocating unknown allocation", _allocations.erase(ptr)); + redhawk::BufferManager::Deallocate(ptr); +} + +void BufferManagerTest::_fillCache(size_t count, size_t bufferSize) +{ + BufferList buffers; + for (size_t blocks = 0; blocks < count; ++blocks) { + buffers.insert(_manager->allocate(bufferSize)); + } + std::for_each(buffers.begin(), buffers.end(), &redhawk::BufferManager::Deallocate); +} diff --git a/redhawk/src/testing/cpp/BufferManagerTest.h b/redhawk/src/testing/cpp/BufferManagerTest.h new file mode 100644 index 000000000..ff39508e3 --- /dev/null +++ b/redhawk/src/testing/cpp/BufferManagerTest.h @@ -0,0 +1,73 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef BUFFERMANAGERTEST_H +#define BUFFERMANAGERTEST_H + +#include + +#include "CFTest.h" + +#include + +class BufferManagerTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(BufferManagerTest); + CPPUNIT_TEST(testBasicAllocate); + CPPUNIT_TEST(testAllocator); + CPPUNIT_TEST(testEnable); + CPPUNIT_TEST(testStatistics); + CPPUNIT_TEST(testThreading); + CPPUNIT_TEST(testPolicyBytes); + CPPUNIT_TEST(testPolicyBlocks); + CPPUNIT_TEST(testPolicyAge); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testBasicAllocate(); + void testAllocator(); + + void testEnable(); + + void testStatistics(); + + void testThreading(); + + void testPolicyBytes(); + void testPolicyBlocks(); + void testPolicyAge(); + +private: + typedef std::set BufferList; + + void* _allocate(size_t bytes); + void _deallocate(void* ptr); + + void _fillCache(size_t count, size_t bufferSize); + + redhawk::BufferManager* _manager; + + BufferList _allocations; +}; + +#endif // BUFFERMANAGER_TEST_H diff --git a/redhawk/src/testing/cpp/CFTest.h b/redhawk/src/testing/cpp/CFTest.h new file mode 100644 index 000000000..0ff587b43 --- /dev/null +++ b/redhawk/src/testing/cpp/CFTest.h @@ -0,0 +1,71 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef CFTEST_H +#define CFTEST_H + +#include + +#include +#include +#include + +template +inline void checkArraysEqual(const T* expected, const T* actual, size_t count, + CppUnit::SourceLine sourceLine, const std::string& message) +{ + for (size_t pos = 0; pos < count; ++pos) { + if (expected[pos] != actual[pos]) { + std::ostringstream description; + description << "expected != actual at position " << pos; + std::string expectedStr = CppUnit::assertion_traits::toString(expected[pos]); + std::string actualStr = CppUnit::assertion_traits::toString(actual[pos]); + CppUnit::Asserter::failNotEqual(expectedStr, actualStr, sourceLine, message, description.str()); + } + } +} + +#define CPPUNIT_ASSERT_ARRAYS_EQUAL(expected,actual,length) \ + ( checkArraysEqual((expected), (actual), (length), CPPUNIT_SOURCELINE(), "") ) + +#define CPPUNIT_ASSERT_ARRAYS_EQUAL_MESSAGE(message, expected,actual,length) \ + ( checkArraysEqual((expected), (actual), (length), CPPUNIT_SOURCELINE(), (message) ) ) + +namespace CppUnit { + // Specialize assertion traits for unsigned char to always display values + // in 2-character hex. + template <> + struct assertion_traits + { + static inline bool equal(unsigned char lhs, unsigned char rhs) + { + return lhs == rhs; + } + + static inline std::string toString(unsigned char value) + { + std::ostringstream oss; + oss << "0x" << std::hex << std::uppercase << std::setw(2) << std::setfill('0') << (int)value; + return oss.str(); + } + }; +} + +#endif // CFTEST_H diff --git a/redhawk/src/testing/cpp/CallbackTest.cpp b/redhawk/src/testing/cpp/CallbackTest.cpp new file mode 100644 index 000000000..ba85c0677 --- /dev/null +++ b/redhawk/src/testing/cpp/CallbackTest.cpp @@ -0,0 +1,525 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "CallbackTest.h" + +#include +#include + +#include + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(CallbackTest); + +namespace { + // Simple global function and counter to test zero-argument functions + static int global_counter = 0; + static int increment() + { + return ++global_counter; + } + + // Convert a C string to uppercase in-place, returning the number of + // characters that were modified + static int upcase(char* str) + { + int count = 0; + for (char* pos = str; *pos != '\0'; ++pos) { + char ch = *pos; + if (!isupper(ch)) { + ++count; + *pos = toupper(ch); + } + } + return count; + } + + // Simple functor that returns a constant value + template + struct Constant { + Constant(T value) : + _value(value) + { + } + + T operator() () const + { + return _value; + } + + void set(T value) + { + _value = value; + } + + bool operator==(const Constant& other) const + { + return (_value == other._value); + } + + private: + T _value; + }; + + // Functor that multiplies argument by a pre-defined scale value + template + class Scale { + public: + Scale(T scale) : + _scale(scale) + { + } + + T operator() (T arg) + { + return arg * _scale; + } + private: + T _scale; + }; + + // Functor that returns the average of two integers + struct Average { + float operator() (int arg1, int arg2) + { + return (arg1 + arg2) / 2.0; + } + }; + + // Object that provides some const and non-const member functions to test + // various modes of member function callbacks + class Object { + public: + Object(const std::string& name) : + _name(name), + _total(0) + { + } + + std::string name() const + { + return _name; + } + + int total() const + { + return _total; + } + + int reset() + { + int result = _total; + _total = 0; + return result; + } + + int add(int count) + { + _total += count; + return _total; + } + + int subtract(int count) + { + _total -= count; + return _total; + } + + int clamp(int min, int max) + { + _total = std::max(min, std::min(max, _total)); + return _total; + } + + private: + const std::string _name; + int _total; + }; + + // Class to help test by-reference arguments and returns + template + class ArgumentTester { + public: + ArgumentTester(R result) : + _result(result), + _arg1(0), + _arg2(0) + { + } + + R& call(A1& arg1, A2& arg2) + { + _arg1 = &arg1; + _arg2 = &arg2; + return _result; + } + + R* result() + { + return &_result; + } + + A1* arg1() + { + return _arg1; + } + + A2* arg2() + { + return _arg2; + } + + private: + R _result; + A1* _arg1; + A2* _arg2; + }; + + // Functor that takes two arguments by reference and modified them: + // - an integer, whose sign gets inverted + // - a string, that gets reversed + struct Mutator { + void operator() (int& number, std::string& message) + { + number *= -1; + std::string out; + std::copy(message.rbegin(), message.rend(), std::back_inserter(out)); + message = out; + } + }; +} + +void CallbackTest::setUp() +{ +} + +void CallbackTest::tearDown() +{ +} + +void CallbackTest::testEmpty() +{ + // Default constructor creates an empty callback + redhawk::callback func; + CPPUNIT_ASSERT(func.empty()); + + // Assign a value; should not be empty + func = &getpid; + CPPUNIT_ASSERT(!func.empty()); + + // Clear should reset to empty + func.clear(); + CPPUNIT_ASSERT(func.empty()); +} + +void CallbackTest::testEmptyCall() +{ + // Calling an empty callback should always throw a runtime error + + redhawk::callback func0; + CPPUNIT_ASSERT(func0.empty()); + CPPUNIT_ASSERT_THROW(func0(), std::runtime_error); + + redhawk::callback func1; + CPPUNIT_ASSERT(func1.empty()); + CPPUNIT_ASSERT_THROW(func1("abc"), std::runtime_error); + + redhawk::callback func2; + CPPUNIT_ASSERT(func2.empty()); + CPPUNIT_ASSERT_THROW(func2(0.0, 0), std::runtime_error); +} + +void CallbackTest::testBooleanOperators() +{ + // Empty callback, should evaluate to boolean false + redhawk::callback func; + CPPUNIT_ASSERT(func.empty()); + CPPUNIT_ASSERT(!func); + if (func) { + // Boolean-like conversion has to be tested in the context of an if + // statement to ensure we are testing what we expect (CPPUNIT_ASSERT + // expands to something like "if (!cond) {...}", which we're already + // testing above) + CPPUNIT_FAIL("Boolean-like conversion returned true on empty callback"); + } + + // Empty callback, should evaluate to boolean true + func = &getpid; + CPPUNIT_ASSERT(!func.empty()); + CPPUNIT_ASSERT_EQUAL(false, !func); + if (func) { + // Expected behavior + } else { + CPPUNIT_FAIL("Boolean-like conversion returned false on valid callback"); + } +} + +void CallbackTest::testFunction() +{ + // Zero-argument function: getpid() + redhawk::callback func0(&getpid); + pid_t self = func0(); + CPPUNIT_ASSERT_EQUAL(getpid(), self); + + // One-argument function: complex conjugate + typedef std::complex complex_float; + redhawk::callback func1 = &std::conj; + complex_float conj_result = func1(complex_float(1.0, 1.0)); + CPPUNIT_ASSERT_EQUAL(complex_float(1.0, -1.0), conj_result); + + // Two-argument function: power (with integer exponent) + redhawk::callback func2 = &std::pow; + double pow_result = func2(7.0, 3); + CPPUNIT_ASSERT_EQUAL(343.0, pow_result); +} + +void CallbackTest::testFunctionEquals() +{ + // Function pointer callbacks should only be equal if the function pointers + // are exactly the same + redhawk::callback func0 = &getuid; + CPPUNIT_ASSERT(func0 != &getpid); + CPPUNIT_ASSERT(func0 == &getuid); +} + +void CallbackTest::testFunctor() +{ + // Zero argument functor: return a pre-defined value + redhawk::callback func0 = Constant("test"); + std::string message = func0(); + CPPUNIT_ASSERT_EQUAL(std::string("test"), message); + + // One-argument functor: scale argument by a numeric factor + redhawk::callback func1 = Scale(1); + int result = func1(2); + CPPUNIT_ASSERT_EQUAL_MESSAGE("One argument functor returned incorrect result", 2, result); + + // Two-argument functor: average of two numbers + redhawk::callback func2 = Average(); + float avg = func2(7, 10); + CPPUNIT_ASSERT_EQUAL_MESSAGE("Two argument functor returned incorrect result", 8.5f, avg); +} + +void CallbackTest::testFunctorRef() +{ + // Associate a functor by reference; changes to the functor should be + // reflected in subsequent calls + Constant constant(-5); + redhawk::callback func = boost::ref(constant); + CPPUNIT_ASSERT(func == boost::ref(constant)); + short value = func(); + CPPUNIT_ASSERT_EQUAL((short) -5, value); + constant.set(10); + value = func(); + CPPUNIT_ASSERT_EQUAL((short) 10, value); +} + +void CallbackTest::testFunctorEquals() +{ + // Constant has an operator== defined, that compares the constant value + redhawk::callback func0_a = Constant("same"); + redhawk::callback func0_b = Constant("same"); + CPPUNIT_ASSERT(func0_a == func0_b); + CPPUNIT_ASSERT(func0_a != Constant("other")); + + // Scale has no operator== defined, so compare always fails + redhawk::callback func1_a = Scale(2.0); + redhawk::callback func1_b = Scale(2.0); + CPPUNIT_ASSERT(func1_a != func1_b); + + // Average has no operator==, but references compare based on whether the + // references are to the same object + Average avg; + redhawk::callback func2_a = boost::ref(avg); + redhawk::callback func2_b = boost::ref(avg); + CPPUNIT_ASSERT(func2_a == func2_b); + Average avg2; + CPPUNIT_ASSERT(func2_a != boost::ref(avg2)); +} + +void CallbackTest::testMemberFunction() +{ + boost::shared_ptr obj = boost::make_shared("test"); + + // Zero-argument callback; use a const pointer alias, and a const-qualified + // member function + const Object* obj_ptr = obj.get(); + redhawk::callback func0(obj_ptr, &Object::name); + CPPUNIT_ASSERT_EQUAL(obj->name(), func0()); + + // One-argument member function; check that the object is modified as + // expected + redhawk::callback func1(obj, &Object::add); + int result = func1(10); + CPPUNIT_ASSERT_EQUAL(10, result); + func1(20); + CPPUNIT_ASSERT_EQUAL(30, obj->total()); + + // Use assign to rebind the callback (using a non-const pointer instead of + // the shared pointer) + func1.assign(obj.get(), &Object::subtract); + func1(15); + CPPUNIT_ASSERT_EQUAL(15, obj->total()); + + // Use an object by value (implicitly making a copy); the function should + // work as expected, without affecting the original object (yes, this is a + // contrived example) + redhawk::callback func2(*obj, &Object::clamp); + result = func2(0, 10); + CPPUNIT_ASSERT_EQUAL(10, result); + CPPUNIT_ASSERT_EQUAL(15, obj->total()); +} + +void CallbackTest::testMemberFunctionEquals() +{ + // Member functions should only be equal if both the target object and + // function are the same + Object obj1("first"); + redhawk::callback func1(&obj1, &Object::add); + redhawk::callback func2(&obj1, &Object::add); + CPPUNIT_ASSERT(func1 == func2); + + // Same object, different function should be unequal + func2.assign(&obj1, &Object::subtract); + CPPUNIT_ASSERT_MESSAGE("Different member functions should not be equal", func1 != func2); + + // Same function, different object should be unequal + Object obj2("second"); + func2.assign(&obj2, &Object::add); + CPPUNIT_ASSERT_MESSAGE("Different target objects should not be equal", func1 != func2); +} + +void CallbackTest::testMixedEquals() +{ + redhawk::callback function = &std::abs; + + Object obj("member"); + redhawk::callback member(&obj, &Object::add); + + Scale scale(1); + redhawk::callback functor = scale; + redhawk::callback functor_ref = boost::ref(scale); + + // Test all possible combinations + CPPUNIT_ASSERT(function != member); + CPPUNIT_ASSERT(function != functor); + CPPUNIT_ASSERT(function != functor_ref); + CPPUNIT_ASSERT(member != functor); + CPPUNIT_ASSERT(member != functor_ref); + CPPUNIT_ASSERT(functor != functor_ref); +} + +void CallbackTest::testReferenceArguments() +{ + // Use a functor that modifies the passed-in arguments to check that the + // original arguments are modifed (as opposed to some argument value on the + // stack) + redhawk::callback functor = Mutator(); + int value = 1; + std::string name = "test text"; + functor(value, name); + CPPUNIT_ASSERT_EQUAL(-1, value); + CPPUNIT_ASSERT_EQUAL(std::string("txet tset"), name); + + // Use a member function with const arguments (by reference) to makes sure + // that they are passed unmodified (i.e., no copies) + typedef std::string R; + typedef const int A1; + typedef const Object A2; + typedef ArgumentTester TesterType; + + TesterType tester("references"); + redhawk::callback member(&tester, &TesterType::call); + + int number = 0; + Object obj("argument"); + R& result = member(number, obj); + CPPUNIT_ASSERT(&number == tester.arg1()); + CPPUNIT_ASSERT(&obj == tester.arg2()); + CPPUNIT_ASSERT(&result == tester.result()); +} + +void CallbackTest::testArgumentConversion() +{ + // One-argument member function: abuse constant value functor and its set + // function to implicitly construct a C++ string from a C string + Constant message("Test"); + redhawk::callback func1(&message, &Constant::set); + func1("Updated"); + CPPUNIT_ASSERT_EQUAL(std::string("Updated"), message()); + + // Two-argument functor: implicit conversion of arguments from double to + // int (truncation to -1, 6), plus implict conversion of result from float + // to int (2.5 truncates to 2) + redhawk::callback func2 = Average(); + int avg = func2(-1.25, 6.375); + CPPUNIT_ASSERT_EQUAL(2, avg); +} + +void CallbackTest::testVoidReturn() +{ + // Test to ensure that functions which return a value can be assigned to + // callbacks that return void. From a C++ implementation standpoint, there + // are template specializations to ensure that callback invoker functions + // returning void are syntactically correct--a void function can return the + // result of calling another void function, but if that function has a + // return value it is a syntax error. + + // Zero-argument function: ignore the result of increment(), defined above, + // but check that it takes effect + global_counter = 0; + redhawk::callback func0 = &increment; + func0(); + CPPUNIT_ASSERT_EQUAL(1, global_counter); + + // Zero-argument member function: ignore the result of Object::reset() but + // check it it takes effect + Object obj("void"); + obj.add(5); + func0.assign(&obj, &Object::reset); + func0(); + CPPUNIT_ASSERT_EQUAL(0, obj.total()); + + // One-argument function: ignore the result of upcase(), defined above + redhawk::callback func1 = &upcase; + char buf[64]; + sprintf(buf, "message"); + func1(buf); + CPPUNIT_ASSERT(strcmp(buf, "MESSAGE") == 0); + + // One-argument member function + redhawk::callback func1_member(&obj, &Object::add); + func1_member(1.25); + CPPUNIT_ASSERT_EQUAL(1, obj.total()); + + // Two-argument function: ignore the result of strcpy (which is just a char + // * to the destination, anyway), but check that it worked + redhawk::callback func2 = &strcpy; + const char* expected = "expected value"; + func2(buf, expected); + CPPUNIT_ASSERT(strcmp(buf, expected) == 0); + + // Two-argument member function + obj.add(100); + redhawk::callback func2_member(&obj, &Object::clamp); + func2_member(0, 50); + CPPUNIT_ASSERT_EQUAL(50, obj.total()); +} diff --git a/redhawk/src/testing/cpp/CallbackTest.h b/redhawk/src/testing/cpp/CallbackTest.h new file mode 100644 index 000000000..127f35e5e --- /dev/null +++ b/redhawk/src/testing/cpp/CallbackTest.h @@ -0,0 +1,70 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef CALLBACKTEST_H +#define CALLBACKTEST_H + +#include "CFTest.h" + +class CallbackTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(CallbackTest); + CPPUNIT_TEST(testEmpty); + CPPUNIT_TEST(testEmptyCall); + CPPUNIT_TEST(testBooleanOperators); + CPPUNIT_TEST(testFunction); + CPPUNIT_TEST(testFunctionEquals); + CPPUNIT_TEST(testFunctor); + CPPUNIT_TEST(testFunctorRef); + CPPUNIT_TEST(testFunctorEquals); + CPPUNIT_TEST(testMemberFunction); + CPPUNIT_TEST(testMemberFunctionEquals); + CPPUNIT_TEST(testMixedEquals); + CPPUNIT_TEST(testReferenceArguments); + CPPUNIT_TEST(testArgumentConversion); + CPPUNIT_TEST(testVoidReturn); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testEmpty(); + void testEmptyCall(); + void testBooleanOperators(); + + void testFunction(); + void testFunctionEquals(); + + void testFunctor(); + void testFunctorRef(); + void testFunctorEquals(); + + void testMemberFunction(); + void testMemberFunctionEquals(); + + void testMixedEquals(); + + void testReferenceArguments(); + void testArgumentConversion(); + void testVoidReturn(); +}; + +#endif // CALLBACKTEST_H diff --git a/redhawk/src/testing/cpp/ExecutorServiceTest.cpp b/redhawk/src/testing/cpp/ExecutorServiceTest.cpp new file mode 100644 index 000000000..f384cec4e --- /dev/null +++ b/redhawk/src/testing/cpp/ExecutorServiceTest.cpp @@ -0,0 +1,222 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "ExecutorServiceTest.h" + +CPPUNIT_TEST_SUITE_REGISTRATION(ExecutorServiceTest); + +namespace { + + class CommandTracker + { + public: + CommandTracker() + { + } + + void run() + { + runCommand(std::string()); + } + + void runCommand(const std::string& command) + { + boost::mutex::scoped_lock lock(_mutex); + _commands.push_back(command); + _cond.notify_all(); + } + + int count() const + { + return _commands.size(); + } + + bool wait(size_t count, boost::system_time when) + { + boost::mutex::scoped_lock lock(_mutex); + while (_commands.size() < count) { + if (!_cond.timed_wait(lock, when)) { + break; + } + } + return _commands.size() >= count; + } + + void reset() + { + _commands.clear(); + } + + static CommandTracker& Global() + { + static CommandTracker tracker; + return tracker; + } + + static void runGlobal() + { + Global().run(); + } + + private: + boost::mutex _mutex; + boost::condition_variable _cond; + + std::vector _commands; + }; +} + +void ExecutorServiceTest::setUp() +{ + CommandTracker::Global().reset(); +} + +void ExecutorServiceTest::tearDown() +{ + _service.stop(); +} + +void ExecutorServiceTest::testExecute() +{ + _service.start(); + + // No-argument (global) function + CPPUNIT_ASSERT_EQUAL(0, CommandTracker::Global().count()); + _service.execute(&CommandTracker::runGlobal); + + CommandTracker tracker; + CPPUNIT_ASSERT_EQUAL(0, tracker.count()); + + // One-argument (member) function + _service.execute(&CommandTracker::run, &tracker); + + // Two-argument (member) function + _service.execute(&CommandTracker::runCommand, &tracker, "testExecute"); + + // Wait up to 1000us for the commands to be executed + boost::system_time timeout = boost::get_system_time() + boost::posix_time::microseconds(1000); + CPPUNIT_ASSERT(CommandTracker::Global().wait(1, timeout)); + CPPUNIT_ASSERT_EQUAL(1, CommandTracker::Global().count()); + + CPPUNIT_ASSERT(tracker.wait(2, timeout)); + CPPUNIT_ASSERT_EQUAL(2, tracker.count()); +} + +void ExecutorServiceTest::testSchedule() +{ + _service.start(); + + // No-argument (global) function + CPPUNIT_ASSERT_EQUAL(0, CommandTracker::Global().count()); + boost::system_time first = boost::get_system_time() + boost::posix_time::microseconds(1000); + _service.schedule(first, &CommandTracker::runGlobal); + + CommandTracker tracker; + CPPUNIT_ASSERT_EQUAL(0, tracker.count()); + + // One-argument (member) function + boost::system_time second = first + boost::posix_time::microseconds(1000); + _service.schedule(second, &CommandTracker::run, &tracker); + + // Two-argument (member) function + boost::system_time third = second + boost::posix_time::microseconds(1000); + _service.schedule(third, &CommandTracker::runCommand, &tracker, "testSchedule"); + + // Use maximum wait of 10000us (from now) for the commands to be executed + // (it should take approximately 3000us, but allow some slack in the event + // of scheduler delays) + boost::system_time timeout = boost::get_system_time() + boost::posix_time::microseconds(10000); + + // Wait for the first command, and check enough time has passed + CPPUNIT_ASSERT(CommandTracker::Global().wait(1, timeout)); + CPPUNIT_ASSERT_EQUAL(1, CommandTracker::Global().count()); + CPPUNIT_ASSERT(boost::get_system_time() >= first); + + // Wait for the second and third commands, again checking that enough time + // has passed + CPPUNIT_ASSERT(tracker.wait(1, timeout)); + CPPUNIT_ASSERT(boost::get_system_time() >= second); + CPPUNIT_ASSERT(tracker.wait(2, timeout)); + CPPUNIT_ASSERT(boost::get_system_time() >= third); + CPPUNIT_ASSERT_EQUAL(2, tracker.count()); +} + +void ExecutorServiceTest::testStop() +{ + _service.start(); + + CommandTracker tracker; + + // Schedule a task far enough in the future that we can stop the service's + // thread before it is executed + boost::system_time when = boost::get_system_time() + boost::posix_time::microseconds(1000); + _service.schedule(when, &CommandTracker::run, &tracker); + + // Stop the service; check that the scheduled time for the task is still in + // the future, and that it was not executed + _service.stop(); + CPPUNIT_ASSERT_MESSAGE("scheduled time already passed", boost::get_system_time() < when); + CPPUNIT_ASSERT_EQUAL(0, tracker.count()); + CPPUNIT_ASSERT_EQUAL((size_t) 1, _service.pending()); + + // Wait until the scheduled time for the task has passed, plus a small + // fudge factor to account for thread timing + when += boost::posix_time::microseconds(500); + while (boost::get_system_time() < when) { + CPPUNIT_ASSERT(!tracker.wait(1, when)); + } + CPPUNIT_ASSERT_MESSAGE("failed to wait requested time", boost::get_system_time() >= when); + + // The task should still not have executed + CPPUNIT_ASSERT_EQUAL(0, tracker.count()); + CPPUNIT_ASSERT_EQUAL((size_t) 1, _service.pending()); + + // Start the service; it should be able to run the task as soon as the + // thread begins + _service.start(); + + // Wait a little bit to give the service's thread time to execute the task + // and check that it has, in fact, happenend + boost::system_time timeout = boost::get_system_time() + boost::posix_time::microseconds(500); + CPPUNIT_ASSERT(tracker.wait(1, timeout)); + CPPUNIT_ASSERT_EQUAL((size_t) 0, _service.pending()); +} + +void ExecutorServiceTest::testClear() +{ + CommandTracker tracker; + + // Queue a command to be executed now, and one in the future + _service.execute(&CommandTracker::run, &tracker); + boost::system_time when = boost::get_system_time() + boost::posix_time::seconds(1); + _service.schedule(when, &CommandTracker::run, &tracker); + + // Check that both tasks are currently pending + CPPUNIT_ASSERT_EQUAL((size_t) 2, _service.pending()); + + // Clear the pending tasks and then start the service--doing it in this + // order makes it deterministic whether any tasks are executed + _service.clear(); + _service.start(); + + // There should be no pending tasks, and nothing should have been executed + CPPUNIT_ASSERT_EQUAL((size_t) 0, _service.pending()); + CPPUNIT_ASSERT_EQUAL(0, tracker.count()); +} diff --git a/redhawk/src/testing/cpp/ExecutorServiceTest.h b/redhawk/src/testing/cpp/ExecutorServiceTest.h new file mode 100644 index 000000000..2d26cc2ac --- /dev/null +++ b/redhawk/src/testing/cpp/ExecutorServiceTest.h @@ -0,0 +1,50 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef EXECUTORSERVICETEST_H +#define EXECUTORSERVICETEST_H + +#include "CFTest.h" + +#include + +class ExecutorServiceTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(ExecutorServiceTest); + CPPUNIT_TEST(testExecute); + CPPUNIT_TEST(testSchedule); + CPPUNIT_TEST(testStop); + CPPUNIT_TEST(testClear); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testExecute(); + void testSchedule(); + void testStop(); + void testClear(); + +private: + redhawk::ExecutorService _service; +}; + +#endif // EXECUTORSERVICETEST_H diff --git a/redhawk/src/testing/cpp/Makefile.am b/redhawk/src/testing/cpp/Makefile.am new file mode 100644 index 000000000..481e1db22 --- /dev/null +++ b/redhawk/src/testing/cpp/Makefile.am @@ -0,0 +1,49 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +TESTS = test_libossiecf + +AM_CPPFLAGS = -I $(top_srcdir)/base/include +AM_LDFLAGS = $(top_builddir)/base/framework/libossiecf.la $(top_builddir)/base/framework/idl/libossieidl.la -no-install + +check_PROGRAMS = $(TESTS) + +test_libossiecf_SOURCES = test_libossiecf.cpp +test_libossiecf_SOURCES += SharedBufferTest.cpp SharedBufferTest.h +test_libossiecf_SOURCES += AnyUtilsTest.cpp AnyUtilsTest.h +test_libossiecf_SOURCES += ValueTest.cpp ValueTest.h +test_libossiecf_SOURCES += ValueSequenceTest.cpp ValueSequenceTest.h +test_libossiecf_SOURCES += PropertyMapTest.cpp PropertyMapTest.h +test_libossiecf_SOURCES += MessagingTest.cpp MessagingTest.h +test_libossiecf_SOURCES += ExecutorServiceTest.cpp ExecutorServiceTest.h +test_libossiecf_SOURCES += BufferManagerTest.cpp BufferManagerTest.h +test_libossiecf_SOURCES += CallbackTest.cpp CallbackTest.h +test_libossiecf_SOURCES += PortManager.cpp PortManager.h +test_libossiecf_SOURCES += BitopsTest.cpp BitopsTest.h +test_libossiecf_SOURCES += BitBufferTest.cpp BitBufferTest.h +test_libossiecf_SOURCES += ServiceInterruptTest.cpp ServiceInterruptTest.h +test_libossiecf_CXXFLAGS = -Wall $(CPPUNIT_CFLAGS) +test_libossiecf_LDFLAGS = $(CPPUNIT_LIBS) $(AM_LDFLAGS) + +# Benchmark program for bit operations +noinst_PROGRAMS = benchmark_bitops + +benchmark_bitops_SOURCES = benchmark_bitops.cpp +benchmark_bitops_CXXFLAGS = -Wall diff --git a/redhawk/src/testing/cpp/MessagingTest.cpp b/redhawk/src/testing/cpp/MessagingTest.cpp new file mode 100644 index 000000000..28f487c6f --- /dev/null +++ b/redhawk/src/testing/cpp/MessagingTest.cpp @@ -0,0 +1,576 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "MessagingTest.h" + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(MessagingTest); + +namespace { + // Internal structs and methods are encapsulated in an anonymous namespace + // to prevent external symbol table pollution and collisions. + + // NB: Because the Any extraction operator for std::string is defined in + // the global namespace (not std), we need to bring it into this namespace + // so that it can be found by the custom struct extraction operators. + using ::operator>>=; + + // Legacy message struct generated with REDHAWK 1.8. This serves two + // purposes: it maintains compatibility with 1.8 code, and forces the + // MessageSupplierPort to use Any serialization instead of direct message + // transfer. + struct legacy_message_struct { + legacy_message_struct () + { + }; + + std::string getId() { + return std::string("legacy_message"); + }; + + CORBA::Long value; + }; + + inline bool operator>>= (const CORBA::Any& a, legacy_message_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + CF::Properties& props = *temp; + for (unsigned int idx = 0; idx < props.length(); idx++) { + if (!strcmp("value", props[idx].id)) { + if (!(props[idx].value >>= s.value)) return false; + } + } + return true; + }; + + inline void operator<<= (CORBA::Any& a, const legacy_message_struct& s) { + CF::Properties props; + props.length(1); + props[0].id = CORBA::string_dup("value"); + props[0].value <<= s.value; + a <<= props; + }; + + // REDHAWK 2.1 message struct, with a getFormat() method that allows it to + // be used with direct in-process messaging to skip Any serialization + struct direct_message_struct { + direct_message_struct () + { + } + + static std::string getId() { + return std::string("direct_message"); + } + + static const char* getFormat() { + return "is"; + } + + CORBA::Long value; + std::string body; + }; + + inline bool operator>>= (const CORBA::Any& a, direct_message_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("value")) { + if (!(props["value"] >>= s.value)) return false; + } + if (props.contains("body")) { + if (!(props["body"] >>= s.body)) return false; + } + return true; + } + + inline void operator<<= (CORBA::Any& a, const direct_message_struct& s) { + redhawk::PropertyMap props; + + props["value"] = s.value; + + props["body"] = s.body; + a <<= props; + } + + inline bool operator== (const direct_message_struct& s1, const direct_message_struct& s2) { + if (s1.value!=s2.value) + return false; + if (s1.body!=s2.body) + return false; + return true; + } + + inline bool operator!= (const direct_message_struct& s1, const direct_message_struct& s2) { + return !(s1==s2); + } + + // Utility class for message consumer callbacks + template + class MessageReceiver + { + public: + typedef T message_type; + + void messageReceived(const std::string& messageId, const message_type& msgData) + { + _received.push_back(msgData); + _addresses.push_back(&msgData); + } + + const std::vector& received() const + { + return _received; + } + + const std::vector& addresses() const + { + return _addresses; + } + + private: + std::vector _received; + std::vector _addresses; + }; + + // Utility class for generic message (CORBA::Any) callbacks + class GenericReceiver + { + public: + void messageReceived(const std::string& messageId, const CORBA::Any& msgData) + { + _received.push_back(redhawk::PropertyType(messageId, msgData)); + } + + const redhawk::PropertyMap& received() const + { + return _received; + } + + private: + redhawk::PropertyMap _received; + }; +} + +void MessagingTest::setUp() +{ + _supplier = new MessageSupplierPort("supplier"); + _consumer = new MessageConsumerPort("consumer"); + + _portManager.addPort(_supplier); + _portManager.addPort(_consumer); + + // Connect the supplier and consumer + CORBA::Object_var objref = _consumer->_this(); + _supplier->connectPort(objref, "connection_1"); + + // Simulate component start + _portManager.start(); +} + +void MessagingTest::tearDown() +{ + // Simulate component stop/shutdown + _portManager.stop(); + _portManager.releaseObject(); + + // Consumer and supplier have been deleted by the port manager + _supplier = 0; + _consumer = 0; +} + +void MessagingTest::testConnections() +{ + CORBA::Object_var objref = _consumer->_this(); + + // Verify the connections list + ExtendedCF::UsesConnectionSequence_var connections = _supplier->connections(); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 1, connections->length()); + CPPUNIT_ASSERT_EQUAL(std::string("connection_1"), std::string(connections[0].connectionId)); + CPPUNIT_ASSERT(objref->_is_equivalent(connections[0].port)); + + // Make two more connections + _supplier->connectPort(objref, "connection_2"); + _supplier->connectPort(objref, "connection_3"); + connections = _supplier->connections(); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 3, connections->length()); + + // Check all the connections; there is no guarantee of ordering in the + // connection list, so collect the names in a set + std::set names; + for (CORBA::ULong index = 0; index < connections->length(); ++index) { + names.insert(std::string(connections[index].connectionId)); + CPPUNIT_ASSERT(objref->_is_equivalent(connections[index].port)); + } + CPPUNIT_ASSERT(names.find("connection_1") != names.end()); + CPPUNIT_ASSERT(names.find("connection_2") != names.end()); + CPPUNIT_ASSERT(names.find("connection_3") != names.end()); + + // Disconnect one of the connections and check again + _supplier->disconnectPort("connection_2"); + connections = _supplier->connections(); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 2, connections->length()); + names.clear(); + for (CORBA::ULong index = 0; index < connections->length(); ++index) { + names.insert(std::string(connections[index].connectionId)); + CPPUNIT_ASSERT(objref->_is_equivalent(connections[index].port)); + } + CPPUNIT_ASSERT(names.find("connection_1") != names.end()); + CPPUNIT_ASSERT(names.find("connection_3") != names.end()); +} + +void MessagingTest::testSendMessage() +{ + // Set up receiver + typedef MessageReceiver receiver_type; + receiver_type receiver; + _consumer->registerMessage("legacy_message", &receiver, &receiver_type::messageReceived); + + legacy_message_struct msg; + msg.value = 1; + + // Send the message and check that it's received. Currently, the consumer's + // message handler is called directly from sendMessage, so this is a + // synchronous operation; however, if at some point in the future, threaded + // message dispatch is added, this test will need to be revisted. + CPPUNIT_ASSERT(receiver.received().empty()); + _supplier->sendMessage(msg); + CPPUNIT_ASSERT_EQUAL((size_t) 1, receiver.received().size()); + + // Check the message contents, working around the old code generators' non- + // const, non-static getId() method with a const_cast + const legacy_message_struct& received = receiver.received().front(); + CPPUNIT_ASSERT_EQUAL(std::string("legacy_message"), const_cast(received).getId()); + CPPUNIT_ASSERT_EQUAL(msg.value, received.value); + + // This test is designed to use the slower Any serialization path for + // message transfer, so check that it actually does + CPPUNIT_ASSERT_MESSAGE("unexpected direct message send", receiver.addresses().front() != &msg); +} + +void MessagingTest::testSendMessageDirect() +{ + // Set up receiver + typedef MessageReceiver receiver_type; + receiver_type receiver; + _consumer->registerMessage("direct_message", &receiver, &receiver_type::messageReceived); + + direct_message_struct msg; + msg.value = 2; + msg.body = "text string"; + + // Send the message and check that it's received. Currently, the consumer's + // message handler is called directly from sendMessage, so this is a + // synchronous operation; however, if at some point in the future, threaded + // message dispatch is added, this test will need to be revisted. + CPPUNIT_ASSERT(receiver.received().empty()); + _supplier->sendMessage(msg); + CPPUNIT_ASSERT_EQUAL((size_t) 1, receiver.received().size()); + + // Check the message contents + const direct_message_struct& received = receiver.received().front(); + CPPUNIT_ASSERT_EQUAL(direct_message_struct::getId(), received.getId()); + CPPUNIT_ASSERT(msg == received); + + // This test is designed to use the slower Any serialization path for + // message transfer, so check that it actually does + CPPUNIT_ASSERT_MESSAGE("direct message transfer not used", receiver.addresses().front() == &msg); +} + +void MessagingTest::testSendMessageConnectionId() +{ + // Create and connect a second consumer port + MessageConsumerPort* consumer_2 = new MessageConsumerPort("consumer_2"); + _portManager.addPort(consumer_2); + consumer_2->startPort(); + CORBA::Object_var objref = consumer_2->_this(); + _supplier->connectPort(objref, "connection_2"); + + // Set up 2 receivers to distinguish which connection received a message + typedef MessageReceiver receiver_type; + receiver_type receiver_1; + _consumer->registerMessage("direct_message", &receiver_1, &receiver_type::messageReceived); + + receiver_type receiver_2; + consumer_2->registerMessage("direct_message", &receiver_2, &receiver_type::messageReceived); + + direct_message_struct msg; + msg.value = 1; + msg.body = "connection_1"; + + // Target the first connection (see above re: threading) + CPPUNIT_ASSERT(receiver_1.received().empty()); + CPPUNIT_ASSERT(receiver_2.received().empty()); + _supplier->sendMessage(msg, "connection_1"); + CPPUNIT_ASSERT_EQUAL((size_t) 1, receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL((size_t) 0, receiver_2.received().size()); + + // Target the second connection this time + msg.value = 2; + msg.body = "connection_2"; + _supplier->sendMessage(msg, "connection_2"); + CPPUNIT_ASSERT_EQUAL((size_t) 1, receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL((size_t) 1, receiver_2.received().size()); + + // Target both connections + msg.value = 3; + msg.body = "all"; + _supplier->sendMessage(msg); + CPPUNIT_ASSERT_EQUAL((size_t) 2, receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL((size_t) 2, receiver_2.received().size()); + + // Target invalid connection + msg.value = 4; + msg.body = "bad_connection"; + CPPUNIT_ASSERT_THROW(_supplier->sendMessage(msg, "bad_connection"), std::invalid_argument); + CPPUNIT_ASSERT_EQUAL((size_t) 2, receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL((size_t) 2, receiver_2.received().size()); +} + +void MessagingTest::testSendMessages() +{ + // Set up receiver + typedef MessageReceiver receiver_type; + receiver_type receiver; + _consumer->registerMessage("legacy_message", &receiver, &receiver_type::messageReceived); + + std::vector messages; + for (size_t index = 0; index < 3; ++index) { + legacy_message_struct msg; + msg.value = index; + messages.push_back(msg); + } + + // Send the messages and check that they are received (see above re: + // threading). + CPPUNIT_ASSERT(receiver.received().empty()); + _supplier->sendMessages(messages); + CPPUNIT_ASSERT_EQUAL(messages.size(), receiver.received().size()); + + // Check the message bodies and make sure that the port used the Any + // serialization path for message transfer + for (size_t index = 0; index < messages.size(); ++index) { + CPPUNIT_ASSERT_EQUAL(messages[index].value, receiver.received()[index].value); + CPPUNIT_ASSERT_MESSAGE("unexpected direct message transfer", &messages[index] != receiver.addresses()[index]); + } +} + +void MessagingTest::testSendMessagesDirect() +{ + // Set up receiver + typedef MessageReceiver receiver_type; + receiver_type receiver; + _consumer->registerMessage("direct_message", &receiver, &receiver_type::messageReceived); + + const char* text[] = { "lorem", "ipsum", "dolor", "sit", "amet", 0 }; + std::vector messages; + for (size_t index = 0; text[index] != 0; ++index) { + direct_message_struct msg; + msg.value = index; + msg.body = text[index]; + messages.push_back(msg); + } + + // Send the messages and check that they are received (see above re: + // threading). + CPPUNIT_ASSERT(receiver.received().empty()); + _supplier->sendMessages(messages); + CPPUNIT_ASSERT_EQUAL(messages.size(), receiver.received().size()); + + // Check all the messages at once + CPPUNIT_ASSERT(messages == receiver.received()); + + // Make sure the port used direct transfer + for (size_t index = 0; index < messages.size(); ++index) { + CPPUNIT_ASSERT_MESSAGE("direct message transfer not used", &messages[index] == receiver.addresses()[index]); + } +} + +void MessagingTest::testSendMessagesConnectionId() +{ + // Create and connect a second consumer port + MessageConsumerPort* consumer_2 = new MessageConsumerPort("consumer_2"); + _portManager.addPort(consumer_2); + CORBA::Object_var objref = consumer_2->_this(); + _supplier->connectPort(objref, "connection_2"); + + // Set up 2 receivers to distinguish which connection received a message + typedef MessageReceiver receiver_type; + receiver_type receiver_1; + _consumer->registerMessage("direct_message", &receiver_1, &receiver_type::messageReceived); + + receiver_type receiver_2; + consumer_2->registerMessage("direct_message", &receiver_2, &receiver_type::messageReceived); + + // Build a list of messages based on filler text and target the first + // connection (see above re: threading) + const char* text_1[] = { "lorem", "ipsum", 0 }; + std::vector messages_1; + for (size_t index = 0; text_1[index] != 0; ++index) { + direct_message_struct msg; + msg.value = index; + msg.body = text_1[index]; + messages_1.push_back(msg); + } + CPPUNIT_ASSERT(receiver_1.received().empty()); + CPPUNIT_ASSERT(receiver_2.received().empty()); + _supplier->sendMessages(messages_1, "connection_1"); + CPPUNIT_ASSERT_EQUAL(messages_1.size(), receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL((size_t) 0, receiver_2.received().size()); + + // Target the second connection this time with a different set of messages + const char* text_2[] = { "dolor", "sit", "amet", 0 }; + std::vector messages_2; + for (size_t index = 0; text_2[index] != 0; ++index) { + direct_message_struct msg; + msg.value = index + messages_1.size(); + msg.body = text_2[index]; + messages_2.push_back(msg); + } + _supplier->sendMessages(messages_2, "connection_2"); + CPPUNIT_ASSERT_EQUAL(messages_1.size(), receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL(messages_2.size(), receiver_2.received().size()); + + // Target both connections + _supplier->sendMessages(messages_1); + CPPUNIT_ASSERT_EQUAL(messages_1.size() + messages_1.size(), receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL(messages_2.size() + messages_1.size(), receiver_2.received().size()); + + // Target invalid connection + std::vector messages_3; + messages_3.resize(1); + messages_3[0].value = 1000; + messages_3[0].body = "bad_connection"; + CPPUNIT_ASSERT_THROW(_supplier->sendMessages(messages_3, "bad_connection"), std::invalid_argument); + CPPUNIT_ASSERT_EQUAL(messages_1.size() + messages_1.size(), receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL(messages_2.size() + messages_1.size(), receiver_2.received().size()); +} + +void MessagingTest::testGenericCallback() +{ + // Set up receiver + GenericReceiver receiver; + _consumer->registerMessage(&receiver, &GenericReceiver::messageReceived); + + // Send legacy_message and direct_message; with a generic callback, + // everything necessarily goes through Any serialization, so there's no + // distinction made other than there being two different message types + legacy_message_struct legacy; + legacy.value = 50; + _supplier->sendMessage(legacy); + + direct_message_struct direct; + direct.value = 100; + direct.body = "lorem ipsum"; + _supplier->sendMessage(direct); + + // Check that the messages were received (see above re: threading) + const redhawk::PropertyMap& messages = receiver.received(); + CPPUNIT_ASSERT_EQUAL((size_t) 2, messages.size()); + + legacy_message_struct legacy_out; + CPPUNIT_ASSERT_EQUAL(std::string("legacy_message"), messages[0].getId()); + CPPUNIT_ASSERT(messages[0].getValue() >>= legacy_out); + CPPUNIT_ASSERT_EQUAL(legacy.value, legacy_out.value); + + direct_message_struct direct_out; + CPPUNIT_ASSERT_EQUAL(direct_message_struct::getId(), messages[1].getId()); + CPPUNIT_ASSERT(messages[1].getValue() >>= direct_out); + CPPUNIT_ASSERT(direct == direct_out); +} + +void MessagingTest::testPush() +{ + // Set up a generic receiver + GenericReceiver receiver; + _consumer->registerMessage(&receiver, &GenericReceiver::messageReceived); + + // Pack the messages ourselves + redhawk::PropertyMap messages_out; + messages_out["first"] = (CORBA::Long) 100; + messages_out["second"] = "some text"; + messages_out["third"] = 0.25; + + CORBA::Any any; + any <<= messages_out; + _supplier->push(any); + + // Check that the messages were received (see above re: threading) + const redhawk::PropertyMap& messages = receiver.received(); + CPPUNIT_ASSERT_EQUAL((size_t) 3, messages.size()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 100, messages[0].getValue().toLong()); + CPPUNIT_ASSERT_EQUAL(std::string("some text"), messages[1].getValue().toString()); + CPPUNIT_ASSERT_EQUAL(0.25, messages[2].getValue().toDouble()); +} + +void MessagingTest::testPushConnectionId() +{ + // Create and connect a second consumer port + MessageConsumerPort* consumer_2 = new MessageConsumerPort("consumer_2"); + _portManager.addPort(consumer_2); + CORBA::Object_var objref = consumer_2->_this(); + _supplier->connectPort(objref, "connection_2"); + + // Set up 2 receivers to distinguish which connection received a message + GenericReceiver receiver_1; + _consumer->registerMessage(&receiver_1, &GenericReceiver::messageReceived); + + GenericReceiver receiver_2; + consumer_2->registerMessage(&receiver_2, &GenericReceiver::messageReceived); + + // Pack the messages ourselves and target the first connection + redhawk::PropertyMap messages_1; + messages_1["first"] = (CORBA::Long) 100; + messages_1["second"] = "some text"; + messages_1["third"] = 0.25; + CORBA::Any any; + any <<= messages_1; + _supplier->push(any, "connection_1"); + + CPPUNIT_ASSERT_EQUAL((size_t) 3, receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL((size_t) 0, receiver_2.received().size()); + + // Target the second connection with a different set of messages + redhawk::PropertyMap messages_2; + messages_2["one"] = "abc"; + messages_2["two"] = false; + any <<= messages_2; + _supplier->push(any, "connection_2"); + + CPPUNIT_ASSERT_EQUAL((size_t) 3, receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL((size_t) 2, receiver_2.received().size()); + + // Target both connections with yet another set of messages + redhawk::PropertyMap messages_3; + messages_3["all"] = (CORBA::Long) 3; + any <<= messages_3; + _supplier->push(any); + + CPPUNIT_ASSERT_EQUAL((size_t) 4, receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL((size_t) 3, receiver_2.received().size()); + + // Target invalid connection + redhawk::PropertyMap messages_4; + messages_4["bad"] = "bad_connection"; + any <<= messages_4; + CPPUNIT_ASSERT_THROW(_supplier->push(any, "bad_connection"), std::invalid_argument); + CPPUNIT_ASSERT_EQUAL((size_t) 4, receiver_1.received().size()); + CPPUNIT_ASSERT_EQUAL((size_t) 3, receiver_2.received().size()); +} diff --git a/redhawk/src/testing/cpp/MessagingTest.h b/redhawk/src/testing/cpp/MessagingTest.h new file mode 100644 index 000000000..1eed4e863 --- /dev/null +++ b/redhawk/src/testing/cpp/MessagingTest.h @@ -0,0 +1,71 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef MESSAGINGTEST_H +#define MESSAGINGTEST_H + +#include "CFTest.h" + +#include + +#include "PortManager.h" + +class MessagingTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(MessagingTest); + CPPUNIT_TEST(testConnections); + CPPUNIT_TEST(testSendMessage); + CPPUNIT_TEST(testSendMessageDirect); + CPPUNIT_TEST(testSendMessageConnectionId); + CPPUNIT_TEST(testSendMessages); + CPPUNIT_TEST(testSendMessagesDirect); + CPPUNIT_TEST(testSendMessagesConnectionId); + CPPUNIT_TEST(testGenericCallback); + CPPUNIT_TEST(testPush); + CPPUNIT_TEST(testPushConnectionId); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testConnections(); + + void testSendMessage(); + void testSendMessageDirect(); + void testSendMessageConnectionId(); + + void testSendMessages(); + void testSendMessagesDirect(); + void testSendMessagesConnectionId(); + + void testGenericCallback(); + + void testPush(); + void testPushConnectionId(); + +private: + PortManager _portManager; + + MessageSupplierPort* _supplier; + MessageConsumerPort* _consumer; +}; + +#endif // MESSAGINGTEST_H diff --git a/redhawk/src/testing/cpp/PortManager.cpp b/redhawk/src/testing/cpp/PortManager.cpp new file mode 100644 index 000000000..e18070234 --- /dev/null +++ b/redhawk/src/testing/cpp/PortManager.cpp @@ -0,0 +1,61 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "PortManager.h" + +#include +#include + +PortManager::PortManager() : + PortSupplier_impl() +{ +} + +PortManager::~PortManager() +{ + releaseObject(); +} + +void PortManager::addPort(PortBase* port) +{ + PortSupplier_impl::addPort(port->getName(), port); + + // Take ownership of the port; if the caller requires a longer lifetime for + // the port, they must increment the reference count themselves + _ports.push_back(port); +} + +void PortManager::start() +{ + startPorts(); +} + +void PortManager::stop() +{ + stopPorts(); +} + +void PortManager::releaseObject() +{ + releasePorts(); + + std::for_each(_ports.begin(), _ports.end(), std::mem_fun(&PortBase::_remove_ref)); + _ports.clear(); +} diff --git a/redhawk/src/testing/cpp/PortManager.h b/redhawk/src/testing/cpp/PortManager.h new file mode 100644 index 000000000..2afab0ec5 --- /dev/null +++ b/redhawk/src/testing/cpp/PortManager.h @@ -0,0 +1,48 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef PORTMANAGER_H +#define PORTMANAGER_H + +#include + +/** + * Container class to manage REDHAWK ports in unit tests. + * + * Takes ownership of the ports and handles start, stop and release to maintain + * consistent behavior with a "real" component. + */ +class PortManager : private PortSupplier_impl +{ +public: + PortManager(); + ~PortManager(); + + void addPort(PortBase* port); + + void start(); + void stop(); + void releaseObject(); + +private: + std::vector _ports; +}; + +#endif // PORTMANAGER_H diff --git a/redhawk/src/testing/cpp/PropertyMapTest.cpp b/redhawk/src/testing/cpp/PropertyMapTest.cpp new file mode 100644 index 000000000..e400cc952 --- /dev/null +++ b/redhawk/src/testing/cpp/PropertyMapTest.cpp @@ -0,0 +1,400 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "PropertyMapTest.h" + +#include + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(PropertyMapTest); + +namespace { + redhawk::PropertyMap generate_test_data() + { + redhawk::PropertyMap propmap; + propmap["first"] = (short)123; + propmap["second"] = "abc"; + propmap["third"] = 5.25; + return propmap; + } + + redhawk::PropertyMap generate_test_sequence(size_t count) + { + redhawk::PropertyMap propmap; + for (size_t index = 0; index < count; ++index) { + std::ostringstream key; + key << "prop_" << index; + propmap[key.str()] = (CORBA::Long) index; + } + return propmap; + } +} + +void PropertyMapTest::setUp() +{ +} + +void PropertyMapTest::tearDown() +{ +} + +void PropertyMapTest::testDefaultConstructor() +{ + // Default constructor should create an empty PropertyMap + redhawk::PropertyMap propmap; + CPPUNIT_ASSERT(propmap.empty()); + CPPUNIT_ASSERT_EQUAL((size_t) 0, propmap.size()); +} + +void PropertyMapTest::testPropertiesConstructor() +{ + // Copy constructor from CF::Properties + CF::Properties properties; + properties.length(2); + redhawk::PropertyMap propmap(properties); + CPPUNIT_ASSERT(!propmap.empty()); + CPPUNIT_ASSERT_EQUAL((size_t) properties.length(), propmap.size()); +} + +void PropertyMapTest::testPropertyTypeFromAny() +{ + // Due to the implicit conversion from the templatized constructor for + // Value (the explicit keyword was removed for 2.1), it was necessary to + // add a constructor to PropertyType that takes a CORBA::Any as the value + // argument to prevent accidental nesting of Anys; this test simply ensures + // that this works + CORBA::Any any; + any <<= CF::Properties(); + redhawk::PropertyType prop("test", any); + CPPUNIT_ASSERT_EQUAL(redhawk::Value::TYPE_PROPERTIES, prop.getValue().getType()); +} + +void PropertyMapTest::testConstCast() +{ + // Create a known set of CF::Properties + CF::Properties properties; + properties.length(2); + properties[0].id = "first"; + properties[0].value <<= "abc"; + properties[1].id = "second"; + properties[1].value <<= 1.0; + + // Create a const PropertyMap reference alias and check the values + const CF::Properties& const_properties = properties; + const redhawk::PropertyMap& propmap = redhawk::PropertyMap::cast(const_properties); + CPPUNIT_ASSERT(!propmap.empty()); + CPPUNIT_ASSERT_EQUAL((size_t) properties.length(), propmap.size()); + CPPUNIT_ASSERT_EQUAL(std::string("first"), propmap[0].getId()); + CPPUNIT_ASSERT_EQUAL(std::string("abc"), propmap[0].getValue().toString()); + CPPUNIT_ASSERT_EQUAL(std::string("second"), propmap[1].getId()); + CPPUNIT_ASSERT_EQUAL(1.0, propmap[1].getValue().toDouble()); + + // Append to the Properties and check that the change is reflected in the + // PropertyMap + properties.length(3); + properties[2].id = "third"; + properties[2].value <<= false; + CPPUNIT_ASSERT_EQUAL((size_t) properties.length(), propmap.size()); + CPPUNIT_ASSERT_EQUAL(std::string("third"), propmap[2].getId()); + CPPUNIT_ASSERT_EQUAL(false, propmap[2].getValue().toBoolean()); +} + +void PropertyMapTest::testCast() +{ + // Start with an empty CF::Properties + CF::Properties properties; + + // Create a PropertyMap reference alias + redhawk::PropertyMap& propmap = redhawk::PropertyMap::cast(properties); + CPPUNIT_ASSERT(propmap.empty()); + + // Add a boolean PropertyType to the end of the PropertyMap and check that + // the change is reflected in the aliased Properties + propmap["boolean"] = true; + CPPUNIT_ASSERT_EQUAL((size_t) 1, propmap.size()); + CPPUNIT_ASSERT_EQUAL((size_t) properties.length(), propmap.size()); + CPPUNIT_ASSERT_EQUAL(std::string(properties[0].id), propmap[0].getId()); + bool result = false; + CPPUNIT_ASSERT(properties[0].value >>= result); + CPPUNIT_ASSERT_EQUAL(true, result); +} + +void PropertyMapTest::testPushBack() +{ + redhawk::PropertyMap propmap; + CPPUNIT_ASSERT(propmap.empty()); + + // Push a raw CF::DataType; it should be the first property + CF::DataType dt; + dt.id = "dt"; + dt.value <<= "one"; + propmap.push_back(dt); + CPPUNIT_ASSERT_EQUAL((size_t) 1, propmap.size()); + CPPUNIT_ASSERT_EQUAL(std::string("dt"), propmap[0].getId()); + CPPUNIT_ASSERT_EQUAL(std::string("one"), propmap[0].getValue().toString()); + + // Push a PropertyType; it should be the second property + propmap.push_back(redhawk::PropertyType("test", (short)0)); + CPPUNIT_ASSERT_EQUAL((size_t) 2, propmap.size()); + CPPUNIT_ASSERT_EQUAL(std::string("test"), propmap[1].getId()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 0, propmap[1].getValue().toLong()); +} + +void PropertyMapTest::testContains() +{ + // Use a const PropertyMap with known test data; contains() is intended to + // be const-friendly + const redhawk::PropertyMap propmap = generate_test_data(); + CPPUNIT_ASSERT(propmap.contains("first")); + CPPUNIT_ASSERT(propmap.contains("third")); + CPPUNIT_ASSERT(!propmap.contains("fourth")); +} + +void PropertyMapTest::testConstIndexing() +{ + // Generate sequential properties (propN=N) and make sure the indexed + // values match up + const redhawk::PropertyMap propmap = generate_test_sequence(8); + CPPUNIT_ASSERT_EQUAL(std::string("prop_4"), propmap[4].getId()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 7, propmap[7].getValue().toLong()); +} + +void PropertyMapTest::testMutableIndexing() +{ + // Create Properties with sequential IDs/values and a PropertyMap reference + // alias--this allows us to use the trusted CORBA sequence operator[] for + // comparison + CF::Properties properties = generate_test_sequence(8); + redhawk::PropertyMap& propmap = redhawk::PropertyMap::cast(properties); + + // Basic indexing + CPPUNIT_ASSERT_EQUAL(std::string("prop_4"), propmap[4].getId()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 7, propmap[7].getValue().toLong()); + + // Overwrite a value by index + propmap[3] = redhawk::PropertyType("overwrite", (CORBA::Long)-128); + CPPUNIT_ASSERT_EQUAL(std::string("overwrite"), std::string(properties[3].id)); + CORBA::Long lval = 0; + CPPUNIT_ASSERT(properties[3].value >>= lval); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) -128, lval); + + // Modify ID of an indexed item + propmap[5].setId("modified"); + CPPUNIT_ASSERT_EQUAL(std::string("modified"), std::string(properties[5].id)); + + // Modiyf value of an indexed item + propmap[7].setValue((CORBA::Double) 9.75); + CORBA::Double dval = 0.0; + CPPUNIT_ASSERT(properties[7].value >>= dval); + CPPUNIT_ASSERT_EQUAL((CORBA::Double) 9.75, dval); +} + +void PropertyMapTest::testConstMapping() +{ + const redhawk::PropertyMap propmap = generate_test_data(); + + // Check for a known property value + CPPUNIT_ASSERT_EQUAL(std::string("abc"), propmap["second"].toString()); + + // IDs that are not found should throw an exception + CPPUNIT_ASSERT_THROW(propmap["fourth"], std::invalid_argument); +} + +void PropertyMapTest::testMutableMapping() +{ + // Use the standard test data, with the caveat that if for some reason its + // size changes, or the property IDs are different than we expect, this + // test will intentionally fail. + redhawk::PropertyMap propmap = generate_test_data(); + CPPUNIT_ASSERT_EQUAL((size_t) 3, propmap.size()); + + // Set a value for a new key, and check that it adds a new property to the + // end of the map + CPPUNIT_ASSERT(!propmap.contains("fourth")); + propmap["fourth"] = true; + CPPUNIT_ASSERT_EQUAL((size_t) 4, propmap.size()); + CPPUNIT_ASSERT_EQUAL(std::string("fourth"), propmap[3].getId()); + CPPUNIT_ASSERT_EQUAL(true, propmap[3].getValue().toBoolean()); + + // Set a value for an existing key, and check that it overwrote the old + // value + CPPUNIT_ASSERT_EQUAL(std::string("second"), propmap[1].getId()); + propmap["second"] = (short)5000; + CPPUNIT_ASSERT_EQUAL((size_t) 4, propmap.size()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 5000, propmap[1].getValue().toLong()); + + // When the property isn't found, it should create one using the default + // Value constructor + CPPUNIT_ASSERT(!propmap.contains("nil")); + CPPUNIT_ASSERT(propmap["nil"].isNil()); + CPPUNIT_ASSERT_EQUAL((size_t) 5, propmap.size()); +} + +void PropertyMapTest::testConstFind() +{ + const redhawk::PropertyMap propmap = generate_test_data(); + + // If the key is not found, find() should return the end iterator + CPPUNIT_ASSERT_EQUAL(propmap.end(), propmap.find("missing")); + + // With a known good key, make sure that find() returns a valid iterator, + // to the correct property + redhawk::PropertyMap::const_iterator prop = propmap.find("third"); + CPPUNIT_ASSERT(prop != propmap.end()); + CPPUNIT_ASSERT_EQUAL(std::string("third"), prop->getId()); + CPPUNIT_ASSERT_EQUAL((CORBA::Double) 5.25, prop->getValue().toDouble()); +} + +void PropertyMapTest::testMutableFind() +{ + redhawk::PropertyMap propmap = generate_test_data(); + + // If the key is not found, find() should return the end iterator (this is + // essentially identical to the const version) + CPPUNIT_ASSERT_EQUAL(propmap.end(), propmap.find("bogus")); + + // Find a known good key, and use the iterator to overwrite the value + redhawk::PropertyMap::iterator prop = propmap.find("second"); + CPPUNIT_ASSERT(prop != propmap.end()); + CPPUNIT_ASSERT_EQUAL(std::string("second"), prop->getId()); + prop->setValue("override"); + CPPUNIT_ASSERT_EQUAL(std::string("override"), propmap["second"].toString()); +} + +void PropertyMapTest::testConstIteration() +{ + const redhawk::PropertyMap empty; + CPPUNIT_ASSERT_EQUAL(empty.begin(), empty.end()); + + const redhawk::PropertyMap propmap = generate_test_sequence(8); + CORBA::Long offset = 0; + for (redhawk::PropertyMap::const_iterator iter = propmap.begin(); iter != propmap.end(); ++iter, ++offset) { + CPPUNIT_ASSERT_EQUAL(offset, iter->getValue().toLong()); + } + CPPUNIT_ASSERT_EQUAL((size_t) offset, propmap.size()); +} + +void PropertyMapTest::testMutableIteration() +{ + redhawk::PropertyMap empty; + CPPUNIT_ASSERT_EQUAL(empty.begin(), empty.end()); + + redhawk::PropertyMap propmap = generate_test_sequence(8); + CORBA::Long offset = 0; + for (redhawk::PropertyMap::iterator iter = propmap.begin(); iter != propmap.end(); ++iter, ++offset) { + if (iter->getId() == "prop_6") { + iter->setValue("override"); + } + } + CPPUNIT_ASSERT_EQUAL((size_t) offset, propmap.size()); + CPPUNIT_ASSERT_EQUAL(std::string("override"), propmap[6].getValue().toString()); +} + +void PropertyMapTest::testUpdate() +{ + // Start with a PropertyMap that partially intersects the standard test + // properties ("second" is in both) + redhawk::PropertyMap propmap; + propmap["second"] = "default"; + propmap["fourth"] = true; + + // Update with the standard test properties + const redhawk::PropertyMap overrides = generate_test_data(); + propmap.update(overrides); + + // Two properties should have been added ("first" and "third") + CPPUNIT_ASSERT_EQUAL((size_t) 4, propmap.size()); + CPPUNIT_ASSERT(propmap.contains("first")); + CPPUNIT_ASSERT_EQUAL(overrides["first"].toLong(), propmap["first"].toLong()); + CPPUNIT_ASSERT(propmap.contains("third")); + + // The common property "second" should be updated according to the standard + // test properties + CPPUNIT_ASSERT_EQUAL(overrides["second"].toString(), propmap["second"].toString()); + + // The "fourth" property should remain unchanged + CPPUNIT_ASSERT_EQUAL(true, propmap["fourth"].toBoolean()); +} + +void PropertyMapTest::testErase() +{ + // Use the sequential test data, because we're going to delete several + // entries, and it's easier with predictable property names + redhawk::PropertyMap propmap = generate_test_sequence(10); + + // Erase a single property by ID + CPPUNIT_ASSERT(propmap.contains("prop_4")); + propmap.erase("prop_4"); + CPPUNIT_ASSERT_EQUAL((size_t) 9, propmap.size()); + CPPUNIT_ASSERT(!propmap.contains("prop_4")); + + // Erase a single property by iterator + redhawk::PropertyMap::iterator prop = propmap.find("prop_2"); + CPPUNIT_ASSERT(prop != propmap.end()); + propmap.erase(prop); + CPPUNIT_ASSERT_EQUAL((size_t) 8, propmap.size()); + CPPUNIT_ASSERT(!propmap.contains("prop_2")); + + // Erase a range of properties; this should remove 3, 5 and 6 + redhawk::PropertyMap::iterator first = propmap.find("prop_3"); + redhawk::PropertyMap::iterator last = propmap.find("prop_7"); + propmap.erase(first, last); + CPPUNIT_ASSERT_EQUAL((size_t) 5, propmap.size()); + + // Check that the gap is where we expect it (from 1 to 7) + prop = propmap.find("prop_1"); + prop++; + CPPUNIT_ASSERT_EQUAL(std::string("prop_7"), prop->getId()); +} + +void PropertyMapTest::testGet() +{ + const redhawk::PropertyMap propmap = generate_test_data(); + + // Property exists, default ignored + CPPUNIT_ASSERT_EQUAL(std::string("abc"), propmap.get("second", "fail").toString()); + + // Property exists, default returned + CPPUNIT_ASSERT(!propmap.contains("missing")); + CPPUNIT_ASSERT_EQUAL(std::string("pass"), propmap.get("missing", "pass").toString()); +} + +void PropertyMapTest::testToString() +{ + // Using the standard test properties, create a string representation; this + // test shouldn't necessarily be considered canonical, but should at least + // raise a red flag if the format changes inadvertently + const redhawk::PropertyMap propmap = generate_test_data(); + const std::string stringval = propmap.toString(); + + // The string representation should: + // * be non-empty + // * be enclosed in curly braces + // * contain one "key=value" for each property + CPPUNIT_ASSERT(!stringval.empty()); + CPPUNIT_ASSERT_EQUAL('{', *stringval.begin()); + CPPUNIT_ASSERT_EQUAL('}', *(stringval.end()-1)); + CPPUNIT_ASSERT(stringval.find("first") != std::string::npos); + CPPUNIT_ASSERT(stringval.find("second") != std::string::npos); + CPPUNIT_ASSERT(stringval.find("third") != std::string::npos); + size_t item_count = std::count(stringval.begin(), stringval.end(), '='); + CPPUNIT_ASSERT_EQUAL(propmap.size(), item_count); +} diff --git a/redhawk/src/testing/cpp/PropertyMapTest.h b/redhawk/src/testing/cpp/PropertyMapTest.h new file mode 100644 index 000000000..50793ff5d --- /dev/null +++ b/redhawk/src/testing/cpp/PropertyMapTest.h @@ -0,0 +1,84 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef PROPERTYMAPTEST_H +#define PROPERTYMAPTEST_H + +#include "CFTest.h" + +class PropertyMapTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(PropertyMapTest); + CPPUNIT_TEST(testDefaultConstructor); + CPPUNIT_TEST(testPropertiesConstructor); + CPPUNIT_TEST(testPropertyTypeFromAny); + CPPUNIT_TEST(testConstCast); + CPPUNIT_TEST(testCast); + CPPUNIT_TEST(testPushBack); + CPPUNIT_TEST(testConstIndexing); + CPPUNIT_TEST(testMutableIndexing); + CPPUNIT_TEST(testConstMapping); + CPPUNIT_TEST(testMutableMapping); + CPPUNIT_TEST(testConstFind); + CPPUNIT_TEST(testMutableFind); + CPPUNIT_TEST(testConstIteration); + CPPUNIT_TEST(testMutableIteration); + CPPUNIT_TEST(testUpdate); + CPPUNIT_TEST(testErase); + CPPUNIT_TEST(testGet); + CPPUNIT_TEST(testToString); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testDefaultConstructor(); + void testPropertiesConstructor(); + void testPropertyTypeFromAny(); + + void testConstCast(); + void testCast(); + + void testPushBack(); + + void testContains(); + + void testConstIndexing(); + void testMutableIndexing(); + + void testConstMapping(); + void testMutableMapping(); + + void testConstFind(); + void testMutableFind(); + + void testConstIteration(); + void testMutableIteration(); + + void testUpdate(); + void testErase(); + + void testGet(); + + void testToString(); +}; + +#endif // PROPERTYMAPTEST_H diff --git a/redhawk/src/testing/cpp/ServiceInterruptTest.cpp b/redhawk/src/testing/cpp/ServiceInterruptTest.cpp new file mode 100644 index 000000000..94a0012de --- /dev/null +++ b/redhawk/src/testing/cpp/ServiceInterruptTest.cpp @@ -0,0 +1,96 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "ServiceInterruptTest.h" + +#include +#include + +#include + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(ServiceInterruptTest); + +int svc_stuck_cpp_base::serviceFunction() +{ + boost::mutex dataBufferLock; + boost::mutex::scoped_lock lock(dataBufferLock); + boost::condition_variable my_wait; + boost::system_time to_time = boost::get_system_time() + boost::posix_time::seconds(1000); + my_wait.timed_wait(lock, to_time); + return NORMAL; +} + +svc_stuck_cpp_base::svc_stuck_cpp_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ +} + +svc_stuck_cpp_base::~svc_stuck_cpp_base() +{ +} +void svc_stuck_cpp_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} +void svc_stuck_cpp_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} +void svc_stuck_cpp_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + try { + stop(); + } catch (CF::Resource::StopError& ex) { + } + + Component::releaseObject(); +} + +void svc_stuck_cpp_base::loadProperties() +{ +} + +void ServiceInterruptTest::setUp() +{ + my_comp = new svc_stuck_cpp_base("hello", "hello"); +} + +void ServiceInterruptTest::tearDown() +{ + if (my_comp!=NULL) { + delete my_comp; + } +} + +void ServiceInterruptTest::testInterruption() +{ + my_comp->start(); + CPPUNIT_ASSERT(my_comp->started()); + usleep(10000); + my_comp->stop(); + CPPUNIT_ASSERT(!my_comp->started()); +} diff --git a/redhawk/src/testing/cpp/ServiceInterruptTest.h b/redhawk/src/testing/cpp/ServiceInterruptTest.h new file mode 100644 index 000000000..b4b646c76 --- /dev/null +++ b/redhawk/src/testing/cpp/ServiceInterruptTest.h @@ -0,0 +1,64 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef SERVICEINTERRUPTTEST_H +#define SERVICEINTERRUPTTEST_H + +#include "CFTest.h" +#include +#include +#include + +class svc_stuck_cpp_base : public Component, protected ThreadedComponent +{ + public: + svc_stuck_cpp_base(const char *uuid, const char *label); + ~svc_stuck_cpp_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + int serviceFunction(); + + protected: + + private: +}; + +class ServiceInterruptTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(ServiceInterruptTest); + CPPUNIT_TEST(testInterruption); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testInterruption(); + + svc_stuck_cpp_base *my_comp; +}; + +#endif // SERVICEINTERRUPTTEST_H diff --git a/redhawk/src/testing/cpp/SharedBufferTest.cpp b/redhawk/src/testing/cpp/SharedBufferTest.cpp new file mode 100644 index 000000000..b73d8af7d --- /dev/null +++ b/redhawk/src/testing/cpp/SharedBufferTest.cpp @@ -0,0 +1,531 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "SharedBufferTest.h" + +#include + +#define _RH_SHARED_BUFFER_USE_STD_ALLOC +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(SharedBufferTest); + +namespace { + // Special allocator that wraps an existing char buffer, marking allocated + // data with ones and deallocated data with zeros. + template + struct CustomAllocator : public std::allocator + { + CustomAllocator(char* buffer) : + _buffer(buffer) + { + } + + T* allocate(size_t count, void* ptr=0) + { + size_t bytes = count * sizeof(T); + std::fill(_buffer, _buffer + bytes, 1); + return reinterpret_cast(_buffer); + } + + void deallocate(void* ptr, size_t count) + { + size_t bytes = count * sizeof(T); + std::fill(_buffer, _buffer + bytes, 0); + } + + private: + char* _buffer; + }; + + // Special deleter that writes to a boolean flag pointer to notify when it + // has been called. Using a pointer-to-value instead of its own local flag + // allows it to be passed by value, which is the preferred way of passing a + // deleter to boost::shared_array (and by extension redhawk::shared_buffer) + struct CustomDeleter { + CustomDeleter(bool* deleted) : + _deleted(deleted) + { + } + + void operator() (void* ptr) + { + std::free(ptr); + *_deleted = true; + } + + bool* _deleted; + }; + + struct NullDeleter { + void operator() (void*) + { + } + }; +} + +void SharedBufferTest::setUp() +{ +} + +void SharedBufferTest::tearDown() +{ +} + +void SharedBufferTest::testDefaultConstructor() +{ + // Empty const shared buffer + const redhawk::shared_buffer shared; + CPPUNIT_ASSERT(shared.size() == 0); + CPPUNIT_ASSERT(shared.empty()); + CPPUNIT_ASSERT_EQUAL(shared.begin(), shared.end()); + + // Empty regular buffer + redhawk::buffer buffer; + CPPUNIT_ASSERT(buffer.size() == 0); + CPPUNIT_ASSERT(buffer.empty()); + CPPUNIT_ASSERT_EQUAL(buffer.begin(), buffer.end()); +} + +void SharedBufferTest::testConstructor() +{ + // Test allocating constructor + const size_t BUFFER_SIZE = 16; + redhawk::buffer buffer(BUFFER_SIZE); + CPPUNIT_ASSERT(!buffer.empty()); + CPPUNIT_ASSERT_EQUAL(buffer.size(), BUFFER_SIZE); + CPPUNIT_ASSERT(buffer.begin() != buffer.end()); + + // Test construction of shared buffer from mutable buffer + redhawk::shared_buffer shared(buffer); + CPPUNIT_ASSERT(!shared.empty()); + CPPUNIT_ASSERT_EQUAL(shared.size(), buffer.size()); + CPPUNIT_ASSERT(shared.data() == buffer.data()); +} + +void SharedBufferTest::testMakeBuffer() +{ + // Simple wrapper for externally-allocated array + int* array = new int[16]; + redhawk::buffer buffer = redhawk::make_buffer(array, 16); + CPPUNIT_ASSERT_EQUAL(array, buffer.data()); + + // Use a custom deleter that is a no-op, with a stack-based array + double darray[4] = { 1.0, 2.0, 3.0, 4.0 }; + redhawk::buffer dbuffer = redhawk::make_buffer(darray, 4, NullDeleter()); + CPPUNIT_ASSERT_EQUAL(&darray[0], dbuffer.data()); +} + +void SharedBufferTest::testEquals() +{ + // Create a buffer with known data + redhawk::buffer first(6); + std::fill(first.begin(), first.end(), 8); + + // Create a second, identical buffer and check that it is equal + redhawk::buffer second(first.size()); + std::copy(first.begin(), first.end(), second.begin()); + CPPUNIT_ASSERT(first.data() != second.data()); + CPPUNIT_ASSERT(first == second); + + // Modify an element, breaking equality + second[3] = -25; + CPPUNIT_ASSERT(first != second); + + // Re-allocate the second buffer with one extra element + second = redhawk::buffer(first.size() + 1); + std::copy(first.begin(), first.end(), second.begin()); + second[second.size() - 1] = second[0]; + CPPUNIT_ASSERT(first != second); +} + +void SharedBufferTest::testIteration() +{ + // Create a buffer with known data + redhawk::buffer buffer(20); + for (unsigned long index = 0; index < buffer.size(); ++index) { + buffer[index] = index; + } + + // The distance between the begin and end iterators must be the same as the + // size, and iteration should yield the same result as sequential indexing + size_t offset = 0; + for (redhawk::buffer::iterator iter = buffer.begin(); iter != buffer.end(); ++iter, ++offset) { + CPPUNIT_ASSERT_EQUAL(*iter, buffer[offset]); + } + CPPUNIT_ASSERT_EQUAL(buffer.size(), offset); + + // Repeat, via a const shared buffer alias + const redhawk::shared_buffer shared = buffer; + CPPUNIT_ASSERT_EQUAL((ptrdiff_t) shared.size(), std::distance(shared.begin(), shared.end())); + CPPUNIT_ASSERT(std::equal(shared.begin(), shared.end(), buffer.begin())); +} + +void SharedBufferTest::testCopy() +{ + // Create a buffer with known data + redhawk::buffer original(9); + for (size_t index = 0; index < original.size(); ++index) { + // Value is true if index is odd + original[index] = index & ~1; + } + + // Create a const shared buffer alias + const redhawk::shared_buffer buffer = original; + CPPUNIT_ASSERT(buffer.data() == original.data()); + + // Make a copy, and verify that it's a new underlying buffer + redhawk::buffer copy = buffer.copy(); + CPPUNIT_ASSERT(copy == buffer); + CPPUNIT_ASSERT(copy.data() != buffer.data()); + + // Make a second copy, exercising copy() from buffer (instead of + // shared_buffer) + redhawk::buffer second = copy.copy(); + CPPUNIT_ASSERT(second == buffer); + CPPUNIT_ASSERT(second.data() != copy.data()); +} + +void SharedBufferTest::testSwap() +{ + // Create two mutable buffers with different contents + redhawk::buffer first(3); + std::fill(first.begin(), first.end(), 7); + CPPUNIT_ASSERT_EQUAL(first.size(), (size_t) std::count(first.begin(), first.end(), 7)); + redhawk::buffer second(5); + std::fill(second.begin(), second.end(), -2); + CPPUNIT_ASSERT_EQUAL(second.size(), (size_t) std::count(second.begin(), second.end(), -2)); + + // Swap them and check that the swap worked as expected + first.swap(second); + CPPUNIT_ASSERT(first.size() == 5); + CPPUNIT_ASSERT(first[0] == -2); + CPPUNIT_ASSERT(second.size() == 3); + CPPUNIT_ASSERT(second[0] == 7); + + // Create one shared buffer alias for each buffer + redhawk::shared_buffer shared_first = first; + CPPUNIT_ASSERT(shared_first.data() == first.data()); + redhawk::shared_buffer shared_second = second; + CPPUNIT_ASSERT(shared_second.data() == second.data()); + + // Swap the shared buffers and make sure that the underlying data pointers + // are correct + shared_first.swap(shared_second); + CPPUNIT_ASSERT(shared_first.data() == second.data()); + CPPUNIT_ASSERT(shared_second.data() == first.data()); +} + +void SharedBufferTest::testResize() +{ + // Fill a new buffer with a ramp (offset by 1) for easy comparison + redhawk::buffer buffer(8); + for (size_t index = 0; index < buffer.size(); ++index) { + buffer[index] = index + 1; + } + + // Resize the buffer and check that the values are preserved + buffer.resize(16); + CPPUNIT_ASSERT_EQUAL((size_t) 16, buffer.size()); + for (size_t index = 0; index < 8; ++index) { + unsigned char expected = index + 1; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Resize did not copy data", expected, buffer[index]); + } + + // Resize down (which can be done better with trim, but is still legal) and + // check values + buffer.resize(4); + CPPUNIT_ASSERT_EQUAL((size_t) 4, buffer.size()); + for (size_t index = 0; index < buffer.size(); ++index) { + unsigned char expected = index + 1; + CPPUNIT_ASSERT_EQUAL_MESSAGE("Resize did not copy data", expected, buffer[index]); + } +} + +void SharedBufferTest::testSharing() +{ + // Fill a new buffer + redhawk::buffer > buffer(8); + for (size_t index = 0; index < buffer.size(); ++index) { + buffer[index] = std::complex(0.5, 0.5) * (double) index; + } + + // Create a const shared buffer aliasing the original + const redhawk::shared_buffer > shared = buffer; + CPPUNIT_ASSERT(shared == buffer); + + // Conjugate values and ensure that the buffers are still equal + std::transform(buffer.begin(), buffer.end(), buffer.begin(), std::conj); + CPPUNIT_ASSERT(shared == buffer); +} + +void SharedBufferTest::testSlicing() +{ + // Fill a new buffer + redhawk::buffer buffer(12); + for (size_t index = 0; index < buffer.size(); ++index) { + buffer[index] = index; + } + + // Take a 4-element slice from the middle and check that it points to the + // same data (offset by the start index) + const redhawk::shared_buffer middle = buffer.slice(4, 8); + CPPUNIT_ASSERT_EQUAL(middle.size(), (size_t) 4); + CPPUNIT_ASSERT(middle.data() == buffer.data() + 4); + + // Take a slice from the midpoint to the end, and check that the elements + // match + const redhawk::shared_buffer end = buffer.slice(6); + CPPUNIT_ASSERT_EQUAL(end.size(), buffer.size() - 6); + for (size_t index = 0; index < end.size(); ++index) { + CPPUNIT_ASSERT_EQUAL(end[index], buffer[index + 6]); + } + + // Compare the overlap between the two slices by taking sub-slices + CPPUNIT_ASSERT(middle.slice(2) == end.slice(0, 2)); +} + +template +void SharedBufferTest::testTrimImpl() +{ + redhawk::buffer original(10); + for (size_t index = 0; index < original.size(); ++index) { + original[index] = index; + } + + // Create an alias of the template type, then trim one element off each end + Buffer buffer = original; + buffer.trim(1, original.size() - 1); + CPPUNIT_ASSERT_EQUAL(buffer.size(), original.size() - 2); + CPPUNIT_ASSERT(std::equal(buffer.begin(), buffer.end(), original.begin() + 1)); + + // Trim another element off the beginning + buffer.trim(1); + CPPUNIT_ASSERT_EQUAL(buffer.size(), original.size() - 3); + CPPUNIT_ASSERT(std::equal(buffer.begin(), buffer.end(), original.begin() + 2)); + + // Iterator-based trim: find specific values and trim to [first, last) + typename Buffer::iterator first = std::find(buffer.begin(), buffer.end(), 4); + typename Buffer::iterator last = std::find(buffer.begin(), buffer.end(), 7); + CPPUNIT_ASSERT(first != buffer.end()); + CPPUNIT_ASSERT(last != buffer.end()); + buffer.trim(first, last); + CPPUNIT_ASSERT(buffer.size() == 3); + CPPUNIT_ASSERT_EQUAL(buffer[0], (unsigned short) 4); + + // Use iterator-based trim to take another element off the beginning + buffer.trim(buffer.begin() + 1); + CPPUNIT_ASSERT(buffer.size() == 2); + CPPUNIT_ASSERT_EQUAL(buffer[0], (unsigned short) 5); +} + +void SharedBufferTest::testTrim() +{ + testTrimImpl >(); +} + +void SharedBufferTest::testTrimShared() +{ + testTrimImpl >(); +} + +void SharedBufferTest::testReplace() +{ + // Fill destination with a constant value + redhawk::buffer dest(24); + std::fill(dest.begin(), dest.end(), 6789); + + // Create a different pattern for the replacement data + redhawk::buffer pattern(4); + for (size_t ii = 0; ii < pattern.size(); ++ii) { + pattern[ii] = (ii + 1) * 1000; + } + const redhawk::shared_buffer src = pattern; + + // 3-argument version: replace 3 elements at offset 1 + dest.replace(1, 3, src); + CPPUNIT_ASSERT_EQUAL(6789, dest[0]); + CPPUNIT_ASSERT_EQUAL(1000, dest[1]); + CPPUNIT_ASSERT_EQUAL(2000, dest[2]); + CPPUNIT_ASSERT_EQUAL(3000, dest[3]); + CPPUNIT_ASSERT_EQUAL(6789, dest[4]); + + // 4-argument version: replace 2 elements at offset 16, starting at offset + // 2 in the source + dest.replace(16, 2, src, 2); + CPPUNIT_ASSERT_EQUAL(6789, dest[15]); + CPPUNIT_ASSERT_EQUAL(3000, dest[16]); + CPPUNIT_ASSERT_EQUAL(4000, dest[17]); + CPPUNIT_ASSERT_EQUAL(6789, dest[18]); +} + +void SharedBufferTest::testRecast() +{ + // Fill a new buffer with an odd number of elements + redhawk::buffer buffer(13); + for (size_t index = 0; index < buffer.size(); ++index) { + buffer[index] = index * 2.0 * M_PI; + } + + // Recast to the complex equivalent, and check that its size is determined + // using floor division (i.e., the extra real value is excluded) + redhawk::shared_buffer > cxbuffer; + cxbuffer = redhawk::shared_buffer >::recast(buffer); + CPPUNIT_ASSERT_EQUAL(cxbuffer.size(), buffer.size() / 2); + CPPUNIT_ASSERT_EQUAL(cxbuffer.size() * 2, buffer.size() - 1); + + // Check that the complex buffer has the original values interleaved as + // real/imaginary pairs + for (size_t index = 0; index < (cxbuffer.size() * 2); index += 2) { + CPPUNIT_ASSERT_EQUAL(cxbuffer[index/2].real(), buffer[index]); + CPPUNIT_ASSERT_EQUAL(cxbuffer[index/2].imag(), buffer[index+1]); + } + + // Recast into short, this time using buffer::recast (such that the result + // is mutable); this is basically nonsensical, but technically valid, so + // only check that the data pointer and size are correct + redhawk::buffer short_buffer; + short_buffer = redhawk::buffer::recast(buffer); + CPPUNIT_ASSERT_EQUAL((void*) short_buffer.data(), (void*) buffer.data()); + CPPUNIT_ASSERT_EQUAL(short_buffer.size(), buffer.size() * 2); +} + +void SharedBufferTest::testAllocator() +{ + typedef std::complex value_type; + typedef CustomAllocator allocator_type; + + // Start with a zero-filled buffer of "raw" memory + std::vector arena; + arena.resize(32); + + // The initial condition of the arena should be all zeros + std::fill(arena.begin(), arena.end(), 0); + CPPUNIT_ASSERT_EQUAL(arena.size(), (size_t) std::count(arena.begin(), arena.end(), 0)); + + // Create a new buffer using a custom allocator; we should see that it's + // marked the allocated space with ones + redhawk::buffer buffer(4, allocator_type(&arena[0])); + const size_t MIN_BYTES = sizeof(value_type) * buffer.size(); + size_t index = 0; + while ((index < arena.size()) && (arena[index] == 1)) { + ++index; + } + CPPUNIT_ASSERT(index >= MIN_BYTES); + + // Release the buffer, which should trigger a deallocation; we should see + // that it's reset the allocated space to zeros + buffer = redhawk::buffer(); + CPPUNIT_ASSERT_EQUAL(arena.size(), (size_t) std::count(arena.begin(), arena.end(), 0)); +} + +void SharedBufferTest::testAllocatorCopy() +{ + typedef CustomAllocator allocator_type; + + // Initialize source buffer + redhawk::buffer buffer(16); + for (size_t index = 0; index < buffer.size(); ++index) { + buffer[index] = index * 2.0 * M_PI / buffer.size(); + } + + // Over-allocate memory for the custom allocator, just to be safe + std::vector arena; + arena.resize(buffer.size() * sizeof(double) * 2); + + // Create a copy using the custom allocator, then ensure that the copy was + // allocated into our memory (and the copy is correct) + redhawk::shared_buffer copy = buffer.copy(allocator_type(&arena[0])); + CPPUNIT_ASSERT(reinterpret_cast(copy.data()) == &arena[0]); + CPPUNIT_ASSERT(copy == buffer); +} + +void SharedBufferTest::testCustomDeleter() +{ + // Local flag given to the custom deleter for checking when deletion has + // occurred + bool deleted = false; + + // Allocate a buffer through other means, then wrap it with a buffer using + // a custom deleter + char* data = static_cast(std::malloc(16)); + redhawk::shared_buffer buffer(data, sizeof(data), CustomDeleter(&deleted)); + CPPUNIT_ASSERT(buffer.data() == data); + + // Make a new shared alias and replace the first buffer; there is still one + // outstanding reference, so the deleter should not be called + redhawk::shared_buffer shared = buffer; + buffer = redhawk::shared_buffer(); + CPPUNIT_ASSERT(!deleted); + + // Replace the second buffer, which should be the last reference; the + // deleter should be called + shared = redhawk::shared_buffer(); + CPPUNIT_ASSERT(deleted); +} + +void SharedBufferTest::testTransient() +{ + // Wrap a transient shared buffer around a C-style array + int data[] = { 8, 6, 7, 5, 3, 0, 9 }; + const size_t BUFFER_SIZE = sizeof(data) / sizeof(data[0]); + redhawk::shared_buffer buffer = redhawk::shared_buffer::make_transient(data, BUFFER_SIZE); + CPPUNIT_ASSERT(buffer.transient()); + CPPUNIT_ASSERT(buffer.data() == data); + CPPUNIT_ASSERT(buffer.size() == BUFFER_SIZE); + for (size_t index = 0; index < buffer.size(); ++index) { + CPPUNIT_ASSERT_EQUAL(buffer[index], data[index]); + } + + // Allocate a new buffer and assign it back to our original buffer, making + // sure that it is no longer transient + buffer = redhawk::buffer(1); + CPPUNIT_ASSERT(!buffer.transient()); +} + +void SharedBufferTest::testGetMemory() +{ + // Using externally-acquired memory, check that the shared buffer's data() + // and base() values match the original value + const size_t BUFFER_SIZE = 12; + float* data = new float[BUFFER_SIZE]; + redhawk::shared_buffer buffer(data, BUFFER_SIZE); + CPPUNIT_ASSERT_EQUAL((const void*) data, buffer.get_memory().address()); + CPPUNIT_ASSERT_EQUAL((const void*) buffer.data(), buffer.get_memory().address()); + + // Recasting shouldn't affect the base pointer + redhawk::shared_buffer alias = redhawk::shared_buffer::recast(buffer); + CPPUNIT_ASSERT_EQUAL((const void*) data, alias.get_memory().address()); + + // Taking a slice of the original buffer should produce a different data + // pointer but preserve the base pointer + redhawk::shared_buffer slice = buffer.slice(2, 7); + const void* slice_base = slice.get_memory().address(); + CPPUNIT_ASSERT(slice.data() != slice_base); + CPPUNIT_ASSERT_EQUAL((const void*) data, slice_base); + + // Recast the slice, which should still preserve the base pointer + alias = redhawk::shared_buffer::recast(slice); + CPPUNIT_ASSERT_EQUAL((const void*) data, alias.get_memory().address()); + + // Transient buffers should always return null + redhawk::shared_buffer transient = redhawk::shared_buffer::make_transient(data, BUFFER_SIZE); + CPPUNIT_ASSERT_EQUAL((const void*) 0, transient.get_memory().address()); +} diff --git a/redhawk/src/testing/cpp/SharedBufferTest.h b/redhawk/src/testing/cpp/SharedBufferTest.h new file mode 100644 index 000000000..2ecf1d15e --- /dev/null +++ b/redhawk/src/testing/cpp/SharedBufferTest.h @@ -0,0 +1,86 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef SHAREDBUFFERTEST_H +#define SHAREDBUFFERTEST_H + +#include "CFTest.h" + +class SharedBufferTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(SharedBufferTest); + CPPUNIT_TEST(testDefaultConstructor); + CPPUNIT_TEST(testConstructor); + CPPUNIT_TEST(testMakeBuffer); + CPPUNIT_TEST(testEquals); + CPPUNIT_TEST(testIteration); + CPPUNIT_TEST(testCopy); + CPPUNIT_TEST(testSwap); + CPPUNIT_TEST(testResize); + CPPUNIT_TEST(testSharing); + CPPUNIT_TEST(testSlicing); + CPPUNIT_TEST(testTrim); + CPPUNIT_TEST(testTrimShared); + CPPUNIT_TEST(testReplace); + CPPUNIT_TEST(testRecast); + CPPUNIT_TEST(testAllocator); + CPPUNIT_TEST(testAllocatorCopy); + CPPUNIT_TEST(testCustomDeleter); + CPPUNIT_TEST(testTransient); + CPPUNIT_TEST(testGetMemory); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + // Constructors + void testDefaultConstructor(); + void testConstructor(); + void testMakeBuffer(); + + // Basic container behavior + void testEquals(); + void testIteration(); + void testCopy(); + void testSwap(); + void testResize(); + + // Extended container behavior + void testSharing(); + void testSlicing(); + void testTrim(); + void testTrimShared(); + void testReplace(); + void testRecast(); + + // Advanced features + void testAllocator(); + void testAllocatorCopy(); + void testCustomDeleter(); + void testTransient(); + void testGetMemory(); + +private: + template + void testTrimImpl(); +}; + +#endif // SHARED_BUFFER_TEST_H diff --git a/redhawk/src/testing/cpp/ValueSequenceTest.cpp b/redhawk/src/testing/cpp/ValueSequenceTest.cpp new file mode 100644 index 000000000..c1321f108 --- /dev/null +++ b/redhawk/src/testing/cpp/ValueSequenceTest.cpp @@ -0,0 +1,228 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "ValueSequenceTest.h" + +#include +#include + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(ValueSequenceTest); + +void ValueSequenceTest::setUp() +{ +} + +void ValueSequenceTest::tearDown() +{ +} + +void ValueSequenceTest::testDefaultConstructor() +{ + redhawk::ValueSequence values; + CPPUNIT_ASSERT(values.empty()); + CPPUNIT_ASSERT_EQUAL((size_t) 0, values.size()); +} + +void ValueSequenceTest::testAnySeqConstructor() +{ + CORBA::AnySeq anys; + anys.length(1); + redhawk::ValueSequence values(anys); + CPPUNIT_ASSERT(!values.empty()); + CPPUNIT_ASSERT_EQUAL((size_t) anys.length(), values.size()); +} + +void ValueSequenceTest::testConstCast() +{ + CORBA::AnySeq anys; + anys.length(2); + anys[0] <<= "abc"; + anys[1] <<= 1.0; + + // Create a const ValueSequence reference alias and check the values + const CORBA::AnySeq& const_anys = anys; + const redhawk::ValueSequence& values = redhawk::ValueSequence::cast(const_anys); + CPPUNIT_ASSERT(!values.empty()); + CPPUNIT_ASSERT_EQUAL((size_t) anys.length(), values.size()); + CPPUNIT_ASSERT_EQUAL(std::string("abc"), values[0].toString()); + CPPUNIT_ASSERT_EQUAL(1.0, values[1].toDouble()); + + // Modify the AnySeq and check that the change is reflected in the aliased + // ValueSequence + anys.length(3); + anys[2] <<= (CORBA::Long) 20; + CPPUNIT_ASSERT_EQUAL((size_t) anys.length(), values.size()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 20, values[2].toLong()); +} + +void ValueSequenceTest::testCast() +{ + CORBA::AnySeq anys; + + // Create a ValueSequence reference alias + redhawk::ValueSequence& values = redhawk::ValueSequence::cast(anys); + CPPUNIT_ASSERT(values.empty()); + + // Append a boolean to the end of the ValueSequence and check that the + // change is reflected in the aliased AnySeq + values.push_back(true); + CPPUNIT_ASSERT_EQUAL((size_t) 1, values.size()); + CPPUNIT_ASSERT_EQUAL((size_t) anys.length(), values.size()); + bool result = false; + CPPUNIT_ASSERT(anys[0] >>= result); + CPPUNIT_ASSERT_EQUAL(true, result); +} + +void ValueSequenceTest::testPushBack() +{ + redhawk::ValueSequence values; + CPPUNIT_ASSERT(values.empty()); + + values.push_back((short)0); + CPPUNIT_ASSERT_EQUAL((size_t) 1, values.size()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 0, values[0].toLong()); + + values.push_back("one"); + CPPUNIT_ASSERT_EQUAL((size_t) 2, values.size()); + CPPUNIT_ASSERT_EQUAL(std::string("one"), values[1].toString()); +} + +void ValueSequenceTest::testConstIndexing() +{ + // Fill an AnySeq such that seq[x] = x + CORBA::AnySeq anys; + anys.length(8); + for (CORBA::ULong index = 0; index < anys.length(); ++index) { + anys[index] <<= index; + } + + // Check that accessing returns the expected values + const redhawk::ValueSequence values(anys); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 3, values[3].toLong()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 5, values[5].toLong()); +} + +void ValueSequenceTest::testIndexing() +{ + // Fill an AnySeq such that seq[x] = x + CORBA::AnySeq anys; + anys.length(8); + for (CORBA::ULong index = 0; index < anys.length(); ++index) { + anys[index] <<= index; + } + + // Check that accessing returns the expected values + redhawk::ValueSequence& values = redhawk::ValueSequence::cast(anys); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 3, values[3].toULong()); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 5, values[5].toULong()); + + // Modify values within the sequence + values[4] = (CORBA::Long) -4; + values[6] = (CORBA::Double) -6.0; + + // Check that the correct values were modified + CORBA::Long lval = 0; + CPPUNIT_ASSERT(anys[4] >>= lval); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) -4, lval); + CORBA::Double dval = 0.0; + CPPUNIT_ASSERT(anys[6] >>= dval); + CPPUNIT_ASSERT_EQUAL((CORBA::Double) -6.0, dval); +} + +void ValueSequenceTest::testConstIteration() +{ + // Create a source AnySequence with predictable values + CORBA::AnySeq anys; + anys.length(16); + for (CORBA::ULong index = 0; index < anys.length(); ++index) { + anys[index] <<= (CORBA::Double) index; + } + + // Use copy constructor to create a const ValueSequence + const redhawk::ValueSequence values(anys); + + // The distance between the begin and end iterators must be the same as the + // size, and iteration should yield the same result as sequential indexing + size_t offset = 0; + for (redhawk::ValueSequence::const_iterator iter = values.begin(); iter != values.end(); ++iter, ++offset) { + CPPUNIT_ASSERT_EQUAL(values[offset].toDouble(), iter->toDouble()); + } + CPPUNIT_ASSERT_EQUAL(values.size(), offset); +} + +void ValueSequenceTest::testMutableIteration() +{ + // Start with an empty sequence + redhawk::ValueSequence values; + CPPUNIT_ASSERT_EQUAL(values.begin(), values.end()); + + // Fill the sequence with predictable values + for (size_t index = 0; index < 10; ++index) { + values.push_back((CORBA::Double) index); + } + + // Modify one value via an iterator + for (redhawk::ValueSequence::iterator iter = values.begin(); iter != values.end(); ++iter) { + if (iter->toDouble() == 5.0) { + *iter = (short)-1000; + } + } + + // Check that the expected value was modified + CPPUNIT_ASSERT_EQUAL((CORBA::Long) -1000, values[5].toLong()); +} + +void ValueSequenceTest::testFromConstValue() +{ + // Create a ValueSequence with known values + redhawk::ValueSequence original; + original.push_back("name"); + original.push_back((short)1000); + + // Create a const Value with a copy of the original sequence + const redhawk::Value rvalue(original); + CPPUNIT_ASSERT_EQUAL(redhawk::Value::TYPE_VALUE_SEQUENCE, rvalue.getType()); + + // Create a const ValueSequence alias to the Value and check that it + // matches the original + const redhawk::ValueSequence& values = rvalue.asSequence(); + CPPUNIT_ASSERT_EQUAL(original.size(), values.size()); + CPPUNIT_ASSERT_EQUAL(original[0].toString(), values[0].toString()); +} + +void ValueSequenceTest::testFromMutableValue() +{ + // Create a new Value from an empty ValueSequence + redhawk::Value rvalue = redhawk::ValueSequence(); + CPPUNIT_ASSERT_EQUAL(redhawk::Value::TYPE_VALUE_SEQUENCE, rvalue.getType()); + + // Create an alias and insert a value at the back, making sure it modifies + // the Value by extracting the AnySeq + redhawk::ValueSequence& values = rvalue.asSequence(); + values.push_back("test"); + CORBA::AnySeq* anys; + CPPUNIT_ASSERT(rvalue >>= anys); + CPPUNIT_ASSERT_EQUAL(values.size(), (size_t) anys->length()); + std::string result; + CPPUNIT_ASSERT((*anys)[0] >>= result); + CPPUNIT_ASSERT_EQUAL(std::string("test"), result); +} diff --git a/redhawk/src/testing/cpp/ValueSequenceTest.h b/redhawk/src/testing/cpp/ValueSequenceTest.h new file mode 100644 index 000000000..c6f949809 --- /dev/null +++ b/redhawk/src/testing/cpp/ValueSequenceTest.h @@ -0,0 +1,64 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef VALUESEQUENCETEST_H +#define VALUESEQUENCETEST_H + +#include "CFTest.h" + +class ValueSequenceTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(ValueSequenceTest); + CPPUNIT_TEST(testDefaultConstructor); + CPPUNIT_TEST(testAnySeqConstructor); + CPPUNIT_TEST(testConstCast); + CPPUNIT_TEST(testCast); + CPPUNIT_TEST(testPushBack); + CPPUNIT_TEST(testConstIndexing); + CPPUNIT_TEST(testIndexing); + CPPUNIT_TEST(testConstIteration); + CPPUNIT_TEST(testMutableIteration); + CPPUNIT_TEST(testFromConstValue); + CPPUNIT_TEST(testFromMutableValue); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testDefaultConstructor(); + void testAnySeqConstructor(); + + void testConstCast(); + void testCast(); + + void testPushBack(); + + void testConstIndexing(); + void testIndexing(); + + void testConstIteration(); + void testMutableIteration(); + + void testFromConstValue(); + void testFromMutableValue(); +}; + +#endif // VALUESEQUENCETEST_H diff --git a/redhawk/src/testing/cpp/ValueTest.cpp b/redhawk/src/testing/cpp/ValueTest.cpp new file mode 100644 index 000000000..ae1ee2b1d --- /dev/null +++ b/redhawk/src/testing/cpp/ValueTest.cpp @@ -0,0 +1,282 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "ValueTest.h" + +#include +#include + +#include + +CPPUNIT_TEST_SUITE_REGISTRATION(ValueTest); + +void ValueTest::setUp() +{ +} + +void ValueTest::tearDown() +{ +} + +void ValueTest::testConstructor() +{ + // This test is mostly to make sure that the constructors for all supported + // types compile. + + // Start with an empty value + { + redhawk::Value value; + CPPUNIT_ASSERT(value.isNil()); + } + + // NB: The CORBA C++ mapping does not allow directly setting an octet value, + // nor does it have true 8-bit numeric types. Use the helper to insert an + // octet for the purposes of testing. + { + redhawk::Value value(CORBA::Any::from_octet(0)); + CPPUNIT_ASSERT_MESSAGE("Octet constructor", !value.isNil()); + } + + // Numeric and common sequence types +#define ASSERT_CTOR(x, v) \ + { \ + redhawk::Value value((x) v); \ + CPPUNIT_ASSERT_MESSAGE(#x " constructor", !value.isNil()); \ + } + ASSERT_CTOR(CORBA::Short, 0); + ASSERT_CTOR(CORBA::UShort, 0); + ASSERT_CTOR(CORBA::Long, 0); + ASSERT_CTOR(CORBA::ULong, 0); + ASSERT_CTOR(CORBA::LongLong, 0); + ASSERT_CTOR(CORBA::ULongLong, 0); + ASSERT_CTOR(CORBA::Float, 0.0); + ASSERT_CTOR(CORBA::Double, 0.0); + ASSERT_CTOR(std::string, "abc"); + + CORBA::AnySeq anys; + ASSERT_CTOR(CORBA::AnySeq, anys); + + CF::Properties properties; + ASSERT_CTOR(CF::Properties, properties); +} + +void ValueTest::testCopyConstructor() +{ + CORBA::Any any; + CF::Properties properties; + any <<= properties; + CORBA::TypeCode_ptr typecode = any.type(); + redhawk::Value::Type type = redhawk::Value::GetType(typecode); + + // Copy constructor from Any (mostly making sure the type is the same, as + // opposed to nesting another level of Any) + redhawk::Value value(any); + CPPUNIT_ASSERT_EQUAL(type, value.getType()); + + // Copy constructor (again, checking that no accidental nesting occurred) + redhawk::Value copy(value); + CPPUNIT_ASSERT_EQUAL(type, copy.getType()); +} + +void ValueTest::testType() +{ + redhawk::Value value; + CPPUNIT_ASSERT(value.getType() == redhawk::Value::TYPE_NONE); + CPPUNIT_ASSERT(!value.isNumeric()); + CPPUNIT_ASSERT(!value.isSequence()); + + value = std::string("test"); + CPPUNIT_ASSERT_EQUAL(value.getType(), redhawk::Value::TYPE_STRING); + CPPUNIT_ASSERT(!value.isNumeric()); + CPPUNIT_ASSERT(!value.isSequence()); + +#define ASSERT_BASIC_NUMERIC(v, t) \ + CPPUNIT_ASSERT_EQUAL(v.getType(), t); \ + CPPUNIT_ASSERT(v.isNumeric()); \ + CPPUNIT_ASSERT(!v.isSequence()); + + value = true; + ASSERT_BASIC_NUMERIC(value, redhawk::Value::TYPE_BOOLEAN); + + // See testConstructor() for note about octet. + value = CORBA::Any::from_octet(1); + ASSERT_BASIC_NUMERIC(value, redhawk::Value::TYPE_OCTET); + + value = (CORBA::Short) -2; + ASSERT_BASIC_NUMERIC(value, redhawk::Value::TYPE_SHORT); + + value = (CORBA::UShort) 3; + ASSERT_BASIC_NUMERIC(value, redhawk::Value::TYPE_USHORT); + + value = (CORBA::Long) -4; + ASSERT_BASIC_NUMERIC(value, redhawk::Value::TYPE_LONG); + + value = (CORBA::ULong) 5; + ASSERT_BASIC_NUMERIC(value, redhawk::Value::TYPE_ULONG); + + value = (CORBA::LongLong) -6; + ASSERT_BASIC_NUMERIC(value, redhawk::Value::TYPE_LONGLONG); + + value = (CORBA::ULongLong) 7; + ASSERT_BASIC_NUMERIC(value, redhawk::Value::TYPE_ULONGLONG); + + value = (CORBA::Float) -8.0; + ASSERT_BASIC_NUMERIC(value, redhawk::Value::TYPE_FLOAT); + + value = (CORBA::Double) 1e100; + ASSERT_BASIC_NUMERIC(value, redhawk::Value::TYPE_DOUBLE); + + value = CORBA::AnySeq(); + CPPUNIT_ASSERT_EQUAL(value.getType(), redhawk::Value::TYPE_VALUE_SEQUENCE); + CPPUNIT_ASSERT(!value.isNumeric()); + CPPUNIT_ASSERT(value.isSequence()); + CPPUNIT_ASSERT_EQUAL(value.getElementType(), redhawk::Value::TYPE_VALUE); + + value = CF::Properties(); + CPPUNIT_ASSERT_EQUAL(value.getType(), redhawk::Value::TYPE_PROPERTIES); + CPPUNIT_ASSERT(!value.isNumeric()); + CPPUNIT_ASSERT(value.isSequence()); + CPPUNIT_ASSERT_EQUAL(value.getElementType(), redhawk::Value::TYPE_DATATYPE); +} + +void ValueTest::testNumericConversion() +{ + // Boolean conversion from case-insensitive string literals and numbers + // (where zero is false and non-zero is true) + CPPUNIT_ASSERT_EQUAL(true, redhawk::Value("True").toBoolean()); + CPPUNIT_ASSERT_EQUAL(false, redhawk::Value("false").toBoolean()); + CPPUNIT_ASSERT_EQUAL(true, redhawk::Value((short)-1).toBoolean()); + CPPUNIT_ASSERT_EQUAL(false, redhawk::Value((short)0).toBoolean()); + + // Octet conversion from string, int and double; range test + CPPUNIT_ASSERT_EQUAL((CORBA::Octet) 222, redhawk::Value("222").toOctet()); + CPPUNIT_ASSERT_EQUAL((CORBA::Octet) 1, redhawk::Value((short)1).toOctet()); + CPPUNIT_ASSERT_EQUAL((CORBA::Octet) 125, redhawk::Value(125.5).toOctet()); + CPPUNIT_ASSERT_THROW(redhawk::Value((short)-1).toOctet(), std::range_error); + CPPUNIT_ASSERT_THROW(redhawk::Value((short)256).toOctet(), std::range_error); + + // Short conversion from string, int and double; range test + CPPUNIT_ASSERT_EQUAL((CORBA::Short) -25000, redhawk::Value("-25000").toShort()); + CPPUNIT_ASSERT_EQUAL((CORBA::Short) 1, redhawk::Value((float)1).toShort()); + CPPUNIT_ASSERT_EQUAL((CORBA::Short) 16000, redhawk::Value(16000.1).toShort()); + CPPUNIT_ASSERT_THROW(redhawk::Value((float)65536).toShort(), std::range_error); + + // UShort conversion from string, int and double; range test + CPPUNIT_ASSERT_EQUAL((CORBA::UShort) 60000, redhawk::Value("60000").toUShort()); + CPPUNIT_ASSERT_EQUAL((CORBA::UShort) 1, redhawk::Value((float)1).toUShort()); + CPPUNIT_ASSERT_EQUAL((CORBA::UShort) 50000, redhawk::Value(50000.999).toUShort()); + CPPUNIT_ASSERT_THROW(redhawk::Value((float)-1).toUShort(), std::range_error); + CPPUNIT_ASSERT_THROW(redhawk::Value((float)65536).toUShort(), std::range_error); + + // Long conversion from string, short and double; range test + CPPUNIT_ASSERT_EQUAL((CORBA::Long) -262144, redhawk::Value("-262144").toLong()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 1, redhawk::Value((short) 1).toLong()); + CPPUNIT_ASSERT_EQUAL((CORBA::Long) 100000000, redhawk::Value(1e8).toLong()); + CPPUNIT_ASSERT_THROW(redhawk::Value(1e10).toLong(), std::range_error); + CPPUNIT_ASSERT_THROW(redhawk::Value(-1e10).toLong(), std::range_error); + + // ULong conversion from string, int and double; range test + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 4294967295, redhawk::Value("4294967295").toULong()); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 1, redhawk::Value((short)1).toULong()); + CPPUNIT_ASSERT_EQUAL((CORBA::ULong) 3000000000, redhawk::Value(3e9).toULong()); + CPPUNIT_ASSERT_THROW(redhawk::Value((short)-1).toULong(), std::range_error); + CPPUNIT_ASSERT_THROW(redhawk::Value(4294967296L).toULong(), std::range_error); + + // LongLong conversion from string, int and double; range test + CPPUNIT_ASSERT_EQUAL((CORBA::LongLong) 1099511627776L, redhawk::Value("1099511627776").toLongLong()); + CPPUNIT_ASSERT_EQUAL((CORBA::LongLong) 1, redhawk::Value((short)1).toLongLong()); + CPPUNIT_ASSERT_EQUAL((CORBA::LongLong) 10000000000, redhawk::Value(1e10).toLongLong()); + CPPUNIT_ASSERT_THROW(redhawk::Value(1e19).toLongLong(), std::range_error); + CPPUNIT_ASSERT_THROW(redhawk::Value(-1e19).toLongLong(), std::range_error); + + // ULongLong conversion from string, boolean and double; range test + CPPUNIT_ASSERT_EQUAL((CORBA::ULongLong) 9223372036854775808UL, redhawk::Value("9223372036854775808").toULongLong()); + CPPUNIT_ASSERT_EQUAL((CORBA::ULongLong) 1, redhawk::Value(true).toULongLong()); + CPPUNIT_ASSERT_EQUAL((CORBA::ULongLong) 500000000000, redhawk::Value(5e11).toULongLong()); + CPPUNIT_ASSERT_THROW(redhawk::Value((short)-1).toULongLong(), std::range_error); + CPPUNIT_ASSERT_THROW(redhawk::Value(1e20).toULongLong(), std::range_error); +} + +void ValueTest::testStringConversion() +{ + // Not defined: TYPE_NONE, TYPE_VALUE_SEQUENCE + // Not tested: TYPE_PROPERTIES + + // Boolean + // NB: This would probably be more helpful if it were "true" and "false" + CPPUNIT_ASSERT_EQUAL(std::string("1"), redhawk::Value(true).toString()); + CPPUNIT_ASSERT_EQUAL(std::string("0"), redhawk::Value(false).toString()); + + // Octet (we have to use from_octet because of ambiguity in any insertion; + // see above) + CPPUNIT_ASSERT_EQUAL(std::string("255"), redhawk::Value(CORBA::Any::from_octet(255)).toString()); + + // Integer numeric types + CPPUNIT_ASSERT_EQUAL(std::string("-16000"), redhawk::Value((CORBA::Short) -16000).toString()); + CPPUNIT_ASSERT_EQUAL(std::string("65535"), redhawk::Value((CORBA::UShort) 65535).toString()); + CPPUNIT_ASSERT_EQUAL(std::string("-200000"), redhawk::Value((CORBA::Long) -200000).toString()); + CPPUNIT_ASSERT_EQUAL(std::string("4294967295"), redhawk::Value((CORBA::ULong) 4294967295).toString()); + CPPUNIT_ASSERT_EQUAL(std::string("-5000000000"), redhawk::Value((CORBA::LongLong) -5000000000L).toString()); + CPPUNIT_ASSERT_EQUAL(std::string("9223372036854775808"), redhawk::Value((CORBA::ULongLong) 9223372036854775808UL).toString()); + + // Floating point numeric types--this is a little more fragile because it + // depends on the specific output format + CPPUNIT_ASSERT_EQUAL(std::string("5.125"), redhawk::Value((CORBA::Float) 5.125).toString()); + CPPUNIT_ASSERT_EQUAL(std::string("-2.25e+40"), redhawk::Value((CORBA::Double) -2.25e40).toString()); +} + +void ValueTest::testConstCast() +{ + CORBA::Any any; + const double dval = 1.25; + any <<= dval; + + // Create a const Value alias and check that it matches the Any + const CORBA::Any& const_any = any; + const redhawk::Value& value = redhawk::Value::cast(const_any); + CPPUNIT_ASSERT_EQUAL(redhawk::Value::TYPE_DOUBLE, value.getType()); + CPPUNIT_ASSERT_EQUAL(dval, value.toDouble()); + + // Modify the Any and check that the change is reflected in the Value + const std::string stringval = "value"; + any <<= stringval; + CPPUNIT_ASSERT_EQUAL(redhawk::Value::TYPE_STRING, value.getType()); + CPPUNIT_ASSERT_EQUAL(stringval, value.toString()); +} + +void ValueTest::testCast() +{ + CORBA::Any any; + const double dval = 1.25; + any <<= dval; + + // Create a Value alias and check that it matches the Any + redhawk::Value& value = redhawk::Value::cast(any); + CPPUNIT_ASSERT_EQUAL(redhawk::Value::TYPE_DOUBLE, value.getType()); + CPPUNIT_ASSERT_EQUAL(dval, value.toDouble()); + + // Set the value to nil and check that the Any was modified (ergo, they + // are both the same object) + CPPUNIT_ASSERT(!ossie::any::isNull(any)); + CPPUNIT_ASSERT(!value.isNil()); + value = redhawk::Value(); + CPPUNIT_ASSERT(value.isNil()); + CPPUNIT_ASSERT(ossie::any::isNull(any)); +} diff --git a/redhawk/src/testing/cpp/ValueTest.h b/redhawk/src/testing/cpp/ValueTest.h new file mode 100644 index 000000000..a2c836812 --- /dev/null +++ b/redhawk/src/testing/cpp/ValueTest.h @@ -0,0 +1,54 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef VALUETEST_H +#define VALUETEST_H + +#include "CFTest.h" + +class ValueTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(ValueTest); + CPPUNIT_TEST(testConstructor); + CPPUNIT_TEST(testCopyConstructor); + CPPUNIT_TEST(testType); + CPPUNIT_TEST(testNumericConversion); + CPPUNIT_TEST(testStringConversion); + CPPUNIT_TEST(testConstCast); + CPPUNIT_TEST(testCast); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void testConstructor(); + void testCopyConstructor(); + + void testType(); + + void testNumericConversion(); + void testStringConversion(); + + void testConstCast(); + void testCast(); +}; + +#endif // VALUETEST_H diff --git a/redhawk/src/testing/cpp/benchmark_bitops.cpp b/redhawk/src/testing/cpp/benchmark_bitops.cpp new file mode 100644 index 000000000..c7a60ec20 --- /dev/null +++ b/redhawk/src/testing/cpp/benchmark_bitops.cpp @@ -0,0 +1,453 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include + +class scoped_timer +{ +public: + explicit scoped_timer(std::ostream& stream=std::cout) : + _stream(stream) + { + reset(); + } + + void reset() + { + clock_gettime(CLOCK_MONOTONIC, &_start); + } + + double elapsed() + { + struct timespec now; + clock_gettime(CLOCK_MONOTONIC, &now); + return (now.tv_sec - _start.tv_sec) + 1e-9*(now.tv_nsec - _start.tv_nsec); + } + + ~scoped_timer() + { + _stream << ((uint64_t)(elapsed()*1e6)) << std::endl; + } + +private: + std::ostream& _stream; + struct timespec _start; +}; + +void test_getint(std::ostream& stream, size_t iterations) +{ + size_t bit_counts[] = { 1, 5, 8, 11, 16, 20, 24, 32, 64 }; + + unsigned char packed[9]; + for (size_t ii = 0; ii < sizeof(packed); ++ii) { + packed[ii] = random(); + } + + stream << "bits,offset,time(usec)" << std::endl; + uint64_t value = 0; + for (size_t ii = 0; ii < (sizeof(bit_counts) / sizeof(bit_counts[0])); ++ii) { + size_t bits = bit_counts[ii]; + for (size_t offset = 0; offset < 8; ++ offset) { + stream << bits << "," << offset << ","; + scoped_timer timer(stream); + for (size_t jj = 0; jj < iterations; ++jj) { + value = redhawk::bitops::getint(packed, offset, bits); + } + } + } +} + +void test_setint(std::ostream& stream, size_t iterations) +{ + size_t bit_counts[] = { 1, 5, 8, 11, 16, 20, 24, 32, 64 }; + + unsigned char packed[9]; + std::fill(packed, packed + sizeof(packed), 0); + + uint64_t value = random(); + + stream << "bits,offset,time(usec)" << std::endl; + for (size_t ii = 0; ii < (sizeof(bit_counts) / sizeof(bit_counts[0])); ++ii) { + size_t bits = bit_counts[ii]; + for (size_t offset = 0; offset < 8; ++ offset) { + stream << bits << "," << offset << ","; + scoped_timer timer(stream); + for (size_t jj = 0; jj < iterations; ++jj) { + redhawk::bitops::setint(packed, offset, value, bits); + } + } + } +} + +void test_fill(std::ostream& stream, size_t iterations) +{ + size_t bit_counts[] = { 7, 8, 9, 14, 16, 18, 251, 256, 261, 1023, 1024 }; + std::vector buffer; + + stream << "bits,offset,time(usec)" << std::endl; + for (size_t ii = 0; ii < (sizeof(bit_counts) / sizeof(bit_counts[0])); ++ii) { + size_t bits = bit_counts[ii]; + buffer.resize((bits + 7) / 8); + for (size_t offset = 0; offset < 8; ++offset) { + + stream << bits << "," << offset << ","; + scoped_timer timer(stream); + for (size_t jj = 0; jj < iterations; ++jj) { + redhawk::bitops::fill(&buffer[0], offset, bits, 1); + } + } + } +} + +void test_pack(std::ostream& stream, size_t iterations) +{ + size_t bit_counts[] = { 7, 8, 9, 14, 16, 18, 251, 256, 261, 1023, 1024 }; + + std::vector unpacked; + std::vector packed; + + stream << "bits,offset,time(usec)" << std::endl; + for (size_t ii = 0; ii < (sizeof(bit_counts) / sizeof(bit_counts[0])); ++ii) { + size_t bits = bit_counts[ii]; + for (size_t offset = 0; offset < 8; ++offset) { + // Generate random data for unpacked byte array + unpacked.resize(bits); + for (size_t index = 0; index < unpacked.size(); ++ index) { + // Assume normal distribution of evens/odds + unpacked[index] = random() & 1; + } + // Clear the bit array + packed.resize((bits + 7) / 8); + std::fill(packed.begin(), packed.end(), 0); + + stream << bits << "," << offset << ","; + scoped_timer timer(stream); + for (size_t jj = 0; jj < iterations; ++jj) { + redhawk::bitops::pack(&packed[0], offset, &unpacked[0], bits); + } + } + } +} + +void test_unpack(std::ostream& stream, size_t iterations) +{ + size_t bit_counts[] = { 7, 8, 9, 14, 16, 18, 251, 256, 261, 1023, 1024 }; + + std::vector packed; + std::vector unpacked; + + stream << "bits,offset,time(usec)" << std::endl; + for (size_t ii = 0; ii < (sizeof(bit_counts) / sizeof(bit_counts[0])); ++ii) { + size_t bits = bit_counts[ii]; + for (size_t offset = 0; offset < 8; ++offset) { + // Generate random data for packed bit array + packed.resize((bits + 7) / 8); + for (size_t index = 0; index < packed.size(); ++ index) { + // Assume normal distribution of evens/odds + packed[index] = random(); + } + // Clear the unpacked byte array + unpacked.resize(bits); + std::fill(unpacked.begin(), unpacked.end(), 0); + + stream << bits << "," << offset << ","; + scoped_timer timer(stream); + for (size_t jj = 0; jj < iterations; ++jj) { + redhawk::bitops::unpack(&unpacked[0], &packed[0], offset, bits); + } + } + } +} + +void test_popcount(std::ostream& stream, size_t iterations) +{ + size_t bit_counts[] = { 7, 8, 9, 14, 16, 18, 251, 256, 261, 1023, 1024 }; + + std::vector packed; + + stream << "bits,offset,time(usec)" << std::endl; + for (size_t ii = 0; ii < (sizeof(bit_counts) / sizeof(bit_counts[0])); ++ii) { + size_t bits = bit_counts[ii]; + for (size_t offset = 0; offset < 8; ++offset) { + // Generate random bit data + packed.resize((bits + 7) / 8); + for (size_t index = 0; index < packed.size(); ++ index) { + packed[index] = random(); + } + + stream << bits << "," << offset << ","; + scoped_timer timer(stream); + for (size_t jj = 0; jj < iterations; ++jj) { + redhawk::bitops::popcount(&packed[0], offset, bits); + } + } + } +} + +void test_copy(std::ostream& stream, size_t iterations) +{ + size_t bit_counts[] = { 7, 8, 9, 14, 16, 18, 251, 256, 261, 1023, 1024 }; + + std::vector source; + std::vector dest; + + stream << "bits,src offset,dest offset,time(usec)" << std::endl; + for (size_t ii = 0; ii < (sizeof(bit_counts) / sizeof(bit_counts[0])); ++ii) { + size_t bits = bit_counts[ii]; + + // Generate random bits for the source; round up to a full byte and add + // enough excess to accomodate offset of up to a byte. + source.resize((bits + 15) / 8); + for (size_t index = 0; index < source.size(); ++ index) { + source[index] = random(); + } + + // Allocate an equal amount of space for the destination + dest.resize((bits + 15) / 8); + std::fill(dest.begin(), dest.end(), 0); + + for (size_t src_offset = 0; src_offset < 8; ++src_offset) { + for (size_t dest_offset = 0; dest_offset < 8; ++dest_offset) { + + stream << bits << "," << src_offset << "," << dest_offset << ","; + scoped_timer timer(stream); + for (size_t jj = 0; jj < iterations; ++jj) { + redhawk::bitops::copy(&dest[0], dest_offset, &source[0], src_offset, bits); + } + } + } + } +} + +void test_compare(std::ostream& stream, size_t iterations) +{ + size_t bit_counts[] = { 7, 8, 9, 14, 16, 18, 251, 256, 261, 1023, 1024 }; + + std::vector lhs; + std::vector rhs; + + stream << "bits,left offset,right offset,time(usec)" << std::endl; + for (size_t ii = 0; ii < (sizeof(bit_counts) / sizeof(bit_counts[0])); ++ii) { + size_t bits = bit_counts[ii]; + + // Generate random bits for the left hand side; round up to a full byte + // and add enough excess to accomodate offset of up to a byte. + lhs.resize((bits + 15) / 8); + for (size_t index = 0; index < lhs.size(); ++ index) { + lhs[index] = random(); + } + rhs.resize(lhs.size()); + + for (size_t lhs_offset = 0; lhs_offset < 8; ++lhs_offset) { + for (size_t rhs_offset = 0; rhs_offset < 8; ++rhs_offset) { + // Copy to the right hand side + redhawk::bitops::copy(&rhs[0], rhs_offset, &lhs[0], lhs_offset, bits); + + stream << bits << "," + << lhs_offset << "," + << rhs_offset << ","; + scoped_timer timer(stream); + for (size_t jj = 0; jj < iterations; ++jj) { + if (redhawk::bitops::compare(&lhs[0], lhs_offset, &rhs[0], rhs_offset, bits)) { + std::cerr << "FAIL" << std::endl; + } + } + } + } + } +} + +void test_compare_large(std::ostream& stream, size_t iterations) +{ + size_t bit_counts[] = { 16, 64, 256, 512, 1024, 2048, 4096 }; + + std::vector lhs; + std::vector rhs; + + stream << "bits,time(usec)" << std::endl; + for (size_t ii = 0; ii < (sizeof(bit_counts) / sizeof(bit_counts[0])); ++ii) { + size_t bits = bit_counts[ii]; + + lhs.resize((bits + 7) / 8); + for (size_t index = 0; index < lhs.size(); ++ index) { + lhs[index] = random(); + } + rhs = lhs; + + stream << bits << ","; + scoped_timer timer(stream); + for (size_t jj = 0; jj < iterations; ++jj) { + if (redhawk::bitops::compare(&lhs[0], 0, &rhs[0], 0, bits)) { + std::cerr << "FAIL" << std::endl; + } + } + } +} + +void test_hamming_distance(std::ostream& stream, size_t iterations) +{ + size_t bit_counts[] = { 7, 8, 9, 14, 16, 18, 251, 256, 261, 1023, 1024 }; + const size_t max_bits = bit_counts[(sizeof(bit_counts)/sizeof(bit_counts[0])) - 1]; + + // Generate random bits for both strings + std::vector lhs; + lhs.resize((max_bits + 15) / 8); + for (size_t index = 0; index < lhs.size(); ++ index) { + lhs[index] = random(); + } + + std::vector rhs; + rhs.resize(lhs.size()); + for (size_t index = 0; index < rhs.size(); ++ index) { + rhs[index] = random(); + } + + stream << "bits,left offset,right offset,time(usec)" << std::endl; + for (size_t ii = 0; ii < (sizeof(bit_counts) / sizeof(bit_counts[0])); ++ii) { + size_t bits = bit_counts[ii]; + + for (size_t lhs_offset = 0; lhs_offset < 8; ++lhs_offset) { + for (size_t rhs_offset = 0; rhs_offset < 8; ++rhs_offset) { + stream << bits << "," + << lhs_offset << "," + << rhs_offset << ","; + scoped_timer timer(stream); + for (size_t jj = 0; jj < iterations; ++jj) { + redhawk::bitops::hammingDistance(&lhs[0], lhs_offset, &rhs[0], rhs_offset, bits); + } + } + } + } +} + +#define ARRAY_ELEMENTS(x) (sizeof(x) / sizeof(x[0])) + +void test_find(std::ostream& stream, size_t iterations) +{ + const size_t string_sizes[] = { 64, 251, 512, 1025 }; + const size_t pattern_sizes[] = { 7, 16, 35 }; + + stream << "string size,pattern size,time(usec)" << std::endl; + + for (size_t ii = 0; ii < ARRAY_ELEMENTS(string_sizes); ++ii) { + const size_t string_length = string_sizes[ii]; + std::vector string; + string.resize((string_length + 7) / 8); + std::fill(string.begin(), string.end(), 0); + + for (size_t jj = 0; jj < ARRAY_ELEMENTS(pattern_sizes); ++jj) { + const size_t pattern_length = pattern_sizes[jj]; + std::vector pattern; + pattern.resize((pattern_length + 7) / 8); + std::fill(pattern.begin(), pattern.end(), 0xFF); + + stream << string_length << ',' + << pattern_length << ','; + scoped_timer timer(stream); + for (size_t kk = 0; kk < iterations; ++kk) { + redhawk::bitops::find(&string[0], 0, string_length, &pattern[0], 0, pattern_length, pattern_length / 2); + } + } + } +} + +typedef void (*benchmark_func)(std::ostream&,size_t); + +void run_benchmark(const std::string& name, const std::string& suffix, benchmark_func func, size_t iterations) +{ + std::string filename = name; + if (!suffix.empty()) { + filename += "-" + suffix; + } + filename += ".csv"; + + std::ofstream file(filename.c_str()); + std::cout << name << std::endl; + func(file, iterations); +} + +int main(int argc, char* argv[]) +{ + size_t iterations = 100000; + + struct option long_options[] = { + { "suffix", required_argument, 0, 0 }, + { 0, 0, 0, 0 } + }; + + typedef std::map FuncTable; + FuncTable functions; + functions["pack"] = &test_pack; + functions["fill"] = &test_fill; + functions["unpack"] = &test_unpack; + functions["getint"] = &test_getint; + functions["setint"] = &test_setint; + functions["popcount"] = &test_popcount; + functions["copy"] = &test_copy; + functions["compare"] = &test_compare; + functions["compare-aligned"] = &test_compare_large; + functions["hamming"] = &test_hamming_distance; + functions["find"] = &test_find; + + int option_index; + std::string suffix; + while (true) { + int status = getopt_long(argc, argv, "", long_options, &option_index); + if (status == '?') { + // Invalid option + return -1; + } else if (status == 0) { + if (option_index == 0) { + suffix = optarg; + } + } else { + // End of arguments + break; + } + } + + if (optind < argc) { + for (int arg = optind; arg < argc; ++arg) { + const std::string name = argv[arg]; + FuncTable::iterator func = functions.find(name); + if (func == functions.end()) { + std::cerr << "unknown test '" << name << "'" << std::endl; + } else { + run_benchmark(name, suffix, func->second, iterations); + } + } + + } else { + for (FuncTable::iterator func = functions.begin(); func != functions.end(); ++func) { + run_benchmark(func->first, suffix, func->second, iterations); + } + } + + return 0; +} diff --git a/redhawk/src/testing/cpp/test_libossiecf.cpp b/redhawk/src/testing/cpp/test_libossiecf.cpp new file mode 100644 index 000000000..4d1cfd7b0 --- /dev/null +++ b/redhawk/src/testing/cpp/test_libossiecf.cpp @@ -0,0 +1,128 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +// log4cxx includes need to follow CorbaUtils, otherwise "ossie/debug.h" will +// issue warnings about the logging macros +#include +#include + +int main(int argc, char* argv[]) +{ + const char* short_options = "vx:"; + struct option long_options[] = { + { "xunit-file", required_argument, 0, 'x' }, + { "log-level", required_argument, 0, 'l' }, + { "log-config", required_argument, 0, 'c' }, + { "verbose", no_argument, 0, 'v' }, + { 0, 0, 0, 0 } + }; + + bool verbose = false; + const char* xunit_file = 0; + const char* log_config = 0; + std::string log_level; + int status; + while ((status = getopt_long(argc, argv, short_options, long_options, NULL)) >= 0) { + switch (status) { + case '?': // Invalid option + return -1; + case 'x': + xunit_file = optarg; + break; + case 'l': + log_level = optarg; + break; + case 'c': + log_config = optarg; + break; + case 'v': + verbose = true; + break; + } + } + + // Initialize the CORBA ORB, which is a prerequisite for some uses of the + // Value and PropertyMap classes + ossie::corba::OrbInit(argc, argv, false); + + // If a log4j configuration file was given, read it. + if (log_config) { + log4cxx::PropertyConfigurator::configure(log_config); + } else { + // Set up a simple configuration that logs on the console. + log4cxx::BasicConfigurator::configure(); + } + + // Apply the log level (can override config file). + log4cxx::LevelPtr level = log4cxx::Level::toLevel(log_level, log4cxx::Level::getInfo()); + log4cxx::Logger::getRootLogger()->setLevel(level); + + // Create the test runner. + CppUnit::TextTestRunner runner; + + // Enable verbose output, displaying the name of each test as it runs. + if (verbose) { + runner.eventManager().addListener(new CppUnit::BriefTestProgressListener()); + } + + // Use a compiler outputter instead of the default text one. + runner.setOutputter(new CppUnit::CompilerOutputter(&runner.result(), std::cerr)); + + // Get the top level suite from the registry. + CppUnit::Test* suite = CppUnit::TestFactoryRegistry::getRegistry().makeTest(); + runner.addTest(suite); + + // If an argument was given, assume it was the name of a test or suite. + std::string test_path; + if (optind < argc) { + test_path = argv[optind]; + } + + // Run the tests (don't pause, write output, don't print progress). + bool success = runner.run(test_path, false, true, false); + + // Write XML file, if requested. + if (xunit_file) { + std::ofstream file(xunit_file); + CppUnit::XmlOutputter xml_outputter(&runner.result(), file); + xml_outputter.write(); + } + + // Shut down the CORBA orb, just for cleanliness' sake. + ossie::corba::OrbShutdown(true); + + // Return error code 1 if the one of test failed. + return success ? 0 : 1; +} diff --git a/redhawk/src/testing/devmgr_config.cfg b/redhawk/src/testing/devmgr_config.cfg new file mode 100644 index 000000000..b2dc5171f --- /dev/null +++ b/redhawk/src/testing/devmgr_config.cfg @@ -0,0 +1,6 @@ +log4j.rootLogger=TRACE,FILE +# Direct log messages to FILE +log4j.appender.FILE=org.apache.log4j.FileAppender +log4j.appender.FILE.File=../foo/bar/test.log +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601}: %p:%c - %m [%F:%L]%n diff --git a/redhawk/src/testing/hierarchical_log.cfg b/redhawk/src/testing/hierarchical_log.cfg new file mode 100644 index 000000000..6f09a690e --- /dev/null +++ b/redhawk/src/testing/hierarchical_log.cfg @@ -0,0 +1,15 @@ +log4j.rootLogger=WARN,FILE +# Direct log messages to FILE +log4j.appender.FILE=org.apache.log4j.FileAppender +log4j.appender.FILE.File=foo/bar/test.log +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601}: %p:%c - %m [%F:%L]%n + +log4j.appender.logger_test=org.apache.log4j.FileAppender +log4j.appender.logger_test.Append=true +log4j.appender.logger_test.File=logger_test.log +log4j.appender.logger_test.layout=org.apache.log4j.PatternLayout +log4j.appender.logger_test.layout.ConversionPattern=%d{ISO8601}: %p:%c - %m [%F:%L]%n + +log4j.category.logger_1.namespace.lower=TRACE, logger_test +log4j.additivity.logger_1.namespace.lower=false diff --git a/redhawk/src/testing/high_thresh.cfg b/redhawk/src/testing/high_thresh.cfg new file mode 100644 index 000000000..44fc7c684 --- /dev/null +++ b/redhawk/src/testing/high_thresh.cfg @@ -0,0 +1,7 @@ +log4j.rootLogger=WARN,FILE +# Direct log messages to FILE +log4j.appender.FILE=org.apache.log4j.FileAppender +log4j.appender.FILE.File=foo/bar/test.log +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601}: %p:%c{1} - %m [%F:%L]%n + diff --git a/redhawk/src/testing/java/.gitignore b/redhawk/src/testing/java/.gitignore new file mode 100644 index 000000000..44d76b347 --- /dev/null +++ b/redhawk/src/testing/java/.gitignore @@ -0,0 +1 @@ +test_ossie diff --git a/redhawk/src/testing/java/AllTests.java b/redhawk/src/testing/java/AllTests.java new file mode 100644 index 000000000..93d26ab8e --- /dev/null +++ b/redhawk/src/testing/java/AllTests.java @@ -0,0 +1,30 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import org.junit.runner.RunWith; +import org.junit.runners.Suite; +import org.junit.runners.Suite.SuiteClasses; + +@RunWith(Suite.class) +@SuiteClasses({ + MessagingTest.class +}) +public class AllTests { +} diff --git a/redhawk/src/testing/java/Main.java b/redhawk/src/testing/java/Main.java new file mode 100644 index 000000000..a026b98c8 --- /dev/null +++ b/redhawk/src/testing/java/Main.java @@ -0,0 +1,150 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +import org.apache.log4j.BasicConfigurator; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.PropertyConfigurator; + +import org.junit.Test; +import org.junit.runner.JUnitCore; +import org.junit.runner.Description; +import org.junit.runner.Result; +import org.junit.runner.Request; + +import utils.ChainFilter; +import utils.TestFilter; +import utils.TextListener; + +public class Main { + + public static Description getTestDescription(String target) throws ClassNotFoundException, NoSuchMethodException + { + // Try to see if it's a class with tests first + try { + return getClassDescription(target); + } catch (ClassNotFoundException exc) { + // The target might be "class.method" + } + + // Split package/class from method name + int pos = target.lastIndexOf('.'); + if (pos < 0) { + // No dots, must be an invalid class + throw new ClassNotFoundException(target); + } + String suite = target.substring(0, pos); + String name = target.substring(pos+1); + + // Class and method lookup may throw exceptions, but it's up to the + // caller to handle them + Class clazz = Class.forName(suite); + clazz.getMethod(name); + return Description.createTestDescription(clazz, name); + } + + public static Description getClassDescription(String target) throws ClassNotFoundException + { + Class clazz = Class.forName(target); + + // Create a suite description + Description desc = Description.createSuiteDescription(clazz); + for (Method method : clazz.getMethods()) { + // Find all methods that are annotated as tests + if (method.getAnnotation(Test.class) != null) { + desc.addChild(Description.createTestDescription(clazz, method.getName(), method.getAnnotations())); + } + } + + return desc; + } + + public static void main(String[] args) { + List tests = new ArrayList<>(); + + boolean verbose = false; + Level log_level = null; + String log_config = null; + + Iterator iter = Arrays.asList(args).iterator(); + while (iter.hasNext()) { + String arg = iter.next(); + if (arg.startsWith("-")) { + // Option argument + if (arg.equals("--log-level")) { + log_level = Level.toLevel(iter.next()); + } else if (arg.equals("--log-config")) { + log_config = iter.next(); + } else if (arg.equals("-v") || arg.equals("--verbose")) { + verbose = true; + } else { + System.err.println("Unrecognized option \"" + arg + "\""); + System.exit(1); + } + } else { + // First non-option argument, add remaining arguments to the + // list of tests + tests.add(arg); + while (iter.hasNext()) { + tests.add(iter.next()); + } + } + } + + if (log_config != null) { + PropertyConfigurator.configure(log_config); + } else { + BasicConfigurator.configure(); + if (log_level == null) { + log_level = Level.INFO; + } + } + + if (log_level != null) { + Logger.getRootLogger().setLevel(log_level); + } + + Request request = Request.aClass(AllTests.class); + if (!tests.isEmpty()) { + ChainFilter filter = new ChainFilter(); + for (String test : tests) { + try { + Description desc = getTestDescription(test); + filter.addFilter(new TestFilter(desc)); + } catch (ClassNotFoundException|NoSuchMethodException exc) { + System.err.println("ERROR: No test '" + test + "'"); + System.exit(1); + } + } + request = request.filterWith(filter); + } + + JUnitCore runner = new JUnitCore(); + runner.addListener(new TextListener(verbose)); + Result result = runner.run(request); + System.exit(result.wasSuccessful() ? 0 : 1); + } +} diff --git a/redhawk/src/testing/java/Makefile.am b/redhawk/src/testing/java/Makefile.am new file mode 100644 index 000000000..37a589fc1 --- /dev/null +++ b/redhawk/src/testing/java/Makefile.am @@ -0,0 +1,45 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +include $(top_srcdir)/aminclude/jarfile.am + +TESTS = test_ossie +check_SCRIPTS = test_ossie + +noinst_java_JARFILES = ossie-tests.jar + +ossie_tests_jar_SOURCE = Main.java +ossie_tests_jar_SOURCE += AllTests.java +ossie_tests_jar_SOURCE += MessagingTest.java +ossie_tests_jar_SOURCE += PortManager.java +ossie_tests_jar_SOURCE += utils/Assert.java +ossie_tests_jar_SOURCE += utils/ChainFilter.java +ossie_tests_jar_SOURCE += utils/TestFilter.java +ossie_tests_jar_SOURCE += utils/TextListener.java + +ossie_tests_jar_CLASSPATH=$(OSSIE_CLASSPATH):$(JUNIT_CLASSPATH):. +ossie_tests_jar_JAVACFLAGS = -g -Xlint + +test_ossie : ossie-tests.jar Makefile + @echo "#!/bin/bash" > $@ + @echo "exec java -cp ossie-tests.jar:$(ossie_tests_jar_CLASSPATH) Main \$$*" >> $@ + @chmod +x $@ + +CLEANFILES = test_ossie diff --git a/redhawk/src/testing/java/MessagingTest.java b/redhawk/src/testing/java/MessagingTest.java new file mode 100644 index 000000000..e6b697bbe --- /dev/null +++ b/redhawk/src/testing/java/MessagingTest.java @@ -0,0 +1,397 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.JUnit4; + +import org.ossie.events.MessageSupplierPort; +import org.ossie.events.MessageConsumerPort; +import org.ossie.events.MessageListener; +import org.ossie.properties.*; + +import utils.Assert; + +@RunWith(JUnit4.class) +public class MessagingTest { + + public static class basic_message_struct extends StructDef { + public final LongProperty value = + new LongProperty( + "basic_message::value", //id + "value", //name + null, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + + public basic_message_struct(Integer value) { + this(); + this.value.setValue(value); + } + + public void set_value(Integer value) { + this.value.setValue(value); + } + + public Integer get_value() { + return this.value.getValue(); + } + + public basic_message_struct() { + addElement(this.value); + } + + public String getId() { + return "basic_message"; + } + }; + + public static class test_message_struct extends StructDef { + public final FloatProperty item_float = + new FloatProperty( + "test_message::item_float", //id + "item_float", //name + null, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + public final StringProperty item_string = + new StringProperty( + "test_message::item_string", //id + "item_string", //name + null, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + + public test_message_struct(Float item_float, String item_string) { + this(); + this.item_float.setValue(item_float); + this.item_string.setValue(item_string); + } + + public void set_item_float(Float item_float) { + this.item_float.setValue(item_float); + } + + public Float get_item_float() { + return this.item_float.getValue(); + } + + public void set_item_string(String item_string) { + this.item_string.setValue(item_string); + } + + public String get_item_string() { + return this.item_string.getValue(); + } + + public test_message_struct() { + addElement(this.item_float); + addElement(this.item_string); + } + + public String getId() { + return "test_message"; + } + }; + + public static class MessageReceiver implements MessageListener + { + public void messageReceived(String msgId, E msgData) + { + messages.add(msgData); + } + + public List messages = new ArrayList<>(); + } + + private static class GenericReceiver implements MessageListener + { + public void messageReceived(String messageId, org.omg.CORBA.Any messageData) + { + messages.add(new CF.DataType(messageId, messageData)); + } + + List messages = new ArrayList<>(); + } + + @Before + public void setUp() throws Exception + { + org.omg.CORBA.ORB orb = org.ossie.corba.utils.Init(new String[0], null); + + _supplier = new MessageSupplierPort("supplier"); + _portManager.addPort(_supplier); + + _consumer = new MessageConsumerPort("consumer"); + org.omg.CORBA.Object objref = _portManager.addPort(_consumer); + + _supplier.connectPort(objref, "connection_1"); + + // Simulate component start + _portManager.start(); + } + + @After + public void tearDown() + { + // Simulate component stop/shutdown + _portManager.stop(); + _portManager.releaseObject(); + } + + @Test + public void testSendMessage() + { + MessageReceiver receiver = new MessageReceiver<>(); + _consumer.registerMessage("basic_message", basic_message_struct.class, receiver); + + basic_message_struct msg = new basic_message_struct(1); + + // Send the message and check that it's received. Currently, the + // consumer's message handler is called directly from sendMessage, so + // this is a synchronous operation; however, if at some point in the + // future, threaded message dispatch is added, this test will need to + // be revisted. + Assert.assertEquals(0, receiver.messages.size()); + _supplier.sendMessage(msg); + Assert.assertEquals(1, receiver.messages.size()); + + Assert.assertEquals("basic_message", receiver.messages.get(0).getId()); + Assert.assertEquals(1, (int) receiver.messages.get(0).get_value()); + } + + @Test + public void testSendMessageConnectionId() throws Exception + { + // Create and connect a second consumer port + MessageConsumerPort consumer_2 = new MessageConsumerPort("consumer_2"); + org.omg.CORBA.Object objref = _portManager.addPort(consumer_2); + _supplier.connectPort(objref, "connection_2"); + + // Set up 2 receivers to distinguish which connection received a + // message + MessageReceiver receiver_1 = new MessageReceiver<>(); + _consumer.registerMessage("basic_message", basic_message_struct.class, receiver_1); + + MessageReceiver receiver_2 = new MessageReceiver<>(); + consumer_2.registerMessage("basic_message", basic_message_struct.class, receiver_2); + + basic_message_struct msg = new basic_message_struct(1); + + // Target the first connection (see above re: threading) + Assert.assertEquals(0, receiver_1.messages.size()); + Assert.assertEquals(0, receiver_2.messages.size()); + _supplier.sendMessage(msg, "connection_1"); + Assert.assertEquals(1, receiver_1.messages.size()); + Assert.assertEquals(0, receiver_2.messages.size()); + + // Target the second connection this time + msg = new basic_message_struct(2); + _supplier.sendMessage(msg, "connection_2"); + Assert.assertEquals(1, receiver_1.messages.size()); + Assert.assertEquals(1, receiver_2.messages.size()); + + // Target both connections + msg = new basic_message_struct(3); + _supplier.sendMessage(msg); + Assert.assertEquals(2, receiver_1.messages.size()); + Assert.assertEquals(2, receiver_2.messages.size()); + + // Target invalid connection + final basic_message_struct msg_4 = new basic_message_struct(4); + Assert.assertThrows(IllegalArgumentException.class, () -> _supplier.sendMessage(msg_4, "bad_connection")); + Assert.assertEquals(2, receiver_1.messages.size()); + Assert.assertEquals(2, receiver_2.messages.size()); + } + + @Test + public void testSendMessages() + { + // Use a generic message receiver that can handle any StructDef, and + // register it for both message types + MessageReceiver receiver = new MessageReceiver<>(); + _consumer.registerMessage("basic_message", basic_message_struct.class, receiver); + _consumer.registerMessage("test_message", test_message_struct.class, receiver); + + // Send two different message types in one batch + basic_message_struct msg_1 = new basic_message_struct(1); + test_message_struct msg_2 = new test_message_struct(2.0f, "two"); + basic_message_struct msg_3 = new basic_message_struct(3); + List messages = Arrays.asList(msg_1, msg_2, msg_3); + + Assert.assertEquals(0, receiver.messages.size()); + _supplier.sendMessages(messages); + + // StructDef implements .equals(), so it will catch if the fields are + // different between input and output, and they are serialized through + // Anys so the references are different + Assert.assertEquals(3, receiver.messages.size()); + Assert.assertEquals(msg_1, receiver.messages.get(0)); + Assert.assertEquals(msg_2, receiver.messages.get(1)); + Assert.assertEquals(msg_3, receiver.messages.get(2)); + + } + + @Test + public void testSendMessagesConnectionId() throws Exception + { + // Create and connect a second consumer port + MessageConsumerPort consumer_2 = new MessageConsumerPort("consumer_2"); + org.omg.CORBA.Object objref = _portManager.addPort(consumer_2); + _supplier.connectPort(objref, "connection_2"); + + // Set up 2 receivers to distinguish which connection received a + // message + MessageReceiver receiver_1 = new MessageReceiver<>(); + _consumer.registerMessage("basic_message", basic_message_struct.class, receiver_1); + + MessageReceiver receiver_2 = new MessageReceiver<>(); + consumer_2.registerMessage("basic_message", basic_message_struct.class, receiver_2); + + // Build a list of messages and send to the first connection + List messages_1 = Arrays.asList(new basic_message_struct(1), + new basic_message_struct(2)); + Assert.assertEquals(0, receiver_1.messages.size()); + Assert.assertEquals(0, receiver_2.messages.size()); + _supplier.sendMessages(messages_1, "connection_1"); + Assert.assertEquals(2, receiver_1.messages.size()); + Assert.assertEquals(0, receiver_2.messages.size()); + + // Target the second connection this time with a different set of messages + List messages_2 = Arrays.asList(new basic_message_struct(3), + new basic_message_struct(4), + new basic_message_struct(5)); + _supplier.sendMessages(messages_2, "connection_2"); + Assert.assertEquals(2, receiver_1.messages.size()); + Assert.assertEquals(3, receiver_2.messages.size()); + + // Target both connections + _supplier.sendMessages(messages_1); + Assert.assertEquals(4, receiver_1.messages.size()); + Assert.assertEquals(5, receiver_2.messages.size()); + + // Target invalid connection + final List messages_3 = Arrays.asList(new basic_message_struct(6)); + Assert.assertThrows(IllegalArgumentException.class, () -> _supplier.sendMessages(messages_3, "bad_connection")); + Assert.assertEquals(4, receiver_1.messages.size()); + Assert.assertEquals(5, receiver_2.messages.size()); + } + + @Test + public void testPush() + { + GenericReceiver receiver = new GenericReceiver(); + _consumer.registerMessage(receiver); + + // Pack the messages ourselves + CF.DataType[] messages_out = new CF.DataType[3]; + messages_out[0] = new CF.DataType("first", AnyUtils.toAny(100, "long")); + messages_out[1] = new CF.DataType("second", AnyUtils.toAny("some text", "string")); + messages_out[2] = new CF.DataType("third", AnyUtils.toAny(0.25, "double")); + org.omg.CORBA.Any any = org.omg.CORBA.ORB.init().create_any(); + CF.PropertiesHelper.insert(any, messages_out); + _supplier.push(any); + + Assert.assertEquals(3, receiver.messages.size()); + Assert.assertEquals("first", receiver.messages.get(0).id); + Assert.assertEquals(100, receiver.messages.get(0).value.extract_long()); + Assert.assertEquals("second", receiver.messages.get(1).id); + Assert.assertEquals("some text", receiver.messages.get(1).value.extract_string()); + Assert.assertEquals("third", receiver.messages.get(2).id); + Assert.assertEquals(0.25, receiver.messages.get(2).value.extract_double(), 0.0); + } + + @Test + public void testPushConnectionId() throws Exception + { + // Create and connect a second consumer port + MessageConsumerPort consumer_2 = new MessageConsumerPort("consumer_2"); + org.omg.CORBA.Object objref = _portManager.addPort(consumer_2); + _supplier.connectPort(objref, "connection_2"); + + // Set up 2 receivers to distinguish which connection received a + // message + GenericReceiver receiver_1 = new GenericReceiver(); + _consumer.registerMessage(receiver_1); + + GenericReceiver receiver_2 = new GenericReceiver(); + consumer_2.registerMessage(receiver_2); + + // Pack the messages ourselves and target the first connection + CF.DataType[] messages_1 = new CF.DataType[3]; + messages_1[0] = new CF.DataType("first", AnyUtils.toAny(100, "long")); + messages_1[1] = new CF.DataType("second", AnyUtils.toAny("some text", "string")); + messages_1[2] = new CF.DataType("third", AnyUtils.toAny(0.25, "double")); + org.omg.CORBA.Any any = org.omg.CORBA.ORB.init().create_any(); + CF.PropertiesHelper.insert(any, messages_1); + _supplier.push(any, "connection_1"); + + Assert.assertEquals(3, receiver_1.messages.size()); + Assert.assertEquals(0, receiver_2.messages.size()); + + // Target the second connection with a different set of messages + CF.DataType[] messages_2 = new CF.DataType[2]; + messages_2[0] = new CF.DataType("one", AnyUtils.toAny("abc", "string")); + messages_2[1] = new CF.DataType("two", AnyUtils.toAny(false, "boolean")); + any = org.omg.CORBA.ORB.init().create_any(); + CF.PropertiesHelper.insert(any, messages_2); + _supplier.push(any, "connection_2"); + + Assert.assertEquals(3, receiver_1.messages.size()); + Assert.assertEquals(2, receiver_2.messages.size()); + + // Target both connections with yet another set of messages + CF.DataType[] messages_3 = new CF.DataType[1]; + messages_3[0] = new CF.DataType("all", AnyUtils.toAny(3, "long")); + any = org.omg.CORBA.ORB.init().create_any(); + CF.PropertiesHelper.insert(any, messages_3); + _supplier.push(any); + + Assert.assertEquals(4, receiver_1.messages.size()); + Assert.assertEquals(3, receiver_2.messages.size()); + + // Target invalid connection + CF.DataType[] messages_4 = new CF.DataType[1]; + messages_4[0] = new CF.DataType("bad", AnyUtils.toAny("bad connection", "string")); + final org.omg.CORBA.Any any_4 = org.omg.CORBA.ORB.init().create_any(); + CF.PropertiesHelper.insert(any_4, messages_4); + Assert.assertThrows(IllegalArgumentException.class, () -> _supplier.push(any_4, "bad_connection")); + + Assert.assertEquals(4, receiver_1.messages.size()); + Assert.assertEquals(3, receiver_2.messages.size()); + } + + protected PortManager _portManager = new PortManager(); + + protected MessageSupplierPort _supplier; + protected MessageConsumerPort _consumer; +} diff --git a/redhawk/src/testing/java/PortManager.java b/redhawk/src/testing/java/PortManager.java new file mode 100644 index 000000000..2971baf79 --- /dev/null +++ b/redhawk/src/testing/java/PortManager.java @@ -0,0 +1,109 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +import java.util.ArrayList; +import java.util.List; + +import org.ossie.component.StartablePort; + +/** + * Test helper class to manage a set of ports as they might be used in a + * component, but for use in unit test fixtures. + */ + public class PortManager { + + public org.omg.CORBA.Object addPort(Object port) throws org.omg.CORBA.UserException + { + _ports.add(port); + + org.omg.CORBA.ORB orb = org.ossie.corba.utils.Orb(); + if (port instanceof org.omg.PortableServer.Servant) { + return activatePort((org.omg.PortableServer.Servant) port); + } else if (port instanceof omnijni.Servant) { + return ((omnijni.Servant) port)._this_object(orb); + } else { + return null; + } + } + + private org.omg.CORBA.Object activatePort(org.omg.PortableServer.Servant servant) throws org.omg.CORBA.UserException + { + org.ossie.corba.utils.RootPOA().activate_object(servant); + return servant._this_object(); + } + + public void start() + { + startPorts(); + } + + public void stop() + { + stopPorts(); + } + + public void releaseObject() + { + releasePorts(); + _ports.clear(); + } + + private void startPorts() + { + for (Object port : _ports) { + if (port instanceof StartablePort) { + ((StartablePort) port).startPort(); + } + } + } + + private void stopPorts() + { + for (Object port : _ports) { + if (port instanceof StartablePort) { + ((StartablePort) port).stopPort(); + } + } + } + + private void releasePorts() + { + for (Object port : _ports) { + if (port instanceof org.omg.PortableServer.Servant) { + deactivatePort((org.omg.PortableServer.Servant) port); + } else if (port instanceof omnijni.Servant) { + ((omnijni.Servant) port)._deactivate(); + } + } + _ports.clear(); + } + + private void deactivatePort(org.omg.PortableServer.Servant servant) + { + try { + org.omg.PortableServer.POA poa = org.ossie.corba.utils.RootPOA(); + poa.deactivate_object(poa.servant_to_id(servant)); + } catch (Exception exc) { + // Ignore errors + } + } + + private List _ports = new ArrayList<>(); +} diff --git a/redhawk/src/testing/java/utils/Assert.java b/redhawk/src/testing/java/utils/Assert.java new file mode 100644 index 000000000..f267896ac --- /dev/null +++ b/redhawk/src/testing/java/utils/Assert.java @@ -0,0 +1,43 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +/** + * Extended JUnit assertion class that adds an assert for checking that an + * exception is thrown by an expression. + */ +public class Assert extends org.junit.Assert { + private Assert() + { + } + + public static void assertThrows(Class exception, RunnableWithException runnable) + { + try { + runnable.run(); + } catch (Exception exc) { + assertTrue("expected exception:<"+ exception.getName() + + "> but got:<" + exc.getClass().getName() + ">", + exception.isInstance(exc)); + return; + } + fail("exception not raised"); + } +}; diff --git a/redhawk/src/testing/java/utils/ChainFilter.java b/redhawk/src/testing/java/utils/ChainFilter.java new file mode 100644 index 000000000..d9eb8d7d4 --- /dev/null +++ b/redhawk/src/testing/java/utils/ChainFilter.java @@ -0,0 +1,63 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.runner.Description; +import org.junit.runner.manipulation.Filter; + +/** + * JUnit test filter that combines multiple filters, selecting any test that + * satisfies one of the filters. + */ +public class ChainFilter extends Filter { + public void addFilter(Filter filter) + { + filters.add(filter); + } + + @Override + public boolean shouldRun(Description description) + { + for (Filter filter : this.filters) { + if (filter.shouldRun(description)) { + return true; + } + } + return false; + } + + @Override + public String describe() + { + String result = ""; + for (Filter filter : this.filters) { + if (!result.isEmpty()) { + result = result + ", "; + } + result += filter.describe(); + } + return "[" + result + "]"; + } + + private List filters = new ArrayList<>(); +} diff --git a/redhawk/src/testing/java/utils/RunnableWithException.java b/redhawk/src/testing/java/utils/RunnableWithException.java new file mode 100644 index 000000000..7bdb41b8b --- /dev/null +++ b/redhawk/src/testing/java/utils/RunnableWithException.java @@ -0,0 +1,30 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +/** + * Equivalent to java.lang.Runnable, but declared to throw all exceptions. This + * is useful for testing that the correct exception is thrown without going + * through a lot of boilerplate try/catch or JUnit's expected exceptions. + */ +@FunctionalInterface +public interface RunnableWithException { + public void run() throws Exception; +} diff --git a/redhawk/src/testing/java/utils/TestFilter.java b/redhawk/src/testing/java/utils/TestFilter.java new file mode 100644 index 000000000..458167cb5 --- /dev/null +++ b/redhawk/src/testing/java/utils/TestFilter.java @@ -0,0 +1,69 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +import org.junit.runner.Description; +import org.junit.runner.manipulation.Filter; + +/** + * JUnit test filter that selects a single test, or a suite of tests from a + * single class. + */ +public class TestFilter extends Filter { + public TestFilter(Description description) + { + test = description; + } + + @Override + public boolean shouldRun(Description description) + { + // Suite-to-suite or test-to-test comparison + if (test.equals(description)) { + return true; + } + if (description.isTest()) { + for (Description child : test.getChildren()) { + if (child.equals(description)) { + return true; + } + } + } else { + for (Description child : description.getChildren()) { + if (shouldRun(child)) { + return true; + } + } + } + return false; + } + + @Override + public String describe() + { + if (test.isTest()) { + return "Method " + test.getDisplayName(); + } else { + return "Class " + test.getDisplayName(); + } + } + + private Description test; +} diff --git a/redhawk/src/testing/java/utils/TextListener.java b/redhawk/src/testing/java/utils/TextListener.java new file mode 100644 index 000000000..533643c97 --- /dev/null +++ b/redhawk/src/testing/java/utils/TextListener.java @@ -0,0 +1,107 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package utils; + +import java.io.PrintStream; +import java.text.NumberFormat; + +import org.junit.runner.Description; +import org.junit.runner.Result; +import org.junit.runner.notification.Failure; +import org.junit.runner.notification.RunListener; + +/** + * JUnit RunListener to provide similar output to CppUnit and Python: mainly, + * printing the name of each test as it runs with verbose mode enabled. + */ +public class TextListener extends RunListener { + public TextListener(boolean verbose) + { + this.verbose = verbose; + this.stream = System.out; + this.testPassed = false; + } + + public void testRunFinished(Result result) + { + stream.println(); + stream.println("Time: " + elapsedTimeAsString(result.getRunTime())); + + for (Failure failure : result.getFailures()) { + stream.println(failure.getTestHeader()); + stream.println(failure.getTrace()); + } + + if (result.wasSuccessful()) { + stream.println("OK (" + result.getRunCount() + " tests)"); + } else { + stream.println("FAILURES!!!"); + stream.println("Tests run: " + result.getRunCount() + ", Failures: " + result.getFailureCount()); + } + } + + public void testStarted(Description description) + { + if (verbose) { + stream.print(description.getDisplayName() + " : "); + } else { + stream.print("."); + } + testPassed = true; + } + + public void testIgnored(Description description) + { + if (verbose) { + stream.print("IGNORED"); + } else { + stream.print("I"); + } + testPassed = false; + } + + public void testFailure(Failure failure) + { + if (verbose) { + stream.print("FAILED"); + } else { + stream.print("F"); + } + testPassed = false; + } + + public void testFinished(Description description) + { + if (verbose) { + if (testPassed) { + stream.print("OK"); + } + stream.println(); + } + } + + protected String elapsedTimeAsString(long runTime) { + return NumberFormat.getInstance().format((double) runTime / 1000); + } + + private boolean verbose; + private PrintStream stream; + private boolean testPassed; +} diff --git a/redhawk/src/testing/loggers/syncappender/.gitignore b/redhawk/src/testing/loggers/syncappender/.gitignore new file mode 100644 index 000000000..bf5638792 --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/.gitignore @@ -0,0 +1,7 @@ +Makefile.in +Makefile +configure +config.* +appender_test +proc_log +cleanmem diff --git a/redhawk/src/testing/loggers/syncappender/Makefile.am b/redhawk/src/testing/loggers/syncappender/Makefile.am new file mode 100644 index 000000000..93e9ff386 --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/Makefile.am @@ -0,0 +1,58 @@ +# Rules for the test code (use `make check` to execute) +ACLOCAL_AMFLAGS = -I m4 -I ../../../acinclude +TESTS = appender_test +noinst_PROGRAMS = cleanmem proc_log +check_PROGRAMS = $(TESTS) +logger_top=../../../base/framework/logging +logger_libsrc=$(logger_top) +#logger_lib_INC=-I$(logger_libsrc) +logger_lib_SRC=../../../base/framework/logging/RH_SyncRollingAppender.cpp +logger_lib_INC=-I../../../base/framework/logging + +appender_test_SOURCES = appender_test.cpp test_suites.cpp $(logger_lib_SRC) + +appender_test_boost_ldadd=$(BOOST_LDFLAGS) $(BOOST_SYSTEM_LIB) -lboost_filesystem +appender_test_log4cxx_ldadd=-llog4cxx +appender_test_LIB = +appender_test_CXXFLAGS = $(CPPUNIT_CFLAGS) -I$(logger_top)/include $(logger_lib_INC) $(logger_idl_INC) -I/usr/include/apr-1 $(BOOST_CPPFLAGS) -DDEBUG_ON -DHAVE_LOG4CXX +appender_test_LDADD = $(appender_test_LIB) $(logger_idl_LIB) $(appender_test_boost_ldadd) $(appender_test_log4cxx_ldadd) +appender_test_LDFLAGS = $(CPPUNIT_LIBS) +appender_test_LDFLAGS += -ldl + + +proc_log_SOURCES = proc_log.cpp $(logger_lib_SRC) +proc_log_boost_ldadd=$(BOOST_LDFLAGS) $(BOOST_SYSTEM_LIB) -lboost_filesystem +proc_log_log4cxx_ldadd=-llog4cxx +proc_log_LIB = +proc_log_CXXFLAGS = $(CPPUNIT_CFLAGS) -I$(logger_top)/include $(logger_lib_INC) $(logger_idl_INC) -I/usr/include/apr-1 $(BOOST_CPPFLAGS) -DDEBUG_ON -DHAVE_LOG4CXX +proc_log_LDADD = $(proc_log_LIB) $(logger_idl_LIB) $(proc_log_boost_ldadd) $(proc_log_log4cxx_ldadd) +proc_log_LDFLAGS = $(CPPUNIT_LIBS) +proc_log_LDFLAGS += -ldl + + +cleanmem_SOURCES = cleanmem.cpp +cleanmem_boost_ldadd=$(BOOST_LDFLAGS) $(BOOST_SYSTEM_LIB) -lboost_filesystem +cleanmem_log4cxx_ldadd=-llog4cxx +cleanmem_LIB = +cleanmem_CXXFLAGS = $(CPPUNIT_CFLAGS) -I$(logger_top)/include $(logger_lib_INC) $(logger_idl_INC) -I/usr/include/apr-1 $(BOOST_CPPFLAGS) -DDEBUG_ON -DHAVE_LOG4CXX +cleanmem_LDADD = $(cleanmem_LIB) $(logger_idl_LIB) $(cleanmem_boost_ldadd) $(cleanmem_log4cxx_ldadd) +cleanmem_LDFLAGS = $(CPPUNIT_LIBS) +cleanmem_LDFLAGS += -ldl + +distclean-local: + rm -rf m4 + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + rm -f COPYING INSTALL *~ + rm -rf MP_RedhawkTest* + + diff --git a/redhawk/src/testing/loggers/syncappender/appender_test.cpp b/redhawk/src/testing/loggers/syncappender/appender_test.cpp new file mode 100644 index 000000000..6a6284c8e --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/appender_test.cpp @@ -0,0 +1,50 @@ +#include +#include +#include +#include "log4cxx/logger.h" +#include "log4cxx/propertyconfigurator.h" +#include "log4cxx/helpers/exception.h" +#include "logtestdebug.h" + + +LOGGER_CFG("LOGGER-CFG-TEST"); + +int main(int argc, char* argv[]) +{ + + std::string cfgname("log4j.stdout"); + std::string testname(""); + if ( argc > 1 ) { + testname = argv[1]; + } + + // Set up a simple configuration that logs on the console. + log4cxx::PropertyConfigurator::configure(cfgname.c_str()); + + // Get the top level suite from the registry + CppUnit::Test *suite; + if ( testname != "" ) { + CppUnit::TestFactoryRegistry ®istry = CppUnit::TestFactoryRegistry::getRegistry(testname); + suite = registry.makeTest(); + } + else { + CppUnit::TestFactoryRegistry ®istry = CppUnit::TestFactoryRegistry::getRegistry(); + registry.registerFactory( &CppUnit::TestFactoryRegistry::getRegistry("test_three") ); + suite = registry.makeTest(); + } + + // Adds the test to the list of test to run + CppUnit::TextUi::TestRunner runner; + runner.addTest( suite ); + + // Change the default outputter to a compiler error format outputter + runner.setOutputter( new CppUnit::CompilerOutputter( &runner.result(), + std::cerr ) ); + // Run the tests. + bool wasSucessful = runner.run(); + + LOGGER_END("LOGGER-CFG-TEST"); + + // Return error code 1 if the one of test failed. + return wasSucessful ? 0 : 1; +} diff --git a/redhawk/src/testing/loggers/syncappender/cleanmem.cpp b/redhawk/src/testing/loggers/syncappender/cleanmem.cpp new file mode 100644 index 000000000..fca7c1747 --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/cleanmem.cpp @@ -0,0 +1,33 @@ +#include +#include +#include +#include +#include + + +std::string clean_fname( const std::string &fname ) { + return boost::replace_all_copy( fname, "/", "-" ); +} + +int main (int argc, char **argv ) +{ + + if ( argc < 2 ) { + printf("usage cleanmem \n"); + return -1; + } + + using namespace boost::interprocess; + try{ + //Erase previous shared memory + std::string fname = clean_fname(argv[1]); + shared_memory_object::remove(fname.c_str()); + + } + catch(interprocess_exception &ex){ + std::cout << ex.what() << std::endl; + return -1; + } + + return 0; +} diff --git a/redhawk/src/testing/loggers/syncappender/configure.ac b/redhawk/src/testing/loggers/syncappender/configure.ac new file mode 100644 index 000000000..af67720af --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/configure.ac @@ -0,0 +1,18 @@ + + +AC_INIT(Logging_Test,0.1) +AC_CONFIG_MACRO_DIR([m4]) +AM_INIT_AUTOMAKE([foreign]) + +AM_PATH_CPPUNIT(1.9.6) +AC_PROG_CXX +AC_PROG_CC +AC_PROG_INSTALL + +AC_SEARCH_LIBS([shm_unlink], [rt]) + +PKG_CHECK_MODULES([LOG4CXX], [liblog4cxx >= 0.10.0]) + +AX_BOOST_BASE([1.41]) +AX_BOOST_SYSTEM +AC_OUTPUT(Makefile) diff --git a/redhawk/src/testing/loggers/syncappender/log4j.appender b/redhawk/src/testing/loggers/syncappender/log4j.appender new file mode 100644 index 000000000..e9782fc52 --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/log4j.appender @@ -0,0 +1,33 @@ + + +#log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n + +log4j.rootLogger=ALL,stdout, mp + +# Direct log messages to stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + +# Direct log messages to stdout +log4j.appender.pse=org.ossie.logging.RH_LogEventAppender +log4j.appender.pse.name_context=TEST_APPENDER +log4j.appender.pse.event_channel=TEST_EVT_CH1 +log4j.appender.pse.producer_id=PRODUCER1 +log4j.appender.pse.producer_name=THE BIG CHEESE +log4j.appender.pse.layout=org.apache.log4j.PatternLayout +log4j.appender.pse.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + +# Direct log messages to stdout +log4j.appender.mp=org.ossie.logging.RH_SyncRollingAppender +log4j.appender.mp.Retries=2 +log4j.appender.mp.WaitOnLock=30 +log4j.appender.mp.MaxFileSize=5MB +log4j.appender.mp.MaxBackupIndex=10 +log4j.appender.mp.File=MP_RedhawkTest +log4j.appender.mp.Cleanup=False +log4j.appender.mp.layout=org.apache.log4j.PatternLayout +log4j.appender.mp.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + + diff --git a/redhawk/src/testing/loggers/syncappender/log4j.appender2 b/redhawk/src/testing/loggers/syncappender/log4j.appender2 new file mode 100644 index 000000000..9c78bb669 --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/log4j.appender2 @@ -0,0 +1,34 @@ + + + +#log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n + +log4j.rootLogger=ALL,stdout, mp + +# Direct log messages to stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + +# Direct log messages to stdout +log4j.appender.pse=org.ossie.logging.RH_LogEventAppender +log4j.appender.pse.name_context=TEST_APPENDER +log4j.appender.pse.event_channel=TEST_EVT_CH1 +log4j.appender.pse.producer_id=PRODUCER1 +log4j.appender.pse.producer_name=THE BIG CHEESE +log4j.appender.pse.layout=org.apache.log4j.PatternLayout +log4j.appender.pse.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + +# Direct log messages to stdout +log4j.appender.mp=org.ossie.logging.RH_SyncRollingAppender +log4j.appender.mp.Retries=2 +log4j.appender.mp.WaitOnLock=30 +log4j.appender.mp.MaxFileSize=5MB +log4j.appender.mp.MaxBackupIndex=10 +log4j.appender.mp.File=tmp/MP_RedhawkTest +log4j.appender.mp.Cleanup=False +log4j.appender.mp.layout=org.apache.log4j.PatternLayout +log4j.appender.mp.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + + diff --git a/redhawk/src/testing/loggers/syncappender/log4j.stdout b/redhawk/src/testing/loggers/syncappender/log4j.stdout new file mode 100644 index 000000000..a632ff660 --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/log4j.stdout @@ -0,0 +1,9 @@ + + +log4j.rootLogger=INFO,stdout + +# Direct log messages to stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n diff --git a/redhawk/src/testing/loggers/syncappender/logtestdebug.h b/redhawk/src/testing/loggers/syncappender/logtestdebug.h new file mode 100644 index 000000000..925f928dd --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/logtestdebug.h @@ -0,0 +1,28 @@ +#ifndef _LOGTESTDEBUG_H +#define _LOGTESTDEBUG_H + +// set when compiling library for test mode... OK to use log4 logging.... +#include +#include +extern log4cxx::LoggerPtr _logger_; +#define LTRACE( expression ) LOG4CXX_TRACE( _logger_, expression ) +#define LDEBUG( expression ) LOG4CXX_DEBUG( _logger_ , expression ) +#define LINFO( expression ) LOG4CXX_INFO( _logger_, expression ) +#define LWARN( expression ) LOG4CXX_WARN( _logger_, expression ) +#define LERROR( expression ) LOG4CXX_ERROR( _logger_, expression ) +#define LFATAL( expression ) LOG4CXX_FATAL( _logger_, expression ) + +#define LNTRACE( lname, expression ) LOG4CXX_TRACE( log4cxx::Logger::getLogger(lname), expression ) +#define LNDEBUG( lname, expression ) LOG4CXX_DEBUG( log4cxx::Logger::getLogger(lname), expression ) +#define LNINFO( lname, expression ) LOG4CXX_INFO( log4cxx::Logger::getLogger(lname), expression ) +#define LNWARN( lname, expression ) LOG4CXX_WARN( log4cxx::Logger::getLogger(lname), expression ) +#define LNERROR( lname, expression ) LOG4CXX_ERROR( log4cxx::Logger::getLogger(lname), expression ) +#define LNFATAL( lname, expression ) LOG4CXX_FATAL( log4cxx::Logger::getLogger(lname), expression ) + +#define LOGGER_CFG( name ) \ + log4cxx::LoggerPtr _logger_ = log4cxx::Logger::getLogger(name); + +#define LOGGER_END( name ) \ + log4cxx::LogManager::shutdown(); + +#endif diff --git a/redhawk/src/testing/loggers/syncappender/proc_log.cpp b/redhawk/src/testing/loggers/syncappender/proc_log.cpp new file mode 100644 index 000000000..1cce6d0f4 --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/proc_log.cpp @@ -0,0 +1,52 @@ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +int main( int argc, char ** argv ) { + + // Set up a simple configuration that logs on the console. + log4cxx::PropertyConfigurator::configure("log4j.appender" ); + + log4cxx::helpers::LogLog::setInternalDebugging(true); + + log4cxx::LoggerPtr logger = log4cxx::Logger::getRootLogger(); + + int32_t interval=1000; + int64_t cnt=10; + int pid = getpid(); + + if ( argc > 1 ) cnt=strtoll( argv[1], NULL, 0); + if ( argc > 2 ) interval=strtol( argv[2], NULL, 0); + + + while ( cnt-- ) { + LOG4CXX_INFO(logger, "test_log4j_props_appender MSG 1 - root logger (" << pid << ")" ); + LOG4CXX_INFO(logger, "test_log4j_props_appender MSG 2 - root logger (" << pid << ")" ); + LOG4CXX_INFO(logger, "test_log4j_props_appender MSG 3 - root logger (" << pid << ")" ); + + std::ostringstream os; + os << "LOG-MultiProcRollingFileAppender.Sub" << getpid(); + log4cxx::LoggerPtr lp = log4cxx::Logger::getLogger(os.str()); + LOG4CXX_INFO(lp, "test_log4j_props_appender MSG 1 - SUB logger (" << pid << ")" ); + LOG4CXX_INFO(lp, "test_log4j_props_appender MSG 2 - SUB logger (" << pid << ")" ); + LOG4CXX_INFO(lp, "test_log4j_props_appender MSG 3 - SUB logger (" << pid << ")" ); + + LOG4CXX_INFO(logger, "proc_log -END (" << pid << ")" ); + usleep(interval); + } + + // closes appenders correctly... + log4cxx::LogManager::shutdown(); + + return(0); + +} + diff --git a/redhawk/src/testing/loggers/syncappender/reconf b/redhawk/src/testing/loggers/syncappender/reconf new file mode 100755 index 000000000..db88259ab --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/reconf @@ -0,0 +1,2 @@ +[ -d m4 ] || mkdir m4 +autoreconf -i diff --git a/redhawk/src/testing/loggers/syncappender/runtests b/redhawk/src/testing/loggers/syncappender/runtests new file mode 100755 index 000000000..e88e7e977 --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/runtests @@ -0,0 +1,10 @@ +# +# +logging_top=../.. +logging_libsrc_top=$logging_top/libsrc +export LD_LIBRARY_PATH=$logging_top/idl/.libs:$logging_libsrc_top/.libs:${LD_LIBRARY_PATH} +./reconf +./configure +make check + + diff --git a/redhawk/src/testing/loggers/syncappender/test_suites.cpp b/redhawk/src/testing/loggers/syncappender/test_suites.cpp new file mode 100644 index 000000000..e0a774c41 --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/test_suites.cpp @@ -0,0 +1,238 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "test_suites.h" + +// Registers the fixture into the 'registry' +CPPUNIT_TEST_SUITE_REGISTRATION( test_suite_one ); +CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( test_suite_one, "test_one" ); +CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( test_suite_two, "test_two" ); +CPPUNIT_TEST_SUITE_NAMED_REGISTRATION( test_suite_three, "test_three" ); + +void +test_suite_one::setUp() +{ + + // Set up a simple configuration that logs on the console. + log4cxx::PropertyConfigurator::configure("log4j.stdout" ); + + log4cxx::helpers::LogLog::setInternalDebugging(true); + + logger = log4cxx::Logger::getLogger("LOG-MultiProcRollingFileAppender"); + + LOG4CXX_INFO(logger, "Setup cache directory for saved configuration files"); + boost::filesystem::path dir("./logs"); + boost::filesystem::create_directory(dir); +} + +void +test_suite_one::tearDown() +{ + // Set up a simple configuration that logs on the console. + log4cxx::PropertyConfigurator::configure("log4j.stdout" ); + + boost::filesystem::path dir("./logs"); + boost::filesystem::remove_all(dir); + +} + +void +test_suite_one::test_one() +{ + LOG4CXX_INFO(logger, "RH_SyncRollingAppender - BEGIN "); + + // Set up a simple configuration that logs on the console. + log4cxx::PropertyConfigurator::configure("log4j.appender" ); + int cnt=10; + int pid = getpid(); + + while ( cnt-- ) { + LOG4CXX_INFO(logger, "test_log4j_props_appender MSG 1 - root logger (" << pid << ")" ); + LOG4CXX_INFO(logger, "test_log4j_props_appender MSG 2 - root logger (" << pid << ")" ); + LOG4CXX_INFO(logger, "test_log4j_props_appender MSG 3 - root logger (" << pid << ")" ); + + std::ostringstream os; + os << "LOG-MultiProcRollingFileAppender.Sub" << getpid(); + log4cxx::LoggerPtr lp = log4cxx::Logger::getLogger(os.str()); + LOG4CXX_INFO(lp, "test_log4j_props_appender MSG 1 - SUB logger (" << pid << ")" ); + LOG4CXX_INFO(lp, "test_log4j_props_appender MSG 2 - SUB logger (" << pid << ")" ); + LOG4CXX_INFO(lp, "test_log4j_props_appender MSG 3 - SUB logger (" << pid << ")" ); + + LOG4CXX_INFO(logger, "RH_SyncRollingAppender -END "); + usleep(1000); + } + +} + + +void +test_suite_one::test_two() +{ + LOG4CXX_INFO(logger, "MultiProcess Test - BEGIN "); + + typedef std::vector< int > Chillens; + + Chillens chillens; + int cnt=10; + for( int i=0; i +#include + + +void +test_suite_three::setUp() +{ + + // Set up a simple configuration that logs on the console. + log4cxx::PropertyConfigurator::configure("log4j.stdout" ); + + log4cxx::helpers::LogLog::setInternalDebugging(true); + + logger = log4cxx::Logger::getLogger("LOG-SyncRollingAppender"); + + LOG4CXX_INFO(logger, "Setup cache directory for saved configuration files"); + boost::filesystem::path dir("./logs"); + boost::filesystem::create_directory(dir); +} + +void +test_suite_three::tearDown() +{ + // Set up a simple configuration that logs on the console. + log4cxx::PropertyConfigurator::configure("log4j.stdout" ); + + boost::filesystem::path dir("./logs"); + boost::filesystem::remove_all(dir); + boost::filesystem::path d2("./tmp"); + boost::filesystem::remove_all(d2); + +} + + +void +test_suite_three::test_cleanmem() +{ + LOG4CXX_INFO(logger, "RH_SyncRollingAppender - BEGIN "); + + // Set up a simple configuration that logs on the console. + log4cxx::PropertyConfigurator::configure("log4j.appender" ); + + int ret=system("./cleanmem MP_RedhawkTest"); + + CPPUNIT_ASSERT_EQUAL( ret, 0); + + +} + + +void +test_suite_three::test_cleanmem_missing() +{ + LOG4CXX_INFO(logger, "RH_SyncRollingAppender - BEGIN "); + + // Set up a simple configuration that logs on the console. + log4cxx::PropertyConfigurator::configure("log4j.appender" ); + + int ret=system("./cleanmem MP_RedhawkTest"); + + using namespace boost::interprocess; + + // validate memory is no long there + shared_memory_object obj( open_only, "MP_RedhawkTest", read_only ); + +} + + + +void +test_suite_three::test_cleanmem_path() +{ + LOG4CXX_INFO(logger, "RH_SyncRollingAppender - BEGIN "); + + // Set up a simple configuration that logs on the console. + log4cxx::PropertyConfigurator::configure("log4j.appender2" ); + + int ret=system("./cleanmem tmp/MP_RedhawkTest"); + + CPPUNIT_ASSERT_EQUAL( ret, 0); + + +} diff --git a/redhawk/src/testing/loggers/syncappender/test_suites.h b/redhawk/src/testing/loggers/syncappender/test_suites.h new file mode 100644 index 000000000..6bfe2318b --- /dev/null +++ b/redhawk/src/testing/loggers/syncappender/test_suites.h @@ -0,0 +1,63 @@ +#ifndef TEST_SUITES_H +#define TEST_SUITES_H +#include +#include +#include + +class test_suite_one : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE( test_suite_one ); + CPPUNIT_TEST( test_one ); + CPPUNIT_TEST( test_two ); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void test_one(); + void test_two(); + + log4cxx::LoggerPtr logger; +}; + + +class test_suite_two : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE( test_suite_two ); + CPPUNIT_TEST( test_loop ); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void test_loop(); + + log4cxx::LoggerPtr logger; + +}; + + +class test_suite_three : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE( test_suite_three ); + CPPUNIT_TEST( test_cleanmem ); + CPPUNIT_TEST_EXCEPTION( test_cleanmem_missing, boost::interprocess::interprocess_exception ); + CPPUNIT_TEST( test_cleanmem_path ); + CPPUNIT_TEST_SUITE_END(); + +public: + void setUp(); + void tearDown(); + + void test_cleanmem(); + void test_cleanmem_path(); + void test_cleanmem_missing(); + + log4cxx::LoggerPtr logger; + +}; + + +#endif // TEST_ONE diff --git a/redhawk/src/testing/macro_config.cfg b/redhawk/src/testing/macro_config.cfg new file mode 100644 index 000000000..d7b35075c --- /dev/null +++ b/redhawk/src/testing/macro_config.cfg @@ -0,0 +1,6 @@ +log4j.rootLogger=TRACE,FILE +# Direct log messages to FILE +log4j.appender.FILE=org.apache.log4j.FileAppender +log4j.appender.FILE.File=foo/bar/test.log +log4j.appender.FILE.layout=org.apache.log4j.PatternLayout +log4j.appender.FILE.layout.ConversionPattern=|||@@@WAVEFORM.INSTANCE@@@|||@@@DEVICE_MANAGER.NAME@@@|||@@@DEVICE_MANAGER.INSTANCE@@@|||%d{ISO8601}: %p:%c - %m [%F:%L]%n diff --git a/redhawk/src/testing/python/ossie b/redhawk/src/testing/python/ossie new file mode 120000 index 000000000..c1c6577db --- /dev/null +++ b/redhawk/src/testing/python/ossie @@ -0,0 +1 @@ +../../base/framework/python/ossie \ No newline at end of file diff --git a/redhawk/src/testing/python/redhawk b/redhawk/src/testing/python/redhawk new file mode 120000 index 000000000..41c1bbb9e --- /dev/null +++ b/redhawk/src/testing/python/redhawk @@ -0,0 +1 @@ +../../base/framework/python/redhawk \ No newline at end of file diff --git a/redhawk/src/testing/python/runtests.py b/redhawk/src/testing/python/runtests.py new file mode 100755 index 000000000..af774229b --- /dev/null +++ b/redhawk/src/testing/python/runtests.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import sys +import getopt + +from omniORB import CORBA + +from ossie.utils.log4py import logging +import ossie.utils.log4py.config + +class MultiTestLoader(unittest.TestLoader): + """ + Extend the default TestLoader to support a list of modules, at least for + the purposes of loadTestsFromName and loadTestsFromNames. + """ + def loadTestsFromName(self, name, modules): + if not isinstance(modules, list): + return unittest.TestLoader.loadTestsFromName(self, name, modules) + else: + # Try all modules in order, returning the first one that has + # matching tests + for mod in modules: + try: + return unittest.TestLoader.loadTestsFromName(self, name, mod) + except AttributeError: + pass + raise AttributeError("test '%s' not found" % (name,)) + +class TestProgram(object): + def __init__(self, modules=None): + if modules is None: + self.modules = [sys.modules['__main__']] + else: + self.modules = modules + self.verbosity = 1 + self.testRunner = None + + self.parseArgs(sys.argv[1:]) + self.createTests() + self.runTests() + + def createTests(self): + # Load tests, filtering by name (if arguments were given). + loader = MultiTestLoader() + if self.testNames: + self.test = loader.loadTestsFromNames(self.testNames, self.modules) + else: + self.test = unittest.TestSuite() + for mod in self.modules: + self.test.addTests(loader.loadTestsFromModule(mod)) + + def parseArgs(self, argv): + import getopt + short_options = 'vx' + long_options = ['xunit', 'log-level=', 'log-config=', 'verbose'] + + xunit = False + log_level = None + log_config = None + options, args = getopt.getopt(argv, short_options, long_options) + for opt, value in options: + if opt in ('-v', '--verbose'): + self.verbosity = 2 + elif opt in ('-x', '--xunit'): + xunit = True + elif opt == '--log-level': + # Map from string names to Python levels (this does not appear to + # be built into Python's logging module) + log_level = ossie.utils.log4py.config._LEVEL_TRANS.get(value.upper(), None) + elif opt == '--log-config': + log_config = value + + + # If requested, use XML output (but the module is non-standard, so it + # may not be available). + if xunit: + try: + import xmlrunner + self.testRunner = xmlrunner.XMLTestRunner(verbosity=self.verbosity) + except ImportError: + print >>sys.stderr, 'WARNING: XML test runner module is not installed' + except TypeError: + # Maybe it didn't like the verbosity argument + self.testRunner = xmlrunner.XMLTestRunner() + + # If a log4j configuration file was given, read it. + if log_config: + ossie.utils.log4py.config.fileConfig(log_config) + else: + # Set up a simple configuration that logs on the console. + logging.basicConfig() + + # Apply the log level (can override config file). + if log_level: + logging.getLogger().setLevel(log_level) + + # Any additional arguments are test names + self.testNames = args + + def runTests(self): + # Many tests require CORBA, so initialize up front + orb = CORBA.ORB_init() + root_poa = orb.resolve_initial_references("RootPOA") + manager = root_poa._get_the_POAManager() + manager.activate() + + # Default: use text output. + if not self.testRunner: + self.testRunner = unittest.TextTestRunner(verbosity=self.verbosity) + + result = self.testRunner.run(self.test) + + orb.shutdown(True) + + sys.exit(not result.wasSuccessful()) + +main = TestProgram + +if __name__ == '__main__': + import os + import glob + import imp + + # Find all Python files in the current directory and import them, adding + # their tests to the overall test suite. + modules = [] + for filename in glob.glob('*.py'): + modname, ext = os.path.splitext(filename) + fd = None + try: + fd, fn, desc = imp.find_module(modname) + mod = imp.load_module(modname, fd, fn, desc) + modules.append(mod) + finally: + if fd: + fd.close() + + main(modules) diff --git a/redhawk/src/testing/python/test_bitbuffer.py b/redhawk/src/testing/python/test_bitbuffer.py new file mode 100644 index 000000000..30f8740ee --- /dev/null +++ b/redhawk/src/testing/python/test_bitbuffer.py @@ -0,0 +1,486 @@ +#!/usr/bin/python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import copy + +from redhawk.bitbuffer import bitbuffer + +class BitBufferTest(unittest.TestCase): + def testConstructor(self): + # Empty + buf = bitbuffer() + self.assertEqual(0, len(buf), 'new empty bitbuffer should be zero-length') + self.failIf(bool(buf), 'bitbuffer with zero length should evaluate to False') + + # Allocating + NUM_BITS = 17 + buf = bitbuffer(bits=NUM_BITS) + self.assertEqual(NUM_BITS, len(buf), 'new bitbuffer should have length 17') + self.failUnless(bool(buf), 'bitbuffer with non-zero length should evaluate to True') + + def testFromInt(self): + # Input value is right-aligned (i.e., take lowest 28 bits) + buf = bitbuffer(0xBADC0DE, 28) + self.assertEqual(28, len(buf)) + data = buf.bytes() + self.assertEqual('\xBA\xDC\x0D', data[:3]) + self.assertEqual(0xE0, ord(data[3]) & 0xF0) + + # Ignore some of the most significant bits + # 00(11 0100 0101 0110 0111 1000) + # 3 4 5 6 7 8 + # = + # 1101 0001 0101 1001 1110 00xx + # D 1 5 9 E + buf = bitbuffer(0x12345678, 22) + self.assertEqual(22, len(buf)) + data = buf.bytes() + self.assertEqual('\xD1\x59', data[:2]) + self.assertEqual(0xE0, ord(data[2]) & 0xFC) + + # Since value is right-aligned, any higher bits are 0 + # 1111 0101 1010 1101 + # F 5 A D + # = + # 0000 0001 1110 1011 0101 101x + # 0 1 E B 5 A + buf = bitbuffer(0xF5AD, 23) + self.assertEqual(23, len(buf)) + data = buf.bytes() + self.assertEqual('\x01\xEB', data[:2]) + self.assertEqual(0x5A, ord(data[2]) & 0xFE) + + def testFromArray(self): + # Test with a large array, using the default behavior for number of + # bits (8 bits for each byte) and start offset (0) + array = bytearray('\x01\x23\x45\x67\x89\xAB\xCD\xEF\x11\x22') + buf = bitbuffer(array) + self.assertEqual(len(array)*8, len(buf)) + self.assertEqual(array, buf.bytes()) + + # Test with a non-zero offset and non-integral number of bytes + buf = bitbuffer(array, bits=18, start=4) + self.assertEqual(18, len(buf)) + # Expected bytes are input array shifted left one nibble (easy to + # calulate) with the last 6 bits masked off + self.assertEqual(b'\x12\x34\x40', buf.bytes()) + + def testFromGenerator(self): + # Use a generator expresion to populate the bit data (0111, repeating) + buf = bitbuffer(bool(x%4) for x in xrange(48)) + self.assertEqual(48, len(buf)) + expected = '\x77'*6 + self.assertEqual(expected, buf.bytes()) + + def testFromList(self): + # Integers + int_vals = [1, 0, 1, 1, 0, 1, 0, 1, 0] + buf = bitbuffer(int_vals) + self.assertEqual(int_vals, buf) + + # Booleans + bool_vals = [False, False, True, False, False, True, True, True, False, True] + buf = bitbuffer(bool_vals) + self.assertEqual(bool_vals, buf) + + def testFromString(self): + # Basic binary string + str_val = '101101001010101001010' + buf = bitbuffer(str_val) + self.assertEqual(len(str_val), len(buf)) + for index, (ch, bit) in enumerate(zip(str_val, buf)): + self.assertEqual(int(ch), bit, 'wrong value at position %d' % index) + + # Invalid input string + self.assertRaises(ValueError, bitbuffer, '01011002') + + def testBytes(self): + # Empty bitbuffer should still support bytes() + buf = bitbuffer() + data = buf.bytes() + self.assertEqual(0, len(data)) + + # Start with a 32-bit value, with no offset and an exact byte length + buf = bitbuffer(0xB552B4E1, 32) + data = buf.bytes() + self.assertEqual(4, len(data)) + self.assertEqual(b'\xB5\x52\xB4\xE1', data) + + # Offset, not byte-aligned slice; bytes() should create a new copy of + # the byte array + # 101(10101010 1001010)110100 + # 10101010|1001010x = 0xAA94 + data = buf[3:18].bytes() + self.assertEqual(2, len(data)) + self.assertEqual('\xAA', data[0]) + self.assertEqual(0x94, ord(data[1]) & 0xFE) + + def testEquals(self): + # Fill a bit buffer with a known pattern + pattern = '11001010010101011011100001101001110000101' + first = bitbuffer(pattern) + + # A bitbuffer should be rigorously equal to itself + self.failUnless(first == first) + + # Another bitbuffer with different backing memory should still compare + # equal + second = bitbuffer(pattern) + self.assertEqual(first, second) + + # Flip a bit, the comparison should now fail + second[17] = not second[17] + self.assertNotEqual(first, second) + + # Create a new buffer with a different size, but the same data (just + # offset by few bits). It should compare unequal as-is; however, it + # should compare equal if taking a slice of the original buffer to + # re-align them. + third = bitbuffer(pattern[3:]) + self.assertNotEqual(first, third) + self.assertEqual(first[3:], third) + + def testEqualsPyObjects(self): + pattern = '1011010101001011' + int_vals = [int(ch) for ch in pattern] + + # Should be equal + buf = bitbuffer(pattern) + self.assertEquals(pattern, buf) + self.assertEquals(int_vals, buf) + + # Different lengths + self.assertNotEquals(pattern[:-2], buf, 'unequal size string compared equal') + self.assertNotEquals(int_vals[:-2], buf, 'unequal size list compared equal') + + # Flipped bit + buf[1] = 1 + self.assertNotEquals(pattern, buf, 'unequal string compared equal') + self.assertNotEquals(int_vals, buf, 'unequal list compared equal') + + # Skip over flipped bit + self.assertEquals(pattern[2:], buf[2:], 'substring compared not equal') + self.assertEquals(int_vals[2:], buf[2:], 'list slice compared not equal') + + def testCopy(self): + # Create a bitbuffer with known data: bit is set if index is odd + original = bitbuffer(x&1 for x in xrange(127)) + + # Make a copy and modify the original; the copy should be unaffected + copied = copy.copy(original) + self.assertEqual(original, copied) + original[2] = 1 + self.assertEqual(0, copied[2]) + + def testFill(self): + # Create a new bitbuffer, with all bits initialized to one + buf = bitbuffer([1] * 64) + + # Fill the entire bitbuffer with all zeros + buf[:] = 0 + self.assertEqual([0] * len(buf), buf) + + # Fill a subset of the bitbuffer with ones + buf[9:33] = 1 + data = buf.bytes() + self.assertEqual(b'\x7F\xFF\xFF\x80', data[1:5]) + + # Implicit offset and non-byte-aligned end + buf = buf[42:47] + buf[1:4] = 1 + self.assertEqual([0, 1, 1, 1, 0], buf) + + def testGetItem(self): + buf = bitbuffer('001010100111100') + self.assertEqual(0, buf[0]) + self.assertEqual(0, buf[1]) + self.assertEqual(1, buf[2]) + self.assertEqual(0, buf[3]) + self.assertEqual(1, buf[4]) + self.assertEqual(0, buf[8]) + self.assertEqual(1, buf[9]) + + # Use slice to create a new bit buffer with a non-zero offset to test + # that the offset is taken into account + buf2 = buf[11:15] + self.assertEqual(1, buf2[0]) + self.assertEqual(1, buf2[1]) + self.assertEqual(0, buf2[2]) + self.assertEqual(0, buf2[3]) + + # Exceptions + # Index past end + self.assertRaises(IndexError, buf.__getitem__, len(buf)) + # Negative index past beginning + self.assertRaises(IndexError, buf.__getitem__, -(len(buf) + 1)) + + def testGetItemSlice(self): + # Fill a new bit buffer with alternating 0's and 1's + buf = bitbuffer(x & 1 for x in xrange(12)) + + # Take a 4-bit slice from the middle and check that it has the expected + # bits + middle = buf[4:8] + self.assertEqual(4, len(middle)) + self.assertEqual([0, 1, 0, 1], middle) + + # Take a slice from the midpoint to the end, and check that the bits + # match + end = buf[6:] + self.assertEqual(6, len(end)) + self.assertEqual([0, 1, 0, 1, 0, 1], end) + + # Compare the overlap between the two slices by taking sub-slices + self.assertEqual(middle[2:], end[0:2]) + + # Starting at the end index should return an empty bit buffer + empty = buf[len(buf):] + self.assertEqual(0, len(empty)) + + # Negative indices should be from end + neg = buf[-6:-2] + self.assertEqual(4, len(neg)) + self.assertEqual(buf[6:10], neg) + + def testSetItem(self): + # Start with a zero-filled buffer + buf = bitbuffer(bits=48) + + # Basic bit setting + buf[3] = 1 + data = buf.bytes() + self.assertEqual(0x10, ord(data[0]), 'Set bit') + + # Two bits in the same byte + buf[8] = 1 + buf[13] = 1 + data = buf.bytes() + self.assertEqual(0x84, ord(data[1]), 'Set two bits in same byte') + + # Any non-zero integer should be interpreted as a 1 + buf[18] = 2 + buf[22] = -5289 + data = buf.bytes() + self.assertEqual(0x22, ord(data[2]), 'Set non-zero integer') + + # 0 should clear an existing bit + buf[8] = 0 + data = buf.bytes() + self.assertEqual(0x04, ord(data[1]), 'Clear bit') + + # Use a slice to test that offsets are accounted for (the slice shares + # the same backing byte array) + buf2 = buf[35:47] + buf2[1] = 1 + self.assertEqual(1, buf[36], 'Slice with offset') + + # Exceptions + # Index past end + self.assertRaises(IndexError, buf.__setitem__, len(buf), 0) + # Negative index past beginning + self.assertRaises(IndexError, buf.__setitem__, -(len(buf) + 1), 0) + + def testSetItemSlice(self): + # Destination is all 0's (allocating ctor zeros byte array) + dest = bitbuffer(bits=36) + + # Set known pattern in source + src = bitbuffer('10001100110001101101') + + # Replace 9 bits at offset 1 + # (1000110 0|1)100 + # 0(1000110|0 1)000000 = 0x4640 + dest[1:10] = src[:9] + data = dest.bytes() + self.assertEqual(b'\x46\x40', data[:2]) + + # Replace 13 bits at offset 22, starting with the 4th bit of the source + # 1000(11 00|110001 10|1)101 + # 000000(11|00 110001|10 1)0xxxx = 0x0331A + dest[22:35] = src[4:17] + data = dest.bytes() + self.assertEqual(b'\x03\x31', data[2:4]) + self.assertEqual(0xA0, ord(data[4]) & 0xF0) + + # Negative indices should be from end; invert first 3 of the last 4 + # bits (prior value above is 1010) + dest[-4:-1] = bitbuffer('010') + data = dest.bytes() + self.assertEqual(0x40, ord(data[4]) & 0xF0) + + def testToInt(self): + # Use a 96-bit long + expected = 0x3545E6A9A11BAAE49A3F3B38 + buf = bitbuffer(expected, 96) + + # Small value + self.assertEqual(3, int(buf[:4])) + + # Multi-byte with offset + # 0x3545E6A9 = 001(10101|01000101|11100110|10101001) + self.assertEqual(0x1545E6A9, int(buf[3:32])) + + # Implicit offset (slice) + buf2 = buf[2:32] + self.assertEqual(0x1545E6A9, int(buf2[1:])) + + # Since Python longs are arbitrarily large, try converting the whole + # bitbuffer into an integer + self.assertEqual(expected, int(buf)) + + def testUnpack(self): + # Larger, byte-aligned + expected = [ + 1, 0, 1, 0, 0, 1, 1, 0, + 1, 0, 0, 1, 0, 0, 1, 1, + 0, 1, 0, 1, 1, 0, 1, 1, + ] + buf = bitbuffer(expected) + self.assertEqual(expected, buf.unpack()) + + # Medium, partial byte at end + buf2 = buf[:-3] + self.assertEqual(expected[:-3], buf2.unpack()) + + # Medium, unaligned + buf2 = buf[3:] + self.assertEqual(expected[3:], buf2.unpack()) + + # Small, sub-byte, unaligned + buf = bitbuffer('0101001', bits=6, start=1) + self.assertEqual(6, len(buf)) + self.assertEqual([1, 0, 1, 0, 0, 1], buf.unpack()) + + def testPopcount(self): + buf = bitbuffer('10111001000100011101000110111100001') + self.assertEqual(17, buf.popcount()) + + buf = bitbuffer('100011101000110111100') + self.assertEqual(11, buf.popcount()) + + buf = bitbuffer('1010001') + self.assertEqual(3, buf.popcount()) + + def testDistance(self): + first = bitbuffer('110100010011110100011001111100111') + second = bitbuffer('000101011111111010011110100000011') + + # Distance from self should always be 0 + self.assertEqual(0, first.distance(first)) + + # a 110100010011110100011001111100111 + # b 000101011111111010011110100000011 + # a XOR b = 110001001100001110000111011100100 + self.assertEqual(15, first.distance(second)) + + def testFind(self): + # Pick a oddly-sized pattern (22 bits) + pattern = bitbuffer('1011000000011110111110') + + # Fill a bit buffer with 1's, then copy the pattern into it in a couple + # of places + buf = bitbuffer([1] * 300) + buf[37:37+len(pattern)] = pattern + buf[200:200+len(pattern)] = pattern + + # 1-argument find, looks for exact match from start + self.assertEqual(37, buf.find(pattern)) + + # Start after the first occurrence (using both positive and negative + # indexing) + self.assertEqual(200, buf.find(pattern, start=59)) + self.assertEqual(200, buf.find(pattern, start=-150)) + + # The search should fail when started after both occurrences, or + # bounded to a range where no occurences exit + self.assertEqual(-1, buf.find(pattern, start=222)) + self.assertEqual(-1, buf.find(pattern, end=36)) + self.assertEqual(-1, buf.find(pattern, start=60, end=-101)) + + # Test that end index is exclusive + self.assertEqual(-1, buf.find(pattern, end=37)) + self.assertEqual(37, buf.find(pattern, end=38)) + + # Introduce some bit errors + buf[38] = not buf[38] + buf[48] = not buf[48] + buf[220] = not buf[220] + + # Try decreasing tolerances + self.assertEqual(37, buf.find(pattern, maxDistance=2)) + self.assertEqual(200, buf.find(pattern, maxDistance=1)) + self.assertEqual(-1, buf.find(pattern)) + + # Starting the search past the end of the bit buffer should always fail, + # but without an exception + self.assertEqual(-1, buf.find(pattern, start=len(buf))) + + def testTakeSkip(self): + # Use a repeating pattern of an irregular length, where the discarded + # part is disjoint + taken = '10000100001' + skipped = '1001' + buf = bitbuffer((taken+skipped) * 5) + expected = bitbuffer(taken * 5) + self.assertEqual(expected, buf.takeskip(11, 4)) + + # Use ASCII text, where the high bit is always zero; a start offset is + # required + msg = 'Here is some text' + buf = bitbuffer(bytearray(msg)) + ascii = buf.takeskip(7, 1, start=1) + + # Reconstruct the input text by taking 7 bits at a time + result = ''.join(chr(int(ascii[bit:bit+7])) for bit in xrange(0, len(ascii), 7)) + self.assertEqual(msg, result) + + # Repeat with a starting and ending offset + ascii = buf.takeskip(7, 1, start=41, end=97) + result = ''.join(chr(int(ascii[bit:bit+7])) for bit in xrange(0, len(ascii), 7)) + self.assertEqual(msg[5:12], result) + + def testAdd(self): + buf1 = bitbuffer(0xADD, 13) + buf2 = bitbuffer(0xC0DE5, 21) + + # 0xADD (13) = 0101011011101 + # 0xC0DE5 (21) = 011000000110111100101 + # = + # 0101 0110 1110 1011 0000 0011 0111 1001 01xx + # 5 6 E B 0 3 7 9 4 + result = buf1 + buf2 + self.assertEqual(34, len(result)) + data = result.bytes() + self.assertEqual('\x56\xEB\x03\x79', data[:4]) + self.assertEqual(0x40, ord(data[4]) & 0xC0) + + def testHex(self): + # Using hex conversion should give same results as equivalent integer + intval = 0x123456789ABCDEF + buf = bitbuffer(intval, 60) + # NB: comparing the result of two hex() expressions avoids potential + # formatting differences in the output of hex() + self.assertEqual(hex(intval), hex(buf)) + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/redhawk/src/testing/python/test_messaging.py b/redhawk/src/testing/python/test_messaging.py new file mode 100644 index 000000000..374e6667d --- /dev/null +++ b/redhawk/src/testing/python/test_messaging.py @@ -0,0 +1,397 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK bulkioInterfaces is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK bulkioInterfaces is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import threading +import time +import unittest + +from omniORB.any import to_any, from_any + +from ossie.cf import CF +from ossie.events import MessageSupplierPort, MessageConsumerPort +from ossie.properties import simple_property, simpleseq_property, props_to_any + +class PortManager(object): + def __init__(self): + self._ports = [] + + def addPort(self, port): + self._ports.append(port) + poa = port._default_POA() + poa.activate_object(port) + + def startPorts(self): + for port in self._ports: + if hasattr(port, 'startPort'): + port.startPort() + + def stopPorts(self): + for port in self._ports: + if hasattr(port, 'stopPort'): + port.stopPort() + + def releasePorts(self): + for port in self._ports: + try: + self.deactivatePort(port) + except: + # Ignore CORBA exceptions + pass + self._ports = [] + + def deactivatePort(self, servant): + poa = servant._default_POA() + object_id = poa.servant_to_id(servant) + poa.deactivate_object(object_id) + + def start(self): + self.startPorts() + + def stop(self): + self.stopPorts() + + def releaseObject(self): + self.releasePorts() + + +# REDHAWK 2.1 generated message structs +class BasicMessage(object): + value = simple_property( + id_="basic_message::value", + name="value", + type_="long") + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["value"] = self.value + return str(d) + + @classmethod + def getId(cls): + return "basic_message" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("value",self.value)] + +class TestMessage(object): + item_float = simple_property( + id_="test_message::item_float", + name="item_float", + type_="float") + + item_string = simple_property( + id_="test_message::item_string", + name="item_string", + type_="string") + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["item_float"] = self.item_float + d["item_string"] = self.item_string + return str(d) + + @classmethod + def getId(cls): + return "test_message" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("item_float",self.item_float),("item_string",self.item_string)] + + +class MessageReceiver(object): + def __init__(self): + self.messages = [] + self._lock = threading.Lock() + self._cond = threading.Condition(self._lock) + + def messageReceived(self, msgId, msgData): + with self._lock: + self.messages.append(msgData) + self._cond.notify() + + def waitMessages(self, count, timeout): + end = time.time() + timeout + with self._lock: + while True: + if len(self.messages) >= count: + return True + now = time.time() + if now >= end: + return False + self._cond.wait(end - now) + + +class MessagingTest(unittest.TestCase): + def setUp(self): + self._portManager = PortManager() + self._supplier = MessageSupplierPort() + self._consumer = MessageConsumerPort() + + self._portManager.addPort(self._supplier) + self._portManager.addPort(self._consumer) + + # Connect the supplier and consumer + objref = self._consumer._this() + self._supplier.connectPort(objref, "connection_1") + + # Simulate component start + self._portManager.start() + + def tearDown(self): + self._portManager.stop() + self._portManager.releaseObject() + + def testSendMessage(self): + receiver = MessageReceiver() + self._consumer.registerMessage("basic_message", BasicMessage, receiver.messageReceived) + + msg = BasicMessage() + msg.value = 1 + + self._supplier.sendMessage(msg) + + # Unlike C++, the Python message consumer is threaded, so we need to + # give it some time to receive the message + self.failUnless(receiver.waitMessages(1, 1.0)) + + self.assertEqual("basic_message", receiver.messages[0].getId()) + self.assertEqual(msg.value, receiver.messages[0].value) + + def testSendMessageConnectionId(self): + # Create and connect a second consumer port + consumer_2 = MessageConsumerPort() + self._portManager.addPort(consumer_2) + objref = consumer_2._this() + self._supplier.connectPort(objref, "connection_2") + + # Set up 2 receivers to distinguish which connection received a message + receiver_1 = MessageReceiver() + self._consumer.registerMessage("basic_message", BasicMessage, receiver_1.messageReceived) + + receiver_2 = MessageReceiver() + consumer_2.registerMessage("basic_message", BasicMessage, receiver_2.messageReceived) + + # Target the first connection + msg = BasicMessage() + msg.value = 1 + self.assertEqual(0, len(receiver_1.messages)) + self.assertEqual(0, len(receiver_2.messages)) + self._supplier.sendMessage(msg, connectionId="connection_1") + + self.failUnless(receiver_1.waitMessages(1, 1.0)) + self.assertEqual("basic_message", receiver_1.messages[0].getId()) + self.assertEqual(1, receiver_1.messages[0].value) + + # Second should not receive it (give it a little time just in case) + self.failIf(receiver_2.waitMessages(1, 0.1)) + + # Target the second connection this time + msg.value = 2 + self._supplier.sendMessage(msg, connectionId="connection_2") + + self.failUnless(receiver_2.waitMessages(1, 1.0)) + self.assertEqual("basic_message", receiver_2.messages[0].getId()) + self.assertEqual(2, receiver_2.messages[0].value) + + # This time, the first should not receive it + self.failIf(receiver_1.waitMessages(2, 0.1)) + + # Target both connections + msg.value = 3 + self._supplier.sendMessage(msg) + + self.failUnless(receiver_1.waitMessages(2, 1.0)) + self.failUnless(receiver_2.waitMessages(2, 1.0)) + + # Target invalid connection + msg.value = 4 + self.assertRaises(ValueError, self._supplier.sendMessage, msg, "bad_connection") + + def testSendMessages(self): + receiver = MessageReceiver() + self._consumer.registerMessage("basic_message", BasicMessage, receiver.messageReceived) + self._consumer.registerMessage("test_message", TestMessage, receiver.messageReceived) + + # Send two different message types in one batch + messages = [BasicMessage(value=1), + TestMessage(item_float=2.0, item_string="two"), + BasicMessage(value=3)] + + self.assertEqual(0, len(receiver.messages)) + self._supplier.sendMessages(messages) + self.failUnless(receiver.waitMessages(len(messages), 1.0)) + self.assertEqual('basic_message', receiver.messages[0].getId()) + self.assertEqual(1, receiver.messages[0].value) + self.assertEqual('test_message', receiver.messages[1].getId()) + self.assertEqual(2.0, receiver.messages[1].item_float) + self.assertEqual("two", receiver.messages[1].item_string) + self.assertEqual('basic_message', receiver.messages[2].getId()) + self.assertEqual(3, receiver.messages[2].value) + + def testSendMessagesConnectionId(self): + # Create and connect a second consumer port + consumer_2 = MessageConsumerPort() + self._portManager.addPort(consumer_2) + objref = consumer_2._this() + self._supplier.connectPort(objref, "connection_2") + + # Set up 2 receivers to distinguish which connection received a message + receiver_1 = MessageReceiver() + self._consumer.registerMessage("basic_message", BasicMessage, receiver_1.messageReceived) + + receiver_2 = MessageReceiver() + consumer_2.registerMessage("basic_message", BasicMessage, receiver_2.messageReceived) + + # Target first connection + messages_1 = [BasicMessage(value=x) for x in xrange(2)] + self.assertEqual(0, len(receiver_1.messages)) + self.assertEqual(0, len(receiver_2.messages)) + self._supplier.sendMessages(messages_1, "connection_1") + + # Wait for the first receiver to get all messages; the second receiver + # ought to get none (give it some time due to threading) + self.failUnless(receiver_1.waitMessages(2, 1.0)) + self.assertEqual(2, len(receiver_1.messages)) + self.failIf(receiver_2.waitMessages(1, 0.1)) + + # Target the second connection this time with a different set of + # messages + messages_2 = [BasicMessage(value=x) for x in xrange(2,5)] + self._supplier.sendMessages(messages_2, "connection_2") + + # Wait for the second receiver to get all the messages (and check at + # least the first value) + self.failUnless(receiver_2.waitMessages(len(messages_2), 1.0)) + self.assertEqual(3, len(receiver_2.messages)) + self.assertEqual(2, receiver_2.messages[0].value) + self.failIf(receiver_1.waitMessages(3, 0.1)) + + # Target both connections + self._supplier.sendMessages(messages_1) + self.failUnless(receiver_1.waitMessages(4, 1.0)) + self.assertEqual(4, len(receiver_1.messages)) + self.failUnless(receiver_2.waitMessages(5, 1.0)) + self.assertEqual(5, len(receiver_2.messages)) + + # Target invalid connection + messages_3 = [BasicMessage(value=5)] + self.assertRaises(ValueError, self._supplier.sendMessages, messages_3, "bad_connection") + self.failIf(receiver_1.waitMessages(5, 0.1)) + self.failIf(receiver_2.waitMessages(6, 0.1)) + + def testPush(self): + receiver = MessageReceiver() + self._consumer.registerMessage(None, None, receiver.messageReceived) + + # Pack the messages ourselves + messages = [] + messages.append(CF.DataType('first', to_any(100))) + messages.append(CF.DataType('second', to_any('some text'))) + messages.append(CF.DataType('third', to_any(0.25))) + + self._supplier.push(props_to_any(messages)) + + self.failUnless(receiver.waitMessages(3, 1.0)) + self.assertEqual(3, len(receiver.messages)) + self.assertEqual(100, from_any(receiver.messages[0].value)) + self.assertEqual('some text', from_any(receiver.messages[1].value)) + self.assertEqual(0.25, from_any(receiver.messages[2].value)) + + def testPushConnectionId(self): + # Create and connect a second consumer port + consumer_2 = MessageConsumerPort() + self._portManager.addPort(consumer_2) + objref = consumer_2._this() + self._supplier.connectPort(objref, 'connection_2') + + # Set up 2 receivers to distinguish which connection received a message + receiver_1 = MessageReceiver() + self._consumer.registerMessage(None, None, receiver_1.messageReceived) + + receiver_2 = MessageReceiver() + consumer_2.registerMessage(None, None, receiver_2.messageReceived) + + # Pack the messages ourselves and target the first connection + messages_1 = [] + messages_1.append(CF.DataType('first', to_any(100))) + messages_1.append(CF.DataType('second', to_any('some text'))) + messages_1.append(CF.DataType('third', to_any(0.25))) + + self._supplier.push(props_to_any(messages_1), 'connection_1') + self.failUnless(receiver_1.waitMessages(3, 1.0)) + self.assertEqual(3, len(receiver_1.messages)) + self.failIf(receiver_2.waitMessages(1, 0.1)) + + # Target the second connection with a different set of messages + messages_2 = [] + messages_2.append(CF.DataType('one', to_any('abc'))) + messages_2.append(CF.DataType('two', to_any(False))) + messages_2 = props_to_any(messages_2) + self._supplier.push(messages_2, "connection_2") + + self.failUnless(receiver_2.waitMessages(2, 1.0)) + self.assertEqual(2, len(receiver_2.messages)) + self.failIf(receiver_1.waitMessages(4, 0.1)) + + # Target both connections with yet another set of messages + messages_3 = props_to_any([CF.DataType('all', to_any(3))]) + self._supplier.push(messages_3) + self.failUnless(receiver_2.waitMessages(3, 1.0)) + self.assertEqual(3, len(receiver_2.messages)) + self.failUnless(receiver_1.waitMessages(4, 1.0)) + self.assertEqual(4, len(receiver_1.messages)) + + # Target invalid connection + messages_4 = props_to_any([CF.DataType('bad', to_any('bad_connection'))]) + self.assertRaises(ValueError, self._supplier.push, messages_4, 'bad_connection') + self.failIf(receiver_2.waitMessages(4, 0.1)) + self.failIf(receiver_1.waitMessages(5, 0.1)) + self.assertEqual(3, len(receiver_2.messages)) + self.assertEqual(4, len(receiver_1.messages)) + +if __name__ == '__main__': + import runtests + runtests.main() diff --git a/redhawk/src/testing/runtests.py b/redhawk/src/testing/runtests.py index 32583ca5f..9011904ce 100755 --- a/redhawk/src/testing/runtests.py +++ b/redhawk/src/testing/runtests.py @@ -80,6 +80,7 @@ def configureTestPaths(): from ossie.utils.idllib import IDLLibrary model._idllib = IDLLibrary() model._idllib.addSearchPath(os.path.join(topdir, 'idl')) + model._idllib.addSearchPath(os.path.join(topdir, '../../bulkioInterfaces/idl')) # Set up the system paths (LD_LIBRARY_PATH, PYTHONPATH, CLASSPATH), IDL paths # and SDRROOT to allow testing against an uninstalled framework. diff --git a/redhawk/src/testing/sdr/dev/devices/BadRelease/BadRelease.prf.xml b/redhawk/src/testing/sdr/dev/devices/BadRelease/BadRelease.prf.xml index 81c803b6c..a2a2bc61d 100644 --- a/redhawk/src/testing/sdr/dev/devices/BadRelease/BadRelease.prf.xml +++ b/redhawk/src/testing/sdr/dev/devices/BadRelease/BadRelease.prf.xml @@ -189,7 +189,7 @@ with this program. If not, see http://www.gnu.org/licenses/. This property returns the value for the last stack size passed as an option for execute - + This property returns the value for the last priority passed as an option for execute - + The execparms passed to the component. diff --git a/redhawk/src/testing/sdr/dev/devices/BadReleaseBefore/BadReleaseBefore.prf.xml b/redhawk/src/testing/sdr/dev/devices/BadReleaseBefore/BadReleaseBefore.prf.xml index 312000a6b..228793fe2 100644 --- a/redhawk/src/testing/sdr/dev/devices/BadReleaseBefore/BadReleaseBefore.prf.xml +++ b/redhawk/src/testing/sdr/dev/devices/BadReleaseBefore/BadReleaseBefore.prf.xml @@ -189,7 +189,7 @@ with this program. If not, see http://www.gnu.org/licenses/. This property returns the value for the last stack size passed as an option for execute - + This property returns the value for the last priority passed as an option for execute - + The execparms passed to the component. diff --git a/redhawk/src/testing/sdr/dev/devices/BasicAlwaysBusyDevice/BasicAlwaysBusyDevice.prf.xml b/redhawk/src/testing/sdr/dev/devices/BasicAlwaysBusyDevice/BasicAlwaysBusyDevice.prf.xml index d0d50fd45..769ca67b2 100644 --- a/redhawk/src/testing/sdr/dev/devices/BasicAlwaysBusyDevice/BasicAlwaysBusyDevice.prf.xml +++ b/redhawk/src/testing/sdr/dev/devices/BasicAlwaysBusyDevice/BasicAlwaysBusyDevice.prf.xml @@ -200,7 +200,7 @@ with this program. If not, see http://www.gnu.org/licenses/. This property returns the value for the last stack size passed as an option for execute - + This property returns the value for the last priority passed as an option for execute - + The execparms passed to the component. diff --git a/redhawk/src/testing/sdr/dev/devices/BasicTestDevice/BasicTestDevice.prf.xml b/redhawk/src/testing/sdr/dev/devices/BasicTestDevice/BasicTestDevice.prf.xml index ab949050d..39c69c461 100644 --- a/redhawk/src/testing/sdr/dev/devices/BasicTestDevice/BasicTestDevice.prf.xml +++ b/redhawk/src/testing/sdr/dev/devices/BasicTestDevice/BasicTestDevice.prf.xml @@ -168,6 +168,7 @@ with this program. If not, see http://www.gnu.org/licenses/. unknown + This property returns the value for the last stack size passed as an option for execute - + This property returns the value for the last priority passed as an option for execute - + The execparms passed to the component. diff --git a/redhawk/src/testing/sdr/dev/devices/BasicTestDeviceMatchOverride/BasicTestDevice.prf.xml b/redhawk/src/testing/sdr/dev/devices/BasicTestDeviceMatchOverride/BasicTestDevice.prf.xml index 4605cc6ab..0440c6a54 100644 --- a/redhawk/src/testing/sdr/dev/devices/BasicTestDeviceMatchOverride/BasicTestDevice.prf.xml +++ b/redhawk/src/testing/sdr/dev/devices/BasicTestDeviceMatchOverride/BasicTestDevice.prf.xml @@ -226,7 +226,7 @@ with this program. If not, see http://www.gnu.org/licenses/. This property returns the value for the last stack size passed as an option for execute - + This property returns the value for the last priority passed as an option for execute - + The execparms passed to the component. diff --git a/redhawk/src/testing/sdr/dev/devices/BasicTestDevice_java/java/startJava.sh b/redhawk/src/testing/sdr/dev/devices/BasicTestDevice_java/java/startJava.sh index 4ace76682..16d3e1b2f 100755 --- a/redhawk/src/testing/sdr/dev/devices/BasicTestDevice_java/java/startJava.sh +++ b/redhawk/src/testing/sdr/dev/devices/BasicTestDevice_java/java/startJava.sh @@ -22,7 +22,7 @@ myDir=`dirname $0` # Setup the OSSIEHOME Lib jars on the classpath -libDir=../../../../../base/framework/java +libDir=${SDRROOT}/../../base/framework/java libFiles=`ls -1 $libDir/*.jar` for file in $libFiles do diff --git a/redhawk/src/testing/sdr/dev/devices/EventPortTestDevice/EventPortTestDevice.prf.xml b/redhawk/src/testing/sdr/dev/devices/EventPortTestDevice/EventPortTestDevice.prf.xml index 7005550de..2dc5402ba 100644 --- a/redhawk/src/testing/sdr/dev/devices/EventPortTestDevice/EventPortTestDevice.prf.xml +++ b/redhawk/src/testing/sdr/dev/devices/EventPortTestDevice/EventPortTestDevice.prf.xml @@ -42,7 +42,7 @@ with this program. If not, see http://www.gnu.org/licenses/. SCA required property describing the CPU type - i686 + x86 diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/GPP.prf.xml b/redhawk/src/testing/sdr/dev/devices/GPP/GPP.prf.xml index 45b5ca2cd..27a6a2e53 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/GPP.prf.xml +++ b/redhawk/src/testing/sdr/dev/devices/GPP/GPP.prf.xml @@ -29,36 +29,53 @@ with this program. If not, see http://www.gnu.org/licenses/. This specifies the specific device - + REDHAWK GPP + SCA required property describing the CPU type x86_64 - + SCA required property describing the Operating System Name Linux - + SCA required property describing the Operating System Version + Host name on which the device is deployed - + + + If true, GNU screen will be used for the execution of components. + False + + + + + + If provided, all component output will be redirected to this file. The GPP will not delete or rotate these logs. The provided value may contain environment variables or reference component exec-params with @EXEC_PARAM@. For example, this would be a valid value $SDRROOT/logs/@COMPONENT_IDENTIFIER@.log + + + + + + DCE:e4e86070-a121-45d4-a144-00386f2188e3 @@ -181,29 +198,73 @@ Optional - - If true, GNU screen will be used for the execution of components. - False + + + The Multicast NIC interface associated with this GPP (e.g. eth1). If not provided no multicast allocations are permitted. + - - The amount of load capacity remaining to be allocated. - + + Total NIC bandwidth for the interfaces defined in mcastnicInterface. This must be specified in the PRF or DCD because ethtool requires super-user privs. + 0 + Mb/s + + + + + Total NIC bandwidth for the interfaces defined in mcastnicInterface. This must be specified in the PRF or DCD because ethtool requires super-user privs. + 0 + Mb/s + - + Amount of ingress multicast NIC capacity in the GPP not allocated to an application Mb/s + - - Amount of RAM in the GPP not allocated to an application - MiB + + Amount of egress multicast NIC capacity in the GPP not allocated to an application + Mb/s + + + + + Free NIC bandwidth for the interfaces defined in mcastnicInterface. + 0 + Mb/s + + + + + + Free NIC bandwidth for the interfaces defined in mcastnicInterface. + 0 + Mb/s + + + + + + + Percentage of total Multicast NIC this GPP can use for capacity management + 80 + % + + + + + When queired, returns the list of vlans on this host. When used as an allocation, defines the list of VLANS the component requires. + + + + 80.0 @@ -216,6 +277,7 @@ Optional e.* + @@ -232,6 +294,7 @@ Optional + Identifier of component or device that generated this message @@ -266,64 +329,247 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr + + + cycle time between updates of metric capture, calculations and threshold evaluations. + 500 + milliseconds + + + + + + Report reason why the GPP had it's usage state set to BUSY. + + + + + + + Select a cache directory other than the default. + + + + + + + Select a working directory other than the default. + + + + + - The thresholds that cause a failure for pawn allocations + The thresholds that cause a failure for allocations + + false + + + 10 % - + + + + 80 + % + - 100 + 10 MB - 900 MB/s - + + + + The percentage of file handles remaining to the GPP that triggers a threshold condition + 3 + % + + + + + The percentage of threads available to the GPP that triggers a threshold condition + 3 + % + - + + + Amount of RAM in the GPP not in use (measured) + MiB + + + + + + Amount of RAM in the GPP not allocated to an application + MiB + + + + + Equal to "processor_cores" x "loadCapacityPerCore". + + + + The performance ratio of this machine, relative to the benchmark machine. 1.0 - - - - + deprecated 80 % - - 0.25 + + Equal to loadCapacity + + + + + + The amount of load capacity remaining to be allocated. + + + + + + + The current load average, as reported by /proc/loadavg. Each core on a computer can have a load average between 0.0 and 1.0. This differs greatly from CPU percentage (as reported by top). Load averages differ in two significant ways: 1) they measure the trend of CPU utlization, and 2) they include all demand for the CPU not only how much was active at the time of measurement. Load averages do not include any processes or threads waiting on I/O, networking, databases, or anything else not demanding the CPU. + + + + + + + + + + + + + + + + + + 0.1 + + + + + + + + list of cpu ids that are being monitored for loadavg and idle utilization. + + + + + + + The current number of threads for the GPP + + + The maximum number of threads allowed for the GPP + + + The current number of open file handles for the GPP + + + The maximum number of open file handles allowed for the GPP + + + + + + + The current number of threads running on the system + + + The maximum number of threads allowed to run on the system + + + The current number of open file on the system. + + + The maximum number of open file handles allowed for the system + + + + + + + + + + + + + + + + + + + + + + % + + + MB + + + % + + + + + + + + + + + + + + + The context specification for the exec_directive_class. See numa library manpage for socket(numa node) and cpu list specifications. For cgroup/cpuset option then a pre-existing cgroup name is required. + 0 - The classification of the affinity policy to apply. + socket @@ -331,54 +577,34 @@ THRESHOLD_NOT_EXCEEDED: The measured value no longer exceeds the configured thr - determines if the specified affinity policy (exec_directive_value, exec_directive_class) is inherited by RH resources started from this GPP. false - list of cpu ids to black list when making affinity requests. see numa library manpage for cpu list specifications. - If no affinity specification is provide during deployment, then enabling this will deploy resources on next available processor socket. (force_override will ignore this) false - controls if affinity requests are processed by the GPP. true - - - - list of cpu ids that are being monitored for loadavg and idle utilization. - - - - - - cycle time between updates of metric capture, calculations and threshold evaluations. - 500 - milliseconds - - - - diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP.cpp index 965106b18..33cc09988 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP.cpp @@ -25,27 +25,62 @@ the ports can also be done from this class **************************************************************************/ +#include +#include #include +#include #include -#include #include #include +#include +#include +#include +#include +#include +#include #include +#include #include +#include #include #include +#include #include #include -#ifdef HAVE_LIBNUMA +#include +#include +#include +#include +#include +#ifdef HAVE_LIBNUMA #include #endif +#ifdef HAVE_STDLIB_H +#include +#endif + +#ifdef HAVE_SYS_TYPES_H +#include +#endif + +#ifdef HAVE_SYS_WAIT_H +#include +#endif + +#ifdef HAVE_UNISTD_H +#include +#endif + #include "ossie/Events.h" #include "ossie/affinity.h" + #include "GPP.h" #include "utils/affinity.h" #include "utils/SymlinkReader.h" #include "utils/ReferenceWrapper.h" +#include "parsers/PidProcStatParser.h" +#include "states/ProcStat.h" #include "states/ProcMeminfo.h" #include "statistics/CpuUsageStats.h" #include "reports/NicThroughputThresholdMonitor.h" @@ -57,10 +92,160 @@ +class SigChildThread : public ThreadedComponent { + friend class GPP_i; +public: + SigChildThread( GPP_i &p): + parent(p) + {}; + int serviceFunction() { + return parent.sigchld_handler(0); + } +private: + GPP_i &parent; +}; -PREPARE_LOGGING(GPP_i) -extern GPP_i *devicePtr; +class RedirectedIO : public ThreadedComponent { + friend class GPP_i; +public: + RedirectedIO( GPP_i &p): + parent(p) + {}; + int serviceFunction() { + return parent.redirected_io_handler(); + } +private: + GPP_i &parent; +}; + + +uint64_t conv_units( const std::string &units ) { + uint64_t unit_m=1024*1024; + if ( units == "Kb" ) unit_m = 1e3; + if ( units == "Mb" ) unit_m = 1e6; + if ( units == "Gb" ) unit_m = 1e9; + if ( units == "Tb" ) unit_m = 1e12; + if ( units == "KB" ) unit_m = 1024; + if ( units == "MB" || units == "MiB" ) unit_m = 1024*1024; + if ( units == "GB" ) unit_m = 1024*1024*1024; + if ( units == "TB" ) unit_m = (uint64_t)1024*1024*1024*1024; + return unit_m; +} + + +const std::string __ExpandEnvVars(const std::string& original) { + + typedef std::list< std::pair > t2StrLst; + + std::string result = original; + const boost::regex envscan("\\$([0-9A-Za-z_]*)"); + const boost::sregex_iterator end; + t2StrLst replacements; + for (boost::sregex_iterator rit(result.begin(), result.end(), envscan); rit != end; ++rit) + replacements.push_back(std::make_pair((*rit)[0],(*rit)[1])); + for (t2StrLst::const_iterator lit = replacements.begin(); lit != replacements.end(); ++lit) { + const char* expanded = std::getenv(lit->second.c_str()); + if (expanded == NULL) + continue; + boost::replace_all(result, lit->first, expanded); + } + + replacements.clear(); + + const boost::regex envscan2("\\$\\{([0-9A-Za-z_]*)\\}"); + for (boost::sregex_iterator rit(result.begin(), result.end(), envscan2); rit != end; ++rit) + replacements.push_back(std::make_pair((*rit)[0],(*rit)[1])); + for (t2StrLst::const_iterator lit = replacements.begin(); lit != replacements.end(); ++lit) + { + const char* expanded = std::getenv(lit->second.c_str()); + if (expanded == NULL) + continue; + boost::replace_all(result, lit->first, expanded); + } + + return result; +} + + +const std::string __ExpandProperties(const std::string& original, const CF::Properties &props) { + std::string result = original; + const boost::regex envscan("@([0-9A-Za-z_]*)@"); + const boost::sregex_iterator end; + typedef std::list< std::pair > t2StrLst; + t2StrLst replacements; + for (boost::sregex_iterator rit(result.begin(), result.end(), envscan); rit != end; ++rit) + replacements.push_back(std::make_pair((*rit)[0],(*rit)[1])); + const redhawk::PropertyMap& pmap = redhawk::PropertyMap::cast(props); + for (t2StrLst::const_iterator lit = replacements.begin(); lit != replacements.end(); ++lit) + { + if ( pmap.find(lit->second.c_str()) != pmap.end()) { + std::string expanded = pmap[lit->second.c_str()].toString(); + boost::replace_all(result, lit->first, expanded); + } + } + return result; +} + + + +// +// resolve StdOutLogger symbol for compiler... +// +namespace rh_logger { + class StdOutLogger : public Logger { + public: + static LoggerPtr getRootLogger( ); + }; +}; + + +// +// proc_redirect class and helpers +// +class FindRedirect : public std::binary_function< GPP_i::proc_redirect, int, bool > { + +public: + bool operator() ( const GPP_i::proc_redirect &a, const int &pid ) const { + return a.pid == pid; + }; +}; + + +GPP_i::proc_redirect::proc_redirect( int _pid, int _cout, int _cerr ): + pid(_pid), cout(_cout), cerr(_cerr), fname("") + { + }; + +GPP_i::proc_redirect::proc_redirect( const std::string &_fname, int _pid, int _cout, int _cerr ): + pid(_pid), cout(_cout), cerr(_cerr), fname(_fname) + { + }; + +void GPP_i::proc_redirect::close() { + if ( cout > -1 ) ::close(cout); + if ( cerr > -1 ) ::close(cerr); +} + +// +// component_description class and helpers +// + +class FindPid : public std::binary_function< GPP_i::component_description, int, bool > { + +public: + bool operator() ( const GPP_i::component_description &a, const int &pid ) const { + return a.pid == pid; + } +}; + +class FindApp : public std::binary_function< GPP_i::component_description, std::string, bool > { + +public: + bool operator() ( const GPP_i::component_description &a, const std::string &appName ) const { + return a.appName == appName; + } +}; inline bool operator== (const GPP_i::component_description& s1, const GPP_i::component_description& s2) { @@ -72,26 +257,134 @@ inline bool operator== (const GPP_i::component_description& s1, }; +GPP_i::component_description::component_description() : + pid(-1), + appName(""), + identifier(""), + app_started(false), + reservation(-1.0), + terminated(false), + pstat_idx(0) +{ memset(pstat_history, 0, sizeof(pstat_history) ); } + + +GPP_i::component_description::component_description( const std::string &appId) : + pid(-1), + appName(appId), + identifier(""), + app_started(false), + reservation(-1.0), + terminated(false), + pstat_idx(0) +{ memset(pstat_history, 0, sizeof(pstat_history) ); } + + +int64_t GPP_i::component_description::get_process_time() +{ + int64_t retval = 0; + if (parent->grp_children.find(pid) == parent->grp_children.end()) + return retval; + BOOST_FOREACH(const int &_pid, parent->grp_children[pid].pids) { + PidProcStatParser pstat_file(_pid); + if ( pstat_file.parse() < 0 ) { + return -1; + } + retval += pstat_file.get_ticks(); + } + return retval; +} + +void GPP_i::component_description::add_history( int64_t ptime ) { + if ( ptime == -1 ) ptime=0; /// Log error .. + pstat_idx = (pstat_idx + 1)% pstat_history_len; + pstat_history[pstat_idx] = ptime; +} + +void GPP_i::component_description::add_history( ) { + int64_t ptime = get_process_time(); + if ( ptime < 0 ) ptime=0; /// Log error .. + pstat_idx = (pstat_idx + 1)% pstat_history_len; + pstat_history[pstat_idx] = ptime; +} + +int64_t GPP_i::component_description::get_pstat_usage( bool refresh) { + if ( refresh) add_history(); + int64_t retval=0; + int8_t p1_idx = pstat_idx -1; + if ( p1_idx < 0 ) p1_idx = pstat_history_len-1; + uint64_t p1=pstat_history[p1_idx]; + uint64_t p2=pstat_history[pstat_idx]; + retval=p2-p1; + if ( (p2-p1) < 0 )retval=p1-p2; + return retval; +} + +int64_t GPP_i::component_description::get_pstat_usage( uint64_t &p2, uint64_t &p1 ){ + int64_t retval=0; + p2 = pstat_history[pstat_idx]; + int8_t p1_idx = pstat_idx -1; + if ( p1_idx < 0 ) p1_idx = pstat_history_len-1; + p1=pstat_history[p1_idx]; + retval=p2-p1; + if ( (p2-p1) < 0 )retval=p1-p2; + return retval; +} + + +PREPARE_LOGGING(GPP_i) + +extern GPP_i *devicePtr; + +std::string GPP_i::format_up_time(unsigned long secondsUp) +{ + std::stringstream formattedUptime; + int days; + int hours; + int minutes; + int seconds; + + int leftover; + + days = (int) secondsUp / (60 * 60 * 24); + leftover = (int) secondsUp - (days * (60 * 60 * 24) ); + hours = (int) leftover / (60 * 60); + leftover = leftover - (hours * (60 * 60) ); + minutes = (int) leftover / 60; + seconds = leftover - (minutes * 60); + + formattedUptime << days << "d " << hours << "h " << minutes << "m " << seconds << "s"; + + return formattedUptime.str(); +} + GPP_i::GPP_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : - GPP_base(devMgr_ior, id, lbl, sftwrPrfl) + GPP_base(devMgr_ior, id, lbl, sftwrPrfl), + _signalThread( new SigChildThread(*this), 0.1 ), + _redirectedIO( new RedirectedIO(*this), 0.1 ) { _init(); } GPP_i::GPP_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : - GPP_base(devMgr_ior, id, lbl, sftwrPrfl, compDev) + GPP_base(devMgr_ior, id, lbl, sftwrPrfl, compDev), + _signalThread( new SigChildThread(*this), 0.1 ), + _redirectedIO( new RedirectedIO(*this), 0.1 ) { - _init(); + _init(); } GPP_i::GPP_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : - GPP_base(devMgr_ior, id, lbl, sftwrPrfl, capacities) + GPP_base(devMgr_ior, id, lbl, sftwrPrfl, capacities), + _signalThread( new SigChildThread(*this), 0.1 ), + _redirectedIO( new RedirectedIO(*this), 0.1 ) { _init(); } GPP_i::GPP_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : - GPP_base(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev) + GPP_base(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev), + _signalThread( new SigChildThread(*this), 0.1 ), + _redirectedIO( new RedirectedIO(*this), 0.1 ) { _init(); } @@ -103,8 +396,21 @@ GPP_i::~GPP_i() void GPP_i::_init() { + // get the user id + uid_t tmp_user_id = getuid(); + std::ostringstream s; + s << tmp_user_id; + user_id = s.str(); + limit_check_count = 0; + n_reservations =0; sig_fd = -1; + // + // io redirection for child processes + // + _handle_io_redirects = false; + _componentOutputLog =""; + // // add our local set affinity method that performs numa library calls // @@ -138,6 +444,7 @@ void GPP_i::_init() { // default cycle time setting for updating data model, metrics and state threshold_cycle_time = 500; + thresholds.ignore = false; // // Add property change listeners and allocation modifiers @@ -149,11 +456,39 @@ void GPP_i::_init() { // add property change listener addPropertyChangeListener("reserved_capacity_per_component", this, &GPP_i::reservedChanged); + // add property change listener + addPropertyChangeListener("DCE:c80f6c5a-e3ea-4f57-b0aa-46b7efac3176", this, &GPP_i::_component_output_changed); + + // add property change listener + addPropertyChangeListener("DCE:89be90ae-6a83-4399-a87d-5f4ae30ef7b1", this, &GPP_i::mcastnicThreshold_changed); + + // add property change listener thresholds + addPropertyChangeListener("thresholds", this, &GPP_i::thresholds_changed); + + utilization_entry_struct cpu; + cpu.description = "CPU cores"; + cpu.component_load = 0; + cpu.system_load = 0; + cpu.subscribed = 0; + cpu.maximum = 0; + utilization.push_back(cpu); + + // shadow property to allow for disabling of values + __thresholds = thresholds; + + setPropertyQueryImpl(this->component_monitor, this, &GPP_i::get_component_monitor); + // tie allocation modifier callbacks to identifiers // nic allocation setAllocationImpl("nic_allocation", this, &GPP_i::allocateCapacity_nic_allocation, &GPP_i::deallocateCapacity_nic_allocation); + // support for older nic ingress/egress allocators + setAllocationImpl("DCE:eb08e43f-11c7-45a0-8750-edff439c8b24", this, &GPP_i::allocate_mcastegress_capacity, &GPP_i::deallocate_mcastegress_capacity); + + // support for older nic ingress/egress allocators + setAllocationImpl("DCE:506102d6-04a9-4532-9420-a323d818ddec", this, &GPP_i::allocate_mcastingress_capacity, &GPP_i::deallocate_mcastingress_capacity); + // load capacity allocations setAllocationImpl("DCE:72c1c4a9-2bcf-49c5-bafd-ae2c1d567056", this, &GPP_i::allocate_loadCapacity, &GPP_i::deallocate_loadCapacity); @@ -161,9 +496,32 @@ void GPP_i::_init() { setAllocationImpl("DCE:8dcef419-b440-4bcf-b893-cab79b6024fb", this, &GPP_i::allocate_memCapacity, &GPP_i::deallocate_memCapacity); //setAllocationImpl("diskCapacity", this, &GPP_i::allocate_diskCapacity, &GPP_i::deallocate_diskCapacity); + + // check reservation allocations + setAllocationImpl(this->redhawk__reservation_request, this, &GPP_i::allocate_reservation_request, &GPP_i::deallocate_reservation_request); } +void GPP_i::constructor() +{ + if (this->workingDirectory.empty() or this->cacheDirectory.empty()) { + char* tmp; + std::string path; + tmp = getcwd(NULL, 200); + if (tmp != NULL) { + path = std::string(tmp); + free(tmp); + } + if (this->workingDirectory.empty()) { + this->workingDirectory = path; + } + if (this->cacheDirectory.empty()) { + this->cacheDirectory = path; + } + } +} + + void GPP_i::postConstruction (std::string &profile, std::string ®istrar_ior, const std::string &idm_channel_ior, @@ -181,31 +539,204 @@ void GPP_i::postConstruction (std::string &profile, throw std::runtime_error("unable configure signal handler"); } + _signalThread.start(); + +} + +void GPP_i::update_grp_child_pids() { + glob_t globbuf; + std::vector pids_now; + glob("/proc/[0-9]*", GLOB_NOSORT, NULL, &globbuf); + for (unsigned int i = 0; i < globbuf.gl_pathc; i++) { + std::string stat_filename(globbuf.gl_pathv[globbuf.gl_pathc - i - 1]); + std::string proc_id(stat_filename.substr(stat_filename.rfind("/")+1)); + try { + pids_now.push_back(boost::lexical_cast(proc_id)); + } + catch(...){ + std::stringstream errstr; + errstr << "Unable to process id: "<(*i) * getpagesize() / (1024*1024); + continue; + } + if ( fcnt == 19 ) { // threads + tmp.num_threads = boost::lexical_cast(*i); + continue; + } + if ( fcnt == 4 ) { // process group id + tmp.pgrpid = boost::lexical_cast(*i); + continue; + } + if ( fcnt == 0 ) { // pid + pid = boost::lexical_cast(*i); + continue; + } + } + + } catch ( ... ) { + std::stringstream errstr; + errstr << "Invalid line format in stat file, pid :" << _pid << " field number " << fcnt << " line " << line ; + LOG_WARN(GPP_i, __FUNCTION__ << ": " << errstr.str() ); + continue; + } + + } catch ( ... ) { + std::stringstream errstr; + errstr << "Unable to read "</stat: "<=37 received=" << fcnt << ")"; + LOG_DEBUG(GPP_i, __FUNCTION__ << ": " << errstr.str() ); + continue; + } + parsed_stat[pid] = tmp; + if (grp_children.find(tmp.pgrpid) == grp_children.end()) { + grp_children[tmp.pgrpid].num_processes = 1; + grp_children[tmp.pgrpid].mem_rss = tmp.mem_rss; + grp_children[tmp.pgrpid].num_threads = tmp.num_threads; + grp_children[tmp.pgrpid].pgrpid = tmp.pgrpid; + grp_children[tmp.pgrpid].pids.push_back(pid); + } else { + grp_children[tmp.pgrpid].num_processes += 1; + grp_children[tmp.pgrpid].mem_rss += tmp.mem_rss; + grp_children[tmp.pgrpid].num_threads += tmp.num_threads; + grp_children[tmp.pgrpid].pids.push_back(pid); + } + } + } + std::vector parsed_stat_to_erase; + for(std::map::iterator _it = parsed_stat.begin(); _it != parsed_stat.end(); _it++) { + if (std::find(pids_now.begin(), pids_now.end(), _it->first) == pids_now.end()) { // it is not on the current process list + if (grp_children.find(parsed_stat[_it->first].pgrpid) != grp_children.end()) { + std::vector::iterator it = std::find(grp_children[parsed_stat[_it->first].pgrpid].pids.begin(), grp_children[parsed_stat[_it->first].pgrpid].pids.end(), _it->first); + if (it != grp_children[parsed_stat[_it->first].pgrpid].pids.end()) + grp_children[parsed_stat[_it->first].pgrpid].pids.erase(it); + } + parsed_stat_to_erase.push_back(_it->first); + } + } + BOOST_FOREACH(const int &_pid, parsed_stat_to_erase) { + parsed_stat.erase(_pid); + } } +std::vector GPP_i::get_component_monitor() { + ReadLock rlock(pidLock); + std::vector retval; + struct sysinfo info; + sysinfo(&info); + BOOST_FOREACH(const component_description &_pid, pids) { + if ( !_pid.terminated ) { + if ((grp_children.find(_pid.pid) == grp_children.end()) or (parsed_stat.find(_pid.pid) == parsed_stat.end())) { + std::stringstream errstr; + errstr << "Could not find /proc/"<<_pid.pid<<"/stat. The process corresponding to component "<<_pid.identifier<<" is no longer there"; + LOG_WARN(GPP_i, __FUNCTION__ << ": " << errstr.str() ); + continue; + } + component_monitor_struct tmp; + tmp.waveform_id = _pid.appName; + tmp.pid = _pid.pid; + tmp.component_id = _pid.identifier; + tmp.num_processes = grp_children[_pid.pid].num_processes; + tmp.cores = _pid.core_usage; + + tmp.mem_rss = grp_children[_pid.pid].mem_rss; + tmp.mem_percent = (double) grp_children[_pid.pid].mem_rss * (1024*1024) / ((double)info.totalram * info.mem_unit) * 100; + tmp.num_threads = grp_children[_pid.pid].num_threads; + + tmp.num_files = 0; + BOOST_FOREACH(const int &actual_pid, grp_children[_pid.pid].pids) { + std::stringstream fd_dirname; + DIR * dirp; + struct dirent * entry; + fd_dirname <<"/proc/"<d_type != DT_DIR) { // If the entry is not a directory + tmp.num_files++; + } + } + closedir (dirp); + } + } + + retval.push_back(tmp); + } + } + + return retval; +} void GPP_i::process_ODM(const CORBA::Any &data) { - boost::mutex::scoped_lock lock(pidLock); const ExtendedEvent::ResourceStateChangeEventType* app_state_change; if (data >>= app_state_change) { - std::string appName = ossie::corba::returnString(app_state_change->sourceName); + std::string appId = ossie::corba::returnString(app_state_change->sourceId); if (app_state_change->stateChangeTo == ExtendedEvent::STARTED) { - RH_NL_TRACE("GPP", "ODM CHANNEL EVENT --> APP STARTED app: " << appName ); - for (std::vector::iterator it=reservations.begin();it!=reservations.end();it++) { - if ((*it).appName == appName) { - tableReservation(*it); - break; - } - } + RH_NL_TRACE("GPP", "ODM CHANNEL EVENT --> APP STARTED app: " << appId ); + ReadLock rlock(pidLock); + ProcessList::iterator i = pids.begin(); + // set app_started ... turns off reservation + while ( i != pids.end() ) { + i=std::find_if( i, pids.end(), std::bind2nd( FindApp(), appId ) ); + if ( i != pids.end() ) { + i->app_started = true; + LOG_TRACE(GPP_i, "Monitor_Processes.. APP STARTED:" << i->pid << " app: " << i->appName ); + i++; + } + } } else if (app_state_change->stateChangeTo == ExtendedEvent::STOPPED) { - RH_NL_TRACE("GPP", "ODM CHANNEL EVENT --> APP STOPPED app: " << appName ); - for (std::vector::iterator it=tabled_reservations.begin();it!=tabled_reservations.end();it++) { - if ((*it).appName == appName) { - restoreReservation(*it); - break; - } - } - } + RH_NL_TRACE("GPP", "ODM CHANNEL EVENT --> APP STOPPED app: " << appId ); + ReadLock rlock(pidLock); + ProcessList::iterator i = pids.begin(); + // set app_started ... turns on reservation + while ( i != pids.end() ) { + i=std::find_if( i, pids.end(), std::bind2nd( FindApp(), appId ) ); + if ( i != pids.end() ) { + i->app_started = false; + LOG_TRACE(GPP_i, "Monitor_Processes.. APP STOPPED :" << i->pid << " app: " << i->appName ); + i++; + } + } + } + } + const StandardEvent::DomainManagementObjectRemovedEventType* app_removed; + if (data >>= app_removed) { + if (app_removed->sourceCategory == StandardEvent::APPLICATION) { + WriteLock rlock(pidLock); + std::string producerId(app_removed->producerId); + for (ApplicationReservationMap::iterator app_it=applicationReservations.begin(); app_it!=applicationReservations.end(); app_it++) { + if (app_it->first == producerId) { + applicationReservations.erase(app_it); + break; + } + } + } } } @@ -213,7 +744,6 @@ int GPP_i::_setupExecPartitions( const CpuList &bl_cpus ) { #if HAVE_LIBNUMA // fill in the exec partitions for each numa node identified on the system - CpuUsageStats::CpuList cpus; std::string nodestr("all"); struct bitmask *node_mask = numa_parse_nodestring((char *)nodestr.c_str()); @@ -222,14 +752,12 @@ int GPP_i::_setupExecPartitions( const CpuList &bl_cpus ) { // for each node bit set in the mask then get cpu list int nbytes = numa_bitmask_nbytes(node_mask); for (int i=0; i < nbytes*8; i++ ){ - exec_socket soc; - cpus.clear(); if ( numa_bitmask_isbitset( node_mask, i ) ) { - soc.id = i; numa_node_to_cpus( i, cpu_mask ); // foreach cpu identified add to list int nb = numa_bitmask_nbytes(cpu_mask); + CpuUsageStats::CpuList cpus; for (int j=0; j < nb*8; j++ ){ int count = std::count( bl_cpus.begin(), bl_cpus.end(), j ); if ( numa_bitmask_isbitset( cpu_mask, j ) && count == 0 ) { @@ -237,9 +765,11 @@ int GPP_i::_setupExecPartitions( const CpuList &bl_cpus ) { } } CpuUsageStats cpu_usage(cpus); + exec_socket soc; + soc.id = i; soc.cpus = cpus; soc.stats = cpu_usage; - soc.idle_threshold = thresholds.cpu_idle; + soc.idle_threshold = __thresholds.cpu_idle; soc.load_capacity.max = cpus.size() * 1.0; soc.load_capacity.measured = 0.0; soc.load_capacity.allocated = 0.0; @@ -252,7 +782,7 @@ int GPP_i::_setupExecPartitions( const CpuList &bl_cpus ) { if ( execPartitions.size() ) { ExecPartitionList::iterator iter = execPartitions.begin(); std::ostringstream ss; - ss << boost::format("%-6s %-4s %-7s %-7s %-7s ") % "SOCKET" % "CPUS" % "USER" % "SYSTEM" % "IDLE" << std::endl; + ss << boost::format("%-6s %-4s %-7s %-7s %-7s ") % "SOCKET" % "CPUS" % "USER" % "SYSTEM" % "IDLE" ; LOG_INFO(GPP_i, ss.str() ); ss.clear(); ss.str(""); @@ -269,14 +799,6 @@ int GPP_i::_setupExecPartitions( const CpuList &bl_cpus ) { } - -void GPP_i::initializeMemoryMonitor() -{ - // add available memory monitor, mem_free defaults to MB - addThresholdMonitor( new FreeMemoryThresholdMonitor(_identifier, MakeCref(thresholds.mem_free), - ConversionWrapper(memCapacity, 1048576, std::divides() ) ) ); -} - void GPP_i::initializeNetworkMonitor() { @@ -290,31 +812,37 @@ GPP_i::initializeNetworkMonitor() data_model.push_back( nic_facade ); std::vector nic_devices( nic_facade->get_devices() ); + std::vector filtered_devices( nic_facade->get_filtered_devices() ); for( size_t i=0; i(modified_thresholds.nic_usage), - boost::bind(&NicFacade::get_throughput_by_device, nic_facade, nic_devices[i]) ) ); + NicMonitorPtr nic_m = NicMonitorPtr( new NicThroughputThresholdMonitor(_identifier, + nic_devices[i], + MakeCref(modified_thresholds.nic_usage), + boost::bind(&NicFacade::get_throughput_by_device, nic_facade, nic_devices[i]) ) ); + + // monitors that affect busy state... + for ( size_t ii=0; ii < filtered_devices.size(); ii++ ) { + if ( nic_devices[i] == filtered_devices[ii] ) { + nic_monitors.push_back(nic_m); + break; + } + } + addThresholdMonitor(nic_m); } } void -GPP_i::initializeCpuMonitor() +GPP_i::initializeResourceMonitors() { - // add memory state reader - ProcMeminfoPtr mem_state( new ProcMeminfo() ); // add cpu utilization calculator RH_NL_INFO("GPP", " initialize CPU Montior --- wl size " << wl_cpus.size()); - CpuUsageStatsPtr cpu_usage_stats( new CpuUsageStats( wl_cpus ) ); - // provide required system metrics to this GPP - system_monitor.reset( new SystemMonitor( cpu_usage_stats, - mem_state ) ); - // seed system monitor + // request a system monitor for this GPP + system_monitor.reset( new SystemMonitor( wl_cpus ) ); + + // seed system monitor history for ( int i=0; i<5; i++ ) { system_monitor->report(); boost::this_thread::sleep( boost::posix_time::milliseconds( 200 ) ); @@ -322,18 +850,37 @@ GPP_i::initializeCpuMonitor() data_model.push_back( system_monitor ); + // add system limits reader + process_limits.reset( new ProcessLimits( getpid() ) ); + + data_model.push_back( process_limits ); + // observer to monitor when cpu idle pass threshold value - addThresholdMonitor( new CpuThresholdMonitor(_identifier, &modified_thresholds.cpu_idle, *cpu_usage_stats, false ) ); + addThresholdMonitor( ThresholdMonitorPtr( new CpuThresholdMonitor(_identifier, &modified_thresholds.cpu_idle, + *(system_monitor->getCpuStats()), false ))); + + // add available memory monitor, mem_free defaults to MB + addThresholdMonitor( ThresholdMonitorPtr( new FreeMemoryThresholdMonitor(_identifier, + MakeCref(modified_thresholds.mem_free), + ConversionWrapper(memCapacity, mem_cap_units, std::multiplies() ) ))); } void -GPP_i::addThresholdMonitor( ThresholdMonitor* threshold_monitor ) +GPP_i::addThresholdMonitor( ThresholdMonitorPtr t ) { - boost::shared_ptr t( threshold_monitor ); - t->attach_listener( boost::bind(&GPP_i::send_threshold_event, this, _1) ); - threshold_monitors.push_back( t ); + t->attach_listener( boost::bind(&GPP_i::send_threshold_event, this, _1) ); + threshold_monitors.push_back( t ); } +void +GPP_i::setShadowThresholds( const thresholds_struct &nv ) { + if ( nv.cpu_idle >= 0.0 ) __thresholds.cpu_idle = nv.cpu_idle; + if ( nv.load_avg >= 0.0 ) __thresholds.load_avg = nv.load_avg; + if ( nv.mem_free >= 0 ) __thresholds.mem_free = nv.mem_free; + if ( nv.nic_usage >= 0 ) __thresholds.nic_usage = nv.nic_usage; + if ( nv.files_available >= 0.0 ) __thresholds.files_available = nv.files_available; + if ( nv.threads >= 0.0 ) __thresholds.threads = nv.threads; +} // // Device LifeCycle API @@ -341,10 +888,9 @@ GPP_i::addThresholdMonitor( ThresholdMonitor* threshold_monitor ) void GPP_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemException) { - RH_NL_TRACE("GPP", "initialize()"); - + RH_NL_INFO("GPP", "initialize()"); // - // subscribe to ODM_Channel for receiving state changes from Application object + // subscribe to ODM_Channel for receiving state changes from Application objects // try { mymgr = redhawk::events::Manager::GetManager(this); @@ -354,7 +900,18 @@ void GPP_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemExc LOG_WARN(GPP_i, "Unable to register with EventChannelManager, disabling domain event notification."); } - + // + // check if componentOutputLog is set.. if so enable redirected io operations + // + if ( componentOutputLog != "" ) { + _componentOutputLog =__ExpandEnvVars(componentOutputLog); + _handle_io_redirects = true; + LOG_INFO(GPP_i, "Turning on Component Output Redirection file: " << _componentOutputLog ); + } + else { + LOG_INFO(GPP_i, "Component Output Redirection is DISABLED." << componentOutputLog ); + } + // // Setup affinity settings context // @@ -364,24 +921,94 @@ void GPP_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemExc // setup execution partitions for performing socket based deployments, we need to know the current black list // _setupExecPartitions( bl_cpus ); + + // + // Get units for conversion operations + // + std::string munit("MB"); + PropertyInterface *p = getPropertyFromId("thresholds::mem_free"); + if (p) { + munit = p->units; + } + thresh_mem_free_units=conv_units(munit); + + munit="MB"; + p = getPropertyFromId("memFree"); + if (p) { + munit = p->units; + } + mem_free_units=conv_units(munit); + + munit="MB"; + p = getPropertyFromId("memCapacity"); + if (p) { + munit = p->units; + } + mem_cap_units=conv_units(munit); // // setup the data model for the GPP // threshold_monitors.clear(); - initializeCpuMonitor(); - initializeMemoryMonitor(); + initializeResourceMonitors(); initializeNetworkMonitor(); std::for_each( data_model.begin(), data_model.end(), boost::bind( &Updateable::update, _1 ) ); std::for_each( execPartitions.begin(), execPartitions.end(), boost::bind( &Updateable::update, _1 ) ); // - // System wide metrics + // get monitored system values... + // + const SystemMonitor::Report &rpt = system_monitor->getReport(); + + // thresholds can be individually disabled, use shadow thresholds for actual calculations and conditions + setShadowThresholds( thresholds ); + + // + // load average attributes // - loadCapacity_counter = 0; + loadTotal = loadCapacityPerCore * (float)processor_cores; + loadCapacity = loadTotal * ((double)__thresholds.load_avg / 100.0); + loadFree = loadCapacity; idle_capacity_modifier = 100.0 * reserved_capacity_per_component/((float)processor_cores); + + // + // memory capacity tracking attributes + // + memInitVirtFree=rpt.virtual_memory_free; // assume current state to be total available + int64_t init_mem_free = (int64_t) memInitVirtFree; + memInitCapacityPercent = (double)( (int64_t)init_mem_free - (int64_t)(__thresholds.mem_free*thresh_mem_free_units) )/ (double)init_mem_free; + if ( memInitCapacityPercent < 0.0 ) memInitCapacityPercent = 100.0; + memFree = init_mem_free / mem_free_units; + memCapacity = ((int64_t)( init_mem_free * memInitCapacityPercent)) / mem_cap_units ; + memCapacityThreshold = memCapacity; + + // + // set initial modified thresholds + // modified_thresholds = thresholds; + modified_thresholds.mem_free = __thresholds.mem_free*thresh_mem_free_units; + modified_thresholds.load_avg = loadTotal * ( (double)__thresholds.load_avg / 100.0); + modified_thresholds.cpu_idle = __thresholds.cpu_idle; + + loadAverage.onemin = rpt.load.one_min; + loadAverage.fivemin = rpt.load.five_min; + loadAverage.fifteenmin = rpt.load.fifteen_min; + + // + // transfer limits to properties + // + const Limits::Contents &sys_rpt = rpt.sys_limits; + sys_limits.current_threads = sys_rpt.threads; + sys_limits.max_threads = sys_rpt.threads_limit; + sys_limits.current_open_files = sys_rpt.files; + sys_limits.max_open_files = sys_rpt.files_limit; + + const Limits::Contents &pid_rpt = process_limits->get(); + gpp_limits.current_threads = pid_rpt.threads; + gpp_limits.max_threads = pid_rpt.threads_limit; + gpp_limits.current_open_files = pid_rpt.files; + gpp_limits.max_open_files = pid_rpt.files_limit; // enable monitors to push out state change events.. MonitorSequence::iterator iter=threshold_monitors.begin(); @@ -389,17 +1016,79 @@ void GPP_i::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemExc if ( *iter ) (*iter)->enable_dispatch(); } + // + // setup mcast interface allocations, used by older systems -- need to deprecate + // + mcastnicIngressThresholdValue = mcastnicIngressTotal * ( mcastnicThreshold / 100.0) ; + mcastnicIngressCapacity = mcastnicIngressThresholdValue; + mcastnicIngressFree = mcastnicIngressCapacity; + mcastnicEgressThresholdValue = mcastnicEgressTotal * ( mcastnicThreshold / 100.0) ; + mcastnicEgressCapacity = mcastnicEgressThresholdValue; + mcastnicEgressFree = mcastnicEgressCapacity; + + // grab nic_metrics for the specified interface + _set_vlan_property(); + // use by service function to mark update time for monitors, states, and stats time_mark = boost::posix_time::microsec_clock::local_time(); + // start capturing IO redirections + _redirectedIO.start(); + GPP_base::start(); GPP_base::initialize(); } + +void GPP_i::thresholds_changed(const thresholds_struct *ov, const thresholds_struct *nv) { + + if ( !(nv->mem_free < 0 ) && ov->mem_free != nv->mem_free ) { + LOG_DEBUG(GPP_i, __FUNCTION__ << " THRESHOLDS.MEM_FREE CHANGED old/new " << ov->mem_free << "/" << nv->mem_free ); + WriteLock wlock(pidLock); + int64_t init_mem_free = (int64_t) memInitVirtFree; + // type cast required for correct calc on 32bit os + memInitCapacityPercent = (double)( (int64_t)init_mem_free - (int64_t)(nv->mem_free*thresh_mem_free_units) )/ (double) init_mem_free; + if ( memInitCapacityPercent < 0.0 ) memInitCapacityPercent = 100.0; + memCapacity = ((int64_t)( init_mem_free * memInitCapacityPercent) ) / mem_cap_units ; + memCapacityThreshold = memCapacity; + modified_thresholds.mem_free = nv->mem_free*thresh_mem_free_units; + } + + + if ( !(nv->load_avg < 0.0) && !(fabs(ov->load_avg - nv->load_avg ) < std::numeric_limits::epsilon()) ) { + LOG_DEBUG(GPP_i, __FUNCTION__ << " THRESHOLDS.LOAD_AVG CHANGED old/new " << ov->load_avg << "/" << nv->load_avg ); + WriteLock wlock(pidLock); + loadCapacity = loadTotal * ((double)nv->load_avg / 100.0); + loadFree = loadCapacity; + modified_thresholds.load_avg = loadTotal * ( (double)nv->load_avg / 100.0); + } + + if ( !(nv->cpu_idle < 0.0) && !(fabs(ov->cpu_idle - nv->cpu_idle ) < std::numeric_limits::epsilon())) { + LOG_DEBUG(GPP_i, __FUNCTION__ << " THRESHOLDS.CPU_IDLE CHANGED old/new " << ov->cpu_idle << "/" << nv->cpu_idle ); + WriteLock wlock(pidLock); + modified_thresholds.cpu_idle = nv->cpu_idle; + } + + + if ( !(nv->nic_usage < 0) && !(fabs(ov->nic_usage - nv->nic_usage ) < std::numeric_limits::epsilon())) { + LOG_DEBUG(GPP_i, __FUNCTION__ << " THRESHOLDS.NIC_USAGE CHANGED old/new " << ov->nic_usage << "/" << nv->nic_usage ); + WriteLock wlock(monitorLock); + modified_thresholds.nic_usage = nv->nic_usage; + } + + setShadowThresholds( *nv ); + +} + void GPP_i::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) { - if ( odm_consumer ) odm_consumer.reset(); - GPP_base::releaseObject(); + _signalThread.stop(); + _signalThread.release(); + _handle_io_redirects = false; + _redirectedIO.stop(); + _redirectedIO.release(); + if ( odm_consumer ) odm_consumer.reset(); + GPP_base::releaseObject(); } @@ -424,10 +1113,27 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::execute (const char* name, const CF: std::vector prepend_args; std::string naming_context_ior; - const redhawk::PropertyMap& tmp_params = redhawk::PropertyMap::cast(parameters); + CF::Properties variable_parameters; + variable_parameters = parameters; + redhawk::PropertyMap& tmp_params = redhawk::PropertyMap::cast(variable_parameters); + float reservation_value = -1; + if (tmp_params.find("RH::GPP::MODIFIED_CPU_RESERVATION_VALUE") != tmp_params.end()) { + double reservation_value_d; + if (!tmp_params["RH::GPP::MODIFIED_CPU_RESERVATION_VALUE"].getValue(reservation_value)) { + if (tmp_params["RH::GPP::MODIFIED_CPU_RESERVATION_VALUE"].getValue(reservation_value_d)) { + reservation_value = reservation_value_d; + } else { + reservation_value = -1; + } + } + tmp_params.erase("RH::GPP::MODIFIED_CPU_RESERVATION_VALUE"); + } naming_context_ior = tmp_params["NAMING_CONTEXT_IOR"].toString(); std::string app_id; std::string component_id = tmp_params["COMPONENT_IDENTIFIER"].toString(); + if (applicationReservations.find(component_id) != applicationReservations.end()) { + applicationReservations.erase(component_id); + } std::string name_binding = tmp_params["NAME_BINDING"].toString(); CF::Application_var _app = CF::Application::_nil(); CORBA::Object_var obj = ossie::corba::Orb()->string_to_object(naming_context_ior.c_str()); @@ -441,11 +1147,11 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::execute (const char* name, const CF: } else { _app = _appRegistrar->app(); if (not CORBA::is_nil(_app)) { - app_id = ossie::corba::returnString(_app->name()); + app_id = ossie::corba::returnString(_app->identifier()); } } } - if (this->useScreen) { + if (useScreen) { std::string ld_lib_path(getenv("LD_LIBRARY_PATH")); setenv("GPP_LD_LIBRARY_PATH",ld_lib_path.c_str(),1); @@ -496,70 +1202,568 @@ CF::ExecutableDevice::ProcessID_Type GPP_i::execute (const char* name, const CF: } CF::ExecutableDevice::ProcessID_Type ret_pid; try { - ret_pid = ExecutableDevice_impl::do_execute(name, options, parameters, prepend_args); - this->addPid(ret_pid, app_id, component_id); - this->addReservation( getComponentDescription(ret_pid) ); + ret_pid = do_execute(name, options, tmp_params, prepend_args); + addProcess(ret_pid, app_id, component_id, reservation_value); } catch ( ... ) { throw; } return ret_pid; } -void GPP_i::terminate (CF::ExecutableDevice::ProcessID_Type processId) throw (CORBA::SystemException, CF::ExecutableDevice::InvalidProcess, CF::Device::InvalidState) + + + +/* execute ***************************************************************** + - executes a process on the device +************************************************************************* */ +CF::ExecutableDevice::ProcessID_Type GPP_i::do_execute (const char* name, const CF::Properties& options, const CF::Properties& parameters, const std::vector prepend_args) throw (CORBA::SystemException, CF::Device::InvalidState, CF::ExecutableDevice::InvalidFunction, CF::ExecutableDevice::InvalidParameters, CF::ExecutableDevice::InvalidOptions, CF::InvalidFileName, CF::ExecutableDevice::ExecuteFail) { - boost::recursive_mutex::scoped_lock lock(load_execute_lock); - try { - ExecutableDevice_impl::terminate(processId); - this->removeReservation( getComponentDescription(processId)) ; + CF::Properties invalidOptions; + std::string path; + char* tmp; + + // throw and error if name does not begin with a / + if (strncmp(name, "/", 1) != 0) + throw CF::InvalidFileName(CF::CF_EINVAL, "Filename must be absolute"); + if (isLocked()) + throw CF::Device::InvalidState("System is locked down"); + if (isDisabled()) + throw CF::Device::InvalidState("System is disabled"); + + //process options and throw InvalidOptions errors if they are not ULong + for (CORBA::ULong i = 0; i < options.length(); ++i) { + if (options[i].id == CF::ExecutableDevice::PRIORITY_ID) { + CORBA::TypeCode_var atype = options[i].value.type(); + if (atype->kind() != CORBA::tk_ulong) { + invalidOptions.length(invalidOptions.length() + 1); + invalidOptions[invalidOptions.length() - 1].id = options[i].id; + invalidOptions[invalidOptions.length() - 1].value + = options[i].value; + } else + LOG_WARN(GPP_i, "Received a PRIORITY_ID execute option...ignoring.") + } + if (options[i].id == CF::ExecutableDevice::STACK_SIZE_ID) { + CORBA::TypeCode_var atype = options[i].value.type(); + if (atype->kind() != CORBA::tk_ulong) { + invalidOptions.length(invalidOptions.length() + 1); + invalidOptions[invalidOptions.length() - 1].id = options[i].id; + invalidOptions[invalidOptions.length() - 1].value + = options[i].value; + } else + LOG_WARN(GPP_i, "Received a STACK_SIZE_ID execute option...ignoring.") + } } - catch(...){ + + if (invalidOptions.length() > 0) { + throw CF::ExecutableDevice::InvalidOptions(invalidOptions); } - this->removePid(processId); -} + // retrieve current working directory + if (this->cacheDirectory.empty()) { + tmp = getcwd(NULL, 200); + if (tmp != NULL) { + path = std::string(tmp); + free(tmp); + } + } else { + path = this->cacheDirectory; + if (!path.compare(path.length()-1, 1, "/")) { + path = path.erase(path.length()-1); + } + } -// -// -// Executable/Device method overrides... -// -// + // append relative path of the executable + path.append(name); -void GPP_i::updateUsageState() -{ - if (system_monitor->get_idle_percent() < modified_thresholds.cpu_idle) { - if ( system_monitor->get_idle_average() < modified_thresholds.cpu_idle) - setUsageState(CF::Device::BUSY); - } - else if (system_monitor->get_mem_free() < (unsigned long)modified_thresholds.mem_free) - setUsageState(CF::Device::BUSY); - else if (this->getPids().size() == 0) - setUsageState(CF::Device::IDLE); - else - setUsageState(CF::Device::ACTIVE); -} + // check file existence + if (access(path.c_str(), F_OK) == -1) { + std::string errMsg = "File could not be found " + path; + throw CF::InvalidFileName(CF::CF_EINVAL, + CORBA::string_dup(errMsg.c_str())); + } + // change permissions to 7-- + if (chmod(path.c_str(), S_IRWXU) != 0) { + LOG_ERROR(GPP_i, "Unable to change permission on executable"); + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EACCES, + "Unable to change permission on executable"); + } -/** - override ExecutableDevice::set_resource_affinity to handle localized settings. + // assemble argument list + std::vector args = prepend_args; + if (getenv("VALGRIND")) { + char* valgrind = getenv("VALGRIND"); + if (strlen(valgrind) == 0) { + // Assume that valgrind is somewhere on the path + args.push_back("valgrind"); + } else { + // Environment variable is path to valgrind executable + args.push_back(valgrind); + } + // Put the log file in the cache next to the component entrypoint; + // include the pid to avoid clobbering existing files + std::string logFile = "--log-file="; + char* name_temp = strdup(path.c_str()); + logFile += dirname(name_temp); + free(name_temp); + logFile += "/valgrind.%p.log"; + args.push_back(logFile); + } + args.push_back(path); + + LOG_DEBUG(GPP_i, "Building param list for process " << path); + for (CORBA::ULong i = 0; i < parameters.length(); ++i) { + LOG_DEBUG(GPP_i, "id=" << ossie::corba::returnString(parameters[i].id) << " value=" << ossie::any_to_string(parameters[i].value)); + CORBA::TypeCode_var atype = parameters[i].value.type(); + args.push_back(ossie::corba::returnString(parameters[i].id)); + args.push_back(ossie::any_to_string(parameters[i].value)); + } + + LOG_DEBUG(GPP_i, "Forking process " << path); + + std::vector argv(args.size() + 1, NULL); + for (std::size_t i = 0; i < args.size(); ++i) { + // const_cast because execv does not modify values in argv[]. + // See: http://pubs.opengroup.org/onlinepubs/9699919799/functions/exec.html + argv[i] = const_cast (args[i].c_str()); + } + + rh_logger::LevelPtr lvl = GPP_i::__logger->getLevel(); + + // setup to capture stdout and stderr from children. + int comp_fd[2]; + if ( _handle_io_redirects ) { + if ( pipe( comp_fd ) == -1 ) { + LOG_ERROR(GPP_i, "Failure to create redirected IO for:" << path); + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EPERM, "Failure to create redirected IO for component"); + } + + if ( fcntl( comp_fd[0], F_SETFD, FD_CLOEXEC ) == -1 ) { + LOG_ERROR(GPP_i, "Failure to support redirected IO for:" << path); + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EPERM, "Failure to support redirected IO for component"); + } + + } + + // fork child process + int pid = fork(); + + if (pid == 0) { + + int num_retries = 5; + int returnval = 0; + + // + // log4cxx will cause dead locks between fork and execv, use the stdout logger object + // for all logging + // + rh_logger::LoggerPtr __logger = rh_logger::StdOutLogger::getRootLogger(); + __logger->setLevel(lvl); + + // set affinity logger method so we do not use log4cxx during affinity processing routine + redhawk::affinity::set_affinity_logger( __logger ) ; + RH_DEBUG(__logger, " Calling set resource affinity....exec:" << name << " options=" << options.length()); + + // set affinity preference before exec + try { + RH_DEBUG(__logger, " Calling set resource affinity....exec:" << name << " options=" << options.length()); + set_resource_affinity( options, getpid(), name ); + } + catch( redhawk::affinity::AffinityFailed &ex ) { + RH_WARN(__logger, "Unable to satisfy affinity request for: " << name << " Reason: " << ex.what() ); + errno=EPERM<<2; + returnval=-1; + ossie::corba::OrbShutdown(true); + exit(returnval); + } + catch( ... ) { + RH_WARN(__logger, "Unhandled exception during affinity processing for resource: " << name ); + ossie::corba::OrbShutdown(true); + exit(returnval); + } + + // reset mutex in child... + pthread_mutex_init(load_execute_lock.native_handle(),0); + + // set the forked component as the process group leader + setpgid(getpid(), 0); + + // apply io redirection for stdout and stderr + if ( _handle_io_redirects ) { + + if( dup2(comp_fd[1],STDERR_FILENO) ==-1 ) { + RH_ERROR(__logger, "Failure to dup2 stderr for resource: " << name ); + ossie::corba::OrbShutdown(true); + exit(-1); + } + + if( dup2(comp_fd[1],STDOUT_FILENO) ==-1 ) { + RH_ERROR(__logger, "Failure to dup2 stdout for resource: " << name ); + ossie::corba::OrbShutdown(true); + exit(-1); + } + + close(comp_fd[0]); + close(comp_fd[1]); + } + + + // Run executable + while(true) + { + if (strcmp(argv[0], "valgrind") == 0) { + // Find valgrind in the path + returnval = execvp(argv[0], &argv[0]); + } else { + returnval = execv(argv[0], &argv[0]); + } + + num_retries--; + if( num_retries <= 0 || errno!=ETXTBSY) + break; + + // Only retry on "text file busy" error + RH_WARN(__logger, "execv() failed, retrying... (cmd=" << path << " msg=\"" << strerror(errno) << "\" retries=" << num_retries << ")"); + usleep(100000); + } + + if( returnval ) { + RH_ERROR(__logger, "Error when calling execv() (cmd=" << path << " errno=" << errno << " msg=\"" << strerror(errno) << "\")"); + ossie::corba::OrbShutdown(true); + } + + RH_ERROR(__logger, "Exiting FAILED subprocess:" << returnval ); + exit(returnval); + } + else if (pid < 0 ){ + LOG_ERROR(GPP_i, "Error forking child process (errno: " << errno << " msg=\"" << strerror(errno) << "\")" ); + switch (errno) { + case E2BIG: + throw CF::ExecutableDevice::ExecuteFail(CF::CF_E2BIG, + "Argument list too long"); + case EACCES: + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EACCES, + "Permission denied"); + case ENAMETOOLONG: + throw CF::ExecutableDevice::ExecuteFail(CF::CF_ENAMETOOLONG, + "File name too long"); + case ENOENT: + throw CF::ExecutableDevice::ExecuteFail(CF::CF_ENOENT, + "No such file or directory"); + case ENOEXEC: + throw CF::ExecutableDevice::ExecuteFail(CF::CF_ENOEXEC, + "Exec format error"); + case ENOMEM: + throw CF::ExecutableDevice::ExecuteFail(CF::CF_ENOMEM, + "Out of memory"); + case ENOTDIR: + throw CF::ExecutableDevice::ExecuteFail(CF::CF_ENOTDIR, + "Not a directory"); + case EPERM: + throw CF::ExecutableDevice::ExecuteFail(CF::CF_EPERM, + "Operation not permitted"); + default: + throw CF::ExecutableDevice::ExecuteFail(CF::CF_NOTSET, + "ERROR ON FORK"); + } + } + + if ( _handle_io_redirects ) { + close(comp_fd[1]); + LOG_TRACE(GPP_i, "Adding Task for IO Redirection PID:" << pid << " : stdout "<< comp_fd[0] ); + WriteLock wlock(fdsLock); + // trans form file name if contains env or exec param expansion + std::string rfname=__ExpandEnvVars(componentOutputLog); + rfname=__ExpandProperties(rfname, parameters ); + redirectedFds.push_front( proc_redirect( rfname, pid, comp_fd[0] ) ); + } + + + LOG_DEBUG(GPP_i, "Execute success: name:" << name << " : "<< path); + + return pid; +} + + +void GPP_i::terminate (CF::ExecutableDevice::ProcessID_Type processId) throw (CORBA::SystemException, CF::ExecutableDevice::InvalidProcess, CF::Device::InvalidState) +{ + LOG_TRACE(GPP_i, " Terminate request, processID: " << processId); + try { + markPidTerminated( processId ); + ExecutableDevice_impl::terminate(processId); + } + catch(...){ + } + removeProcess(processId); +} + +bool GPP_i::_component_cleanup(const int pid, const int status) +{ + component_description comp; + try { + comp = getComponentDescription(pid); + } catch (...) { + // pass.. could be a pid from and popen or system commands.. + return false; + } + + if (!comp.terminated) { + // release of component can exit process before terminate is called + if (WIFEXITED(status) && (WEXITSTATUS(status) != 0)) { + LOG_ERROR(GPP_i, "Unexpected component exit with non-zero status " << WEXITSTATUS(status) + << ", App/Identifier/Process: " << comp.appName << "/" << comp.identifier << "/" << pid); + sendChildNotification(comp.identifier, comp.appName); + } else if (WIFSIGNALED(status)) { + LOG_ERROR(GPP_i, "Unexepected component termination with signal " << WTERMSIG(status) + << ", App/Identifier/Process: " << comp.appName << "/" << comp.identifier << "/" << pid); + sendChildNotification(comp.identifier, comp.appName); + } + } + + removeProcess(pid); + return true; +} + + +bool GPP_i::_check_nic_thresholds() +{ + uint64_t threshold=0; + double actual=0; + size_t nic_exceeded=0; + bool retval=false; + // + ReadLock rlock(monitorLock); + NicMonitorSequence::iterator iter=nic_monitors.begin(); + for( ; iter != nic_monitors.end(); iter++ ) { + NicMonitorPtr monitor=*iter; + threshold += monitor->get_threshold_value(); + actual += monitor->get_measured_value(); + + LOG_TRACE(GPP_i, __FUNCTION__ << ": NicThreshold: " << monitor->get_resource_id() << " exceeded " << monitor->is_threshold_exceeded() << " threshold=" << monitor->get_threshold() << " measured=" << monitor->get_measured()); + if ( monitor->is_threshold_exceeded() ) nic_exceeded++; + } + + if ( nic_monitors.size() != 0 && nic_monitors.size() == nic_exceeded ) { + std::ostringstream oss; + oss << "Threshold (cumulative) : " << threshold << " Actual (cumulative) : " << actual; + _setReason( "NIC USAGE ", oss.str() ); + retval = true; + } + + return retval; +} + +bool GPP_i::_check_limits( const thresholds_struct &thresholds) +{ + limit_check_count = (limit_check_count + 1) % 10; + if ( limit_check_count ) { + return false; + } + + float _tthreshold = 1 - __thresholds.threads * .01; + + if (gpp_limits.max_threads != -1) { + // + // check current process limits + // + LOG_TRACE(GPP_i, "_gpp_check_limits threads (cur/max): " << gpp_limits.current_threads << "/" << gpp_limits.max_threads ); + if (gpp_limits.current_threads>(gpp_limits.max_threads*_tthreshold)) { + LOG_WARN(GPP_i, "GPP process thread limit threshold exceeded, count/threshold: " << gpp_limits.current_threads << "/" << (gpp_limits.max_threads*_tthreshold) ); + return true; + } + } + + if ( sys_limits.max_threads != -1 ) { + // + // check current system limits + // + LOG_TRACE(GPP_i, "_sys_check_limits threads (cur/max): " << sys_limits.current_threads << "/" << sys_limits.max_threads ); + if (sys_limits.current_threads>( sys_limits.max_threads *_tthreshold)) { + LOG_WARN(GPP_i, "SYSTEM thread limit threshold exceeded, count/threshold: " << sys_limits.current_threads << "/" << (sys_limits.max_threads*_tthreshold) ); + return true; + } + } + + return false; +} + + +// +// +// Executable/Device method overrides... +// +// + +void GPP_i::updateUsageState() +{ + // allow for global ignore of thresholds + if ( thresholds.ignore == true ) { + LOG_TRACE(GPP_i, "Ignoring threshold checks "); + if (getPids().size() == 0) { + LOG_TRACE(GPP_i, "Usage State IDLE (trigger) pids === 0... "); + _resetReason(); + setUsageState(CF::Device::IDLE); + } + else { + LOG_TRACE(GPP_i, "Usage State ACTIVE..... "); + _resetReason(); + setUsageState(CF::Device::ACTIVE); + } + return; + } + + double sys_idle = system_monitor->get_idle_percent(); + double sys_idle_avg = system_monitor->get_idle_average(); + double sys_load = system_monitor->get_loadavg(); + int64_t mem_free = system_monitor->get_mem_free(); + + // get reservation state + double max_allowable_load = utilization[0].maximum; + double subscribed = utilization[0].subscribed; + + + { + std::stringstream oss; + ReadLock rlock(monitorLock); + NicMonitorSequence::iterator iter=nic_monitors.begin(); + for( ; iter != nic_monitors.end(); iter++ ) { + NicMonitorPtr m = *iter; + oss << " Nic: " << m->get_resource_id() << " exceeded " << m->is_threshold_exceeded() << " threshold=" << m->get_threshold() << " measured=" << m->get_measured() << std::endl; + } + + LOG_TRACE(GPP_i, "USAGE STATE: " << std::endl << + " CPU: threshold " << modified_thresholds.cpu_idle << " Actual: " << sys_idle << " Avg: " << sys_idle_avg << std::endl << + " MEM: threshold " << modified_thresholds.mem_free << " Actual: " << mem_free << std::endl << + " LOAD: threshold " << modified_thresholds.load_avg << " Actual: " << sys_load << std::endl << + " RESRV: threshold " << max_allowable_load << " Actual: " << subscribed << std::endl << + " Ingress threshold: " << mcastnicIngressThresholdValue << " capacity: " << mcastnicIngressCapacity << std::endl << + " Egress threshold: " << mcastnicEgressThresholdValue << " capacity: " << mcastnicEgressCapacity << std::endl << + " Threads threshold: " << gpp_limits.max_threads << " Actual: " << gpp_limits.current_threads << std::endl << + " NIC: " << std::endl << oss.str() + ); + } + + if ( !(thresholds.cpu_idle < 0) ) { + if (sys_idle < modified_thresholds.cpu_idle) { + if ( sys_idle_avg < modified_thresholds.cpu_idle) { + std::ostringstream oss; + oss << "Threshold: " << modified_thresholds.cpu_idle << " Actual/Average: " << sys_idle << "/" << sys_idle_avg ; + _setReason( "CPU IDLE", oss.str() ); + setUsageState(CF::Device::BUSY); + return; + } + } + } + + if ( !(thresholds.mem_free < 0) && (mem_free < modified_thresholds.mem_free)) { + std::ostringstream oss; + oss << "Threshold: " << modified_thresholds.mem_free << " Actual: " << mem_free; + _setReason( "FREE MEMORY", oss.str() ); + setUsageState(CF::Device::BUSY); + } + + else if ( !(thresholds.load_avg < 0) && ( sys_load > modified_thresholds.load_avg )) { + std::ostringstream oss; + oss << "Threshold: " << modified_thresholds.load_avg << " Actual: " << sys_load; + _setReason( "LOAD AVG", oss.str() ); + setUsageState(CF::Device::BUSY); + } + else if ( reserved_capacity_per_component != 0 && (subscribed > max_allowable_load) ) { + std::ostringstream oss; + oss << "Threshold: " << max_allowable_load << " Actual(subscribed) " << subscribed; + _setReason( "RESERVATION CAPACITY", oss.str() ); + setUsageState(CF::Device::BUSY); + } + else if ( !(thresholds.nic_usage < 0) && _check_nic_thresholds() ) { + setUsageState(CF::Device::BUSY); + } + else if (_check_limits(thresholds)) { + std::ostringstream oss; + oss << "Threshold: " << gpp_limits.max_threads << " Actual: " << gpp_limits.current_threads; + _setReason( "ULIMIT (MAX_THREADS)", oss.str() ); + setUsageState(CF::Device::BUSY); + } + else if (getPids().size() == 0) { + LOG_TRACE(GPP_i, "Usage State IDLE (trigger) pids === 0... "); + _resetReason(); + setUsageState(CF::Device::IDLE); + } + else { + LOG_TRACE(GPP_i, "Usage State ACTIVE..... "); + _resetReason(); + setUsageState(CF::Device::ACTIVE); + } +} + + +void GPP_i::_resetReason() { + _setReason("",""); +} + +void GPP_i::_setReason( const std::string &reason, const std::string &event, const bool enable_timestamp ) { + + if ( reason != "" ) { + if ( reason != _busy_reason ) { + LOG_INFO(GPP_i, "GPP BUSY, REASON: " << reason << " " << event ); + _busy_timestamp = boost::posix_time::microsec_clock::local_time(); + _busy_mark = boost::posix_time::microsec_clock::local_time(); + _busy_reason = reason; + std::ostringstream oss; + oss << "(time: " << _busy_timestamp << ") REASON: " << _busy_reason << " EXCEEDED " << event; + busy_reason = oss.str(); + } + else if ( reason == _busy_reason ) { + boost::posix_time::ptime now = boost::posix_time::microsec_clock::local_time(); + boost::posix_time::time_duration dur = now - _busy_timestamp; + boost::posix_time::time_duration last_msg = now - _busy_mark; + std::ostringstream oss; + oss << "(first/duration: " << _busy_timestamp << "/" << dur << ") REASON: " << _busy_reason << " EXCEEDED " << event; + busy_reason = oss.str(); + if ( last_msg.total_seconds() > 2 ) { + _busy_mark = now; + LOG_INFO(GPP_i, "GPP BUSY, " << oss.str() ); + } + } + } + else { + _busy_timestamp = boost::posix_time::microsec_clock::local_time(); + _busy_mark = _busy_timestamp; + busy_reason = reason; + _busy_reason = reason; + } +} + +/** + override ExecutableDevice::set_resource_affinity to handle localized settings. + + NOTE: the get_affinity_logger method is required to get the rh_logger object used after the "fork" method is + called. ExecutableDevice will provide the logger to use.... + + log4cxx will lock in the child process before execv is call for high frequency component deployments. */ void GPP_i::set_resource_affinity( const CF::Properties& options, const pid_t rsc_pid, const char *rsc_name, const std::vector &bl ) { + + RH_DEBUG( redhawk::affinity::get_affinity_logger(), "Affinity Options....GPP/Resource: " << label() << "/" << rsc_name << " options" << options.length() ); + boost::recursive_mutex::scoped_lock(load_execute_lock); + + // check if we override incoming affinity requests... if ( affinity.force_override ) { if ( redhawk::affinity::is_disabled() == false ) { - LOG_WARN(GPP_i, "Enforcing GPP affinity property settings to resource, GPP/pid: " << label() << "/" << rsc_pid ); + RH_WARN(redhawk::affinity::get_affinity_logger(), "Enforcing GPP affinity property settings to resource, GPP/pid: " << label() << "/" << rsc_pid ); if ( _apply_affinity( affinity, rsc_pid, rsc_name ) < 0 ) { throw redhawk::affinity::AffinityFailed("Failed to apply GPP affinity settings to resource"); } } else { - LOG_WARN(GPP_i, "Affinity processing disabled, unable to apply GPP affinity settings to resource, GPP/rsc/pid: " << label() << "/" << rsc_name << "/" << rsc_pid ); + RH_WARN(redhawk::affinity::get_affinity_logger(), "Affinity processing disabled, unable to apply GPP affinity settings to resource, GPP/rsc/pid: " << label() << "/" << rsc_name << "/" << rsc_pid ); } } else if ( affinity.deploy_per_socket && redhawk::affinity::has_nic_affinity(options) == false ) { if ( execPartitions.size() == 0 ) { - LOG_WARN(GPP_i, "Skipping deploy_per_socket request. Reason: No execute partitions found, check numa settings. GPP/resource: " << label() << "/" << rsc_name ); + RH_WARN(redhawk::affinity::get_affinity_logger(), "Skipping deploy_per_socket request. Reason: No execute partitions found, check numa settings. GPP/resource: " << label() << "/" << rsc_name ); return; } @@ -569,7 +1773,7 @@ void GPP_i::set_resource_affinity( const CF::Properties& options, const pid_t rs if ( psoc < 0 ) { throw redhawk::affinity::AffinityFailed("Failed to apply PROCESSOR SOCKET affinity settings to resource"); } - LOG_DEBUG(GPP_i, "Enforcing PROCESSOR SOCKET affinity settings to resource, GPP/pid/socket: " << label() << "/" << rsc_pid << "/" << psoc ); + RH_DEBUG(redhawk::affinity::get_affinity_logger(), "Enforcing PROCESSOR SOCKET affinity settings to resource, GPP/pid/socket: " << label() << "/" << rsc_pid << "/" << psoc ); std::ostringstream os; os << psoc; if ( _apply_affinity( rsc_pid, rsc_name, "socket", os.str(), bl_cpus ) < 0 ) { @@ -579,76 +1783,43 @@ void GPP_i::set_resource_affinity( const CF::Properties& options, const pid_t rs } else { + // create black list cpus redhawk::affinity::CpuList blcpus; - try { - //blcpus = gpp::affinity::get_cpu_list( "cpu", affinity.blacklist_cpus ); - blcpus.resize(bl_cpus.size()); - std::copy( bl_cpus.begin(), bl_cpus.end(), blcpus.begin() ); - } - catch( redhawk::affinity::AffinityFailed &ex) { - LOG_ERROR(GPP_i, "Unable to process blacklist cpu specification reason:" << ex.what() ); - throw; - } + blcpus.resize(bl_cpus.size()); + std::copy( bl_cpus.begin(), bl_cpus.end(), blcpus.begin() ); // // Same flow as ExecutableDevice::set_resource_affinity // try { if ( redhawk::affinity::has_affinity( options ) ) { - LOG_DEBUG(GPP_i, "Has Affinity Options....GPP/Resource:" << label() << "/" << rsc_name); + RH_DEBUG(redhawk::affinity::get_affinity_logger(), "Has Affinity Options....GPP/Resource:" << label() << "/" << rsc_name); if ( redhawk::affinity::is_disabled() ) { - LOG_WARN(GPP_i, "Resource has affinity directives but processing disabled, ExecDevice/Resource:" << - label() << "/" << rsc_name); + RH_WARN(redhawk::affinity::get_affinity_logger(), "Resource has affinity directives but processing disabled, ExecDevice/Resource:" << + label() << "/" << rsc_name); } else { - LOG_DEBUG(GPP_i, "Calling set resource affinity.... GPP/Resource:" << - label() << "/" << rsc_name); - redhawk::affinity::set_affinity( options, rsc_pid, blcpus ); + RH_DEBUG(redhawk::affinity::get_affinity_logger(), "Calling set resource affinity.... GPP/Resource: " << label() << "/" << rsc_name); + redhawk::affinity::AffinityDirectives spec = redhawk::affinity::convert_properties( options ); + gpp::affinity::set_affinity( spec, rsc_pid, blcpus ); } } else { - LOG_TRACE(GPP_i, "No Affinity Options Found....GPP/Resource:" << label() << "/" << rsc_name); + RH_TRACE(redhawk::affinity::get_affinity_logger(), "No Affinity Options Found....GPP/Resource: " << label() << "/" << rsc_name); } + } catch( redhawk::affinity::AffinityFailed &e) { - LOG_WARN(GPP_i, "AFFINITY REQUEST FAILED: " << e.what() ); + RH_WARN(redhawk::affinity::get_affinity_logger(), "AFFINITY REQUEST FAILED: " << e.what() ); throw; } } - } - int GPP_i::serviceFunction() { - - // Check if any children died.... - fd_set readfds; - FD_ZERO(&readfds); - FD_SET(sig_fd, &readfds); - struct timeval tv = {0, 50}; - struct signalfd_siginfo si; - ssize_t s; - - if ( sig_fd > -1 ) { - // don't care about writefds and exceptfds: - select(sig_fd+1, &readfds, NULL, NULL, &tv); - if (FD_ISSET(sig_fd, &readfds)) { - LOG_TRACE(GPP_i, "Checking for signals from SIGNALFD......" << sig_fd); - s = read(sig_fd, &si, sizeof(struct signalfd_siginfo)); - if (s != sizeof(struct signalfd_siginfo)){ - LOG_ERROR(GPP_i, "SIGCHLD handling error ..."); - } - - if ( si.ssi_signo == SIGCHLD) { - LOG_TRACE(GPP_i, "Child died , pid .................................." << si.ssi_pid); - sigchld_handler(si.ssi_signo); - } - } - } - boost::posix_time::ptime now = boost::posix_time::microsec_clock::local_time(); boost::posix_time::time_duration dur = now -time_mark; if ( dur.total_milliseconds() < threshold_cycle_time ) { @@ -657,68 +1828,108 @@ int GPP_i::serviceFunction() time_mark = now; - // - // update any threshold limits that are based on the current system state - // - establishModifiedThresholds(); - // update data model for the GPP try { std::for_each( data_model.begin(), data_model.end(), boost::bind( &Updateable::update, _1 ) ); std::for_each( execPartitions.begin(), execPartitions.end(), boost::bind( &exec_socket::update, _1 ) ); - calculateSystemMemoryLoading(); } catch( const boost::thread_resource_error& e ){ std::stringstream errstr; errstr << "Error acquiring lock (errno=" << e.native_error() << " msg=\"" << e.what() << "\")"; LOG_ERROR(GPP_i, __FUNCTION__ << ": " << errstr.str() ); - return NOOP; } + // + // update state that affect device usage state + // + update(); + if ( execPartitions.size() ) { // dump execute partition status... ExecPartitionList::iterator iter = execPartitions.begin(); std::ostringstream ss; ss << boost::format("%-6s %-4s %-7s %-7s %-7s ") % "SOCKET" % "CPUS" % "USER" % "SYSTEM" % "IDLE"; - LOG_DEBUG(GPP_i, ss.str() ); + LOG_TRACE(GPP_i, ss.str() ); ss.clear(); ss.str(""); for ( ; iter != execPartitions.end(); iter++ ) { ss << boost::format("%-6d %-4d %-7.2f %-7.2f %-7.2f ") % iter->id % iter->stats.get_ncpus() % iter->stats.get_user_percent() % iter->stats.get_system_percent() % iter->stats.get_idle_percent() ; - LOG_DEBUG(GPP_i, ss.str() ); + LOG_TRACE(GPP_i, ss.str() ); ss.clear(); ss.str(""); } } + // update monitors to see if thresholds are exceeded + std::for_each( threshold_monitors.begin(), threshold_monitors.end(), boost::bind( &Updateable::update, _1 ) ); + for( size_t i=0; iupdate(); LOG_TRACE(GPP_i, __FUNCTION__ << ": resource_id=" << threshold_monitors[i]->get_resource_id() << " threshold=" << threshold_monitors[i]->get_threshold() << " measured=" << threshold_monitors[i]->get_measured()); } - // update monitors to see if thresholds are exceeded - std::for_each( threshold_monitors.begin(), threshold_monitors.end(), boost::bind( &Updateable::update, _1 ) ); - // update device usages state for the GPP updateUsageState(); - + return NORMAL; } +void GPP_i::_set_vlan_property() +{ + mcastnicVLANs.clear(); + // grab nic_metrics for the specified interface + if ( mcastnicInterface != "" ) { + std::vector::iterator nic = nic_metrics.begin(); + for ( ; nic != nic_metrics.end(); nic++ ) { + // found match + if ( nic->interface == mcastnicInterface ) { + std::vector values; + boost::split( values, nic->vlans, boost::is_any_of(std::string(",")), boost::algorithm::token_compress_on ); + for ( size_t i=0; i< values.size(); i++){ + mcastnicVLANs.push_back( atoi(values[i].c_str()) ); + } + break; + } + } + } + + +} + // // // Property Callback Methods // // + +void GPP_i::_component_output_changed(const std::string *oldValue, const std::string *newValue) +{ + if( newValue ) { + if ( *newValue == "" ) { + _componentOutputLog=""; + _handle_io_redirects = false; + return; + } + + _componentOutputLog =__ExpandEnvVars(componentOutputLog); + _handle_io_redirects = true; + } + else { + _componentOutputLog=""; + _handle_io_redirects = false; + } + +} + + void GPP_i::reservedChanged(const float *oldValue, const float *newValue) { if( newValue ) { reserved_capacity_per_component = *newValue; - idle_capacity_modifier = 100.0 * reserved_capacity_per_component/((float)this->processor_cores); + idle_capacity_modifier = 100.0 * reserved_capacity_per_component/((float)processor_cores); ExecPartitionList::iterator iter = execPartitions.begin(); for( ; iter != execPartitions.end(); iter++ ) { @@ -728,6 +1939,43 @@ void GPP_i::reservedChanged(const float *oldValue, const float *newValue) } +void GPP_i::mcastnicThreshold_changed(const CORBA::Long *oldvalue, const CORBA::Long *newvalue) { + + if( newvalue ) { + int threshold = *newvalue; + if ( threshold >= 0 && threshold <= 100 ) { + double origIngressThreshold = mcastnicIngressThresholdValue; + double origEgressThreshold = mcastnicIngressThresholdValue; + mcastnicThreshold = threshold; + mcastnicIngressThresholdValue = mcastnicIngressTotal * ( mcastnicThreshold / 100.0) ; + mcastnicEgressThresholdValue = mcastnicEgressTotal * ( mcastnicThreshold / 100.0) ; + + if ( mcastnicIngressThresholdValue > origIngressThreshold ) { + // add extra to capacity + mcastnicIngressCapacity += mcastnicIngressThresholdValue -origIngressThreshold; + mcastnicIngressFree = mcastnicIngressCapacity; + } + else if ( mcastnicIngressThresholdValue < mcastnicIngressCapacity ){ + mcastnicIngressCapacity = mcastnicIngressThresholdValue; + mcastnicIngressFree = mcastnicIngressCapacity; + } + + if ( mcastnicEgressThresholdValue > origEgressThreshold ) { + // add extra to capacity + mcastnicEgressCapacity += mcastnicEgressThresholdValue-origEgressThreshold; + mcastnicEgressFree = mcastnicEgressCapacity; + } + else if ( mcastnicEgressThresholdValue < mcastnicEgressCapacity ){ + mcastnicEgressCapacity = mcastnicEgressThresholdValue; + mcastnicEgressFree = mcastnicEgressCapacity; + } + + } + } + +} + + void GPP_i::_affinity_changed( const affinity_struct *ovp, const affinity_struct *nvp ) { if ( ovp && nvp && *ovp == *nvp ) return; @@ -800,16 +2048,16 @@ void GPP_i::_affinity_changed( const affinity_struct *ovp, const affinity_struct if ( std::count( bl_cpus.begin(), bl_cpus.end(), cpus[i] ) == 0 ) wl_cpus.push_back( cpus[i] ); } + // apply changes to member variable + affinity = nv; RH_NL_DEBUG("GPP", "Affinity Force Override, force_override=" << nv.force_override); RH_NL_INFO("GPP", "Affinity Disable State, disabled=" << nv.disabled); - if ( nv.disabled ) { + if ( nv.disabled || gpp::affinity::check_numa() == false ) { RH_NL_INFO("GPP", "Disabling affinity processing requests."); - redhawk::affinity::set_affinity_state( nv.disabled ); + affinity.disabled=true; + redhawk::affinity::set_affinity_state(affinity.disabled); } - // apply changes to member variable - affinity = nv; - _set_processor_monitor_list( wl_cpus ); // reset idle idle capcity monitor... idle threshold gets calculated during each loop of svc func. @@ -832,10 +2080,157 @@ void GPP_i::_affinity_changed( const affinity_struct *ovp, const affinity_struct // Allocation Callback Methods // // +bool GPP_i::allocate_mcastegress_capacity(const CORBA::Long &value) +{ + boost::mutex::scoped_lock lock(propertySetAccess); + std::string except_msg("Invalid allocation"); + bool retval=false; + LOG_DEBUG(GPP_i, __FUNCTION__ << ": Allocating mcastegress allocation " << value); + + if ( mcastnicInterface == "" ) { + std::string msg = "mcastnicEgressCapacity request failed because no mcastnicInterface has been configured"; + LOG_DEBUG(GPP_i, __FUNCTION__ << msg ); + throw CF::Device::InvalidState(msg.c_str()); + return retval; + } + + // see if calculated capacity and measured capacity is avaliable + if ( value > mcastnicEgressCapacity ) { + std::ostringstream os; + os << "mcastnicEgressCapacity request: " << value << " failed because of insufficent capacity available, current: " << mcastnicEgressCapacity; + std::string msg = os.str(); + LOG_DEBUG(GPP_i, __FUNCTION__ << msg ); + CF::Properties errprops; + errprops.length(1); + errprops[0].id = "mcastnicEgressCapacity"; + errprops[0].value <<= (CORBA::Long)value; + throw CF::Device::InvalidCapacity(msg.c_str(), errprops); + } + + // adjust property + retval= true; + mcastnicEgressCapacity = mcastnicEgressCapacity - value; + mcastnicEgressFree = mcastnicEgressCapacity; + return retval; +} + + +void GPP_i::deallocate_mcastegress_capacity(const CORBA::Long &value) +{ + boost::mutex::scoped_lock lock(propertySetAccess); + LOG_DEBUG(GPP_i, __FUNCTION__ << ": Deallocating mcastegress allocation " << value); + + mcastnicEgressCapacity = value + mcastnicEgressCapacity; + if ( mcastnicEgressCapacity > mcastnicEgressThresholdValue ) { + mcastnicEgressCapacity = mcastnicEgressThresholdValue; + } + + mcastnicEgressFree = mcastnicEgressCapacity; +} + +bool GPP_i::allocate_reservation_request(const redhawk__reservation_request_struct &value) +{ + if (isBusy()) { + return false; + } + LOG_DEBUG(GPP_i, __FUNCTION__ << ": allocating reservation_request allocation "); + { + WriteLock rlock(pidLock); + if (applicationReservations.find(value.obj_id) != applicationReservations.end()){ + LOG_INFO(GPP_i, __FUNCTION__ << ": Cannot make multiple reservations against the same application: "<first == value.obj_id) { + applicationReservations.erase(app_it); + break; + } + } +} + + +bool GPP_i::allocate_mcastingress_capacity(const CORBA::Long &value) +{ + boost::mutex::scoped_lock lock(propertySetAccess); + std::string except_msg("Invalid allocation"); + bool retval=false; + LOG_DEBUG(GPP_i, __FUNCTION__ << ": Allocating mcastingress allocation " << value); + + if ( mcastnicInterface == "" ) { + std::string msg = "mcastnicIngressCapacity request failed because no mcastnicInterface has been configured" ; + LOG_DEBUG(GPP_i, __FUNCTION__ << msg ); + throw CF::Device::InvalidState(msg.c_str()); + } + + // see if calculated capacity and measured capacity is avaliable + if ( value > mcastnicIngressCapacity ) { + std::ostringstream os; + os << "mcastnicIngressCapacity request: " << value << " failed because of insufficent capacity available, current: " << mcastnicIngressCapacity; + std::string msg = os.str(); + LOG_DEBUG(GPP_i, __FUNCTION__ << msg ); + CF::Properties errprops; + errprops.length(1); + errprops[0].id = "mcastnicIngressCapacity"; + errprops[0].value <<= (CORBA::Long)value; + throw CF::Device::InvalidCapacity(msg.c_str(), errprops); + } + + // adjust property + retval=true; + mcastnicIngressCapacity = mcastnicIngressCapacity - value; + mcastnicIngressFree = mcastnicIngressCapacity; + + return retval; +} + + +void GPP_i::deallocate_mcastingress_capacity(const CORBA::Long &value) +{ + boost::mutex::scoped_lock lock(propertySetAccess); + LOG_DEBUG(GPP_i, __FUNCTION__ << ": Deallocating mcastingress deallocation " << value); + + mcastnicIngressCapacity = value + mcastnicIngressCapacity; + if ( mcastnicIngressCapacity > mcastnicIngressThresholdValue ) { + mcastnicIngressCapacity = mcastnicIngressThresholdValue; + } + + mcastnicIngressFree = mcastnicIngressCapacity; +} + + bool GPP_i::allocateCapacity_nic_allocation(const nic_allocation_struct &alloc) { - boost::mutex::scoped_lock lock(pidLock); + WriteLock wlock(nicLock); std::string except_msg("Invalid allocation"); bool success=false; LOG_TRACE(GPP_i, __FUNCTION__ << ": Allocating nic_allocation (identifier=" << alloc.identifier << ")"); @@ -889,7 +2284,7 @@ bool GPP_i::allocateCapacity_nic_allocation(const nic_allocation_struct &alloc) void GPP_i::deallocateCapacity_nic_allocation(const nic_allocation_struct &alloc) { - boost::mutex::scoped_lock lock(pidLock); + WriteLock wlock(nicLock); LOG_TRACE(GPP_i, __FUNCTION__ << ": Deallocating nic_allocation (identifier=" << alloc.identifier << ")"); try { LOG_DEBUG(GPP_i, __FUNCTION__ << ": { identifier: \"" << alloc.identifier << "\", data_rate: " << alloc.data_rate << ", data_size: " << alloc.data_size << ", multicast_support: \"" << alloc.multicast_support << "\", ip_addressable: \"" << alloc.ip_addressable << "\", interface: \"" << alloc.interface << "\" }"); @@ -929,20 +2324,29 @@ void GPP_i::deallocate_diskCapacity(const double &value) { return; } -bool GPP_i::allocate_memCapacity(const int64_t &value) { +bool GPP_i::allocate_memCapacity(const CORBA::LongLong &value) { if (isBusy()) { return false; } - - // get current available free memory from system - if ( system_monitor->get_mem_free() < (uint64_t)value ) + LOG_DEBUG(GPP_i, "allocate memory (REQUEST) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); + if ( value > memCapacity or value > memCapacityThreshold ) return false; + memCapacity -= value; + LOG_DEBUG(GPP_i, "allocate memory (SUCCESS) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); return true; } -void GPP_i::deallocate_memCapacity(const int64_t &value) { +void GPP_i::deallocate_memCapacity(const CORBA::LongLong &value) { + LOG_DEBUG(GPP_i, "deallocate memory (REQUEST) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); + memCapacity += value; + LOG_DEBUG(GPP_i, "deallocate memory (SUCCESS) value: " << value << " memCapacity: " << memCapacity << " memFree:" << memFree ); + if ( memCapacity > memCapacityThreshold ) { + memCapacity = memCapacityThreshold; + } + updateThresholdMonitors(); + updateUsageState(); return; } @@ -953,17 +2357,58 @@ bool GPP_i::allocate_loadCapacity(const double &value) { if (isBusy()) { return false; } + + // get current system load and calculated reservation load + if ( reserved_capacity_per_component == 0.0 ) { + + LOG_DEBUG(GPP_i, "allocate load capacity, (REQUEST) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); + // get system monitor report... + double load_threshold = modified_thresholds.load_avg; + double sys_load = system_monitor->get_loadavg(); + if ( sys_load + value > load_threshold ) { + LOG_WARN(GPP_i, "Allocate load capacity would exceed measured system load, current loadavg: " << sys_load << " requested: " << value << " threshold: " << load_threshold ); + } - if ( (loadCapacity_counter - value ) < 0.0 ) - return false; - - loadCapacity_counter -= value; + // perform classic load capacity + if ( value > loadCapacity ) { + std::ostringstream os; + os << " Allocate load capacity failed due to insufficient capacity, available capacity:" << loadCapacity << " requested capacity: " << value; + std::string msg = os.str(); + LOG_DEBUG(GPP_i, msg ); + CF::Properties errprops; + errprops.length(1); + errprops[0].id = "DCE:72c1c4a9-2bcf-49c5-bafd-ae2c1d567056 (loadCapacity)"; + errprops[0].value <<= (CORBA::Long)value; + throw CF::Device::InsufficientCapacity(errprops, msg.c_str()); + } + + loadCapacity -= value; + LOG_DEBUG(GPP_i, "allocate load capacity, (SUCCESS) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); + } + else { + // manage load capacity handled via reservation + LOG_WARN(GPP_i, "Allocate load capacity allowed, GPP using component reservations for managing load capacity." ); + + loadCapacity -= value; + if ( loadCapacity < 0.0 ) { + loadCapacity = 0.0; + } + + LOG_DEBUG(GPP_i, "allocate load capacity, (SUCCESS) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); + + } + updateUsageState(); return true; } void GPP_i::deallocate_loadCapacity(const double &value) { - loadCapacity_counter += value; + LOG_DEBUG(GPP_i, "deallocate load capacity, (REQUEST) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); + loadCapacity += value; + if ( loadCapacity > loadFree ) { + loadCapacity = loadFree; + } + LOG_DEBUG(GPP_i, "deallocate load capacity, (SUCCESS) value: " << value << " loadCapacity: " << loadCapacity << " loadFree:" << loadFree ); updateThresholdMonitors(); updateUsageState(); return; @@ -995,7 +2440,7 @@ void GPP_i::sendChildNotification(const std::string &comp_id, const std::string void GPP_i::updateThresholdMonitors() { - boost::mutex::scoped_lock lock(pidLock); + WriteLock wlock(monitorLock); MonitorSequence::iterator iter=threshold_monitors.begin(); for( ; iter != threshold_monitors.end(); iter++ ) { ThresholdMonitorPtr monitor=*iter; @@ -1004,135 +2449,473 @@ void GPP_i::updateThresholdMonitors() } } - -void GPP_i::establishModifiedThresholds() +void GPP_i::update() { - boost::mutex::scoped_lock lock(pidLock); - this->modified_thresholds.cpu_idle = this->thresholds.cpu_idle + (this->idle_capacity_modifier * this->reservations.size()) + this->loadCapacity_counter; - LOG_TRACE(GPP_i, __FUNCTION__ << "ModifyThreshold : " << std::endl << - " modified_threshold=" << modified_thresholds.cpu_idle << std::endl << + // establish what the actual load is per floor_reservation + // if the actual load -per is less than the reservation, compute the different and add the difference to the cpu_idle + // read the clock from the system (start) + + int64_t user=0, system=0; + ProcStat::GetTicks( system, user); + int64_t f_start_total = system; + int64_t f_use_start_total = user; + float reservation_set = 0; + size_t nres=0; + int64_t usage=0; + + { + WriteLock rlock(pidLock); + + this->update_grp_child_pids(); + + ProcessList::iterator i=this->pids.begin(); + for ( ; i!=pids.end(); i++) { + + if ( !i->terminated ) { + + // update pstat usage for each process + usage = i->get_pstat_usage(); + + if ( !i->app_started ) { + if ( applicationReservations.find(i->appName) != applicationReservations.end()) { + if (applicationReservations[i->appName].reservation.find("cpucores") != applicationReservations[i->appName].reservation.end()) { + continue; + } + } + nres++; + if ( i->reservation == -1) { + reservation_set += idle_capacity_modifier; + } else { + reservation_set += 100.0 * i->reservation/((float)processor_cores); + } + } + } + } + } + LOG_TRACE(GPP_i, __FUNCTION__ << " Completed first pass, record pstats for nproc: " << nres << " res_set " << reservation_set ); + + // set number reservations that are not started + n_reservations = nres; + + // wait a little bit + usleep(500000); + + + user=0, system=0; + ProcStat::GetTicks( system, user); + int64_t f_end_total = system; + int64_t f_use_end_total = user; + float f_total = (float)(f_end_total-f_start_total); + if ( f_total <= 0.0 ) { + LOG_TRACE(GPP_i, __FUNCTION__ << std::endl<< " System Ticks end/start " << f_end_total << "/" << f_start_total << std::endl ); + f_total=1.0; + } + float inverse_load_per_core = ((float)processor_cores)/(f_total); + float aggregate_usage = 0; + float non_specialized_aggregate_usage = 0; + double percent_core; + + ReadLock rlock(pidLock); + ProcessList::iterator i=this->pids.begin(); + int usage_out=0; + for (ApplicationReservationMap::iterator app_it=applicationReservations.begin(); app_it!=applicationReservations.end(); app_it++) { + app_it->second.usage = 0; + } + for ( ; i!=pids.end(); i++, usage_out++) { + + usage = 0; + percent_core =0; + if ( !i->terminated ) { + + // get delta from last pstat + usage = i->get_pstat_usage(); + + percent_core = (double)usage * inverse_load_per_core; + i->core_usage = percent_core; + double res = i->reservation; + +#if 0 + // debug assist + if ( !(usage_out % 500) || usage < 0 || percent_core < 0.0 ) { + uint64_t u, p2, p1; + u = i->get_pstat_usage(p2,p1); + LOG_INFO(GPP_i, __FUNCTION__ << std::endl<< "PROC SPEC PID: " << i->pid << std::endl << + " usage " << usage << std::endl << + " u " << usage << std::endl << + " p2 " << p2 << std::endl << + " p1 " << p1 << std::endl << + " percent_core: " << percent_core << std::endl << + " reservation: " << i->reservation << std::endl ); + } +#endif + + if ( applicationReservations.find(i->appName) != applicationReservations.end()) { + if (applicationReservations[i->appName].reservation.find("cpucores") != applicationReservations[i->appName].reservation.end()) { + applicationReservations[i->appName].usage += percent_core; + } + } + + if ( i->app_started ) { + + // if component is not using enough the add difference between minimum and current load + if ( percent_core < res ) { + reservation_set += 100.00 * ( res - percent_core)/((double)processor_cores); + } + // for components with non specific + if ( res == -1.0 ) { + non_specialized_aggregate_usage += percent_core / inverse_load_per_core; + } + else { + aggregate_usage += percent_core / inverse_load_per_core; + } + } + } + } + + for (ApplicationReservationMap::iterator app_it=applicationReservations.begin(); app_it!=applicationReservations.end(); app_it++) { + if (app_it->second.reservation.find("cpucores") != app_it->second.reservation.end()) { + bool found_app = false; + for ( ProcessList::iterator _pid_it=this->pids.begin();_pid_it!=pids.end(); _pid_it++) { + if (applicationReservations.find(_pid_it->appName) != applicationReservations.end()) { + found_app = true; + break; + } + } + if (not found_app) { + if (app_it->second.reservation["cpucores"] == -1) { + reservation_set += idle_capacity_modifier; + } else { + reservation_set += 100.0 * app_it->second.reservation["cpucores"]/((float)processor_cores); + } + } else { + if (app_it->second.usage < app_it->second.reservation["cpucores"]) { + reservation_set += 100.00 * ( app_it->second.reservation["cpucores"] - app_it->second.usage)/((double)processor_cores); + } + } + } + } + + LOG_TRACE(GPP_i, __FUNCTION__ << " Completed SECOND pass, record pstats for processes" ); + + aggregate_usage *= inverse_load_per_core; + non_specialized_aggregate_usage *= inverse_load_per_core; + modified_thresholds.cpu_idle = __thresholds.cpu_idle + reservation_set; + utilization[0].component_load = aggregate_usage + non_specialized_aggregate_usage; + float estimate_total = (f_use_end_total-f_use_start_total) * inverse_load_per_core; + utilization[0].system_load = (utilization[0].component_load > estimate_total) ? utilization[0].component_load : estimate_total; // for very light loads, sometimes there is a measurement mismatch because of timing + utilization[0].subscribed = (reservation_set * (float)processor_cores) / 100.0 + utilization[0].component_load; + utilization[0].maximum = processor_cores-(__thresholds.cpu_idle/100.0) * processor_cores; + + LOG_DEBUG(GPP_i, __FUNCTION__ << " LOAD and IDLE : " << std::endl << + " modified_threshold(req+res)=" << modified_thresholds.cpu_idle << std::endl << " system: idle: " << system_monitor->get_idle_percent() << std::endl << " idle avg: " << system_monitor->get_idle_average() << std::endl << - " threshold: " << thresholds.cpu_idle << std::endl << - " modifier: " << idle_capacity_modifier << std::endl << - " reservations: " << reservations.size() << std::endl << - " loadCapacity_counter: " << loadCapacity_counter ); -} + " threshold(req): " << __thresholds.cpu_idle << std::endl << + " idle modifier: " << idle_capacity_modifier << std::endl << + " reserved_cap_per_component: " << reserved_capacity_per_component << std::endl << + " number of reservations: " << n_reservations << std::endl << + " processes: " << pids.size() << std::endl << + " loadCapacity: " << loadCapacity << std::endl << + " loadTotal: " << loadTotal << std::endl << + " loadFree(Modified): " << loadFree <getReport(); + LOG_TRACE(GPP_i, __FUNCTION__ << " SysInfo Load : " << std::endl << + " one: " << rpt.load.one_min << std::endl << + " five: " << rpt.load.five_min << std::endl << + " fifteen: " << rpt.load.fifteen_min << std::endl ); + + loadAverage.onemin = rpt.load.one_min; + loadAverage.fivemin = rpt.load.five_min; + loadAverage.fifteenmin = rpt.load.fifteen_min; + + memFree = rpt.virtual_memory_free / mem_free_units; + LOG_TRACE(GPP_i, __FUNCTION__ << "Memory : " << std::endl << + " sys_monitor.vit_total: " << rpt.virtual_memory_total << std::endl << + " sys_monitor.vit_free: " << rpt.virtual_memory_free << std::endl << + " sys_monitor.mem_total: " << rpt.physical_memory_total << std::endl << + " sys_monitor.mem_free: " << rpt.physical_memory_free << std::endl << + " memFree: " << memFree << std::endl << + " memCapacity: " << memCapacity << std::endl << + " memCapacityThreshold: " << memCapacityThreshold << std::endl << + " memInitCapacityPercent: " << memInitCapacityPercent << std::endl ); -void GPP_i::calculateSystemMemoryLoading() { - LOG_TRACE(GPP_i, __FUNCTION__ << ": memCapacity=" << memCapacity << " sys_monitor.get_mem_free=" << system_monitor->get_mem_free() ); - memCapacity = system_monitor->get_mem_free(); + // + // transfer limits to properties + // + const Limits::Contents &sys_rpt =rpt.sys_limits; + sys_limits.current_threads = sys_rpt.threads; + sys_limits.max_threads = sys_rpt.threads_limit; + sys_limits.current_open_files = sys_rpt.files; + sys_limits.max_open_files = sys_rpt.files_limit; + process_limits->update_state(); + const Limits::Contents &pid_rpt = process_limits->get(); + gpp_limits.current_threads = pid_rpt.threads; + gpp_limits.max_threads = pid_rpt.threads_limit; + gpp_limits.current_open_files = pid_rpt.files; + gpp_limits.max_open_files = pid_rpt.files_limit; } -void GPP_i::sigchld_handler(int sig) +int GPP_i::sigchld_handler(int sig) { - int status; - pid_t child_pid; - - while( (child_pid = waitpid(-1, &status, WNOHANG)) > 0 ) - { - try { - component_description retval; - if ( devicePtr) { - retval = devicePtr->getComponentDescription(child_pid); - sendChildNotification(retval.identifier, retval.appName); + // Check if any children died.... + fd_set readfds; + FD_ZERO(&readfds); + FD_SET(sig_fd, &readfds); + struct timeval tv = {0, 50}; + struct signalfd_siginfo si; + ssize_t s; + uint32_t cnt=1; + + if ( sig_fd > -1 ) { + // don't care about writefds and exceptfds: + while (true) { + FD_ZERO(&readfds); + FD_SET(sig_fd, &readfds); + select(sig_fd+1, &readfds, NULL, NULL, &tv); + if (FD_ISSET(sig_fd, &readfds)) { + LOG_TRACE(GPP_i, " Checking for signals from SIGNALFD(" << sig_fd << ") cnt:" << cnt++ ); + s = read(sig_fd, &si, sizeof(struct signalfd_siginfo)); + LOG_TRACE(GPP_i, " RETURN from SIGNALFD(" << sig_fd << ") cnt/ret:" << cnt << "/" << s ); + if (s != sizeof(struct signalfd_siginfo)){ + LOG_ERROR(GPP_i, "SIGCHLD handling error ..."); + break; } + + // + // for sigchld and signalfd + // if there are many SIGCHLD events that occur at that same time, the kernel + // can compress the event into a single process id...there for we need to + // try and waitpid for any process that have died. The process id + // reported might not require waitpid, so try and clean up outside the waitpid loop + // + // If a child dies, try to clean up tracking state for the resource. The _component_cleanup + // will issue a notification event message for non domain terminated resources.. ie. segfaults.. + // + if ( si.ssi_signo == SIGCHLD) { + LOG_TRACE(GPP_i, "Child died , pid .................................." << si.ssi_pid); + int status; + pid_t child_pid; + bool reap=false; + while( (child_pid = waitpid(-1, &status, WNOHANG)) > 0 ) { + LOG_TRACE(GPP_i, "WAITPID died , pid .................................." << child_pid); + if ( (uint)child_pid == si.ssi_pid ) reap=true; + _component_cleanup( child_pid, status ); + } + if ( !reap ) { + _component_cleanup( si.ssi_pid, status ); + } + } + else { + LOG_TRACE(GPP_i, "read from signalfd --> signo:" << si.ssi_signo); + } + } + else { break; - } catch ( ... ) { - try { - sendChildNotification("Unknown", "Unknown"); - } catch ( ... ) { + } + } + } + + //LOG_TRACE(GPP_i, "sigchld_handler RETURN.........loop cnt:" << cnt); + return NOOP; +} + + +int GPP_i::redirected_io_handler() +{ + // check if we should be handling io redirects + if ( !_handle_io_redirects || redirectedFds.size() == 0 ) { + return NOOP; + } + + // check we have a log file + if ( _componentOutputLog == "" ) { + LOG_DEBUG(GPP_i, " Component IO redirect ON but no file specified. "); + return NOOP; + } + + LOG_DEBUG(GPP_i, " Locking For Redirect Processing............. "); + ReadLock lock(fdsLock); + + int redirect_file = open(_componentOutputLog.c_str(), O_RDWR | O_CREAT , S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH ); + if ( redirect_file != -1 ) { + if ( lseek(redirect_file, 0, SEEK_END) == -1 ) { + LOG_DEBUG(GPP_i, " Unable to SEEK To file end, file: " << _componentOutputLog); + } + } + else { + LOG_TRACE(GPP_i, " Unable to open up componentOutputLog, fallback to /dev/null tried log: " << _componentOutputLog); + redirect_file = open("/dev/null", O_RDWR, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH ); + } + + size_t size = 0; + uint64_t cnt = 0; + uint64_t fopens = 0; + uint64_t fcloses = 0; + uint64_t nbytes = 0; + size_t result=0; + int rd_fd =0; + ProcessFds::iterator fd = redirectedFds.begin(); + for ( ; fd != redirectedFds.end() && _handle_io_redirects ; fd++ ) { + + // set default redirect to be master + rd_fd=redirect_file; + + // check if our pid is vaid + if ( fd->pid > 0 and fd->cout > -1 ) { + + // open up a specific redirect file + if ( fd->fname != "" && fd->fname != _componentOutputLog ) { + LOG_TRACE(GPP_i, " OPEN FILE - PID: " << fd->pid << " fname " << fd->fname); + rd_fd = open(fd->fname.c_str(), O_RDWR | O_CREAT , S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH ); + if ( rd_fd == -1 ) { + LOG_ERROR(GPP_i, " Unable to open component output log: " << fd->fname); + rd_fd = redirect_file; + } + else { + fopens++; + if ( lseek(rd_fd, 0, SEEK_END) == -1 ) { + LOG_DEBUG(GPP_i, " Unable to SEEK To file end, file: " << fd->fname); + } + } + } + + fd_set readfds; + FD_ZERO(&readfds); + FD_SET(fd->cout, &readfds); + struct timeval tv = {0, 50}; + select(fd->cout+1, &readfds, NULL, NULL, &tv); + if (FD_ISSET(fd->cout, &readfds)) { + + result=0; + size = 0; + if (ioctl (fd->cout, FIONREAD, &size) == -1) { + LOG_ERROR(GPP_i, "(redirected IO) Error requesting how much to read, PID: " << fd->pid << " FD:" << fd->cout ); + close(fd->cout); + fd->cout = -1; + } + if ( fd->cout != -1 && rd_fd != -1 ) { + LOG_TRACE(GPP_i, " SPLICE DATA From Child to Output SIZE " << size << "...... PID: " << fd->pid << " FD:" << fd->cout ); + result = splice( fd->cout, NULL, rd_fd, NULL, size,0 ); + LOG_TRACE(GPP_i, " SPLICE DATA From Child to Output RES:" << result << "... PID: " << fd->pid << " FD:" << fd->cout ); + } + if ( (int64_t)result == -1 ) { + LOG_ERROR(GPP_i, "(redirected IO) Error during transfer to redirected file, PID: " << fd->pid << " FD:" << fd->cout ); + close(fd->cout); + fd->cout = -1; + } + else { + nbytes += result; + cnt++; } } + } - - if( child_pid == -1 && errno != ECHILD ) - { - // Error - perror("waitpid"); + + /// close our per component redirected io file if we opened one + if ( rd_fd != -1 && rd_fd != redirect_file ) { + fcloses++; + close(rd_fd); } + } + + // close file while we wait + if ( redirect_file ) close(redirect_file); + LOG_DEBUG(GPP_i, " IO REDIRECT, NPROCS: "<< redirectedFds.size() << " OPEN/CLOSE " << fopens << "/" << fcloses <<" PROCESSED PROCS/Bytes " << cnt << "/" << nbytes ); + return NOOP; } + std::vector GPP_i::getPids() { - boost::mutex::scoped_lock lock(pidLock); + ReadLock lock(pidLock); std::vector keys; - for (ProcessMap::iterator it=pids.begin();it!=pids.end();it++) { - keys.push_back(it->first); + for (ProcessList::iterator it=pids.begin();it!=pids.end();it++) { + keys.push_back(it->pid); } return keys; } -void GPP_i::addPid(int pid, std::string appName, std::string identifier) +void GPP_i::addProcess(int pid, const std::string &appName, const std::string &identifier, const float req_reservation=1.0) { - boost::mutex::scoped_lock lock(pidLock); - if (pids.find(pid) == pids.end()) { - component_description tmp; - tmp.appName = appName; - tmp.identifier = identifier; - pids[pid] = tmp; - } + WriteLock lock(pidLock); + ProcessList:: iterator result = std::find_if( pids.begin(), pids.end(), std::bind2nd( FindPid(), pid ) ); + if ( result != pids.end() ) return; + + LOG_DEBUG(GPP_i, "START Adding Process/RES: " << pid << "/" << req_reservation << " APP:" << appName ); + component_description tmp; + tmp.appName = appName; + tmp.pid = pid; + tmp.identifier = identifier; + tmp.reservation = req_reservation; + tmp.core_usage = 0; + tmp.parent = this; + pids.push_front( tmp ); + if (applicationReservations.find(appName) != applicationReservations.end()) { + applicationReservations[appName].component_pids.push_back(pid); + } + + LOG_DEBUG(GPP_i, "END Adding Process/RES: " << pid << "/" << req_reservation << " APP:" << appName ); } GPP_i::component_description GPP_i::getComponentDescription(int pid) { - boost::mutex::scoped_lock lock(pidLock); - ProcessMap::iterator it=pids.find(pid); + ReadLock lock(pidLock); + ProcessList:: iterator it = std::find_if( pids.begin(), pids.end(), std::bind2nd( FindPid(), pid ) ); if (it == pids.end()) throw std::invalid_argument("pid not found"); - return it->second; + return *it; } -void GPP_i::removePid(int pid) +void GPP_i::markPidTerminated( const int pid) { - boost::mutex::scoped_lock lock(pidLock); - ProcessMap::iterator it=pids.find(pid); - if (it == pids.end()) - return; - pids.erase(it); + ReadLock lock(pidLock); + ProcessList:: iterator it = std::find_if( pids.begin(), pids.end(), std::bind2nd( FindPid(), pid ) ); + if (it == pids.end()) return; + LOG_DEBUG(GPP_i, " Mark For Termination: " << it->pid << " APP:" << it->appName ); + it->app_started= false; + it->terminated = true; } -void GPP_i::addReservation( const component_description &component) +void GPP_i::removeProcess(int pid) { - boost::mutex::scoped_lock lock(pidLock); - this->reservations.push_back(component); -} -void GPP_i::removeReservation( const component_description &component) -{ - boost::mutex::scoped_lock lock(pidLock); - ProcessList::iterator it = std::find(this->reservations.begin(), this->reservations.end(), component); - if (it != this->reservations.end()) { - this->reservations.erase(it); - } - it = std::find(this->tabled_reservations.begin(), this->tabled_reservations.end(), component); - if (it != this->tabled_reservations.end()) { - this->tabled_reservations.erase(it); - } -} - -void GPP_i::tableReservation( const component_description &component) -{ - ProcessList::iterator it = std::find(this->reservations.begin(), this->reservations.end(), component); - if (it != this->reservations.end()) { - this->tabled_reservations.push_back(*it); - this->reservations.erase(it); + { + WriteLock wlock(pidLock); + ProcessList:: iterator result = std::find_if( pids.begin(), pids.end(), std::bind2nd( FindPid(), pid ) ); + if ( result != pids.end() ) { + LOG_DEBUG(GPP_i, "Monitor Process: REMOVE Process: " << result->pid << " app: " << result->appName ); + pids.erase(result); } -} + } -void GPP_i::restoreReservation( const component_description &component) -{ - ProcessList::iterator it = std::find(this->tabled_reservations.begin(), this->tabled_reservations.end(), component); - if (it != this->tabled_reservations.end()) { - this->reservations.push_back(*it); - this->tabled_reservations.erase(it); + { + WriteLock wlock(fdsLock); + ProcessFds::iterator i=std::find_if( redirectedFds.begin(), redirectedFds.end(), std::bind2nd( FindRedirect(), pid ) ); + if ( i != redirectedFds.end() ) { + i->close(); + LOG_DEBUG(GPP_i, "Redirectio IO ..REMOVE Redirected pid:" << pid ); + redirectedFds.erase(i); } + } + } - int GPP_i::_apply_affinity( const affinity_struct &nv, const pid_t rsc_pid, const char *rsc_name ) { @@ -1170,7 +2953,7 @@ int GPP_i::_apply_affinity( const pid_t rsc_pid, // apply affinity changes to the process try { - if ( redhawk::affinity::set_affinity( pol, rsc_pid, blcpus) != 0 ) { + if ( gpp::affinity::set_affinity( pol, rsc_pid, blcpus) != 0 ) { RH_NL_WARN("GPP", "Unable to set affinity for process, pid/name: " << rsc_pid << "/" << rsc_name ); } } @@ -1216,8 +2999,8 @@ bool GPP_i::_check_exec_partition( const std::string &iface ){ if ( (uint32_t)soc < execPartitions.size() ) { const exec_socket &ep = execPartitions[soc]; // get modified idle threshold value - double m_idle_thresh = ep.idle_threshold + ( ep.idle_cap_mod * reservations.size()) + - (float)loadCapacity_counter/(float)ep.cpus.size(); + double m_idle_thresh = ep.idle_threshold + ( ep.idle_cap_mod * n_reservations) + + (float)loadCapacity/(float)ep.cpus.size(); RH_NL_DEBUG("GPP", " Checking Execution Partition for an NIC interface iface/socket " << iface << "/" << soc << ") IDLE: actual/avg/threshold limit/modified " << ep.get_idle_percent() << "/" << ep.get_idle_average() << "/" << ep.idle_threshold << "/" << m_idle_thresh ); if ( ep.get_idle_percent() > m_idle_thresh ) { @@ -1239,8 +3022,8 @@ int GPP_i::_get_deploy_on_partition() { ExecPartitionList::iterator iter = execPartitions.begin(); for( ; iter != execPartitions.end(); iter++ ) { // get modified idle threshold value - double m_idle_thresh = iter->idle_threshold + ( iter->idle_cap_mod * reservations.size()) + - (float)loadCapacity_counter/(float)iter->cpus.size(); + double m_idle_thresh = iter->idle_threshold + ( iter->idle_cap_mod * n_reservations) + + (float)loadCapacity/(float)iter->cpus.size(); RH_NL_DEBUG("GPP", " Looking for execute partition (processor socket:" << iter->id << ") IDLE: actual/avg/threshold limit/modified " << iter->get_idle_percent() << "/" << iter->get_idle_average() << "/" << iter->idle_threshold << "/" << m_idle_thresh ); if ( iter->get_idle_percent() > m_idle_thresh ) { diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP.h index bbc688d8c..943d71de8 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP.h @@ -22,6 +22,8 @@ #include "GPP_base.h" #include +#include +#include #include "utils/Updateable.h" #include "reports/ThresholdMonitor.h" @@ -30,6 +32,7 @@ #include "statistics/CpuUsageStats.h" #include "reports/SystemMonitorReporting.h" #include "reports/CpuThresholdMonitor.h" +#include "reports/NicThroughputThresholdMonitor.h" #include "NicFacade.h" #include "ossie/Events.h" @@ -37,10 +40,27 @@ class ThresholdMonitor; class NicFacade; +#if BOOST_FILESYSTEM_VERSION < 3 +#define BOOST_PATH_STRING(x) (x) +#else +#define BOOST_PATH_STRING(x) (x).string() +#endif + +typedef boost::shared_mutex Lock; +typedef boost::unique_lock< Lock > WriteLock; +typedef boost::shared_lock< Lock > ReadLock; + + + class GPP_i : public GPP_base { - ENABLE_LOGGING + ENABLE_LOGGING; + + public: + static std::string format_up_time(unsigned long secondsUp); + + public: GPP_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl); GPP_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev); @@ -50,9 +70,7 @@ class GPP_i : public GPP_base int serviceFunction(); void initializeNetworkMonitor(); - void initializeMemoryMonitor(); - void initializeCpuMonitor(); - void addThresholdMonitor( ThresholdMonitor* threshold_monitor ); + void initializeResourceMonitors(); void send_threshold_event(const threshold_event_struct& message); void initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemException); @@ -60,182 +78,324 @@ class GPP_i : public GPP_base void deallocate_loadCapacity(const double &value); bool allocate_diskCapacity(const double &value); void deallocate_diskCapacity(const double &value); - bool allocate_memCapacity(const int64_t &value); - void deallocate_memCapacity(const int64_t &value); + bool allocate_memCapacity(const CORBA::LongLong &value); + void deallocate_memCapacity(const CORBA::LongLong &value); + bool allocate_reservation_request(const redhawk__reservation_request_struct &value); + void deallocate_reservation_request(const redhawk__reservation_request_struct &value); + bool allocate_mcastegress_capacity(const CORBA::Long &value); + void deallocate_mcastegress_capacity(const CORBA::Long &value); + bool allocate_mcastingress_capacity(const CORBA::Long &value); + void deallocate_mcastingress_capacity(const CORBA::Long &value); + + CF::ExecutableDevice::ProcessID_Type execute ( const char* name, + const CF::Properties& options, + const CF::Properties& parameters) + throw (CORBA::SystemException, CF::Device::InvalidState, CF::ExecutableDevice::InvalidFunction, + CF::ExecutableDevice::InvalidParameters, CF::ExecutableDevice::InvalidOptions, + CF::InvalidFileName, CF::ExecutableDevice::ExecuteFail); + + CF::ExecutableDevice::ProcessID_Type do_execute (const char* name, const CF::Properties& options, + const CF::Properties& parameters, + const std::vector prepend_args) + throw (CF::ExecutableDevice::ExecuteFail, + CF::InvalidFileName, CF::ExecutableDevice::InvalidOptions, + CF::ExecutableDevice::InvalidParameters, + CF::ExecutableDevice::InvalidFunction, CF::Device::InvalidState, + CORBA::SystemException); + - CF::ExecutableDevice::ProcessID_Type execute (const char* name, const CF::Properties& options, const CF::Properties& parameters) - throw (CORBA::SystemException, CF::Device::InvalidState, CF::ExecutableDevice::InvalidFunction, - CF::ExecutableDevice::InvalidParameters, CF::ExecutableDevice::InvalidOptions, - CF::InvalidFileName, CF::ExecutableDevice::ExecuteFail); void terminate (CF::ExecutableDevice::ProcessID_Type processId) throw (CORBA::SystemException, CF::ExecutableDevice::InvalidProcess, CF::Device::InvalidState); void updateThresholdMonitors(); - void calculateSystemMemoryLoading(); void sendChildNotification(const std::string &comp_id, const std::string &app_id); bool allocateCapacity_nic_allocation(const nic_allocation_struct &value); void deallocateCapacity_nic_allocation(const nic_allocation_struct &value); void deallocateCapacity (const CF::Properties& capacities) throw (CF::Device::InvalidState, CF::Device::InvalidCapacity, CORBA::SystemException); CORBA::Boolean allocateCapacity (const CF::Properties& capacities) throw (CF::Device::InvalidState, CF::Device::InvalidCapacity, CF::Device::InsufficientCapacity, CORBA::SystemException); void releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError); + + void postConstruction( std::string &softwareProfile, std::string ®istrar_ior, const std::string &idm_channel_ior="", const std::string &nic="", const int sigfd=-1 ); + int sigchld_handler( int sig ); - protected: - struct component_description { - std::string appName; - std::string identifier; - }; - - struct LoadCapacity { - float max; - float measured; - float allocated; + int redirected_io_handler( ); + + std::vector get_component_monitor(); + + struct proc_values { + float mem_rss; + CORBA::ULong num_threads; + int pgrpid; }; - - // - // base execution unit for partitioning a host system - // - struct exec_socket : public Updateable { - int id; - CpuUsageStats::CpuList cpus; - CpuUsageStats stats; - LoadCapacity load_capacity; - double idle_threshold; - double idle_cap_mod; - - exec_socket() : idle_threshold(0.0), idle_cap_mod(0.0) {}; - - void update() { - stats.compute_statistics(); - }; - - double get_idle_percent() const { - return stats.get_idle_percent(); - } - - double get_idle_average() const { - return stats.get_idle_average(); - } - - bool is_available() const { - return stats.get_idle_percent() > idle_threshold ; - }; + + struct grp_values : proc_values { + int num_processes; + std::vector pids; + }; + + void update_grp_child_pids(); + std::map parsed_stat; + std::map grp_children; + + struct proc_redirect { + int pid; + int cout; + int cerr; + std::string fname; + proc_redirect( int _pid, int _cout, int _cerr=-1 ); + proc_redirect( const std::string &fname, int _pid, int _cout, int _cerr=-1 ); + void close(); }; - typedef std::vector< exec_socket > ExecPartitionList; - - - friend bool operator==( const component_description &, - const component_description & ); - - std::vector getPids(); - component_description getComponentDescription(int pid); - - - void set_resource_affinity( const CF::Properties& options, - const pid_t rsc_pid, - const char *rsc_name, - const std::vector &bl); + struct component_description { + static const int pstat_history_len=5; + int pid; + std::string appName; + std::string identifier; + bool app_started; + float reservation; + float core_usage; + bool terminated; + uint64_t pstat_history[pstat_history_len]; + uint8_t pstat_idx; + std::vector pids; + GPP_i *parent; + + component_description(); + component_description( const std::string &appId); + void add_history( int64_t ptime ); + void add_history(); + int64_t get_pstat_usage( const bool refresh=true ); + int64_t get_pstat_usage( uint64_t &p2, uint64_t &p1 ); + int64_t get_process_time(); + }; + struct application_reservation { + std::vector component_pids; + std::map reservation; + float usage; + }; - void process_ODM(const CORBA::Any &data); - void updateUsageState(); + void constructor(); - typedef boost::shared_ptr ThresholdMonitorPtr; - typedef std::vector< uint32_t > CpuList; - typedef std::vector< boost::shared_ptr > UpdateableSequence; - typedef std::vector > StateSequence; - typedef std::vector > StatisticsSequence; - typedef std::vector > ReportingSequence; - typedef std::vector< ThresholdMonitorPtr > MonitorSequence; - typedef boost::shared_ptr SystemMonitorPtr; - typedef std::map ProcessMap; - typedef std::vector< component_description > ProcessList; + protected: - void addPid(int pid, std::string appName, std::string identifier); - void removePid(int pid); - void addReservation(const component_description &component); - void removeReservation(const component_description &component); - void tableReservation(const component_description &component); - void restoreReservation(const component_description &component); - void reservedChanged(const float *oldValue, const float *newValue); - void establishModifiedThresholds(); - void sigchld_handler( int sig ); + + struct LoadCapacity { + float max; + float measured; + float allocated; + }; - ProcessList reservations; - ProcessList tabled_reservations; - ProcessMap pids; - boost::mutex pidLock; + // + // base execution unit for partitioning a host system + // + struct exec_socket : public Updateable { + int id; + CpuUsageStats::CpuList cpus; + CpuUsageStats stats; + LoadCapacity load_capacity; // future + double idle_threshold; + double idle_cap_mod; + + exec_socket() : idle_threshold(0.0), idle_cap_mod(0.0) {}; + + void update() { + stats.compute_statistics(); + }; + + double get_idle_percent() const { + return stats.get_idle_percent(); + } + + double get_idle_average() const { + return stats.get_idle_average(); + } + + bool is_available() const { + return stats.get_idle_percent() > idle_threshold ; + }; + }; - NicFacadePtr nic_facade; - MonitorSequence threshold_monitors; - SystemMonitorPtr system_monitor; - ExecPartitionList execPartitions; - double loadCapacity_counter; + typedef std::vector< exec_socket > ExecPartitionList; + + + friend bool operator==( const component_description &, + const component_description & ); + + std::vector getPids(); + component_description getComponentDescription(int pid); + void markPidTerminated(const int pid ); + + + void set_resource_affinity( const CF::Properties& options, + const pid_t rsc_pid, + const char *rsc_name, + const std::vector &bl= std::vector(0) ); + + + void process_ODM(const CORBA::Any &data); + + void updateUsageState(); + void setShadowThresholds(const thresholds_struct &newVals ); + + typedef boost::shared_ptr NicMonitorPtr; + typedef boost::shared_ptr ThresholdMonitorPtr; + typedef std::vector< uint32_t > CpuList; + typedef std::vector< boost::shared_ptr > UpdateableSequence; + typedef std::vector > StateSequence; + typedef std::vector > StatisticsSequence; + typedef std::vector > ReportingSequence; + typedef std::vector< ThresholdMonitorPtr > MonitorSequence; + typedef std::vector< NicMonitorPtr > NicMonitorSequence; + typedef boost::shared_ptr SystemMonitorPtr; + typedef std::map ProcessMap; + typedef std::deque< component_description > ProcessList; + typedef std::deque< proc_redirect > ProcessFds; + typedef std::map ApplicationReservationMap; + + void addProcess(int pid, + const std::string &appName, + const std::string &identifier, + const float req_reservation ); + void removeProcess(int pid ); + void addThresholdMonitor( ThresholdMonitorPtr threshold_monitor ); + void reservedChanged(const float *oldValue, const float *newValue); + void mcastnicThreshold_changed(const CORBA::Long *oldValue, const CORBA::Long *newValue); + void thresholds_changed(const thresholds_struct *oldValue, const thresholds_struct *newValue); + void update(); + + ProcessList pids; + size_t n_reservations; + Lock pidLock; + Lock fdsLock; + ProcessFds redirectedFds; + bool _handle_io_redirects; + std::string _componentOutputLog; + + Lock nicLock; + NicFacadePtr nic_facade; + MonitorSequence threshold_monitors; + NicMonitorSequence nic_monitors; + SystemMonitorPtr system_monitor; + ProcessLimitsPtr process_limits; + ExecPartitionList execPartitions; + ApplicationReservationMap applicationReservations; - UpdateableSequence data_model; - thresholds_struct modified_thresholds; - float idle_capacity_modifier; - CpuList wl_cpus; // list of allowable cpus to run on .... empty == all, derived from affnity blacklist property and host machine - CpuList bl_cpus; // list of blacklist cpus to avoid - - std::string binary_location; // path to this program. + Lock monitorLock; + UpdateableSequence data_model; + thresholds_struct __thresholds; + thresholds_struct modified_thresholds; + uint64_t thresh_mem_free_units; + uint64_t mem_free_units; + uint64_t mem_cap_units; + int64_t memCapacityThreshold; + double memInitCapacityPercent; + uint64_t memInitVirtFree; + float idle_capacity_modifier; + CpuList wl_cpus; // list of allowable cpus to run on .... empty == all, derived from affnity blacklist property and host machine + CpuList bl_cpus; // list of blacklist cpus to avoid + double mcastnicIngressThresholdValue; + double mcastnicEgressThresholdValue; + + std::string binary_location; // path to this program. - boost::posix_time::ptime time_mark; // time marker for update - redhawk::events::SubscriberPtr odm_consumer; // interface that receives ODM_Channel events - redhawk::events::ManagerPtr mymgr; // interface to manage event channel access - - - private: - - // - // setup execution partitions for launching components - // - int _setupExecPartitions( const CpuList &blacklist ); - - // - // apply specific affinity settings to a pid - // - int _apply_affinity( const pid_t rsc_pid, - const char *rsc_name, - const std::string &affinity_class, - const std::string &affinity_value, - const CpuList &bl_cpus ); + boost::posix_time::ptime time_mark; // time marker for update + redhawk::events::SubscriberPtr odm_consumer; // interface that receives ODM_Channel events + redhawk::events::ManagerPtr mymgr; // interface to manage event channel access + + std::string _busy_reason; + boost::posix_time::ptime _busy_timestamp; // time when busy reason was initially set + boost::posix_time::ptime _busy_mark; // track message output + + private: + + // + // set the busy reason property for the GPP.. + // + void _resetReason(); + void _setReason( const std::string &reason, const std::string &event, const bool enable_timestamp = true ); + + bool _component_cleanup( const int pid, const int exit_status ); + + // + // setup execution partitions for launching components + // + int _setupExecPartitions( const CpuList &blacklist ); + + // + // apply specific affinity settings to a pid + // + int _apply_affinity( const pid_t rsc_pid, + const char *rsc_name, + const std::string &affinity_class, + const std::string &affinity_value, + const CpuList &bl_cpus ); - // - // apply affinity settings for affinity struct property - // - int _apply_affinity( const affinity_struct &affinity, const pid_t rsc_pid, const char *rsc_name ); - - // - // get the next available partition to use for luanching resources - // - int _get_deploy_on_partition(); - - // - // Check if execution partition for a NIC interface has enough processing capacity - // - bool _check_exec_partition( const std::string &iface ); - - // - // Callback when affinity processing structure is changed - // - void _affinity_changed(const affinity_struct *ov, const affinity_struct *nv ); - - // - // Determine list of CPUs that are monitored - // - void _set_processor_monitor_list( const CpuList &cl ); - - // - // Common method called by all CTORs - // - void _init(); - -}; + // + // apply affinity settings for affinity struct property + // + int _apply_affinity( const affinity_struct &affinity, const pid_t rsc_pid, const char *rsc_name ); + + // + // get the next available partition to use for luanching resources + // + int _get_deploy_on_partition(); + + // + // Check if execution partition for a NIC interface has enough processing capacity + // + bool _check_exec_partition( const std::string &iface ); + + // + // Callback when affinity processing structure is changed + // + void _affinity_changed(const affinity_struct *ov, const affinity_struct *nv ); + + // + // Callback when componentOutputLog is changed + // + void _component_output_changed(const std::string *ov, const std::string *nv ); + + // + // Set vlan list attribute + // + void _set_vlan_property(); + + // + // Determine list of CPUs that are monitored + // + void _set_processor_monitor_list( const CpuList &cl ); + + // + // expand special characters in consoleOutputLog + // + std::string _expand_parameters( const std::string &path ); + + // + // Common method called by all CTORs + // + void _init(); + + // + // check file and thread limits for the process and system + // + bool _check_limits( const thresholds_struct &threshold); + // + // check threshold limits for nic interfaces to determine busy state + // + bool _check_nic_thresholds(); + + std::string user_id; + int limit_check_count; + + ossie::ProcessThread _signalThread; + ossie::ProcessThread _redirectedIO; + }; #endif // GPP_IMPL_H diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP_base.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP_base.cpp index 78a94f830..bdf55a177 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP_base.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP_base.cpp @@ -33,36 +33,40 @@ GPP_base::GPP_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : ExecutableDevice_impl(devMgr_ior, id, lbl, sftwrPrfl), ThreadedComponent() { - construct(); + construct(); } GPP_base::GPP_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : ExecutableDevice_impl(devMgr_ior, id, lbl, sftwrPrfl, compDev), ThreadedComponent() { - construct(); + construct(); } GPP_base::GPP_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : ExecutableDevice_impl(devMgr_ior, id, lbl, sftwrPrfl, capacities), ThreadedComponent() { - construct(); + construct(); } GPP_base::GPP_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : ExecutableDevice_impl(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev), ThreadedComponent() { - construct(); + construct(); } GPP_base::~GPP_base() { - delete propEvent; - propEvent = 0; - delete MessageEvent_out; - MessageEvent_out = 0; + if (propEvent) { + delete propEvent; + propEvent = 0; + } + if ( MessageEvent_out ) { + delete MessageEvent_out; + MessageEvent_out = 0; + } } void GPP_base::construct() @@ -118,15 +122,15 @@ void GPP_base::loadProperties() "readonly", "", "eq", - "allocation,configure"); + "property,allocation,configure"); addProperty(device_model, "DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", "device_model", "readonly", - "", + "REDHAWK GPP", "eq", - "allocation,configure"); + "property,allocation,configure"); addProperty(processor_name, "DCE:fefb9c66-d14a-438d-ad59-2cfd1adb272b", @@ -134,7 +138,7 @@ void GPP_base::loadProperties() "readonly", "", "eq", - "allocation,configure"); + "property,allocation,configure"); addProperty(os_name, "DCE:4a23ad60-0b25-4121-a630-68803a498f75", @@ -142,7 +146,7 @@ void GPP_base::loadProperties() "readonly", "", "eq", - "allocation,configure"); + "property,allocation,configure"); addProperty(os_version, "DCE:0f3a9a37-a342-43d8-9b7f-78dc6da74192", @@ -150,7 +154,7 @@ void GPP_base::loadProperties() "readonly", "", "eq", - "allocation,configure"); + "property,allocation,configure"); addProperty(hostName, "DCE:9190eb70-bd1e-4556-87ee-5a259dcfee39", @@ -158,7 +162,7 @@ void GPP_base::loadProperties() "readonly", "", "external", - "configure,event"); + "property,configure,event"); addProperty(useScreen, false, @@ -169,65 +173,94 @@ void GPP_base::loadProperties() "external", "execparam"); - addProperty(loadCapacity, - "DCE:72c1c4a9-2bcf-49c5-bafd-ae2c1d567056", - "loadCapacity", + addProperty(componentOutputLog, + "DCE:c80f6c5a-e3ea-4f57-b0aa-46b7efac3176", + "componentOutputLog", "readwrite", "", "external", - "allocation"); + "property"); - addProperty(mcastnicIngressCapacity, - "DCE:506102d6-04a9-4532-9420-a323d818ddec", - "mcastnicIngressCapacity", + addProperty(mcastnicInterface, + "", + "DCE:4e416acc-3144-47eb-9e38-97f1d24f7700", + "mcastnicInterface", + "readwrite", + "", + "external", + "execparam"); + + addProperty(mcastnicIngressTotal, + 0, + "DCE:5a41c2d3-5b68-4530-b0c4-ae98c26c77ec", + "mcastnicIngressTotal", "readwrite", "Mb/s", "external", - "allocation"); + "execparam"); - addProperty(memCapacity, - "DCE:8dcef419-b440-4bcf-b893-cab79b6024fb", - "memCapacity", + addProperty(mcastnicEgressTotal, + 0, + "DCE:442d5014-2284-4f46-86ae-ce17e0749da0", + "mcastnicEgressTotal", "readwrite", - "MiB", + "Mb/s", "external", - "allocation"); + "execparam"); - addProperty(loadCapacityPerCore, - 1.0, - "DCE:3bf07b37-0c00-4e2a-8275-52bd4e391f07", - "loadCapacityPerCore", + addProperty(mcastnicIngressCapacity, + 0, + "DCE:506102d6-04a9-4532-9420-a323d818ddec", + "mcastnicIngressCapacity", "readwrite", - "", - "gt", - "allocation,execparam"); + "Mb/s", + "external", + "allocation,event"); - addProperty(reserved_capacity_per_component, - 0.25, - "reserved_capacity_per_component", - "", + addProperty(mcastnicEgressCapacity, + 0, + "DCE:eb08e43f-11c7-45a0-8750-edff439c8b24", + "mcastnicEgressCapacity", "readwrite", - "", + "Mb/s", "external", - "configure"); + "allocation,event"); - addProperty(processor_cores, - "processor_cores", - "", + addProperty(mcastnicIngressFree, + 0, + "DCE:0b57a27a-8fa2-412b-b0ae-010618b8f40e", + "mcastnicIngressFree", "readonly", - "", + "Mb/s", "external", - "configure"); + "configure,event"); - addProperty(loadThreshold, + addProperty(mcastnicEgressFree, + 0, + "DCE:9b5bbdcb-1894-4b95-847c-787f121c05ae", + "mcastnicEgressFree", + "readonly", + "Mb/s", + "external", + "configure,event"); + + addProperty(mcastnicThreshold, 80, - "DCE:22a60339-b66e-4309-91ae-e9bfed6f0490", - "loadThreshold", + "DCE:89be90ae-6a83-4399-a87d-5f4ae30ef7b1", + "mcastnicThreshold", "readwrite", "%", "external", "configure,event"); + addProperty(mcastnicVLANs, + "DCE:65544aad-4c73-451f-93de-d4d76984025a", + "mcastnicVLANs", + "readwrite", + "", + "external", + "allocation"); + // Set the sequence with its initial values nic_interfaces.push_back("e.*"); addProperty(nic_interfaces, @@ -237,7 +270,7 @@ void GPP_base::loadProperties() "readwrite", "", "external", - "configure"); + "configure,property"); addProperty(available_nic_interfaces, "available_nic_interfaces", @@ -265,6 +298,47 @@ void GPP_base::loadProperties() "external", "configure"); + addProperty(nic_allocation_status, + "nic_allocation_status", + "", + "readonly", + "", + "external", + "configure"); + + addProperty(nic_metrics, + "nic_metrics", + "", + "readonly", + "", + "external", + "configure"); + + addProperty(networkMonitor, + "networkMonitor", + "", + "readonly", + "", + "external", + "configure"); + + addProperty(component_monitor, + "component_monitor", + "", + "readonly", + "", + "external", + "property"); + + addProperty(affinity, + affinity_struct(), + "affinity", + "", + "readwrite", + "", + "external", + "property"); + addProperty(threshold_event, threshold_event_struct(), "threshold_event", @@ -274,6 +348,30 @@ void GPP_base::loadProperties() "external", "message"); + addProperty(busy_reason, + "busy_reason", + "", + "readonly", + "", + "external", + "property"); + + addProperty(cacheDirectory, + "cacheDirectory", + "", + "readonly", + "", + "external", + "property"); + + addProperty(workingDirectory, + "workingDirectory", + "", + "readonly", + "", + "external", + "property"); + addProperty(thresholds, thresholds_struct(), "thresholds", @@ -281,26 +379,45 @@ void GPP_base::loadProperties() "readwrite", "", "external", - "property,configure"); + "property"); - addProperty(nic_allocation_status, - "nic_allocation_status", + addProperty(threshold_cycle_time, + 500, + "threshold_cycle_time", + "threshold_cycle_time", + "readwrite", + "milliseconds", + "external", + "property"); + + addProperty(gpp_limits, + ulimit_struct(), + "gpp_limits", "", "readonly", "", "external", - "configure"); + "property"); - addProperty(nic_metrics, - "nic_metrics", + addProperty(sys_limits, + sys_limits_struct(), + "sys_limits", "", "readonly", "", "external", - "configure"); + "property"); - addProperty(networkMonitor, - "networkMonitor", + addProperty(utilization, + "utilization", + "", + "readonly", + "", + "external", + "property"); + + addProperty(processor_cores, + "processor_cores", "", "readonly", "", @@ -315,24 +432,90 @@ void GPP_base::loadProperties() "external", "configure"); - addProperty(affinity, - affinity_struct(), - "affinity", + addProperty(memFree, + "DCE:6565bffd-cb09-4927-9385-2ecac68035c7", + "memFree", + "readonly", + "MiB", + "external", + "configure,event"); + + addProperty(memCapacity, + "DCE:8dcef419-b440-4bcf-b893-cab79b6024fb", + "memCapacity", + "readwrite", + "MiB", + "external", + "allocation,event"); + + addProperty(reserved_capacity_per_component, + 0.1, + "reserved_capacity_per_component", + "reserved_capacity_per_component", + "readwrite", + "", + "external", + "configure"); + + addProperty(loadTotal, + "DCE:28b23bc8-e4c0-421b-9c52-415a24715209", + "loadTotal", + "readonly", + "", + "external", + "configure"); + + addProperty(loadThreshold, + 80, + "DCE:22a60339-b66e-4309-91ae-e9bfed6f0490", + "loadThreshold", + "readwrite", + "%", + "external", + "configure,event"); + + addProperty(loadCapacityPerCore, + 1.0, + "DCE:3bf07b37-0c00-4e2a-8275-52bd4e391f07", + "loadCapacityPerCore", + "readwrite", + "", + "gt", + "allocation,execparam"); + + addProperty(loadFree, + "DCE:6c000787-6fea-4765-8686-2e051e6c24b0", + "loadFree", + "readonly", "", + "external", + "configure,event"); + + addProperty(loadCapacity, + "DCE:72c1c4a9-2bcf-49c5-bafd-ae2c1d567056", + "loadCapacity", "readwrite", "", "external", - "configure,property"); + "allocation,event"); + addProperty(loadAverage, + loadAverage_struct(), + "DCE:9da85ebc-6503-48e7-af36-b77c7ad0c2b4", + "loadAverage", + "readonly", + "", + "external", + "property"); - addProperty(threshold_cycle_time, - 500, - "threshold_cycle_time", - "threshold_cycle_time", + addProperty(redhawk__reservation_request, + redhawk__reservation_request_struct(), + "redhawk::reservation_request", + "", "readwrite", - "milliseconds", + "", "external", - "property,configure"); + "allocation"); } diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP_base.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP_base.h index 40e4bce27..3ffa5910d 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP_base.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/GPP_base.h @@ -53,30 +53,74 @@ class GPP_base : public ExecutableDevice_impl, protected ThreadedComponent std::string os_name; std::string os_version; std::string hostName; + std::string componentOutputLog; bool useScreen; - double loadCapacity; - CORBA::Long mcastnicIngressCapacity; - CORBA::LongLong memCapacity; - double loadCapacityPerCore; - float reserved_capacity_per_component; - short processor_cores; - CORBA::Long loadThreshold; + advanced_struct advanced; + std::vector nic_interfaces; std::vector available_nic_interfaces; nic_allocation_struct nic_allocation; - advanced_struct advanced; - threshold_event_struct threshold_event; - thresholds_struct thresholds; + std::string mcastnicInterface; + CORBA::Long mcastnicIngressTotal; + CORBA::Long mcastnicEgressTotal; + CORBA::Long mcastnicIngressCapacity; + CORBA::Long mcastnicEgressCapacity; + CORBA::Long mcastnicIngressFree; + CORBA::Long mcastnicEgressFree; + CORBA::Long mcastnicThreshold; + std::vector mcastnicVLANs; std::vector nic_allocation_status; std::vector nic_metrics; std::vector networkMonitor; + std::vector component_monitor; + + // reporting struct when a threshold is broke + threshold_event_struct threshold_event; + // threshold items to watch + thresholds_struct thresholds; + /// Property to annotate why the system is busy + std::string busy_reason; + /// Property to select a cache directory other than the default + std::string cacheDirectory; + /// Property to select a working directory other than the default + std::string workingDirectory; + // time between cycles to refresh threshold metrics + CORBA::ULong threshold_cycle_time; + // ulimits for the GPP process + ulimit_struct gpp_limits; + // ulimits for the system as a whole + sys_limits_struct sys_limits; + /// Property: memFree + CORBA::LongLong memFree; + /// Property: memCapacity + CORBA::LongLong memCapacity; + /// Property: loadTotal + double loadTotal; + /// Property: loadThreshold + CORBA::Long loadThreshold; + /// Property: loadCapacityPerCore + double loadCapacityPerCore; + /// Property: loadFree + double loadFree; + /// Property: loadCapacity + double loadCapacity; + /// Property: loadAverage + loadAverage_struct loadAverage; + /// Property: reserved capacity per core for reservation schema + float reserved_capacity_per_component; + /// Property processor_cores - number of cores the machine supports + short processor_cores; + /// Property: redhawk__reservation_request + redhawk__reservation_request_struct redhawk__reservation_request; + /// Property processor_monitor_list - list of the cores we are watching.. std::string processor_monitor_list; + // Property affinity - controls affinity processing for the GPP affinity_struct affinity; - CORBA::ULong threshold_cycle_time; // Ports PropertyEventSupplier *propEvent; MessageSupplierPort *MessageEvent_out; + std::vector utilization; private: void construct(); diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/Makefile.am b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/Makefile.am index 5573f79be..fdf619c16 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/Makefile.am @@ -42,6 +42,8 @@ reports/SystemMonitorReporting.cpp \ reports/SystemMonitorReporting.h \ reports/CpuThresholdMonitor.cpp \ reports/CpuThresholdMonitor.h \ +parsers/PidProcStatParser.cpp \ +parsers/PidProcStatParser.h \ parsers/ProcStatFileParser.cpp \ parsers/ProcStatFileParser.h \ parsers/ProcStatParser.cpp \ @@ -50,6 +52,8 @@ parsers/ProcMeminfoParser.cpp \ parsers/ProcMeminfoParser.h \ states/NicState.cpp \ states/NicState.h \ +states/Limits.cpp \ +states/Limits.h \ states/State.h \ states/CpuState.cpp \ states/CpuState.h \ @@ -65,6 +69,8 @@ statistics/NicAccumulator.cpp \ statistics/NicAccumulator.h \ statistics/Statistics.h \ struct_props.h \ +utils/popen.cpp \ +utils/popen.h \ utils/affinity.cpp \ utils/affinity.h \ utils/CmdlineExecutor.cpp \ @@ -80,4 +86,4 @@ utils/ReferenceWrapper.h \ utils/SymlinkReader.cpp \ utils/SymlinkReader.h GPP_CXXFLAGS = -Wall $(BOOST_CPPFLAGS) -I$(CFDIR)/include -I$(CFDIR)/include/ossie -GPP_LDADD = $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_FILESYSTEM_LIB) $(BOOST_THREAD_LIB) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la +GPP_LDADD = $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_FILESYSTEM_LIB) $(BOOST_THREAD_LIB) -lboost_iostreams $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/NicFacade.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/NicFacade.cpp index c82c10457..ec81374b0 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/NicFacade.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/NicFacade.cpp @@ -28,6 +28,7 @@ #include #include +#include #if BOOST_FILESYSTEM_VERSION < 3 #define BOOST_PATH_STRING(x) (x) @@ -97,6 +98,12 @@ NicFacade::poll_nic_interfaces() const tmp << BOOST_PATH_STRING(iter->path()); boost::filesystem::path test_file( tmp.str() + "/statistics/rx_bytes" ); + std::string operstate = tmp.str()+"/operstate"; + std::ifstream fp(operstate.c_str()); + std::string _state; + std::getline(fp, _state); + if (_state==std::string("down")) continue; + if(boost::filesystem::is_regular_file(test_file)) { interfaces.push_back( BOOST_PATH_STRING(iter->path().filename()) ); @@ -320,6 +327,18 @@ NicFacade::get_devices() const return devices; } +std::vector +NicFacade::get_filtered_devices() const +{ + std::vector devices; + NicStates::const_iterator i; + for( i=filtered_nic_states_.begin(); i!=filtered_nic_states_.end(); ++i ) + { + devices.push_back(i->first); + } + return devices; +} + float NicFacade::get_throughput_by_device( const std::string& device ) const { diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/NicFacade.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/NicFacade.h index 30adb77b2..6c6ce4ca2 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/NicFacade.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/NicFacade.h @@ -58,6 +58,7 @@ class NicFacade : public Reporting void report(); std::vector get_devices() const; + std::vector get_filtered_devices() const; float get_throughput_by_device( const std::string& device ) const; double get_throughput_by_device_bps( const std::string& device ) const; diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/affinity_struct.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/affinity_struct.h index 33b5f6ef4..8278b0006 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/affinity_struct.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/affinity_struct.h @@ -2,14 +2,14 @@ * This file is protected by Copyright. Please refer to the COPYRIGHT file * distributed with this source distribution. * - * This file is part of REDHAWK core. + * This file is part of REDHAWK GPP. * - * REDHAWK core is free software: you can redistribute it and/or modify it + * REDHAWK GPP is free software: you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your * option) any later version. * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License * for more details. diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/main.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/main.cpp index 5b8eb3450..daa55b31d 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/main.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/main.cpp @@ -2,14 +2,14 @@ * This file is protected by Copyright. Please refer to the COPYRIGHT file * distributed with this source distribution. * - * This file is part of REDHAWK core. + * This file is part of REDHAWK GPP. * - * REDHAWK core is free software: you can redistribute it and/or modify it + * REDHAWK GPP is free software: you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your * option) any later version. * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License * for more details. @@ -36,11 +36,11 @@ void signal_catcher(int sig) } int main(int argc, char* argv[]) { - // // Install signal handler for processing SIGCHLD through // signal file descriptor to avoid whitelist/blacklist function calls // + // add command line arg, to setup signalfd in start_device std::vector gpp_argv(argv, argv+argc); gpp_argv.push_back("USESIGFD"); @@ -50,6 +50,7 @@ int main(int argc, char* argv[]) } struct sigaction sa; + sigemptyset(&sa.sa_mask); sa.sa_handler = signal_catcher; sa.sa_flags = 0; devicePtr = 0; diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/PidProcStatParser.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/PidProcStatParser.cpp new file mode 100644 index 000000000..9c6da5cb7 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/PidProcStatParser.cpp @@ -0,0 +1,90 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK GPP. + * + * REDHAWK GPP is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#include +#include +#include +#include +#include +#include + +#include "PidProcStatParser.h" +#include "ParserExceptions.h" + +#ifdef DEBUG_ON +#define DEBUG(x) x +#else +#define DEBUG(x) +#endif + + +PidProcStatParser::PidProcStatParser( const int pid) : + _pid(pid) +{ + } + +PidProcStatParser::~PidProcStatParser() +{ +} + +void PidProcStatParser::readone(FILE *input, int64_t *x) { fscanf(input, "%lld ", (long long *) x); } +void PidProcStatParser::readunsigned(FILE *input, uint64_t *x) { fscanf(input, "%llu ",(unsigned long long *) x); } +void PidProcStatParser::readstr(FILE *input, char *x) { fscanf(input, "%s ", x);} +void PidProcStatParser::readchar(FILE *input, char *x) { fscanf(input, "%c ", x);} + +const PidProcStatParser::Contents & PidProcStatParser::get() { return _data; }; + +int PidProcStatParser::parse( Contents & data ) +{ + char tcomm[PATH_MAX]; + char state; + int retval=-1; + std::stringstream ss; + ss<<"/proc/"<<_pid<<"/stat"; + FILE *input=fopen(ss.str().c_str(), "r"); + if( !input ) return retval; + readone(input,&data.pid); + readstr(input,tcomm); + data.comm = tcomm; + readchar(input,&state); + data.state = state; + readone(input,&data.ppid); + readone(input,&data.pgrp); + readone(input,&data.session); + readone(input,&data.tty_nr); + readone(input,&data.tty_pgrp); + readone(input,&data.flags); + readone(input,&data.min_flt); + readone(input,&data.cmin_flt); + readone(input,&data.maj_flt); + readone(input,&data.cmaj_flt); + readone(input,&data.utime); + readone(input,&data.stime); + readone(input,&data.cutime); + readone(input,&data.cstime); + fclose(input); + + return 0; +} + + +int PidProcStatParser::parse() { + return parse(_data); +} + diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/PidProcStatParser.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/PidProcStatParser.h new file mode 100644 index 000000000..3eec53a1e --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/PidProcStatParser.h @@ -0,0 +1,79 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK GPP. + * + * REDHAWK GPP is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef _PIDPROCSTATPARSER_H_ +#define _PIDPROCSTATPARSER_H_ +#include +#include +#include +#include + +class PidProcStatParser { + + public: + + struct Contents { + int64_t pid; + std::string comm; + char state; + int64_t ppid; + int64_t pgrp; + int64_t session; + int64_t tty_nr; + int64_t tty_pgrp; + int64_t flags; + int64_t min_flt; + int64_t cmin_flt; + int64_t maj_flt; + int64_t cmaj_flt; + int64_t utime; + int64_t stime; + int64_t cutime; + int64_t cstime; + }; + +public: + + PidProcStatParser(); + + PidProcStatParser( const int pid ); + + virtual ~PidProcStatParser(); + + int parse(); + int parse( Contents &data ); + const Contents &get(); + inline int64_t get_ticks() { + return _data.utime + _data.stime + _data.cutime + _data.cstime; + } + +private: + + inline void readone(FILE *input, int64_t *x); + inline void readunsigned(FILE *input, uint64_t *x); + inline void readstr(FILE *input, char *x); + inline void readchar(FILE *input, char *x); + + int _pid; + Contents _data; +}; + + + +#endif diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/ProcMeminfoParser.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/ProcMeminfoParser.cpp index 36abbf2f3..781fedf12 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/ProcMeminfoParser.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/ProcMeminfoParser.cpp @@ -89,14 +89,13 @@ void ProcMeminfoParser::parse( ProcMeminfo::Contents & data ) if ( values.size() >= 3 ) { std::string units(values[2]); boost::to_upper(units); - if ( units == "KB" ) unit_m = 1e3; - if ( units == "MB" ) unit_m = 1e6; - if ( units == "GB" ) unit_m = 1e9; - if ( units == "TB" ) unit_m = 1e12; + if ( units == "KB" ) unit_m = 1024; + if ( units == "MB" ) unit_m = 1024*1024; + if ( units == "GB" ) unit_m = 1024*1024*1024; + if ( units == "TB" ) unit_m = (uint64_t)1024*1024*1024*1024; } metric = metric * unit_m; - data[key]=metric; } diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/ProcStatParser.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/ProcStatParser.cpp index 5a439750c..5a03062c9 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/ProcStatParser.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/parsers/ProcStatParser.cpp @@ -27,7 +27,6 @@ #include "ProcStatParser.h" #include "ParserExceptions.h" - #ifdef DEBUG_ON #define DEBUG(x) x #else diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/FreeMemoryThresholdMonitor.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/FreeMemoryThresholdMonitor.cpp index f025dc5f3..e2cff7770 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/FreeMemoryThresholdMonitor.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/FreeMemoryThresholdMonitor.cpp @@ -2,14 +2,14 @@ * This file is protected by Copyright. Please refer to the COPYRIGHT file * distributed with this source distribution. * - * This file is part of REDHAWK core. + * This file is part of REDHAWK GPP. * - * REDHAWK core is free software: you can redistribute it and/or modify it + * REDHAWK GPP is free software: you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your * option) any later version. * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License * for more details. @@ -21,7 +21,7 @@ #include "utils/ReferenceWrapper.h" FreeMemoryThresholdMonitor::FreeMemoryThresholdMonitor( const std::string& source_id, QueryFunction threshold, QueryFunction measured ): -GenericThresholdMonitor(source_id, GetResourceId(), GetMessageClass(), threshold, measured ) +GenericThresholdMonitor(source_id, GetResourceId(), GetMessageClass(), threshold, measured ) { } diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/FreeMemoryThresholdMonitor.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/FreeMemoryThresholdMonitor.h index 9af3eef0e..46498ee9e 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/FreeMemoryThresholdMonitor.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/FreeMemoryThresholdMonitor.h @@ -2,14 +2,14 @@ * This file is protected by Copyright. Please refer to the COPYRIGHT file * distributed with this source distribution. * - * This file is part of REDHAWK core. + * This file is part of REDHAWK GPP. * - * REDHAWK core is free software: you can redistribute it and/or modify it + * REDHAWK GPP is free software: you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your * option) any later version. * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License * for more details. @@ -19,42 +19,16 @@ */ #ifndef FREE_MEMORY_THRESHOLD_MONITOR_H_ #define FREE_MEMORY_THRESHOLD_MONITOR_H_ -#include #include "ThresholdMonitor.h" -template< class REFERENCE_TYPE, class RETURN_TYPE=REFERENCE_TYPE, class ctype=uint64_t, class CFUNC=std::divides< RETURN_TYPE > > -class ConversionWrapper - { - public: - typedef REFERENCE_TYPE type; - typedef RETURN_TYPE result_type; - typedef void argument_type; - typedef CFUNC opfunc; - - explicit ConversionWrapper( type& ref, ctype cf=1048576, const opfunc &func=std::divides< result_type >() ): - ref_(ref), func_(func), unit_conversion_(cf) - {}; - - result_type operator()() const { - return func_( static_cast(ref_), (result_type)unit_conversion_ ); - }; - - type& get() const { return ref_; }; - - private: - type &ref_; - opfunc func_; - ctype unit_conversion_; - - }; - -class FreeMemoryThresholdMonitor : public GenericThresholdMonitor +class FreeMemoryThresholdMonitor : public GenericThresholdMonitor { public: - FreeMemoryThresholdMonitor( const std::string& source_id, QueryFunction threshold, QueryFunction measured ) ; - static std::string GetResourceId(){ return "physical_ram"; } - static std::string GetMessageClass(){ return "MEMORY_FREE"; } + FreeMemoryThresholdMonitor( const std::string& source_id, QueryFunction threshold, QueryFunction measured ) ; + + static std::string GetResourceId(){ return "physical_ram"; } + static std::string GetMessageClass(){ return "MEMORY_FREE"; } }; diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/NicThroughputThresholdMonitor.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/NicThroughputThresholdMonitor.cpp index 4c8203f20..4b297dc67 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/NicThroughputThresholdMonitor.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/NicThroughputThresholdMonitor.cpp @@ -21,6 +21,6 @@ #include "../utils/ReferenceWrapper.h" NicThroughputThresholdMonitor::NicThroughputThresholdMonitor( const std::string& source_id, const std::string& resource_id, NicThroughputThresholdMonitor::QueryFunction threshold, NicThroughputThresholdMonitor::QueryFunction measured ): -GenericThresholdMonitor >(source_id, resource_id, GetMessageClass(), threshold, measured ) +GenericThresholdMonitor >(source_id, resource_id, GetMessageClass(), threshold, measured ) { } diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/NicThroughputThresholdMonitor.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/NicThroughputThresholdMonitor.h index c6a209e25..30f92dfb6 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/NicThroughputThresholdMonitor.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/NicThroughputThresholdMonitor.h @@ -22,12 +22,24 @@ #include "ThresholdMonitor.h" -class NicThroughputThresholdMonitor : public GenericThresholdMonitor > +class NicThroughputThresholdMonitor : public GenericThresholdMonitor > { public: NicThroughputThresholdMonitor( const std::string& source_id, const std::string& resource_id, QueryFunction threshold, QueryFunction measured ); static std::string GetMessageClass(){ return "NIC_THROUGHPUT"; } + + bool is_threshold_exceeded() const + { + if (get_threshold_value() < 0 ) return false; + return this->GenericThresholdMonitor< float,std::greater_equal >::is_threshold_exceeded(); + } + + void update() + { + if (get_threshold_value() < 0 ) return; + this->GenericThresholdMonitor< float,std::greater_equal >::update(); + } }; #endif diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/SystemMonitorReporting.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/SystemMonitorReporting.cpp index f8aba55d0..c17aea267 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/SystemMonitorReporting.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/SystemMonitorReporting.cpp @@ -27,10 +27,20 @@ static const size_t BYTES_PER_MEGABYTE = 1024*1024; +SystemMonitor::SystemMonitor( const CpuList & cpu_list, const int nhistory ): + cpu_usage_stats_( new CpuUsageStats(cpu_list,nhistory) ), + mem_usage_state_(new ProcMeminfo()), + sys_limit_state_(new SysLimits()) +{ + report(); +} + SystemMonitor::SystemMonitor( const CpuStatsPtr & cpu_usage_stats, - const MemInfoPtr &mem_usage_state ) : + const MemInfoPtr &mem_usage_state, + const SysLimitsPtr &sys_limit ) : cpu_usage_stats_(cpu_usage_stats), - mem_usage_state_(mem_usage_state) + mem_usage_state_(mem_usage_state), + sys_limit_state_(sys_limit) { report(); } @@ -44,66 +54,72 @@ double SystemMonitor::get_idle_average() const { } uint64_t SystemMonitor::get_mem_free() const { - return report_.physical_memory_free; + return report_.virtual_memory_free; +} + +uint64_t SystemMonitor::get_phys_free() const { + return report_.physical_memory_free; +} + +uint64_t SystemMonitor::get_all_usage() const { + return report_.all_usage; +} + +uint64_t SystemMonitor::get_user_usage() const { + return report_.user_usage; +} + +double SystemMonitor::get_loadavg() const { + return report_.load.one_min; } const SystemMonitor::Report &SystemMonitor::getReport() const { return report_; } +const SystemMonitor::CpuStatsPtr SystemMonitor::getCpuStats() const { + return cpu_usage_stats_; +} + void SystemMonitor::report() { - cpu_usage_stats_->update(); - mem_usage_state_->update(); + struct sysinfo info; + sysinfo(&info); + try { - ProcMeminfo::Counter total_memory = mem_usage_state_->getMetric("CommitLimit"); - ProcMeminfo::Counter committed_memory = mem_usage_state_->getMetric("Committed_AS"); - report_.physical_memory_free = (double)(total_memory - committed_memory); + cpu_usage_stats_->update(); + mem_usage_state_->update(); + sys_limit_state_->update(); + const ProcMeminfo::Contents &mem_stats = mem_usage_state_->get(); + report_.virtual_memory_total = mem_stats.at("MemTotal")+ mem_stats.at("SwapTotal"); + report_.virtual_memory_free = mem_stats.at("MemFree") + mem_stats.at("SwapFree"); + report_.physical_memory_total = mem_stats.at("MemTotal"); + report_.physical_memory_free = mem_stats.at("MemFree"); } - catch (...) { - struct sysinfo info; - sysinfo(&info); - //report_.physical_memory_free = info.freeram / BYTES_PER_MEGABYTE * info.mem_unit; - report_.physical_memory_free = info.freeram / BYTES_PER_MEGABYTE * info.mem_unit; + catch(...){ + report_.virtual_memory_total = (info.totalram+info.totalswap) * info.mem_unit; + report_.virtual_memory_free = (info.freeram+info.freeswap) * info.mem_unit; + report_.physical_memory_total = info.totalram * info.mem_unit; + report_.physical_memory_free = info.freeram * info.mem_unit; } - //reporting_data_.virtual_memory_total = (info.totalram+info.totalswap) / BYTES_PER_MEGABYTE * info.mem_unit; - //reporting_data_.virtual_memory_free = (info.freeram+info.freeswap) / BYTES_PER_MEGABYTE * info.mem_unit; - //reporting_data_.virtual_memory_used = reporting_data_.virtual_memory_total-reporting_data_.virtual_memory_free; - //reporting_data_.virtual_memory_percent = (double)reporting_data_.virtual_memory_used / (double)reporting_data_.virtual_memory_total * 100.; - //reporting_data_.physical_memory_total = info.totalram / BYTES_PER_MEGABYTE * info.mem_unit; - //reporting_data_.physical_memory_free = info.freeram / BYTES_PER_MEGABYTE * info.mem_unit; - //reporting_data_.physical_memory_used = reporting_data_.physical_memory_total-reporting_data_.physical_memory_free; - //reporting_data_.physical_memory_percent = (double)reporting_data_.physical_memory_used / (double)reporting_data_.physical_memory_total * 100.; - //reporting_data_.user_cpu_percent = cpu_usage_accumulator_->get_user_percent(); - //reporting_data_.system_cpu_percent = cpu_usage_accumulator_->get_system_percent(); - report_.idle_cpu_percent = cpu_usage_stats_->get_idle_percent(); - //reporting_data_.cpu_percent = 100.0 - reporting_data_.idle_cpu_percent; - //reporting_data_.up_time = info.uptime; - //reporting_data_.up_time_string = format_up_time(reporting_data_.up_time); - //reporting_data_.last_update_time = time(NULL); -} - -std::string -SystemMonitor::format_up_time(unsigned long secondsUp) const -{ - std::stringstream formattedUptime; - int days; - int hours; - int minutes; - int seconds; - - int leftover; - - days = (int) secondsUp / (60 * 60 * 24); - leftover = (int) secondsUp - (days * (60 * 60 * 24) ); - hours = (int) leftover / (60 * 60); - leftover = leftover - (hours * (60 * 60) ); - minutes = (int) leftover / 60; - seconds = leftover - (minutes * 60); - - formattedUptime << days << "d " << hours << "h " << minutes << "m " << seconds << "s"; - - return formattedUptime.str(); + report_.virtual_memory_used = report_.virtual_memory_total-report_.virtual_memory_free; + report_.physical_memory_used = report_.physical_memory_total-report_.physical_memory_free; + report_.virtual_memory_percent = (double)report_.virtual_memory_used / (double)report_.virtual_memory_total * 100.; + report_.physical_memory_percent = (double)report_.physical_memory_used / (double)report_.physical_memory_total * 100.; + report_.user_cpu_percent = cpu_usage_stats_->get_user_percent(); + report_.system_cpu_percent = cpu_usage_stats_->get_system_percent(); + report_.idle_cpu_percent = cpu_usage_stats_->get_idle_percent(); + report_.cpu_percent = 100.0 - report_.idle_cpu_percent; + report_.up_time = info.uptime; + report_.last_update_time = time(NULL); + report_.idle_cpu_percent = cpu_usage_stats_->get_idle_percent(); + report_.all_usage = cpu_usage_stats_->get_all_usage(); + report_.user_usage = cpu_usage_stats_->get_user_usage(); + report_.sys_limits = sys_limit_state_->get(); + report_.load.one_min = info.loads[0] * 1.0/(1< #include #include "Reporting.h" -#include "statistics/Statistics.h" +#include "statistics/CpuUsageStats.h" #include "states/ProcMeminfo.h" - +#include "states/Limits.h" class SystemMonitor : public Reporting { public: - typedef boost::shared_ptr< CpuStatistics > CpuStatsPtr; + typedef boost::shared_ptr< CpuUsageStats > CpuStatsPtr; typedef boost::shared_ptr< ProcMeminfo > MemInfoPtr; + typedef CpuUsageStats::CpuList CpuList; + + struct loadavg { + double one_min; + double five_min; + double fifteen_min; + }; struct Report { - uint64_t physical_memory_free; - double idle_cpu_percent; + uint64_t virtual_memory_total; + uint64_t virtual_memory_used; + uint64_t virtual_memory_free; + double virtual_memory_percent; + uint64_t physical_memory_total; + uint64_t physical_memory_used; + uint64_t physical_memory_free; + double physical_memory_percent; + uint64_t all_usage; + uint64_t user_usage; + double cpu_percent; + double user_cpu_percent; + double system_cpu_percent; + double idle_cpu_percent; + double up_time; + Limits::Contents sys_limits; + double last_update_time; + loadavg load; }; + +public: + SystemMonitor( const CpuList &cpu_list, const int nhistory=5 ); -public: SystemMonitor( const CpuStatsPtr & cpu_usage_stats, - const MemInfoPtr & mem_usage_state ); + const MemInfoPtr & mem_usage_state, + const SysLimitsPtr& sys_limit ); double get_idle_percent() const; double get_idle_average() const; uint64_t get_mem_free() const; + uint64_t get_phys_free() const; + uint64_t get_all_usage() const; + uint64_t get_user_usage() const; + double get_loadavg() const; const Report &getReport() const; void report(); + const CpuStatsPtr getCpuStats() const; private: - std::string format_up_time(unsigned long secondsUp) const; - -private: - CpuStatsPtr cpu_usage_stats_; - MemInfoPtr mem_usage_state_; - Report report_; + CpuStatsPtr cpu_usage_stats_; + MemInfoPtr mem_usage_state_; + SysLimitsPtr sys_limit_state_; + Report report_; }; diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/ThresholdMonitor.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/ThresholdMonitor.h index caf75e76b..422ba8fb9 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/ThresholdMonitor.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/reports/ThresholdMonitor.h @@ -28,9 +28,9 @@ #include "utils/Updateable.h" #include "utils/EventDispatcher.h" #include "Reporting.h" +#include "utils/ConversionWrapper.h" #include "struct_props.h" -//class ThresholdMonitor : public Reporting, public EventDispatcherMixin class ThresholdMonitor : public Updateable, public EventDispatcherMixin { public: diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/Limits.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/Limits.cpp new file mode 100644 index 000000000..e420d3d24 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/Limits.cpp @@ -0,0 +1,231 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK GPP. + * + * REDHAWK GPP is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "Limits.h" +#include "utils/popen.h" + +#if BOOST_FILESYSTEM_VERSION < 3 +#define BOOST_PATH_STRING(x) (x) +#else +#define BOOST_PATH_STRING(x) (x).string() +#endif + +#ifdef DEBUG_ON +#define DEBUG(x) std::cout << x << std::endl +#else +#define DEBUG(x) +#endif + + +const Limits::Contents& +Limits::get() const +{ + return contents; +} + +Limits::Limits() +{ +} + +Limits::~Limits() +{ +} + + + +SysLimits::SysLimits() +{ +} + +SysLimits::~SysLimits() +{ +} + +void SysLimits::update_state() +{ + Contents tmp; + + // grab current file handles + std::string fname; + try{ + fname = "/proc/sys/fs/file-nr"; + std::ifstream file_nr(fname.c_str(), std::ifstream::in); + if ( !file_nr.good()) throw std::ifstream::failure("unable to open " + fname ); + std::string line; + while ( std::getline( file_nr, line ) ) { + std::vector values; + boost::split( values, line, boost::is_any_of(std::string(" \t")), boost::algorithm::token_compress_on ); + DEBUG(" values: " << values.size() << " file-nr line: " << line ); + + if ( values.size() > 2 ) { + try { + tmp.files = boost::lexical_cast( values[0] ); + tmp.files_limit = boost::lexical_cast( values[2] ); + } + catch( boost::bad_lexical_cast ){ + } + } + } + } + catch( ... ) { + } + + try{ + fname = "/proc/sys/kernel/threads-max"; + std::ifstream sys_threads_max(fname.c_str(), std::ifstream::in); + if ( !sys_threads_max.good()) throw std::ifstream::failure("unable to open " + fname ); + std::string line; + while ( std::getline( sys_threads_max, line ) ) { + std::vector values; + boost::split( values, line, boost::is_any_of(std::string(" \t")), boost::algorithm::token_compress_on ); + DEBUG( " sys-kernel-threads-max line: " << line ); + + if ( values.size() > 0 ) { + try { + tmp.threads_limit = boost::lexical_cast( values[0] ); + } + catch( boost::bad_lexical_cast ){ + } + } + } + } + catch( ... ) { + } + + try { + std::string line = utils::popen("ps -eo nlwp | tail -n +2 | awk '{ num_threads += $1 } END { print num_threads }' ", true); + if ( line != "ERROR" ) { + std::vector values; + boost::split(values, line, boost::is_any_of(std::string(" \t")), boost::algorithm::token_compress_on ); + DEBUG(" system active threads: " << line); + if ( values.size() > 0 ) { + try { + tmp.threads = boost::lexical_cast( values[0] ); + } + catch( boost::bad_lexical_cast ){ + } + } + } + + } + catch( ... ) { + } + + DEBUG( " SYSTEM: threads/max " << tmp.threads << "/" << tmp.threads_limit ); + DEBUG( " SYSTEM: files/max " << tmp.files << "/" << tmp.files_limit ); + contents = tmp; +} + + +ProcessLimits::ProcessLimits( const int in_pid) : + pid(in_pid) +{ + if (in_pid<0) pid=getpid(); +} + +ProcessLimits::~ProcessLimits() +{ +} + +void ProcessLimits::update_state() +{ + Contents tmp; + + if ( pid < 0 ) pid = getpid(); + + struct rlimit limit; + if (getrlimit(RLIMIT_NPROC, &limit) == 0) { + tmp.threads_limit = limit.rlim_cur; + } + if (getrlimit(RLIMIT_NOFILE, &limit) == 0) { + tmp.files_limit = limit.rlim_cur; + } + + // + std::ostringstream ppath; + ppath << "/proc/"< values; + boost::split( values, line, boost::is_any_of(std::string(" ")), boost::algorithm::token_compress_on ); + DEBUG( " line: " << line ); + + if ( values.size() > 1 && boost::starts_with( values[0], "Threads:" ) ) { + try { + tmp.threads = boost::lexical_cast( values[1] ); + } + catch( boost::bad_lexical_cast ){ + } + } + } + } + } + + if ( !tmp.threads ) { + std::stringstream subpath; + subpath<< BOOST_PATH_STRING(pid_dir)<<"/task/"; + boost::filesystem::path subPath(subpath.str()); + if (boost::filesystem::exists(subPath)) { + for (boost::filesystem::directory_iterator sub_dir_iter(subPath); + sub_dir_iter!= boost::filesystem::directory_iterator();++sub_dir_iter) { + tmp.threads++; + } + } + } + + std::stringstream subfilepath; + subfilepath<< BOOST_PATH_STRING(pid_dir)<<"/fd/"; + boost::filesystem::path subFilePath(subfilepath.str()); + if (boost::filesystem::exists(subFilePath)) { + for (boost::filesystem::directory_iterator sub_dir_iter(subFilePath); + sub_dir_iter!= boost::filesystem::directory_iterator();++sub_dir_iter) { + tmp.files++; + } + } + + DEBUG( " Process: threads/max " << tmp.threads << "/" << tmp.threads_limit ); + DEBUG( " Process: files/max " << tmp.files << "/" << tmp.files_limit ); + + contents = tmp; +} + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/Limits.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/Limits.h new file mode 100644 index 000000000..dcc5cfc84 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/Limits.h @@ -0,0 +1,105 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK GPP. + * + * REDHAWK GPP is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef _LIMIT_H_ +#define _LIMIT_H_ +#include +#include +#include +#include +#include +#include "states/State.h" + +class Limits; +typedef boost::shared_ptr LimitsPtr; + +class SysLimits; +typedef boost::shared_ptr SysLimitsPtr; + +class ProcessLimits; +typedef boost::shared_ptr ProcessLimitsPtr; + + +class Limits : public State +{ + + public: + struct Contents { + Contents() : threads(0), threads_limit(-1), files(0), files_limit(-1) {}; + int64_t threads; + int64_t threads_limit; + int64_t files; + int64_t files_limit; + }; + + + // init file and read in baseline stats + Limits(); + + virtual ~Limits(); + + //virtual void update_state(); + + // return contents of file + const Contents &get() const; + + protected: + + Contents contents; + + private: + +}; + + + +class SysLimits : public Limits +{ + + public: + // init file and read in baseline stats + SysLimits(); + + virtual ~SysLimits(); + + void update_state(); + +}; + + + +class ProcessLimits : public Limits +{ + + public: + // init file and read in baseline stats + ProcessLimits(const int pid=-1); + + virtual ~ProcessLimits(); + + void update_state(); + + protected: + + int pid; + +}; + + +#endif // __SYSLIMIT_H__ diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcMeminfo.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcMeminfo.h index 9f56e1788..1712fa53b 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcMeminfo.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcMeminfo.h @@ -42,7 +42,7 @@ class ProcMeminfo : public State virtual ~ProcMeminfo(); - // update content state by processing /proc/stat + // update content state by processing /proc/meminfo void update_state(); // return contents of file diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcStat.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcStat.cpp index db1441d44..1a754277d 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcStat.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcStat.cpp @@ -17,6 +17,7 @@ * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see http://www.gnu.org/licenses/. */ +#include #include "ProcStat.h" #include "parsers/ProcStatParser.h" @@ -44,4 +45,26 @@ ProcStat::get() const } +static void __readone(FILE *input, int64_t *x) { fscanf(input, "%lld ",(long long *) x); } +static void __readstr(FILE *input, char *x) { fscanf(input, "%s ", x);} +int ProcStat::GetTicks( int64_t &r_sys, int64_t &r_user ) { + + char cpu[512]; + int retval=-1; + int64_t user, nice, sys, idle, iowait, irq, softirq; + FILE *input=fopen("/proc/stat", "r"); + if( !input ) return retval; + __readstr(input,cpu); + __readone(input,&user); + __readone(input,&nice); + __readone(input,&sys); + __readone(input,&idle); + __readone(input,&iowait); + __readone(input,&irq); + __readone(input,&softirq); + fclose(input); + r_sys = user+nice+sys+idle; + r_user = user+nice+sys; + return 0; +} diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcStat.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcStat.h index 0e3657662..1d13631c8 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcStat.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/states/ProcStat.h @@ -79,6 +79,7 @@ class ProcStat : public State }; + static int GetTicks( int64_t &sys, int64_t &user ); // init file and read in baseline stats ProcStat(); diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/statistics/CpuUsageStats.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/statistics/CpuUsageStats.cpp index 27931537e..fae14b0ab 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/statistics/CpuUsageStats.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/statistics/CpuUsageStats.cpp @@ -56,6 +56,18 @@ CpuUsageStats::CpuUsageStats(const CpuList &cpus, const int nhistory ): _update_stats(); } + +CpuUsageStats::CpuUsageStats(const CpuUsageStats &src ) +{ + prev_cpus_stat_ = src.prev_cpus_stat_; + current_cpus_stat_ = src.current_cpus_stat_; + proc_stat_ = src.proc_stat_; + cpus_ = src.cpus_; + metrics_ = src.metrics_; + average_ = src.average_; + history_db_ = src.history_db_; +} + double CpuUsageStats::get_user_percent() const { return metrics_[ ProcStat::CPU_JIFFIES_USER ]; @@ -87,6 +99,17 @@ double CpuUsageStats::get_idle_average() const return average_[ ProcStat::CPU_JIFFIES_IDLE ]; } +uint64_t CpuUsageStats::get_all_usage() const +{ + return cpus_all_itv; +} + +uint64_t CpuUsageStats::get_user_usage() const +{ + return cpus_user_itv; + return 0; +} + void CpuUsageStats::compute_statistics() @@ -96,6 +119,8 @@ void CpuUsageStats::compute_statistics() // sum up all jiffies for all required cpus or all Accumulator cpus_itv = _get_interval_total(); + cpus_all_itv = cpus_itv; + cpus_user_itv =_get_user_total(); std::fill( metrics_.begin(),metrics_.end(), 0 ); std::fill( average_.begin(),average_.end(), 0 ); @@ -134,6 +159,20 @@ CpuUsageStats::Accumulator CpuUsageStats::_get_interval_total() const return 0; } +CpuUsageStats::Accumulator CpuUsageStats::_get_user_total() const +{ + if( !prev_cpus_stat_.empty() ) { + uint64_t scc; + uint64_t scp; + scc = _sum_jiffie_list( current_cpus_stat_, ProcStat::CPU_JIFFIES_SYSTEM ); + scp = _sum_jiffie_list( prev_cpus_stat_, ProcStat::CPU_JIFFIES_SYSTEM ); + DEBUG(std::cout << " _get_user_total scc/scp/diff " << scc << "/" << scp << "/" << scc - scp << std::endl); + return scc - scp; + } + else + return 0; +} + void CpuUsageStats::_update_stats() { proc_stat_.update_state(); @@ -163,7 +202,7 @@ CpuUsageStats::Accumulator CpuUsageStats::_sum_jiffies(const ProcStat::CpuStats { uint64_t accum=0; - // filter out cpus that were identified... if list == 0 then do not filer any + // filter out cpus that were identified... if list == 0 then do not filter any if ( cpus_.size() == 0 ) { ProcStat::CpuStats::const_iterator iter = cpu_stats.begin(); for ( int i=0; iter != cpu_stats.end(); i++, iter++ ) { @@ -185,6 +224,33 @@ CpuUsageStats::Accumulator CpuUsageStats::_sum_jiffies(const ProcStat::CpuStats return accum; } +CpuUsageStats::Accumulator CpuUsageStats::_sum_jiffie_list(const ProcStat::CpuStats& cpu_stats, + const ProcStat::CpuJiffiesField &jiffie_max ) const +{ + + uint64_t accum=0; + // filter out cpus that were identified... if list == 0 then do not filter any + if ( cpus_.size() == 0 ) { + ProcStat::CpuStats::const_iterator iter = cpu_stats.begin(); + for ( int i=0; iter != cpu_stats.end(); i++, iter++ ) { + accum += std::accumulate( (*iter).jiffies.begin(), (*iter).jiffies.begin()+jiffie_max, 0 ); // skip guest counters... + DEBUG(std::cout << " _sum_jiffies cpu/accum: " << i << "/" << accum << std::endl); + } + } + else { + // filter out cpus that were identified... + for( uint32_t i=0; i < cpus_.size(); i++ ) { + if ( cpus_[i] < cpu_stats.size() ) { + int cpu_idx = cpus_[i]; + accum += std::accumulate( cpu_stats[cpu_idx].jiffies.begin(), cpu_stats[cpu_idx].jiffies.begin()+jiffie_max, 0); // skip guest counters... + } + } + } + + DEBUG(std::cout << " _sum_jiffies accum: " << accum << std::endl); + return accum; +} + CpuUsageStats::Accumulator CpuUsageStats::_sum_jiffie_field( const ProcStat::CpuStats& cpu_stats, const ProcStat::CpuJiffiesField &jiffie ) const diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/statistics/CpuUsageStats.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/statistics/CpuUsageStats.h index adedec966..8ebf99164 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/statistics/CpuUsageStats.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/statistics/CpuUsageStats.h @@ -48,6 +48,7 @@ class CpuUsageStats : public CpuStatistics //CpuUsageStats( ); CpuUsageStats( const int nhistory=5 ); CpuUsageStats( const CpuList &cpus, const int nhistory=5 ); + CpuUsageStats( const CpuUsageStats &src ); virtual ~CpuUsageStats() {} @@ -60,15 +61,20 @@ class CpuUsageStats : public CpuStatistics virtual double get_user_average() const; virtual double get_system_average() const; virtual double get_idle_average() const; + uint64_t get_all_usage() const; + uint64_t get_user_usage() const; protected: typedef ProcStat::Jiffie Accumulator; virtual Accumulator _get_interval_total() const; + virtual Accumulator _get_user_total() const; virtual double _calc_metric( const ProcStat::CpuJiffiesField & jiffie, const Accumulator itv ) const; virtual double _calc_average( const ProcStat::CpuJiffiesField & jiffie ) const; virtual Accumulator _sum_jiffies( const ProcStat::CpuStats & cpu_stats ) const; + virtual Accumulator _sum_jiffie_list( const ProcStat::CpuStats & cpu_stats, + const ProcStat::CpuJiffiesField & jiffie_max ) const; virtual Accumulator _sum_jiffie_field( const ProcStat::CpuStats& cpu_stats, const ProcStat::CpuJiffiesField & jiffie ) const; bool _accum_cpu( const uint32_t cpu_id ) const; @@ -82,6 +88,8 @@ class CpuUsageStats : public CpuStatistics MetricsList metrics_; MetricsList average_; MetricsHistory history_db_; + Accumulator cpus_all_itv; + Accumulator cpus_user_itv; }; diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/struct_props.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/struct_props.h index d56e6a456..b0e1c3f85 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/struct_props.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/struct_props.h @@ -145,6 +145,65 @@ inline bool operator!= (const nic_allocation_struct& s1, const nic_allocation_st return !(s1==s2); }; +struct redhawk__reservation_request_struct { + redhawk__reservation_request_struct () + { + } + + static std::string getId() { + return std::string("redhawk::reservation_request"); + } + + static const char* getFormat() { + return "s[s][s]"; + } + + std::string obj_id; + std::vector kinds; + std::vector values; +}; + +inline bool operator>>= (const CORBA::Any& a, redhawk__reservation_request_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("redhawk::reservation_request::obj_id")) { + if (!(props["redhawk::reservation_request::obj_id"] >>= s.obj_id)) return false; + } + if (props.contains("redhawk::reservation_request::kinds")) { + if (!(props["redhawk::reservation_request::kinds"] >>= s.kinds)) return false; + } + if (props.contains("redhawk::reservation_request::values")) { + if (!(props["redhawk::reservation_request::values"] >>= s.values)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const redhawk__reservation_request_struct& s) { + redhawk::PropertyMap props; + + props["redhawk::reservation_request::obj_id"] = s.obj_id; + + props["redhawk::reservation_request::kinds"] = s.kinds; + + props["redhawk::reservation_request::values"] = s.values; + a <<= props; +} + +inline bool operator== (const redhawk__reservation_request_struct& s1, const redhawk__reservation_request_struct& s2) { + if (s1.obj_id!=s2.obj_id) + return false; + if (s1.kinds!=s2.kinds) + return false; + if (s1.values!=s2.values) + return false; + return true; +} + +inline bool operator!= (const redhawk__reservation_request_struct& s1, const redhawk__reservation_request_struct& s2) { + return !(s1==s2); +} + struct advanced_struct { advanced_struct () { @@ -331,81 +390,100 @@ inline bool operator!= (const threshold_event_struct& s1, const threshold_event_ return !(s1==s2); }; + struct thresholds_struct { thresholds_struct () { + ignore=false; cpu_idle = 10; - mem_free = 100; + load_avg = 80; + mem_free = 100LL; nic_usage = 900; + files_available = 3; + threads = 3; }; static std::string getId() { return std::string("thresholds"); }; + bool ignore; float cpu_idle; + float load_avg; CORBA::LongLong mem_free; CORBA::Long nic_usage; + float files_available; + float threads; }; inline bool operator>>= (const CORBA::Any& a, thresholds_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("cpu_idle", props[idx].id)) { - if (!(props[idx].value >>= s.cpu_idle)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("mem_free", props[idx].id)) { - if (!(props[idx].value >>= s.mem_free)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } - else if (!strcmp("nic_usage", props[idx].id)) { - if (!(props[idx].value >>= s.nic_usage)) { - CORBA::TypeCode_var typecode = props[idx].value.type(); - if (typecode->kind() != CORBA::tk_null) { - return false; - } - } - } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("ignore")) { + if (!(props["ignore"] >>= s.ignore)) return false; + } + if (props.contains("cpu_idle")) { + if (!(props["cpu_idle"] >>= s.cpu_idle)) return false; + } + if (props.contains("load_avg")) { + if (!(props["load_avg"] >>= s.load_avg)) return false; + } + if (props.contains("mem_free")) { + if (!(props["mem_free"] >>= s.mem_free)) return false; + } + if (props.contains("nic_usage")) { + if (!(props["nic_usage"] >>= s.nic_usage)) return false; + } + if (props.contains("files_available")) { + if (!(props["files_available"] >>= s.files_available)) return false; + } + if (props.contains("threads")) { + if (!(props["threads"] >>= s.threads)) return false; } return true; -}; +} inline void operator<<= (CORBA::Any& a, const thresholds_struct& s) { - CF::Properties props; - props.length(3); - props[0].id = CORBA::string_dup("cpu_idle"); - props[0].value <<= s.cpu_idle; - props[1].id = CORBA::string_dup("mem_free"); - props[1].value <<= s.mem_free; - props[2].id = CORBA::string_dup("nic_usage"); - props[2].value <<= s.nic_usage; + redhawk::PropertyMap props; + + props["ignore"] = s.ignore; + + props["cpu_idle"] = s.cpu_idle; + + props["load_avg"] = s.load_avg; + + props["mem_free"] = s.mem_free; + + props["nic_usage"] = s.nic_usage; + + props["files_available"] = s.files_available; + + props["threads"] = s.threads; a <<= props; -}; +} inline bool operator== (const thresholds_struct& s1, const thresholds_struct& s2) { + if (s1.ignore!=s2.ignore) + return false; if (s1.cpu_idle!=s2.cpu_idle) return false; + if (s1.load_avg!=s2.load_avg) + return false; if (s1.mem_free!=s2.mem_free) return false; if (s1.nic_usage!=s2.nic_usage) return false; + if (s1.files_available!=s2.files_available) + return false; + if (s1.threads!=s2.threads) + return false; return true; -}; +} inline bool operator!= (const thresholds_struct& s1, const thresholds_struct& s2) { return !(s1==s2); -}; +} struct nic_allocation_status_struct_struct { nic_allocation_status_struct_struct () @@ -1034,4 +1112,357 @@ inline bool operator!= (const interfaces_struct& s1, const interfaces_struct& s2 return !(s1==s2); }; +struct ulimit_struct { + ulimit_struct () + { + }; + + static std::string getId() { + return std::string("ulimit"); + }; + + CORBA::Long current_threads; + CORBA::Long max_threads; + CORBA::Long current_open_files; + CORBA::Long max_open_files; +}; + +inline bool operator>>= (const CORBA::Any& a, ulimit_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("current_threads")) { + if (!(props["current_threads"] >>= s.current_threads)) return false; + } + if (props.contains("max_threads")) { + if (!(props["max_threads"] >>= s.max_threads)) return false; + } + if (props.contains("current_open_files")) { + if (!(props["current_open_files"] >>= s.current_open_files)) return false; + } + if (props.contains("max_open_files")) { + if (!(props["max_open_files"] >>= s.max_open_files)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const ulimit_struct& s) { + redhawk::PropertyMap props; + + props["current_threads"] = s.current_threads; + + props["max_threads"] = s.max_threads; + + props["current_open_files"] = s.current_open_files; + + props["max_open_files"] = s.max_open_files; + a <<= props; +} + +inline bool operator== (const ulimit_struct& s1, const ulimit_struct& s2) { + if (s1.current_threads!=s2.current_threads) + return false; + if (s1.max_threads!=s2.max_threads) + return false; + if (s1.current_open_files!=s2.current_open_files) + return false; + if (s1.max_open_files!=s2.max_open_files) + return false; + return true; +} + +inline bool operator!= (const ulimit_struct& s1, const ulimit_struct& s2) { + return !(s1==s2); +} + +struct utilization_entry_struct { + utilization_entry_struct () + { + }; + + static std::string getId() { + return std::string("utilization_entry"); + }; + + std::string description; + float component_load; + float system_load; + float subscribed; + float maximum; +}; + +inline bool operator>>= (const CORBA::Any& a, utilization_entry_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("description")) { + if (!(props["description"] >>= s.description)) return false; + } + if (props.contains("component_load")) { + if (!(props["component_load"] >>= s.component_load)) return false; + } + if (props.contains("system_load")) { + if (!(props["system_load"] >>= s.system_load)) return false; + } + if (props.contains("subscribed")) { + if (!(props["subscribed"] >>= s.subscribed)) return false; + } + if (props.contains("maximum")) { + if (!(props["maximum"] >>= s.maximum)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const utilization_entry_struct& s) { + redhawk::PropertyMap props; + + props["description"] = s.description; + + props["component_load"] = s.component_load; + + props["system_load"] = s.system_load; + + props["subscribed"] = s.subscribed; + + props["maximum"] = s.maximum; + a <<= props; +} + +inline bool operator== (const utilization_entry_struct& s1, const utilization_entry_struct& s2) { + if (s1.description!=s2.description) + return false; + if (s1.component_load!=s2.component_load) + return false; + if (s1.system_load!=s2.system_load) + return false; + if (s1.subscribed!=s2.subscribed) + return false; + if (s1.maximum!=s2.maximum) + return false; + return true; +} + +inline bool operator!= (const utilization_entry_struct& s1, const utilization_entry_struct& s2) { + return !(s1==s2); +} +struct loadAverage_struct { + loadAverage_struct () + { + }; + + static std::string getId() { + return std::string("DCE:9da85ebc-6503-48e7-af36-b77c7ad0c2b4"); + }; + + double onemin; + double fivemin; + double fifteenmin; +}; + +inline bool operator>>= (const CORBA::Any& a, loadAverage_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("onemin")) { + if (!(props["onemin"] >>= s.onemin)) return false; + } + if (props.contains("fivemin")) { + if (!(props["fivemin"] >>= s.fivemin)) return false; + } + if (props.contains("fifteenmin")) { + if (!(props["fifteenmin"] >>= s.fifteenmin)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const loadAverage_struct& s) { + redhawk::PropertyMap props; + + props["onemin"] = s.onemin; + + props["fivemin"] = s.fivemin; + + props["fifteenmin"] = s.fifteenmin; + a <<= props; +} + +inline bool operator== (const loadAverage_struct& s1, const loadAverage_struct& s2) { + if (s1.onemin!=s2.onemin) + return false; + if (s1.fivemin!=s2.fivemin) + return false; + if (s1.fifteenmin!=s2.fifteenmin) + return false; + return true; +} + +inline bool operator!= (const loadAverage_struct& s1, const loadAverage_struct& s2) { + return !(s1==s2); +} + +struct component_monitor_struct { + component_monitor_struct () + { + }; + + static std::string getId() { + return std::string("component_monitor::component_monitor"); + }; + + std::string component_id; + std::string waveform_id; + unsigned short pid; + float cores; + float mem_rss; + float mem_percent; + CORBA::ULong num_processes; + CORBA::ULong num_threads; + CORBA::ULong num_files; +}; + +inline bool operator>>= (const CORBA::Any& a, component_monitor_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("component_monitor::component_monitor::component_id")) { + if (!(props["component_monitor::component_monitor::component_id"] >>= s.component_id)) return false; + } + if (props.contains("component_monitor::component_monitor::waveform_id")) { + if (!(props["component_monitor::component_monitor::waveform_id"] >>= s.waveform_id)) return false; + } + if (props.contains("component_monitor::component_monitor::pid")) { + if (!(props["component_monitor::component_monitor::pid"] >>= s.pid)) return false; + } + if (props.contains("component_monitor::component_monitor::cores")) { + if (!(props["component_monitor::component_monitor::cores"] >>= s.cores)) return false; + } + if (props.contains("component_monitor::component_monitor::mem_rss")) { + if (!(props["component_monitor::component_monitor::mem_rss"] >>= s.mem_rss)) return false; + } + if (props.contains("component_monitor::component_monitor::mem_percent")) { + if (!(props["component_monitor::component_monitor::mem_percent"] >>= s.mem_percent)) return false; + } + if (props.contains("component_monitor::component_monitor::num_processes")) { + if (!(props["component_monitor::component_monitor::num_processes"] >>= s.num_processes)) return false; + } + if (props.contains("component_monitor::component_monitor::num_threads")) { + if (!(props["component_monitor::component_monitor::num_threads"] >>= s.num_threads)) return false; + } + if (props.contains("component_monitor::component_monitor::num_files")) { + if (!(props["component_monitor::component_monitor::num_files"] >>= s.num_files)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const component_monitor_struct& s) { + redhawk::PropertyMap props; + + props["component_monitor::component_monitor::component_id"] = s.component_id; + + props["component_monitor::component_monitor::waveform_id"] = s.waveform_id; + + props["component_monitor::component_monitor::pid"] = s.pid; + + props["component_monitor::component_monitor::cores"] = s.cores; + + props["component_monitor::component_monitor::mem_rss"] = s.mem_rss; + + props["component_monitor::component_monitor::mem_percent"] = s.mem_percent; + + props["component_monitor::component_monitor::num_processes"] = s.num_processes; + + props["component_monitor::component_monitor::num_threads"] = s.num_threads; + + props["component_monitor::component_monitor::num_files"] = s.num_files; + a <<= props; +} + +inline bool operator== (const component_monitor_struct& s1, const component_monitor_struct& s2) { + if (s1.component_id!=s2.component_id) + return false; + if (s1.waveform_id!=s2.waveform_id) + return false; + if (s1.pid!=s2.pid) + return false; + if (s1.cores!=s2.cores) + return false; + if (s1.mem_rss!=s2.mem_rss) + return false; + if (s1.mem_percent!=s2.mem_percent) + return false; + if (s1.num_processes!=s2.num_processes) + return false; + if (s1.num_threads!=s2.num_threads) + return false; + if (s1.num_files!=s2.num_files) + return false; + return true; +} + +inline bool operator!= (const component_monitor_struct& s1, const component_monitor_struct& s2) { + return !(s1==s2); +} +struct sys_limits_struct { + sys_limits_struct () + { + }; + + static std::string getId() { + return std::string("sys_limits"); + }; + + CORBA::Long current_threads; + CORBA::Long max_threads; + CORBA::Long current_open_files; + CORBA::Long max_open_files; +}; + +inline bool operator>>= (const CORBA::Any& a, sys_limits_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("sys_limits::current_threads")) { + if (!(props["sys_limits::current_threads"] >>= s.current_threads)) return false; + } + if (props.contains("sys_limits::max_threads")) { + if (!(props["sys_limits::max_threads"] >>= s.max_threads)) return false; + } + if (props.contains("sys_limits::current_open_files")) { + if (!(props["sys_limits::current_open_files"] >>= s.current_open_files)) return false; + } + if (props.contains("sys_limits::max_open_files")) { + if (!(props["sys_limits::max_open_files"] >>= s.max_open_files)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const sys_limits_struct& s) { + redhawk::PropertyMap props; + + props["sys_limits::current_threads"] = s.current_threads; + + props["sys_limits::max_threads"] = s.max_threads; + + props["sys_limits::current_open_files"] = s.current_open_files; + + props["sys_limits::max_open_files"] = s.max_open_files; + a <<= props; +} + +inline bool operator== (const sys_limits_struct& s1, const sys_limits_struct& s2) { + if (s1.current_threads!=s2.current_threads) + return false; + if (s1.max_threads!=s2.max_threads) + return false; + if (s1.current_open_files!=s2.current_open_files) + return false; + if (s1.max_open_files!=s2.max_open_files) + return false; + return true; +} + +inline bool operator!= (const sys_limits_struct& s1, const sys_limits_struct& s2) { + return !(s1==s2); +} + #endif // STRUCTPROPS_H diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/ConversionWrapper.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/ConversionWrapper.h new file mode 100644 index 000000000..32524181c --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/ConversionWrapper.h @@ -0,0 +1,58 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK GPP. + * + * REDHAWK GPP is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef CONV_WRAPPER_H_ +#define CONV_WRAPPER_H_ +#include +#include +template< class REFERENCE_TYPE, class RETURN_TYPE=REFERENCE_TYPE, class ctype=uint64_t, class CFUNC=std::multiplies< RETURN_TYPE > > +class ConversionWrapper + { + public: + typedef REFERENCE_TYPE type; + typedef RETURN_TYPE result_type; + typedef void argument_type; + typedef CFUNC opfunc; + + explicit ConversionWrapper( type& ref, ctype cf=1048576, const opfunc &func=std::multiplies< result_type >() ): + ref_(ref), func_(func), unit_conversion_(cf) + {}; + + result_type operator()() const { + return func_( static_cast(ref_), (result_type)unit_conversion_ ); + // debug +#if 0 + result_type ret; + ret = func_( static_cast(ref_), (result_type)unit_conversion_ ); + std::cout << " ConversionWrapper: value/cf/result " << ref_ << "/" << unit_conversion_ << "/" << ret << std::endl; + return ret; +#endif + }; + + type& get() const { return ref_; }; + + private: + type &ref_; + opfunc func_; + ctype unit_conversion_; + + }; + +#endif diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/affinity.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/affinity.cpp index 280b9b68c..c5709bc31 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/affinity.cpp +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/affinity.cpp @@ -2,14 +2,14 @@ * This file is protected by Copyright. Please refer to the COPYRIGHT file * distributed with this source distribution. * - * This file is part of REDHAWK core. + * This file is part of REDHAWK GPP. * - * REDHAWK core is free software: you can redistribute it and/or modify it + * REDHAWK GPP is free software: you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your * option) any later version. * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License * for more details. @@ -24,6 +24,7 @@ #include #include #include +#include #ifdef HAVE_LIBNUMA #include #endif @@ -41,7 +42,7 @@ namespace gpp { namespace affinity { - bool check_numa() { + bool check_numa() { #ifdef HAVE_LIBNUMA return (numa_available() != -1); #else @@ -49,6 +50,7 @@ namespace gpp { #endif } + const std::string get_cgroup_root() { return redhawk::affinity::get_cgroup_root(); } @@ -58,7 +60,11 @@ namespace gpp { } bool is_disabled() { - return redhawk::affinity::is_disabled(); + return redhawk::affinity::is_disabled() || ( check_numa() == false ) ; + } + + rh_logger::LoggerPtr get_affinity_logger() { + return redhawk::affinity::get_affinity_logger(); } void set_nic_promotion( const bool onoff ) { @@ -83,14 +89,14 @@ namespace gpp { std::string pintr("/proc/interrupts"); std::ifstream in(pintr.c_str(), std::ifstream::in ); if ( in.fail() ) { - RH_NL_ERROR("gpp::affinity", "Unable to access /proc/interrupts"); + RH_ERROR(get_affinity_logger(), "Unable to access /proc/interrupts"); return cpus; } std::string line; while( std::getline( in, line ) ) { // check if the device is our interface - RH_NL_TRACE("gpp::affinity", "Processing /proc/interrupts.... line:" << line); + RH_TRACE(get_affinity_logger(), "Processing /proc/interrupts.... line:" << line); if ( line.rfind(iface) != std::string::npos ) { std::istringstream iss(line); int parts=0; @@ -99,13 +105,18 @@ namespace gpp { iss>>tok; // skip interrupt number and iface if ( parts > 0 and tok != iface ) { - std::istringstream iss(tok); - int icnt; - iss >> icnt; - if ( icnt > 0 ) { - RH_NL_TRACE("gpp::affinity", "identify cpus: Adding CPU : " << parts-1); - cpus.push_back(parts-1); - } + int icnt=0; + try { + icnt=boost::lexical_cast(tok); + RH_TRACE(get_affinity_logger(), "identify cpus: CPU : " << parts-1 << " nic interrupts:" << icnt); + if ( icnt > 0 ) { + RH_TRACE(get_affinity_logger(), "identify cpus: Adding CPU : " << parts-1); + cpus.push_back(parts-1); + } + } + catch(...){ + RH_TRACE(get_affinity_logger(), "Invalid Token: tok:" << tok); + } } parts++; }while(iss); @@ -115,7 +126,7 @@ namespace gpp { redhawk::affinity::CpuList::iterator citer=cpus.begin(); for (; citer != cpus.end(); citer++) { - RH_NL_DEBUG("gpp::affinity", "identified CPUS iface/cpu ...:" << iface << "/" << *citer); + RH_DEBUG(get_affinity_logger(), "identified CPUS iface/cpu ...:" << iface << "/" << *citer); } return cpus; @@ -134,11 +145,11 @@ namespace gpp { #ifdef HAVE_LIBNUMA int soc=-1; for( int i=0; i < (int)cpulist.size();i++ ) { - RH_NL_DEBUG("gpp::affinity", "Finding (processor socket) for NIC:" << iface << " socket :" << numa_node_of_cpu(cpulist[i]) ); + RH_DEBUG(get_affinity_logger(), "Finding (processor socket) for NIC:" << iface << " socket :" << numa_node_of_cpu(cpulist[i]) ); if ( std::count( bl.begin(), bl.end(), cpulist[i] ) != 0 ) continue; soc = numa_node_of_cpu(cpulist[i]); if ( soc != psoc && psoc != -1 && !findFirst ) { - RH_NL_WARN("gpp::affinity", "More than 1 socket servicing NIC:" << iface); + RH_WARN(get_affinity_logger(), "More than 1 socket servicing NIC:" << iface); psoc=-1; break; } @@ -156,6 +167,7 @@ namespace gpp { return retval; + } @@ -168,10 +180,6 @@ namespace gpp { } #ifdef HAVE_LIBNUMA - if ( numa_available() == -1 ) { - return cpu_list; - } - if ( list_type == "socket" || list_type == "node" ) { std::string nodestr = context; struct bitmask *node_mask = numa_parse_nodestring((char *)nodestr.c_str()); @@ -231,222 +239,218 @@ namespace gpp { redhawk::affinity::CpuList::const_iterator citer=blacklist.begin(); for (; citer != blacklist.end(); citer++) { - RH_NL_DEBUG("gpp::affinity", "BlackList ...:" << *citer); + RH_DEBUG(get_affinity_logger(), "BlackList ...:" << *citer); } redhawk::affinity::AffinityDirectives::const_iterator piter = spec.begin(); for ( int cnt=0; piter != spec.end(); piter++, cnt++ ) { redhawk::affinity::AffinityDirective affinity_spec = *piter; - RH_NL_DEBUG("gpp::affinity", " cnt:" << cnt << " Processing Affinity pid: " << pid << " " << affinity_spec.first << ":" << affinity_spec.second ); + RH_DEBUG(get_affinity_logger(), " cnt:" << cnt << " Processing Affinity pid: " << pid << " " << affinity_spec.first << ":" << affinity_spec.second ); #ifdef HAVE_LIBNUMA - if ( numa_available() == -1 ) { - RH_NL_WARN("gpp::affinity", "Missing affinity support from Redhawk libraries, ... ignoring numa affinity based requests "); - } - else { - // nic -- Determine cpu list by interrupts assigned for the specified NIC - if ( affinity_spec.first == "nic" ) { - std::string iface = affinity_spec.second; - // Determine cpu list by interrupts assigned for the specified NIC - redhawk::affinity::CpuList cpulist = identify_cpus(iface); + // nic -- Determine cpu list by interrupts assigned for the specified NIC + if ( affinity_spec.first == "nic" ) { + std::string iface = affinity_spec.second; + // Determine cpu list by interrupts assigned for the specified NIC + redhawk::affinity::CpuList cpulist = identify_cpus(iface); - // if no cpus identified then issue warning - if ( cpulist.size() > 0 ) { - - // check if black list is specified... if not then use numa node based affinity - if ( blacklist.size() == 0 && getpid() == pid ) { // are we the same process, then use node binding method - bitmask *node_mask = numa_allocate_nodemask(); - if ( !node_mask ) { - throw redhawk::affinity::AffinityFailed("Unable to allocate node mask"); - } - - for( int i=0; i < (int)cpulist.size();i++ ) { - RH_NL_DEBUG("gpp::affinity", "Setting NIC (processor socket select) available sockets :" << numa_node_of_cpu(cpulist[i]) ); - numa_bitmask_setbit(node_mask, numa_node_of_cpu(cpulist[i]) ); - } - - RH_NL_DEBUG("gpp::affinity", "Setting NIC (processor socket select) affinity constraint: :" << iface ); - numa_bind(node_mask); - numa_bitmask_free(node_mask); - } - else { - - int cpus=0; - for( int i=0; i < (int)cpulist.size();i++ ) { - // check if cpu id is not in blacklist - if ( std::count( blacklist.begin(), blacklist.end(), cpulist[i] ) == 0 ) cpus++; - } - - // - // For nic based affinity and blacklisted cpus... - // - // Find all the cpus that service interrupts for the specified interfaces - // - // if interface is serviced by a single cpu and promote to socket flag is on - // get list of cpus for the processor socket servicing the interface... - // apply blacklist - // specify affinity with remaining cpu list - // - // if interface is serviced by single cpu and blacklisted, and promote to socket flag is on - // get list of cpus for the processor socket servicing the interface... - // apply blacklist - // specify affinity with remaining cpu list - // - // otherwise ... - // from the list of cpus for the interface, apply blacklist then apply as affinity - // - if ( (cpulist.size() == 1 && get_nic_promotion() ) || ( cpus == 0 && get_nic_promotion() ) ) { - int cpuid = cpulist[0]; - std::ostringstream os; - os << numa_node_of_cpu( cpuid ); - redhawk::affinity::CpuList tlist = get_cpu_list( "socket", os.str() ); - RH_NL_INFO("gpp::affinity", "Promoting NIC affinity to PID:" << pid << " SOCKET:" << os.str() ); - cpulist.clear(); - for( int i=0; i < (int)tlist.size();i++ ) { - if ( tlist[i] == cpuid ) continue; - cpulist.push_back( tlist[i] ); - } - } + // if no cpus identified then issue warning + if ( cpulist.size() > 0 ) { + + // check if black list is specified... if not then use numa node based affinity + if ( blacklist.size() == 0 && getpid() == pid ) { // are we the same process, then use node binding method + bitmask *node_mask = numa_allocate_nodemask(); + if ( !node_mask ) { + throw redhawk::affinity::AffinityFailed("Unable to allocate node mask"); + } + + for( int i=0; i < (int)cpulist.size();i++ ) { + RH_DEBUG(get_affinity_logger(), "Setting NIC (processor socket select) available sockets :" << numa_node_of_cpu(cpulist[i]) ); + numa_bitmask_setbit(node_mask, numa_node_of_cpu(cpulist[i]) ); + } + + RH_DEBUG(get_affinity_logger(), "Setting NIC (processor socket select) affinity constraint: :" << iface ); + numa_bind(node_mask); + numa_bitmask_free(node_mask); + } + else { + + int cpus=0; + for( int i=0; i < (int)cpulist.size();i++ ) { + // check if cpu id is not in blacklist + if ( std::count( blacklist.begin(), blacklist.end(), cpulist[i] ) == 0 ) cpus++; + } + + // + // For nic based affinity and blacklisted cpus... + // + // Find all the cpus that service interrupts for the specified interfaces + // + // if interface is serviced by a single cpu and promote to socket flag is on + // get list of cpus for the processor socket servicing the interface... + // apply blacklist + // specify affinity with remaining cpu list + // + // if interface is serviced by single cpu and blacklisted, and promote to socket flag is on + // get list of cpus for the processor socket servicing the interface... + // apply blacklist + // specify affinity with remaining cpu list + // + // otherwise ... + // from the list of cpus for the interface, apply blacklist then apply as affinity + // + if ( (cpulist.size() == 1 && get_nic_promotion() ) || ( cpus == 0 && get_nic_promotion() ) ) { + int cpuid = cpulist[0]; + std::ostringstream os; + os << numa_node_of_cpu( cpuid ); + redhawk::affinity::CpuList tlist = get_cpu_list( "socket", os.str() ); + RH_INFO(get_affinity_logger(), "Promoting NIC affinity to PID:" << pid << " SOCKET:" << os.str() ); + cpulist.clear(); + for( int i=0; i < (int)tlist.size();i++ ) { + if ( tlist[i] == cpuid ) continue; + cpulist.push_back( tlist[i] ); + } + } - cpus=0; - for( int i=0; i < (int)cpulist.size();i++ ) { - // check if cpu id is not in blacklist - if ( std::count( blacklist.begin(), blacklist.end(), cpulist[i] ) == 0 ) cpus++; - } + cpus=0; + for( int i=0; i < (int)cpulist.size();i++ ) { + // check if cpu id is not in blacklist + if ( std::count( blacklist.begin(), blacklist.end(), cpulist[i] ) == 0 ) cpus++; + } - if ( cpus > 0 ) { // use cpulist to bind process - bitmask *cpu_mask = numa_allocate_cpumask(); - if ( !cpu_mask ) { - throw redhawk::affinity::AffinityFailed("Unable to allocate node mask"); - } - - for( int i=0; i < (int)cpulist.size();i++ ) { - // check if cpu id is blacklisted - if ( std::count( blacklist.begin(), blacklist.end(), cpulist[i] ) == 0 ) { - RH_NL_DEBUG("gpp::affinity", "Setting NIC (cpu select) available :" << cpulist[i] ); - numa_bitmask_setbit(cpu_mask, cpulist[i]); - } - } - - RH_NL_DEBUG("gpp::affinity", "Setting NIC (cpu select) affinity constraint: :" << iface ); - if ( numa_sched_setaffinity( pid, cpu_mask) ) { - std::ostringstream e; - e << "Binding to NIC with cpu affinity, nic=" << iface; - throw redhawk::affinity::AffinityFailed(e.str()); - } - numa_bitmask_free(cpu_mask); - } - else { - RH_NL_WARN("gpp::affinity", "Setting NIC (cpu select), no cpu available all blacklisted :" << iface ); - std::ostringstream e; - e << "Binding to NIC, no cpus available all blacklisted :" << iface; - throw redhawk::affinity::AffinityFailed(e.str()); - } - } + if ( cpus > 0 ) { // use cpulist to bind process + bitmask *cpu_mask = numa_allocate_cpumask(); + if ( !cpu_mask ) { + throw redhawk::affinity::AffinityFailed("Unable to allocate node mask"); } - else { - RH_NL_WARN("gpp::affinity", "Setting NIC, unable to set directive:" << iface ); - std::ostringstream e; - e << "Binding to NIC, unable to set directive, cannot determine processor socket or cpu list from interrupt mapping, directive:" << iface; - throw redhawk::affinity::AffinityFailed(e.str()); + + for( int i=0; i < (int)cpulist.size();i++ ) { + // check if cpu id is blacklisted + if ( std::count( blacklist.begin(), blacklist.end(), cpulist[i] ) == 0 ) { + RH_DEBUG(get_affinity_logger(), "Setting NIC (cpu select) available :" << cpulist[i] ); + numa_bitmask_setbit(cpu_mask, cpulist[i]); + } } - } - // socket -- assign via processor socket - if ( affinity_spec.first == "socket" ) { - std::string nodestr = affinity_spec.second; - struct bitmask *node_mask = numa_parse_nodestring((char *)nodestr.c_str()); - if ( !node_mask ) { - throw redhawk::affinity::AffinityFailed("Processor socket affinity failed, unable to parse: " + nodestr); + RH_DEBUG(get_affinity_logger(), "Setting NIC (cpu select) affinity constraint: :" << iface ); + if ( numa_sched_setaffinity( pid, cpu_mask) ) { + std::ostringstream e; + e << "Binding to NIC with cpu affinity, nic=" << iface; + throw redhawk::affinity::AffinityFailed(e.str()); } + numa_bitmask_free(cpu_mask); + } + else { + RH_WARN(get_affinity_logger(), "Setting NIC (cpu select), no cpu available all blacklisted :" << iface ); + std::ostringstream e; + e << "Binding to NIC, no cpus available all blacklisted :" << iface; + throw redhawk::affinity::AffinityFailed(e.str()); + } + } + } + else { + RH_WARN(get_affinity_logger(), "Setting NIC, unable to set directive:" << iface ); + std::ostringstream e; + e << "Binding to NIC, unable to set directive, cannot determine processor socket or cpu list from interrupt mapping, directive:" << iface; + throw redhawk::affinity::AffinityFailed(e.str()); + } + } + + // socket -- assign via processor socket + if ( affinity_spec.first == "socket" ) { + std::string nodestr = affinity_spec.second; + struct bitmask *node_mask = numa_parse_nodestring((char *)nodestr.c_str()); + if ( !node_mask ) { + throw redhawk::affinity::AffinityFailed("Processor socket affinity failed, unable to parse: " + nodestr); + } - // plain node binding if no cpus are listed. - if ( blacklist.size() == 0 ) { - // bind to node... let system scheduler do its magic - RH_NL_DEBUG("gpp::affinity", "Setting PROCESSOR SOCKET affinity to constraint :" << nodestr ); - numa_bind( node_mask ); - } - else { // remove blacklisted cpus from node binding - bitmask *cpu_mask = numa_allocate_cpumask(); - if ( !cpu_mask ) { - throw redhawk::affinity::AffinityFailed("Unable to allocate cpu mask"); - } - - // check if node is active, if so then get a cpu id - int nbytes = numa_bitmask_nbytes(node_mask); - for (int i=0; i < nbytes*8; i++ ){ - if ( numa_bitmask_isbitset( node_mask, i ) ) { - numa_node_to_cpus( i, cpu_mask ); - } - } - - // check if cpu id is blacklisted - redhawk::affinity::CpuList::const_iterator biter = blacklist.begin(); - for ( ; biter != blacklist.end() ; biter++ ) { - RH_NL_DEBUG("gpp::affinity", "Setting PROCESSOR SOCKET (cpu select) blacklist :" << *biter ); - numa_bitmask_clearbit(cpu_mask, *biter); - } + // plain node binding if no cpus are listed. + if ( blacklist.size() == 0 ) { + // bind to node... let system scheduler do its magic + RH_DEBUG(get_affinity_logger(), "Setting PROCESSOR SOCKET affinity to constraint :" << nodestr ); + numa_bind( node_mask ); + } + else { // remove blacklisted cpus from node binding + bitmask *cpu_mask = numa_allocate_cpumask(); + if ( !cpu_mask ) { + throw redhawk::affinity::AffinityFailed("Unable to allocate cpu mask"); + } + + // check if node is active, if so then get a cpu id + int nbytes = numa_bitmask_nbytes(node_mask); + for (int i=0; i < nbytes*8; i++ ){ + if ( numa_bitmask_isbitset( node_mask, i ) ) { + numa_node_to_cpus( i, cpu_mask ); + } + } + + // check if cpu id is blacklisted + redhawk::affinity::CpuList::const_iterator biter = blacklist.begin(); + for ( ; biter != blacklist.end() ; biter++ ) { + RH_DEBUG(get_affinity_logger(), "Setting PROCESSOR SOCKET (cpu select) blacklist :" << *biter ); + numa_bitmask_clearbit(cpu_mask, *biter); + } #if 0 - { - // TEST for sched_setaffinity to resolve that not all threads are being confined to cpu set - cpu_set_t cset; - CPU_ZERO(&cset); - int nbytes = numa_bitmask_nbytes(cpu_mask); - for (int i=0; i < nbytes*8; i++ ){ - if ( numa_bitmask_isbitset( cpu_mask, i ) ) { - RH_NL_DEBUG("gpp::affinity", "PTHREAD setting affinity to cpu :" << i ); - CPU_SET(i,&cset); - } - } - - //if ( !pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cset) ) {y - if ( !sched_setaffinity(pid, sizeof(cpu_set_t), &cset) ) { - RH_NL_ERROR("gpp::affinity", "Setting PROCESSOR SOCKET (cpu select), unable to set processor affinity"); - } - - - } + { + // TEST for sched_setaffinity to resolve that not all threads are being confined to cpu set + cpu_set_t cset; + CPU_ZERO(&cset); + int nbytes = numa_bitmask_nbytes(cpu_mask); + for (int i=0; i < nbytes*8; i++ ){ + if ( numa_bitmask_isbitset( cpu_mask, i ) ) { + RH_DEBUG(get_affinity_logger(), "PTHREAD setting affinity to cpu :" << i ); + CPU_SET(i,&cset); + } + } + + //if ( !pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cset) ) {y + if ( !sched_setaffinity(pid, sizeof(cpu_set_t), &cset) ) { + RH_ERROR(get_affinity_logger(), "Setting PROCESSOR SOCKET (cpu select), unable to set processor affinity"); + } + + + } #endif - RH_NL_DEBUG("gpp::affinity", "Setting PROCESSOR SOCKET (cpu select) affinity, pid/constraint:" << pid << "/" << nodestr ); - if ( numa_sched_setaffinity( pid, cpu_mask) ) { - std::ostringstream e; - e << "Binding to PROCESSOR SOCKET with blacklisted cpus, socket=" << nodestr; - throw redhawk::affinity::AffinityFailed(e.str()); - } - numa_bitmask_free(cpu_mask); - } + RH_DEBUG(get_affinity_logger(), "Setting PROCESSOR SOCKET (cpu select) affinity, pid/constraint:" << pid << "/" << nodestr ); + if ( numa_sched_setaffinity( pid, cpu_mask) ) { + std::ostringstream e; + e << "Binding to PROCESSOR SOCKET with blacklisted cpus, socket=" << nodestr; + throw redhawk::affinity::AffinityFailed(e.str()); + } + numa_bitmask_free(cpu_mask); + } - numa_bitmask_free(node_mask); + numa_bitmask_free(node_mask); - } + } - // cpu -- assign via cpu id - if ( affinity_spec.first == "cpu" ) { - std::string cpustr = affinity_spec.second; - struct bitmask *cpu_mask = numa_parse_cpustring((char*)cpustr.c_str()); - if ( !cpu_mask ) { - throw redhawk::affinity::AffinityFailed("CPU affinity failed, unable to parse: <" + cpustr + ">" ); - } - - // apply black list - redhawk::affinity::CpuList::const_iterator biter = blacklist.begin(); - for ( ; biter != blacklist.end() ; biter++ ) { - RH_NL_DEBUG("gpp::affinity", "Setting CPU affinity, blacklist :" << *biter ); - numa_bitmask_clearbit(cpu_mask, *biter); - } + // cpu -- assign via cpu id + if ( affinity_spec.first == "cpu" ) { + std::string cpustr = affinity_spec.second; + struct bitmask *cpu_mask = numa_parse_cpustring((char*)cpustr.c_str()); + if ( !cpu_mask ) { + throw redhawk::affinity::AffinityFailed("CPU affinity failed, unable to parse: <" + cpustr + ">" ); + } - RH_NL_DEBUG("gpp::affinity", "Setting CPU affinity to constraint :" << cpustr ); - if ( numa_sched_setaffinity( pid, cpu_mask ) ) { - std::ostringstream e; - e << "Binding to CPU: " << cpustr; - throw redhawk::affinity::AffinityFailed(e.str()); - } + // apply black list + redhawk::affinity::CpuList::const_iterator biter = blacklist.begin(); + for ( ; biter != blacklist.end() ; biter++ ) { + RH_DEBUG(get_affinity_logger(), "Setting CPU affinity, blacklist :" << *biter ); + numa_bitmask_clearbit(cpu_mask, *biter); + } + + RH_DEBUG(get_affinity_logger(), "Setting CPU affinity to constraint :" << cpustr ); + if ( numa_sched_setaffinity( pid, cpu_mask ) ) { + std::ostringstream e; + e << "Binding to CPU: " << cpustr; + throw redhawk::affinity::AffinityFailed(e.str()); + } - } } + #else - RH_NL_WARN("gpp::affinity", "Missing affinity support from Redhawk libraries, ... ignoring numa affinity based requests "); + RH_WARN(get_affinity_logger(), "Missing affinity support from Redhawk libraries, ... ignoring numa affinity based requests "); #endif // cpuset -- assign via cpuset @@ -463,7 +467,7 @@ namespace gpp { } os << pid << std::endl; os.close(); - RH_NL_DEBUG("gpp::affinity", "Setting CPUSET affinity to constraint :" << cpuset_name ); + RH_DEBUG(get_affinity_logger(), "Setting CPUSET affinity to constraint :" << cpuset_name ); } // cgroup - assign to cgroup @@ -480,7 +484,7 @@ namespace gpp { } os << pid << std::endl; os.close(); - RH_NL_DEBUG("gpp::affinity", "Setting CGROUP affinity to constraint :" << cgroup_name ); + RH_DEBUG(get_affinity_logger(), "Setting CGROUP affinity to constraint :" << cgroup_name ); } } diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/affinity.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/affinity.h index ea4c063ce..274ca6134 100644 --- a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/affinity.h +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/affinity.h @@ -35,6 +35,8 @@ namespace gpp namespace affinity { + bool check_numa(); + /** Find the socket assocated with a particular network interface diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/popen.cpp b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/popen.cpp new file mode 100644 index 000000000..7e1b90b97 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/popen.cpp @@ -0,0 +1,61 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK GPP. + * + * REDHAWK GPP is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef DEBUG_ON +#define DEBUG(x) x +#else +#define DEBUG(x) +#endif + +namespace utils { + + std::string popen(const std::string &cmd, const bool first_or_last) { + DEBUG(std::cout << "CMD:" << cmd << std::endl); + FILE* pipe = ::popen(cmd.c_str(), "r"); + if (!pipe) return "ERROR"; + // make sure to popen and it succeeds +#if BOOST_VERSION > 104300 + boost::iostreams::file_descriptor_source pipe_src(fileno(pipe), boost::iostreams::never_close_handle ); +#else + boost::iostreams::file_descriptor_source pipe_src(fileno(pipe) ); +#endif + boost::iostreams::stream stream(pipe_src); + stream.set_auto_close(false); // https://svn.boost.org/trac/boost/ticket/3517 + std::string line; + while(std::getline(stream,line)) { + if ( first_or_last ) break; + DEBUG(std::cout << "LINE-> " + line + " length: " << line.length() << std::endl); + } + pclose(pipe); + return line; + } + + +}; diff --git a/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/popen.h b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/popen.h new file mode 100644 index 000000000..0b82b1ed8 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/GPP/cpp/utils/popen.h @@ -0,0 +1,31 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK GPP. + * + * REDHAWK GPP is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK GPP is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef _UTILS_POPEN_H_ +#define _UTILS_POPEN_H_ +#include + +namespace utils { + + std::string popen(const std::string &cmd, const bool first_or_last); + +}; + +#endif + diff --git a/redhawk/src/testing/sdr/dev/devices/NicExecDevice/NicExecDevice.prf.xml b/redhawk/src/testing/sdr/dev/devices/NicExecDevice/NicExecDevice.prf.xml new file mode 100644 index 000000000..133a4328b --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/NicExecDevice/NicExecDevice.prf.xml @@ -0,0 +1,84 @@ + + + + + + This specifies the device kind + GPP + + + + + This specifies the specific device + NicExecDevice + + + + + SCA required property describing the CPU type + x86_64 + + + + + SCA required property describing the Operating System Name + Linux + + + + + SCA required property describing the Operating System Version + + + + + + + + + + Uniquely identifies the allocation. Used for updates and deallocations. + +Required + + + + + + Requires this specific interface. + +Optional + + + + + + + + Uniquely identifies the allocation. + + + The allocated interface + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/NicExecDevice/NicExecDevice.scd.xml b/redhawk/src/testing/sdr/dev/devices/NicExecDevice/NicExecDevice.scd.xml new file mode 100644 index 000000000..3c6e0efb5 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/NicExecDevice/NicExecDevice.scd.xml @@ -0,0 +1,76 @@ + + + + + 2.2 + + executabledevice + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/NicExecDevice/NicExecDevice.spd.xml b/redhawk/src/testing/sdr/dev/devices/NicExecDevice/NicExecDevice.spd.xml new file mode 100644 index 000000000..123e354ba --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/NicExecDevice/NicExecDevice.spd.xml @@ -0,0 +1,45 @@ + + + + + + + + + Executable device with configurable virtual NICs to support testing components with NIC allocations. + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/NicExecDevice.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/NicExecDevice/python/NicExecDevice.py b/redhawk/src/testing/sdr/dev/devices/NicExecDevice/python/NicExecDevice.py new file mode 100755 index 000000000..daa19b246 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/NicExecDevice/python/NicExecDevice.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +# +# AUTO-GENERATED +# +# Source: NicExecDevice.spd.xml +from ossie.device import start_device +import logging + +from NicExecDevice_base import * + +class NicExecDevice_i(NicExecDevice_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your device registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + self._allocatedNics = {} + self.setAllocationImpl('nic_allocation', self.allocate_nic, self.deallocate_nic) + + def allocate_nic(self, value): + if value.interface: + nic = value.interface + if not nic in self.nic_list: + # Bad interface + return False + elif nic in self._allocatedNics: + # Interface in use + return False + else: + nic = self._findAvailableNic() + if not nic: + return False + + if value.identifier in self._allocatedNics: + # Duplicate identifier + return False + + self._allocatedNics[value.identifier] = nic + return True + + def _findAvailableNic(self): + all_nics = self.nic_list[:] + for nic in self._allocatedNics.itervalues(): + all_nics.remove(nic) + if not all_nics: + return None + return all_nics[0] + + def deallocate_nic(self, value): + del self._allocatedNics[value.identifier] + + def get_nic_allocation_status(self): + return [self.NicAllocationStatusStruct(k,v) for k, v in self._allocatedNics.iteritems()] + + nic_allocation_status = NicExecDevice_base.nic_allocation_status.rebind(fget=get_nic_allocation_status) + + def updateUsageState(self): + """ + This is called automatically after allocateCapacity or deallocateCapacity are called. + Your implementation should determine the current state of the device: + self._usageState = CF.Device.IDLE # not in use + self._usageState = CF.Device.ACTIVE # in use, with capacity remaining for allocation + self._usageState = CF.Device.BUSY # in use, with no capacity remaining for allocation + """ + if len(self._allocatedNics) == len(self.nic_list): + self._usageState = CF.Device.BUSY + elif len(self._allocatedNics) > 0: + self._usageState = CF.Device.ACTIVE + else: + self._usageState = CF.Device.IDLE + + def process(self): + return FINISH + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Device") + start_device(NicExecDevice_i) + diff --git a/redhawk/src/testing/sdr/dev/devices/NicExecDevice/python/NicExecDevice_base.py b/redhawk/src/testing/sdr/dev/devices/NicExecDevice/python/NicExecDevice_base.py new file mode 100644 index 000000000..d8e160d54 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/NicExecDevice/python/NicExecDevice_base.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: NicExecDevice.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.device import ExecutableDevice +from ossie.threadedcomponent import * +from ossie.properties import simple_property +from ossie.properties import simpleseq_property +from ossie.properties import struct_property +from ossie.properties import structseq_property + +import Queue, copy, time, threading + +class NicExecDevice_base(CF__POA.ExecutableDevice, ExecutableDevice, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams): + ExecutableDevice.__init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 devices. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this device + + def start(self): + ExecutableDevice.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + ExecutableDevice.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + ExecutableDevice.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + device_kind = simple_property(id_="DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", + name="device_kind", + type_="string", + defvalue="GPP", + mode="readonly", + action="eq", + kinds=("allocation",), + description="""This specifies the device kind""") + + + device_model = simple_property(id_="DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", + name="device_model", + type_="string", + defvalue="NicExecDevice", + mode="readonly", + action="eq", + kinds=("allocation",), + description=""" This specifies the specific device""") + + + processor_name = simple_property(id_="DCE:9B445600-6C7F-11d4-A226-0050DA314CD6", + name="processor_name", + type_="string", + defvalue="x86_64", + mode="readonly", + action="eq", + kinds=("allocation",), + description="""SCA required property describing the CPU type""") + + + os_name = simple_property(id_="DCE:80BF17F0-6C7F-11d4-A226-0050DA314CD6", + name="os_name", + type_="string", + defvalue="Linux", + mode="readonly", + action="eq", + kinds=("allocation",), + description="""SCA required property describing the Operating System Name""") + + + os_version = simple_property(id_="DCE:0f3a9a37-a342-43d8-9b7f-78dc6da74192", + name="os_version", + type_="string", + mode="readonly", + action="eq", + kinds=("allocation",), + description="""SCA required property describing the Operating System Version""") + + + nic_list = simpleseq_property(id_="nic_list", + type_="string", + defvalue=[], + mode="readonly", + action="external", + kinds=("property",)) + + + class NicAllocation(object): + identifier = simple_property( + id_="nic_allocation::identifier", + name="identifier", + type_="string", + defvalue="" + ) + + interface = simple_property( + id_="nic_allocation::interface", + name="interface", + type_="string", + defvalue="" + ) + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["identifier"] = self.identifier + d["interface"] = self.interface + return str(d) + + @classmethod + def getId(cls): + return "nic_allocation" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("identifier",self.identifier),("interface",self.interface)] + + nic_allocation = struct_property(id_="nic_allocation", + structdef=NicAllocation, + configurationkind=("allocation",), + mode="readwrite") + + + class NicAllocationStatusStruct(object): + identifier = simple_property( + id_="nic_allocation_status::identifier", + name="identifier", + type_="string") + + interface = simple_property( + id_="nic_allocation_status::interface", + name="interface", + type_="string") + + def __init__(self, identifier="", interface=""): + self.identifier = identifier + self.interface = interface + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["identifier"] = self.identifier + d["interface"] = self.interface + return str(d) + + @classmethod + def getId(cls): + return "nic_allocation_status_struct" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("identifier",self.identifier),("interface",self.interface)] + + nic_allocation_status = structseq_property(id_="nic_allocation_status", + structdef=NicAllocationStatusStruct, + defvalue=[], + configurationkind=("property",), + mode="readonly") + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/PortTestDevice/PortTestDevice.prf.xml b/redhawk/src/testing/sdr/dev/devices/PortTestDevice/PortTestDevice.prf.xml index 7005550de..2dc5402ba 100644 --- a/redhawk/src/testing/sdr/dev/devices/PortTestDevice/PortTestDevice.prf.xml +++ b/redhawk/src/testing/sdr/dev/devices/PortTestDevice/PortTestDevice.prf.xml @@ -42,7 +42,7 @@ with this program. If not, see http://www.gnu.org/licenses/. SCA required property describing the CPU type - i686 + x86 diff --git a/redhawk/src/testing/sdr/dev/devices/PortTestDeviceService/PortTestDeviceService.prf.xml b/redhawk/src/testing/sdr/dev/devices/PortTestDeviceService/PortTestDeviceService.prf.xml index 7005550de..2dc5402ba 100644 --- a/redhawk/src/testing/sdr/dev/devices/PortTestDeviceService/PortTestDeviceService.prf.xml +++ b/redhawk/src/testing/sdr/dev/devices/PortTestDeviceService/PortTestDeviceService.prf.xml @@ -42,7 +42,7 @@ with this program. If not, see http://www.gnu.org/licenses/. SCA required property describing the CPU type - i686 + x86 diff --git a/redhawk/src/testing/sdr/dev/devices/bad_init_device/bad_init_device.spec b/redhawk/src/testing/sdr/dev/devices/bad_init_device/bad_init_device.spec deleted file mode 100644 index b49a3b897..000000000 --- a/redhawk/src/testing/sdr/dev/devices/bad_init_device/bad_init_device.spec +++ /dev/null @@ -1,85 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: bad_init_device -Summary: Device %{name} -Version: 1.0.0 -Release: 1 -License: None -Group: REDHAWK/Devices -Source: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-root - -Requires: redhawk >= 1.9 -BuildRequires: redhawk-devel >= 1.9 -BuildRequires: autoconf automake libtool - -BuildArch: noarch - -%description -Device %{name} - - -%prep -%setup - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dev/devices/bad_init_device/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dev/devices/bad_init_device/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk) -%dir %{_prefix}/dev/devices/%{name} -%{_prefix}/dev/devices/%{name}/bad_init_device.scd.xml -%{_prefix}/dev/devices/%{name}/bad_init_device.prf.xml -%{_prefix}/dev/devices/%{name}/bad_init_device.spd.xml -%{_prefix}/dev/devices/%{name}/python - diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/Makefile.am b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/Makefile.am new file mode 100644 index 000000000..d7a3d6f72 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/Makefile.am @@ -0,0 +1,32 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +CFDIR = $(top_srcdir)/base + +noinst_PROGRAMS = dev_alloc_cpp + +dev_alloc_cpp_SOURCES = dev_alloc_cpp.cpp \ +dev_alloc_cpp.h \ +dev_alloc_cpp_base.cpp \ +dev_alloc_cpp_base.h \ +main.cpp \ +struct_props.h + +dev_alloc_cpp_CXXFLAGS = -Wall $(BOOST_CPPFLAGS) -I$(CFDIR)/include -I$(CFDIR)/include/ossie +dev_alloc_cpp_LDADD = $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_FILESYSTEM_LIB) $(BOOST_THREAD_LIB) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/Makefile.am.ide b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/Makefile.am.ide new file mode 100644 index 000000000..ad9ce2a07 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/Makefile.am.ide @@ -0,0 +1,9 @@ +# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! +# Files can be excluded by right-clicking on the file in the project explorer +# and choosing Resource Configurations -> Exclude from build. Re-include files +# by opening the Properties dialog of your project and choosing C/C++ Build -> +# Tool Chain Editor, and un-checking "Exclude resource from build " +redhawk_SOURCES_auto = dev_alloc_cpp.cpp +redhawk_SOURCES_auto += dev_alloc_cpp.h +redhawk_SOURCES_auto += dev_alloc_cpp_base.cpp +redhawk_SOURCES_auto += dev_alloc_cpp_base.h diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp.cpp b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp.cpp new file mode 100644 index 000000000..ccd3f9206 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp.cpp @@ -0,0 +1,106 @@ +/************************************************************************** + + This is the device code. This file contains the child class where + custom functionality can be added to the device. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "dev_alloc_cpp.h" + +PREPARE_LOGGING(dev_alloc_cpp_i) + +dev_alloc_cpp_i::dev_alloc_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : + dev_alloc_cpp_base(devMgr_ior, id, lbl, sftwrPrfl) +{ +} + +dev_alloc_cpp_i::dev_alloc_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : + dev_alloc_cpp_base(devMgr_ior, id, lbl, sftwrPrfl, compDev) +{ +} + +dev_alloc_cpp_i::dev_alloc_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : + dev_alloc_cpp_base(devMgr_ior, id, lbl, sftwrPrfl, capacities) +{ +} + +dev_alloc_cpp_i::dev_alloc_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : + dev_alloc_cpp_base(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev) +{ +} + +dev_alloc_cpp_i::~dev_alloc_cpp_i() +{ +} + +void dev_alloc_cpp_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ + this->setAllocationImpl(s_prop, this, &dev_alloc_cpp_i::alloc_s_prop, &dev_alloc_cpp_i::dealloc_s_prop); + this->setAllocationImpl(si_prop, this, &dev_alloc_cpp_i::alloc_si_prop, &dev_alloc_cpp_i::dealloc_si_prop); + this->setAllocationImpl(se_prop, this, &dev_alloc_cpp_i::alloc_se_prop, &dev_alloc_cpp_i::dealloc_se_prop); + this->setAllocationImpl(sq_prop, this, &dev_alloc_cpp_i::alloc_sq_prop, &dev_alloc_cpp_i::dealloc_sq_prop); +} + +/************************************************************************** + + This is called automatically after allocateCapacity or deallocateCapacity are called. + Your implementation should determine the current state of the device: + + setUsageState(CF::Device::IDLE); // not in use + setUsageState(CF::Device::ACTIVE); // in use, with capacity remaining for allocation + setUsageState(CF::Device::BUSY); // in use, with no capacity remaining for allocation + +**************************************************************************/ +void dev_alloc_cpp_i::updateUsageState() +{ +} + +bool dev_alloc_cpp_i::alloc_s_prop(const s_prop_struct &value) +{ + // perform logic + return true; // successful allocation +} +void dev_alloc_cpp_i::dealloc_s_prop(const s_prop_struct &value) +{ + // perform logic +} +bool dev_alloc_cpp_i::alloc_si_prop(const short &value) +{ + // perform logic + return true; // successful allocation +} +void dev_alloc_cpp_i::dealloc_si_prop(const short &value) +{ + // perform logic +} +bool dev_alloc_cpp_i::alloc_se_prop(const std::vector &value) +{ + // perform logic + return true; // successful allocation +} +void dev_alloc_cpp_i::dealloc_se_prop(const std::vector &value) +{ + // perform logic +} +bool dev_alloc_cpp_i::alloc_sq_prop(const std::vector &value) +{ + // perform logic + return true; // successful allocation +} +void dev_alloc_cpp_i::dealloc_sq_prop(const std::vector &value) +{ + // perform logic +} + +int dev_alloc_cpp_i::serviceFunction() +{ + LOG_DEBUG(dev_alloc_cpp_i, "serviceFunction() example log message"); + + return NOOP; +} + diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp.h b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp.h new file mode 100644 index 000000000..93f7376e8 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp.h @@ -0,0 +1,32 @@ +#ifndef DEV_ALLOC_CPP_I_IMPL_H +#define DEV_ALLOC_CPP_I_IMPL_H + +#include "dev_alloc_cpp_base.h" + +class dev_alloc_cpp_i : public dev_alloc_cpp_base +{ + ENABLE_LOGGING + public: + dev_alloc_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl); + dev_alloc_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev); + dev_alloc_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities); + dev_alloc_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev); + ~dev_alloc_cpp_i(); + + void constructor(); + + int serviceFunction(); + bool alloc_s_prop(const s_prop_struct &value); + void dealloc_s_prop(const s_prop_struct &value); + bool alloc_si_prop(const short &value); + void dealloc_si_prop(const short &value); + bool alloc_se_prop(const std::vector &value); + void dealloc_se_prop(const std::vector &value); + bool alloc_sq_prop(const std::vector &value); + void dealloc_sq_prop(const std::vector &value); + + protected: + void updateUsageState(); +}; + +#endif // DEV_ALLOC_CPP_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp_base.cpp b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp_base.cpp new file mode 100644 index 000000000..ab0be95e4 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp_base.cpp @@ -0,0 +1,134 @@ +#include "dev_alloc_cpp_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the device class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +dev_alloc_cpp_base::dev_alloc_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl), + ThreadedComponent() +{ + construct(); +} + +dev_alloc_cpp_base::dev_alloc_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl, compDev), + ThreadedComponent() +{ + construct(); +} + +dev_alloc_cpp_base::dev_alloc_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl, capacities), + ThreadedComponent() +{ + construct(); +} + +dev_alloc_cpp_base::dev_alloc_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev), + ThreadedComponent() +{ + construct(); +} + +dev_alloc_cpp_base::~dev_alloc_cpp_base() +{ +} + +void dev_alloc_cpp_base::construct() +{ + loadProperties(); + +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void dev_alloc_cpp_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Device_impl::start(); + ThreadedComponent::startThread(); +} + +void dev_alloc_cpp_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Device_impl::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void dev_alloc_cpp_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the device running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Device_impl::releaseObject(); +} + +void dev_alloc_cpp_base::loadProperties() +{ + addProperty(device_kind, + "DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", + "device_kind", + "readonly", + "", + "eq", + "allocation"); + + addProperty(device_model, + "DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", + "device_model", + "readonly", + "", + "eq", + "allocation"); + + addProperty(si_prop, + "si_prop", + "", + "readwrite", + "", + "external", + "allocation"); + + addProperty(se_prop, + "se_prop", + "", + "readwrite", + "", + "external", + "allocation"); + + addProperty(s_prop, + s_prop_struct(), + "s_prop", + "", + "readwrite", + "", + "external", + "allocation"); + + addProperty(sq_prop, + "sq_prop", + "", + "readwrite", + "", + "external", + "allocation"); + +} + + diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp_base.h b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp_base.h new file mode 100644 index 000000000..a76c98d73 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/dev_alloc_cpp_base.h @@ -0,0 +1,45 @@ +#ifndef DEV_ALLOC_CPP_BASE_IMPL_BASE_H +#define DEV_ALLOC_CPP_BASE_IMPL_BASE_H + +#include +#include +#include + +#include "struct_props.h" + +class dev_alloc_cpp_base : public Device_impl, protected ThreadedComponent +{ + public: + dev_alloc_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl); + dev_alloc_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev); + dev_alloc_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities); + dev_alloc_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev); + ~dev_alloc_cpp_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + // Member variables exposed as properties + /// Property: device_kind + std::string device_kind; + /// Property: device_model + std::string device_model; + /// Property: si_prop + short si_prop; + /// Property: se_prop + std::vector se_prop; + /// Property: s_prop + s_prop_struct s_prop; + /// Property: sq_prop + std::vector sq_prop; + + private: + void construct(); +}; +#endif // DEV_ALLOC_CPP_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/main.cpp b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/main.cpp new file mode 100644 index 000000000..90fb539db --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/main.cpp @@ -0,0 +1,26 @@ +#include +#include "ossie/ossieSupport.h" + +#include "dev_alloc_cpp.h" + +dev_alloc_cpp_i *devicePtr; + +void signal_catcher(int sig) +{ + // IMPORTANT Don't call exit(...) in this function + // issue all CORBA calls that you need for cleanup here before calling ORB shutdown + if (devicePtr) { + devicePtr->halt(); + } +} +int main(int argc, char* argv[]) +{ + struct sigaction sa; + sa.sa_handler = signal_catcher; + sa.sa_flags = 0; + devicePtr = 0; + + Device_impl::start_device(&devicePtr, sa, argc, argv); + return 0; +} + diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/struct_props.h b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/struct_props.h new file mode 100644 index 000000000..de7f03b20 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/cpp/struct_props.h @@ -0,0 +1,124 @@ +#ifndef STRUCTPROPS_H +#define STRUCTPROPS_H + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + +*******************************************************************************************/ + +#include +#include +#include + +struct s_prop_struct { + s_prop_struct () + { + } + + static std::string getId() { + return std::string("s_prop"); + } + + static const char* getFormat() { + return "sh[d]"; + } + + std::string s_prop__a; + short s_prop__b; + std::vector abc; +}; + +inline bool operator>>= (const CORBA::Any& a, s_prop_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("s_prop::a")) { + if (!(props["s_prop::a"] >>= s.s_prop__a)) return false; + } + if (props.contains("s_prop::b")) { + if (!(props["s_prop::b"] >>= s.s_prop__b)) return false; + } + if (props.contains("abc")) { + if (!(props["abc"] >>= s.abc)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const s_prop_struct& s) { + redhawk::PropertyMap props; + + props["s_prop::a"] = s.s_prop__a; + + props["s_prop::b"] = s.s_prop__b; + + props["abc"] = s.abc; + a <<= props; +} + +inline bool operator== (const s_prop_struct& s1, const s_prop_struct& s2) { + if (s1.s_prop__a!=s2.s_prop__a) + return false; + if (s1.s_prop__b!=s2.s_prop__b) + return false; + if (s1.abc!=s2.abc) + return false; + return true; +} + +inline bool operator!= (const s_prop_struct& s1, const s_prop_struct& s2) { + return !(s1==s2); +} + +struct sq_prop_s_struct { + sq_prop_s_struct () + { + } + + static std::string getId() { + return std::string("sq_prop_s"); + } + + static const char* getFormat() { + return "fs"; + } + + float sq_prop__a; + std::string sq_prop__b; +}; + +inline bool operator>>= (const CORBA::Any& a, sq_prop_s_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("sq_prop::a")) { + if (!(props["sq_prop::a"] >>= s.sq_prop__a)) return false; + } + if (props.contains("sq_prop::b")) { + if (!(props["sq_prop::b"] >>= s.sq_prop__b)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const sq_prop_s_struct& s) { + redhawk::PropertyMap props; + + props["sq_prop::a"] = s.sq_prop__a; + + props["sq_prop::b"] = s.sq_prop__b; + a <<= props; +} + +inline bool operator== (const sq_prop_s_struct& s1, const sq_prop_s_struct& s2) { + if (s1.sq_prop__a!=s2.sq_prop__a) + return false; + if (s1.sq_prop__b!=s2.sq_prop__b) + return false; + return true; +} + +inline bool operator!= (const sq_prop_s_struct& s1, const sq_prop_s_struct& s2) { + return !(s1==s2); +} + +#endif // STRUCTPROPS_H diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.prf.xml b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.prf.xml new file mode 100644 index 000000000..7d592111a --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.prf.xml @@ -0,0 +1,35 @@ + + + + + This specifies the device kind + + + + + This specifies the specific device + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.scd.xml b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.scd.xml new file mode 100644 index 000000000..125128ef1 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.scd.xml @@ -0,0 +1,49 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.spd.xml b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.spd.xml new file mode 100644 index 000000000..9819b3354 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/dev_alloc_cpp + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/dev_props_bad_numbers.prf.xml b/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/dev_props_bad_numbers.prf.xml new file mode 100644 index 000000000..34aea44cc --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/dev_props_bad_numbers.prf.xml @@ -0,0 +1,24 @@ + + + + + This specifies the device kind + + + + + This specifies the specific device + + + + + 5 + + + + + 4 + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/dev_props_bad_numbers.scd.xml b/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/dev_props_bad_numbers.scd.xml new file mode 100644 index 000000000..125128ef1 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/dev_props_bad_numbers.scd.xml @@ -0,0 +1,49 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/dev_props_bad_numbers.spd.xml b/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/dev_props_bad_numbers.spd.xml new file mode 100644 index 000000000..0477452d2 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/dev_props_bad_numbers.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/dev_props_bad_numbers.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/python/dev_props_bad_numbers.py b/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/python/dev_props_bad_numbers.py new file mode 100755 index 000000000..601197198 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/python/dev_props_bad_numbers.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: dev_props_bad_numbers.spd.xml +from ossie.device import start_device +import logging + +from dev_props_bad_numbers_base import * + +class dev_props_bad_numbers_i(dev_props_bad_numbers_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your device registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def updateUsageState(self): + """ + This is called automatically after allocateCapacity or deallocateCapacity are called. + Your implementation should determine the current state of the device: + self._usageState = CF.Device.IDLE # not in use + self._usageState = CF.Device.ACTIVE # in use, with capacity remaining for allocation + self._usageState = CF.Device.BUSY # in use, with no capacity remaining for allocation + """ + return NOOP + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the device. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the device developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", dev_props_bad_numbers_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = dev_props_bad_numbers_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Application: + app = self.getApplication().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + Allocation: + + Allocation callbacks are available to customize a Device's response to an allocation request. + Callback allocation/deallocation functions are registered using the setAllocationImpl function, + usually in the initialize() function + For example, allocation property "my_alloc" can be registered with allocation function + my_alloc_fn and deallocation function my_dealloc_fn as follows: + + self.setAllocationImpl("my_alloc", self.my_alloc_fn, self.my_dealloc_fn) + + def my_alloc_fn(self, value): + # perform logic + return True # successful allocation + + def my_dealloc_fn(self, value): + # perform logic + pass + + Example: + + # This example assumes that the device has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the device + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Device") + start_device(dev_props_bad_numbers_i) + diff --git a/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/python/dev_props_bad_numbers_base.py b/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/python/dev_props_bad_numbers_base.py new file mode 100644 index 000000000..79c7b5018 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/dev_props_bad_numbers/python/dev_props_bad_numbers_base.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: dev_props_bad_numbers.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.device import Device +from ossie.threadedcomponent import * +from ossie.properties import simple_property + +import Queue, copy, time, threading + +class dev_props_bad_numbers_base(CF__POA.Device, Device, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams): + Device.__init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 devices. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this device + + def start(self): + Device.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Device.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Device.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + device_kind = simple_property(id_="DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", + name="device_kind", + type_="string", + mode="readonly", + action="eq", + kinds=("allocation",), + description="""This specifies the device kind""") + + + device_model = simple_property(id_="DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", + name="device_model", + type_="string", + mode="readonly", + action="eq", + kinds=("allocation",), + description=""" This specifies the specific device""") + + + some_float = simple_property(id_="some_float", + type_="float", + defvalue=5.0, + mode="readwrite", + action="external", + kinds=("property",)) + + + some_short = simple_property(id_="some_short", + type_="short", + defvalue=4, + mode="readwrite", + action="external", + kinds=("property",)) + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/Makefile.am b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/Makefile.am new file mode 100644 index 000000000..329882437 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/Makefile.am @@ -0,0 +1,30 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +CFDIR = $(top_srcdir)/base + +noinst_PROGRAMS = log_test_cpp + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +include $(srcdir)/Makefile.am.ide +log_test_cpp_SOURCES = $(redhawk_SOURCES_auto) +log_test_cpp_LDADD = ../../../../dom/deps/cpp_dep1/cpp/libcpp_dep1.la $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_THREAD_LIB) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la +log_test_cpp_CXXFLAGS = -Wall $(BOOST_CPPFLAGS) -I$(CFDIR)/include -I$(CFDIR)/include/ossie diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/Makefile.am.ide b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/Makefile.am.ide new file mode 100644 index 000000000..f62a7a223 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/Makefile.am.ide @@ -0,0 +1,10 @@ +# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! +# Files can be excluded by right-clicking on the file in the project explorer +# and choosing Resource Configurations -> Exclude from build. Re-include files +# by opening the Properties dialog of your project and choosing C/C++ Build -> +# Tool Chain Editor, and un-checking "Exclude resource from build " +redhawk_SOURCES_auto = log_test_cpp.cpp +redhawk_SOURCES_auto += log_test_cpp.h +redhawk_SOURCES_auto += log_test_cpp_base.cpp +redhawk_SOURCES_auto += log_test_cpp_base.h +redhawk_SOURCES_auto += main.cpp diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp.cpp b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp.cpp new file mode 100644 index 000000000..cd068968d --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp.cpp @@ -0,0 +1,322 @@ +/************************************************************************** + + This is the device code. This file contains the child class where + custom functionality can be added to the device. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "log_test_cpp.h" + +PREPARE_LOGGING(log_test_cpp_i) + +log_test_cpp_i::log_test_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : + log_test_cpp_base(devMgr_ior, id, lbl, sftwrPrfl) +{ +} + +log_test_cpp_i::log_test_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : + log_test_cpp_base(devMgr_ior, id, lbl, sftwrPrfl, compDev) +{ +} + +log_test_cpp_i::log_test_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : + log_test_cpp_base(devMgr_ior, id, lbl, sftwrPrfl, capacities) +{ +} + +log_test_cpp_i::log_test_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : + log_test_cpp_base(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev) +{ +} + +log_test_cpp_i::~log_test_cpp_i() +{ +} + +void log_test_cpp_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ + baseline_1_logger = this->_baseLog->getChildLogger("some_stuff"); + baseline_2_logger = this->_baseLog->getChildLogger("more_stuff"); + namespaced_logger = this->_baseLog->getChildLogger("lower", "namespace"); + basetree_logger = this->_baseLog->getChildLogger("lower", ""); + rh_logger::LoggerPtr child_ns_logger = basetree_logger->getChildLogger("first", "second"); + rh_logger::LoggerPtr child_no_ns_logger = basetree_logger->getChildLogger("third"); +} + +/************************************************************************** + + This is called automatically after allocateCapacity or deallocateCapacity are called. + Your implementation should determine the current state of the device: + + setUsageState(CF::Device::IDLE); // not in use + setUsageState(CF::Device::ACTIVE); // in use, with capacity remaining for allocation + setUsageState(CF::Device::BUSY); // in use, with no capacity remaining for allocation + +**************************************************************************/ +void log_test_cpp_i::updateUsageState() +{ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the device has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the device base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the device developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void log_test_cpp_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &log_test_cpp_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Device Manager: + CF::DeviceManager_ptr devmgr = this->getDeviceManager()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (log_test_cpp_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &log_test_cpp_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to log_test_cpp.cpp + log_test_cpp_i::log_test_cpp_i(const char *uuid, const char *label) : + log_test_cpp_base(uuid, label) + { + addPropertyListener(scaleValue, this, &log_test_cpp_i::scaleChanged); + addPropertyListener(status, this, &log_test_cpp_i::statusChanged); + } + + void log_test_cpp_i::scaleChanged(float oldValue, float newValue) + { + RH_DEBUG(this->_baseLog, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void log_test_cpp_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + RH_DEBUG(this->_baseLog, "status changed"); + } + + //Add to log_test_cpp.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + Logging: + + The member _baseLog is a logger whose base name is the component (or device) instance name. + New logs should be created based on this logger name. + + To create a new logger, + rh_logger::LoggerPtr my_logger = this->_baseLog->getChildLogger("foo"); + + Assuming component instance name abc_1, my_logger will then be created with the + name "abc_1.user.foo". + + Allocation: + + Allocation callbacks are available to customize the Device's response to + allocation requests. For example, if the Device contains the allocation + property "my_alloc" of type string, the allocation and deallocation + callbacks follow the pattern (with arbitrary function names + my_alloc_fn and my_dealloc_fn): + + bool log_test_cpp_i::my_alloc_fn(const std::string &value) + { + // perform logic + return true; // successful allocation + } + void log_test_cpp_i::my_dealloc_fn(const std::string &value) + { + // perform logic + } + + The allocation and deallocation functions are then registered with the Device + base class with the setAllocationImpl call. Note that the variable for the property is used rather + than its id: + + this->setAllocationImpl(my_alloc, this, &log_test_cpp_i::my_alloc_fn, &log_test_cpp_i::my_dealloc_fn); + + + +************************************************************************************************/ +int log_test_cpp_i::serviceFunction() +{ + RH_DEBUG(_baseLog, "message from _log"); + RH_DEBUG(baseline_1_logger, "message from baseline_1_logger"); + RH_DEBUG(baseline_2_logger, "message from baseline_2_logger"); + RH_DEBUG(namespaced_logger, "message from namespaced_logger"); + RH_DEBUG(basetree_logger, "message from basetree_logger"); + + return NOOP; +} + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp.h b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp.h new file mode 100644 index 000000000..5f27aca05 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp.h @@ -0,0 +1,28 @@ +#ifndef LOG_TEST_CPP_I_IMPL_H +#define LOG_TEST_CPP_I_IMPL_H + +#include "log_test_cpp_base.h" + +class log_test_cpp_i : public log_test_cpp_base +{ + ENABLE_LOGGING + public: + log_test_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl); + log_test_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev); + log_test_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities); + log_test_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev); + ~log_test_cpp_i(); + + void constructor(); + + int serviceFunction(); + rh_logger::LoggerPtr baseline_1_logger; + rh_logger::LoggerPtr baseline_2_logger; + rh_logger::LoggerPtr namespaced_logger; + rh_logger::LoggerPtr basetree_logger; + + protected: + void updateUsageState(); +}; + +#endif // LOG_TEST_CPP_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp_base.cpp b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp_base.cpp new file mode 100644 index 000000000..491f751cc --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp_base.cpp @@ -0,0 +1,101 @@ +#include "log_test_cpp_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the device class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +log_test_cpp_base::log_test_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl), + ThreadedComponent() +{ + construct(); +} + +log_test_cpp_base::log_test_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl, compDev), + ThreadedComponent() +{ + construct(); +} + +log_test_cpp_base::log_test_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl, capacities), + ThreadedComponent() +{ + construct(); +} + +log_test_cpp_base::log_test_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev), + ThreadedComponent() +{ + construct(); +} + +log_test_cpp_base::~log_test_cpp_base() +{ +} + +void log_test_cpp_base::construct() +{ + loadProperties(); + +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void log_test_cpp_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Device_impl::start(); + ThreadedComponent::startThread(); +} + +void log_test_cpp_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Device_impl::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void log_test_cpp_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the device running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Device_impl::releaseObject(); +} + +void log_test_cpp_base::loadProperties() +{ + addProperty(device_kind, + "DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", + "device_kind", + "readonly", + "", + "eq", + "allocation"); + + addProperty(device_model, + "DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", + "device_model", + "readonly", + "", + "eq", + "allocation"); + +} + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp_base.h b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp_base.h new file mode 100644 index 000000000..f7e850054 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/log_test_cpp_base.h @@ -0,0 +1,36 @@ +#ifndef LOG_TEST_CPP_BASE_IMPL_BASE_H +#define LOG_TEST_CPP_BASE_IMPL_BASE_H + +#include +#include +#include + + +class log_test_cpp_base : public Device_impl, protected ThreadedComponent +{ + public: + log_test_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl); + log_test_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev); + log_test_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities); + log_test_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev); + ~log_test_cpp_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + // Member variables exposed as properties + /// Property: device_kind + std::string device_kind; + /// Property: device_model + std::string device_model; + + private: + void construct(); +}; +#endif // LOG_TEST_CPP_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/main.cpp b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/main.cpp new file mode 100644 index 000000000..690105f1a --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/cpp/main.cpp @@ -0,0 +1,26 @@ +#include +#include "ossie/ossieSupport.h" + +#include "log_test_cpp.h" + +log_test_cpp_i *devicePtr; + +void signal_catcher(int sig) +{ + // IMPORTANT Don't call exit(...) in this function + // issue all CORBA calls that you need for cleanup here before calling ORB shutdown + if (devicePtr) { + devicePtr->halt(); + } +} +int main(int argc, char* argv[]) +{ + struct sigaction sa; + sa.sa_handler = signal_catcher; + sa.sa_flags = 0; + devicePtr = 0; + + Device_impl::start_device(&devicePtr, sa, argc, argv); + return 0; +} + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_cpp/log_test_cpp.prf.xml b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/log_test_cpp.prf.xml new file mode 100644 index 000000000..01774f3a3 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/log_test_cpp.prf.xml @@ -0,0 +1,14 @@ + + + + + This specifies the device kind + + + + + This specifies the specific device + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_cpp/log_test_cpp.scd.xml b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/log_test_cpp.scd.xml new file mode 100644 index 000000000..125128ef1 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/log_test_cpp.scd.xml @@ -0,0 +1,49 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_cpp/log_test_cpp.spd.xml b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/log_test_cpp.spd.xml new file mode 100644 index 000000000..a1ef48e45 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_cpp/log_test_cpp.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/log_test_cpp + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_java/java/Makefile.am b/redhawk/src/testing/sdr/dev/devices/log_test_java/java/Makefile.am new file mode 100644 index 000000000..5a6ed22ec --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_java/java/Makefile.am @@ -0,0 +1,42 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +if HAVE_JAVASUPPORT + +log_test_java.jar: + mkdir -p bin + find ./src -name "*.java" > fileList.txt + $(JAVAC) -cp $(OSSIE_CLASSPATH) -d bin @fileList.txt + $(JAR) cf ./log_test_java.jar -C bin . + rm fileList.txt + +clean-local: + rm -rf bin + +log_test_java_jar_SOURCES := $(shell find ./src -name "*.java") + +ossieName = log_test_java +noinst_PROGRAMS = log_test_java.jar + +else + +all-local: + @echo "Java support disabled - log_test_java will not be compiled" + +endif diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_java/java/src/log_test_java/java/log_test_java.java b/redhawk/src/testing/sdr/dev/devices/log_test_java/java/src/log_test_java/java/log_test_java.java new file mode 100644 index 000000000..1f3b44d74 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_java/java/src/log_test_java/java/log_test_java.java @@ -0,0 +1,319 @@ +package log_test_java.java; + +import java.util.Properties; +import org.ossie.component.RHLogger; + +/** + * This is the device code. This file contains the derived class where custom + * functionality can be added to the device. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general device housekeeping + * + * Source: log_test_java.spd.xml + */ +public class log_test_java extends log_test_java_base { + /** + * This is the device constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your device. + * + * A device may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * _baseLog.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * device class. + * + * Devices may contain allocation properties with "external" action, which + * are used in capacity allocation and deallocation. In order to support + * this capability, allocation properties require additional functionality. + * This is implemented by calling setAllocator() on the property instance + * with an object that implements the Allocator interface for that data type. + * + * Example: + * // This example makes use of the following properties + * // - A struct property called tuner_alloc + * // The following methods are defined elsewhere in your class: + * // - private boolean allocate_tuner(tuner_alloc_struct capacity) + * // - private void deallocate_tuner(tuner_alloc_struct capacity) + * // The file must import "org.ossie.properties.Allocator" + * + * this.tuner_alloc.setAllocator(new Allocator() { + * public boolean allocate(tuner_alloc_struct capacity) { + * return allocate_tuner(capacity); + * } + * public void deallocate(tuner_alloc_struct capacity) { + * deallocate_tuner(capacity); + * } + * }); + * + * The recommended practice is for the allocate() and deallocate() methods + * to contain only glue code to dispatch the call to private methods on the + * device class. + * Accessing the Device Manager and Domain Manager: + * + * Both the Device Manager hosting this Device and the Domain Manager hosting + * the Device Manager are available to the Device. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Device Manager: + * CF.DeviceManager devmgr = this.getDeviceManager().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + public RHLogger baseline_1_logger; + public RHLogger baseline_2_logger; + public RHLogger namespaced_logger; + public RHLogger basetree_logger; + + public log_test_java() + { + super(); + } + + public void constructor() + { + baseline_1_logger = this._baseLog.getChildLogger("some_stuff"); + baseline_2_logger = this._baseLog.getChildLogger("more_stuff"); + namespaced_logger = this._baseLog.getChildLogger("lower", "namespace"); + basetree_logger = this._baseLog.getChildLogger("lower", ""); + RHLogger child_ns_logger = basetree_logger.getChildLogger("first", "second"); + RHLogger child_no_ns_logger = basetree_logger.getChildLogger("third"); + } + + /************************************************************************** + + This is called automatically after allocateCapacity or deallocateCapacity are called. + Your implementation should determine the current state of the device: + + setUsageState(CF.DevicePackage.UsageType.IDLE); // not in use + setUsageState(CF.DevicePackage.UsageType.ACTIVE); // in use, with capacity remaining for allocation + setUsageState(CF.DevicePackage.UsageType.BUSY); // in use, with no capacity remaining for allocation + + ***************************************************************************/ + protected void updateUsageState() + { + } + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the device's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the device developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Logging: + * + * The member _baseLog is a logger whose base name is the component (or device) instance name. + * New logs should be created based on this logger name. + * + * To create a new logger, + * RHLogger my_logger = this._baseLog.getChildLogger("foo"); + * + * Assuming component instance name abc_1, my_logger will then be created with the + * name "abc_1.user.foo". + * + * Example: + * + * This example assumes that the device has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the device + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData = new float[data.getData().length]; + * for (int i = 0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i] = (float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i] = (float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + + this._baseLog.debug("message from _log"); + this.baseline_1_logger.debug("message from baseline_1_logger"); + this.baseline_2_logger.debug("message from baseline_2_logger"); + this.namespaced_logger.debug("message from namespaced_logger"); + this.basetree_logger.debug("message from basetree_logger"); + + return NOOP; + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_java/java/src/log_test_java/java/log_test_java_base.java b/redhawk/src/testing/sdr/dev/devices/log_test_java/java/src/log_test_java/java/log_test_java_base.java new file mode 100644 index 000000000..2e0c70bb4 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_java/java/src/log_test_java/java/log_test_java_base.java @@ -0,0 +1,117 @@ +package log_test_java.java; + + +import java.util.Properties; + +import org.apache.log4j.Logger; + +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; + +import CF.DevicePackage.AdminType; +import CF.DevicePackage.OperationalType; +import CF.DevicePackage.UsageType; +import CF.InvalidObjectReference; + +import org.ossie.component.*; +import org.ossie.properties.*; + + +/** + * This is the device code. This file contains all the access points + * you need to use to be able to access all input and output ports, + * respond to incoming data, and perform general device housekeeping + * + * Source: log_test_java.spd.xml + * + * @generated + */ + +public abstract class log_test_java_base extends ThreadedDevice { + /** + * @generated + */ + public final static Logger logger = Logger.getLogger(log_test_java_base.class.getName()); + + /** + * The property DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d + * This specifies the device kind + * + * @generated + */ + public final StringProperty device_kind = + new StringProperty( + "DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", //id + "device_kind", //name + null, //default value + Mode.READONLY, //mode + Action.EQ, //action + new Kind[] {Kind.ALLOCATION} + ); + + /** + * The property DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb + * This specifies the specific device + * + * @generated + */ + public final StringProperty device_model = + new StringProperty( + "DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", //id + "device_model", //name + null, //default value + Mode.READONLY, //mode + Action.EQ, //action + new Kind[] {Kind.ALLOCATION} + ); + + /** + * @generated + */ + public log_test_java_base() + { + super(); + + setLogger( logger, log_test_java_base.class.getName() ); + + + // Properties + addProperty(device_kind); + + addProperty(device_model); + + } + + protected void setupPortLoggers() { + } + + + + /** + * The main function of your device. If no args are provided, then the + * CORBA object is not bound to an SCA Domain or NamingService and can + * be run as a standard Java application. + * + * @param args + * @generated + */ + public static void main(String[] args) + { + final Properties orbProps = new Properties(); + log_test_java.configureOrb(orbProps); + + try { + ThreadedDevice.start_device(log_test_java.class, args, orbProps); + } catch (InvalidObjectReference e) { + e.printStackTrace(); + } catch (ServantNotActive e) { + e.printStackTrace(); + } catch (WrongPolicy e) { + e.printStackTrace(); + } catch (InstantiationException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + } +} diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_java/java/startJava.sh b/redhawk/src/testing/sdr/dev/devices/log_test_java/java/startJava.sh new file mode 100755 index 000000000..f67fca717 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_java/java/startJava.sh @@ -0,0 +1,38 @@ +#!/bin/sh +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +#Sun ORB start line +# Important, the $@ must be quoted "$@" for arguments to be passed correctly +myDir=`dirname $0` +JAVA_LIBDIR=${myDir}/../../../../../base/framework/java +JAVA_CLASSPATH=${JAVA_LIBDIR}/apache-commons-lang-2.4.jar:${JAVA_LIBDIR}/log4j-1.2.15.jar:${JAVA_LIBDIR}/CFInterfaces.jar:${JAVA_LIBDIR}/ossie.jar:${myDir}/log_test_java.jar:${myDir}:${myDir}/bin:${CLASSPATH} + +# Path for Java +if test -x $JAVA_HOME/bin/java; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=java +fi + +# NOTE: the $@ must be quoted "$@" for arguments to be passed correctly + +#Sun ORB start line +exec $JAVA -cp ${JAVA_CLASSPATH} log_test_java.java.log_test_java "$@" diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_java/log_test_java.prf.xml b/redhawk/src/testing/sdr/dev/devices/log_test_java/log_test_java.prf.xml new file mode 100644 index 000000000..01774f3a3 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_java/log_test_java.prf.xml @@ -0,0 +1,14 @@ + + + + + This specifies the device kind + + + + + This specifies the specific device + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_java/log_test_java.scd.xml b/redhawk/src/testing/sdr/dev/devices/log_test_java/log_test_java.scd.xml new file mode 100644 index 000000000..125128ef1 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_java/log_test_java.scd.xml @@ -0,0 +1,49 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_java/log_test_java.spd.xml b/redhawk/src/testing/sdr/dev/devices/log_test_java/log_test_java.spd.xml new file mode 100644 index 000000000..c7c26fa15 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_java/log_test_java.spd.xml @@ -0,0 +1,26 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_py/log_test_py.prf.xml b/redhawk/src/testing/sdr/dev/devices/log_test_py/log_test_py.prf.xml new file mode 100644 index 000000000..01774f3a3 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_py/log_test_py.prf.xml @@ -0,0 +1,14 @@ + + + + + This specifies the device kind + + + + + This specifies the specific device + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_py/log_test_py.scd.xml b/redhawk/src/testing/sdr/dev/devices/log_test_py/log_test_py.scd.xml new file mode 100644 index 000000000..125128ef1 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_py/log_test_py.scd.xml @@ -0,0 +1,49 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_py/log_test_py.spd.xml b/redhawk/src/testing/sdr/dev/devices/log_test_py/log_test_py.spd.xml new file mode 100644 index 000000000..38e915aa0 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_py/log_test_py.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/log_test_py.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_py/python/log_test_py.py b/redhawk/src/testing/sdr/dev/devices/log_test_py/python/log_test_py.py new file mode 100755 index 000000000..91e02ef39 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_py/python/log_test_py.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: log_test_py.spd.xml +from ossie.device import start_device +import logging + +from log_test_py_base import * + +class log_test_py_i(log_test_py_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your device registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + self.baseline_1_logger = self._baseLog.getChildLogger("some_stuff"); + self.baseline_2_logger = self._baseLog.getChildLogger("more_stuff"); + self.namespaced_logger = self._baseLog.getChildLogger("lower", "namespace"); + self.basetree_logger = self._baseLog.getChildLogger("lower", ""); + child_ns_logger = self.basetree_logger.getChildLogger("first", "second") + child_no_ns_logger = self.basetree_logger.getChildLogger("third") + + def updateUsageState(self): + """ + This is called automatically after allocateCapacity or deallocateCapacity are called. + Your implementation should determine the current state of the device: + self._usageState = CF.Device.IDLE # not in use + self._usageState = CF.Device.ACTIVE # in use, with capacity remaining for allocation + self._usageState = CF.Device.BUSY # in use, with no capacity remaining for allocation + """ + return NOOP + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the device. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the device developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", log_test_py_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = log_test_py_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Application: + app = self.getApplication().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + Logging: + + The member _baseLog is a logger whose base name is the component (or device) instance name. + New logs should be created based on this logger name. + + To create a new logger, + my_logger = self._baseLog.getChildLogger("foo") + + Assuming component instance name abc_1, my_logger will then be created with the + name "abc_1.user.foo". + + Allocation: + + Allocation callbacks are available to customize a Device's response to an allocation request. + Callback allocation/deallocation functions are registered using the setAllocationImpl function, + usually in the initialize() function + For example, allocation property "my_alloc" can be registered with allocation function + my_alloc_fn and deallocation function my_dealloc_fn as follows: + + self.setAllocationImpl("my_alloc", self.my_alloc_fn, self.my_dealloc_fn) + + def my_alloc_fn(self, value): + # perform logic + return True # successful allocation + + def my_dealloc_fn(self, value): + # perform logic + pass + + Example: + + # This example assumes that the device has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the device + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._baseLog.debug("message from _log") + self.baseline_1_logger.debug("message from baseline_1_logger") + self.baseline_2_logger.debug("message from baseline_2_logger") + self.namespaced_logger.debug("message from namespaced_logger") + self.basetree_logger.debug("message from basetree_logger") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Device") + start_device(log_test_py_i) + diff --git a/redhawk/src/testing/sdr/dev/devices/log_test_py/python/log_test_py_base.py b/redhawk/src/testing/sdr/dev/devices/log_test_py/python/log_test_py_base.py new file mode 100644 index 000000000..f66aede1c --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/log_test_py/python/log_test_py_base.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: log_test_py.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.device import Device +from ossie.threadedcomponent import * +from ossie.properties import simple_property + +import Queue, copy, time, threading + +class log_test_py_base(CF__POA.Device, Device, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams): + Device.__init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 devices. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this device + + def start(self): + Device.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Device.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._baseLog.exception("Error stopping") + Device.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + device_kind = simple_property(id_="DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", + name="device_kind", + type_="string", + mode="readonly", + action="eq", + kinds=("allocation",), + description="""This specifies the device kind""") + + + device_model = simple_property(id_="DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", + name="device_model", + type_="string", + mode="readonly", + action="eq", + kinds=("allocation",), + description=""" This specifies the specific device""") + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/start_event_device/.md5sums b/redhawk/src/testing/sdr/dev/devices/start_event_device/.md5sums new file mode 100644 index 000000000..f4d643d57 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/start_event_device/.md5sums @@ -0,0 +1,2 @@ +69770cf49c995719d70d0a5d22e618e4 build.sh +b1057bb4da19becda3800f7d05b4209d start_event_device.spec diff --git a/redhawk/src/testing/sdr/dev/devices/start_event_device/.start_event_device.wavedev b/redhawk/src/testing/sdr/dev/devices/start_event_device/.start_event_device.wavedev new file mode 100644 index 000000000..b7ae64e09 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/start_event_device/.start_event_device.wavedev @@ -0,0 +1,25 @@ + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/start_event_device/python/.md5sums b/redhawk/src/testing/sdr/dev/devices/start_event_device/python/.md5sums new file mode 100644 index 000000000..1882b620d --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/start_event_device/python/.md5sums @@ -0,0 +1,6 @@ +8bfcd22353c3a57fee561ad86ee2a56b reconf +0859f314dda606edc04b24a4680de616 start_event_device.py +9438c5c435102516c5dada8f8b972ddc configure.ac +1e96b4beb34a59ab9f9945325f4081ad start_event_device_base.py +76016779fcd2da4ea65a4122d0520ce3 Makefile.am.ide +abe151480dc78386afe9158da3db6a65 Makefile.am diff --git a/redhawk/src/testing/sdr/dev/devices/start_event_device/python/start_event_device.py b/redhawk/src/testing/sdr/dev/devices/start_event_device/python/start_event_device.py new file mode 100755 index 000000000..2483ba466 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/start_event_device/python/start_event_device.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +# +# AUTO-GENERATED +# +# Source: start_event_device.spd.xml +from ossie.device import start_device +import logging + +from start_event_device_base import * + +class start_event_device_i(start_event_device_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your device registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def start(self): + self._log.info('starting %s', self._id) + + message = start_event_device_i.StateChange() + message.identifier = self._id + message.event = 'start' + self.port_message_out.sendMessage(message) + + if self.failures.start: + raise CF.Resource.StartError(CF.CF_NOTSET, 'failure requested') + + start_event_device_base.start(self) + + def stop(self): + self._log.info('stopping %s', self._id) + + message = start_event_device_i.StateChange() + message.identifier = self._id + message.event = 'stop' + self.port_message_out.sendMessage(message) + + if self.failures.stop: + raise CF.Resource.StopError(CF.CF_NOTSET, 'failure requested') + + start_event_device_base.stop(self) + + def updateUsageState(self): + """ + This is called automatically after allocateCapacity or deallocateCapacity are called. + Your implementation should determine the current state of the device: + self._usageState = CF.Device.IDLE # not in use + self._usageState = CF.Device.ACTIVE # in use, with capacity remaining for allocation + self._usageState = CF.Device.BUSY # in use, with no capacity remaining for allocation + """ + return NOOP + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the device. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the device developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", start_event_device_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = start_event_device_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Application: + app = self.getApplication().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + Allocation: + + Allocation callbacks are available to customize a Device's response to an allocation request. + Callback allocation/deallocation functions are registered using the setAllocationImpl function, + usually in the initialize() function + For example, allocation property "my_alloc" can be registered with allocation function + my_alloc_fn and deallocation function my_dealloc_fn as follows: + + self.setAllocationImpl("my_alloc", self.my_alloc_fn, self.my_dealloc_fn) + + def my_alloc_fn(self, value): + # perform logic + return True # successful allocation + + def my_dealloc_fn(self, value): + # perform logic + pass + + Example: + + # This example assumes that the device has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the device + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Device") + start_device(start_event_device_i) + diff --git a/redhawk/src/testing/sdr/dev/devices/start_event_device/python/start_event_device_base.py b/redhawk/src/testing/sdr/dev/devices/start_event_device/python/start_event_device_base.py new file mode 100644 index 000000000..4eacfbbec --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/start_event_device/python/start_event_device_base.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: start_event_device.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.device import Device +from ossie.threadedcomponent import * +from ossie.properties import simple_property +from ossie.properties import simpleseq_property +from ossie.properties import struct_property + +import Queue, copy, time, threading +from ossie.resource import usesport, providesport +from ossie.events import MessageSupplierPort + +class start_event_device_base(CF__POA.Device, Device, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams): + Device.__init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 devices. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this device + self.port_message_out = MessageSupplierPort() + + def start(self): + Device.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Device.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Device.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + port_message_out = usesport(name="message_out", + repid="IDL:ExtendedEvent/MessageEvent:1.0", + type_="responses") + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + device_kind = simple_property(id_="DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", + name="device_kind", + type_="string", + defvalue="start_event_device", + mode="readonly", + action="eq", + kinds=("allocation",), + description="""This specifies the device kind""") + + + device_model = simple_property(id_="DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", + name="device_model", + type_="string", + mode="readonly", + action="eq", + kinds=("allocation",), + description=""" This specifies the specific device""") + + + class StateChange(object): + identifier = simple_property( + id_="state_change::identifier", + name="identifier", + type_="string") + + event = simple_property( + id_="state_change::event", + name="event", + type_="string") + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["identifier"] = self.identifier + d["event"] = self.event + return str(d) + + @classmethod + def getId(cls): + return "state_change" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("identifier",self.identifier),("event",self.event)] + + state_change = struct_property(id_="state_change", + structdef=StateChange, + configurationkind=("message",), + mode="readwrite") + + + class Failures(object): + start = simple_property( + id_="failures::start", + name="start", + type_="boolean", + defvalue=False + ) + + stop = simple_property( + id_="failures::stop", + name="stop", + type_="boolean", + defvalue=False + ) + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["start"] = self.start + d["stop"] = self.stop + return str(d) + + @classmethod + def getId(cls): + return "failures" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("start",self.start),("stop",self.stop)] + + failures = struct_property(id_="failures", + structdef=Failures, + configurationkind=("property",), + mode="readwrite") + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/start_event_device/start_event_device.prf.xml b/redhawk/src/testing/sdr/dev/devices/start_event_device/start_event_device.prf.xml new file mode 100644 index 000000000..93bfce6fe --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/start_event_device/start_event_device.prf.xml @@ -0,0 +1,48 @@ + + + + + + This specifies the device kind + start_event_device + + + + + This specifies the specific device + + + + + + + + + + + false + + + false + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/start_event_device/start_event_device.scd.xml b/redhawk/src/testing/sdr/dev/devices/start_event_device/start_event_device.scd.xml new file mode 100644 index 000000000..17ac03dcf --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/start_event_device/start_event_device.scd.xml @@ -0,0 +1,78 @@ + + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/start_event_device/start_event_device.spd.xml b/redhawk/src/testing/sdr/dev/devices/start_event_device/start_event_device.spd.xml new file mode 100644 index 000000000..680480a08 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/start_event_device/start_event_device.spd.xml @@ -0,0 +1,44 @@ + + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/start_event_device.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/test_collocation_device/python/test_collocation_device.py b/redhawk/src/testing/sdr/dev/devices/test_collocation_device/python/test_collocation_device.py index 2ef1841c7..0620562c0 100755 --- a/redhawk/src/testing/sdr/dev/devices/test_collocation_device/python/test_collocation_device.py +++ b/redhawk/src/testing/sdr/dev/devices/test_collocation_device/python/test_collocation_device.py @@ -79,6 +79,12 @@ def deallocateCapacity(self, properties): self._log.exception("Error sending properties event") + def allocate_redhawk__reservation_request(self, value): + return True + + def deallocate_redhawk__reservation_request(self, value): + pass + def allocate_additional_supported_components(self, value): return True diff --git a/redhawk/src/testing/sdr/dev/devices/test_collocation_device/python/test_collocation_device_base.py b/redhawk/src/testing/sdr/dev/devices/test_collocation_device/python/test_collocation_device_base.py index 35626abbd..947cece38 100644 --- a/redhawk/src/testing/sdr/dev/devices/test_collocation_device/python/test_collocation_device_base.py +++ b/redhawk/src/testing/sdr/dev/devices/test_collocation_device/python/test_collocation_device_base.py @@ -32,6 +32,8 @@ from ossie.device import ExecutableDevice from ossie.properties import simple_property +from ossie.properties import simpleseq_property +from ossie.properties import struct_property from ossie.events import PropertyEventSupplier @@ -138,6 +140,57 @@ def releaseObject(self): # # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file # or by using the IDE. + class RedhawkReservationRequest(object): + redhawk__reservation_request__obj_id = simple_property( + id_="redhawk::reservation_request::obj_id", + + type_="string") + + redhawk__reservation_request__kinds = simpleseq_property( + id_="redhawk::reservation_request::kinds", + + type_="string", + defvalue=[] + ) + + redhawk__reservation_request__values = simpleseq_property( + id_="redhawk::reservation_request::values", + + type_="string", + defvalue=[] + ) + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["redhawk__reservation_request__obj_id"] = self.redhawk__reservation_request__obj_id + d["redhawk__reservation_request__kinds"] = self.redhawk__reservation_request__kinds + d["redhawk__reservation_request__values"] = self.redhawk__reservation_request__values + return str(d) + + @classmethod + def getId(cls): + return "redhawk::reservation_request" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("redhawk__reservation_request__obj_id",self.redhawk__reservation_request__obj_id),("redhawk__reservation_request__kinds",self.redhawk__reservation_request__kinds),("redhawk__reservation_request__values",self.redhawk__reservation_request__values)] + + redhawk__reservation_request = struct_property(id_="redhawk::reservation_request", + structdef=RedhawkReservationRequest, + configurationkind=("allocation",), + mode="readwrite") device_kind = simple_property(id_="DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", name="device_kind", type_="string", diff --git a/redhawk/src/testing/sdr/dev/devices/test_collocation_device/test_collocation_device.prf.xml b/redhawk/src/testing/sdr/dev/devices/test_collocation_device/test_collocation_device.prf.xml index 5fe58a758..74b5f595d 100644 --- a/redhawk/src/testing/sdr/dev/devices/test_collocation_device/test_collocation_device.prf.xml +++ b/redhawk/src/testing/sdr/dev/devices/test_collocation_device/test_collocation_device.prf.xml @@ -56,4 +56,10 @@ with this program. If not, see http://www.gnu.org/licenses/. + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/ticket_1502/ticket_1502.spec b/redhawk/src/testing/sdr/dev/devices/ticket_1502/ticket_1502.spec deleted file mode 100644 index e58201f7f..000000000 --- a/redhawk/src/testing/sdr/dev/devices/ticket_1502/ticket_1502.spec +++ /dev/null @@ -1,85 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: ticket_1502 -Version: 1.0.0 -Release: 1%{?dist} -Summary: Device %{name} - -Group: REDHAWK/Devices -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.9 -Requires: redhawk >= 1.9 - -BuildArch: noarch - -%description -Device %{name} - - -%prep -%setup -q - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dev/devices/ticket_1502/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dev/devices/ticket_1502/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dev/devices/%{name} -%{_prefix}/dev/devices/%{name}/ticket_1502.scd.xml -%{_prefix}/dev/devices/%{name}/ticket_1502.prf.xml -%{_prefix}/dev/devices/%{name}/ticket_1502.spd.xml -%{_prefix}/dev/devices/%{name}/python - diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/Makefile.am b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/Makefile.am new file mode 100644 index 000000000..6ca410e13 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/Makefile.am @@ -0,0 +1,26 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +CFDIR = $(top_srcdir)/base + +noinst_PROGRAMS = writeonly_cpp + +writeonly_cpp_SOURCES = writeonly_cpp.cpp writeonly_cpp.h writeonly_cpp_base.cpp writeonly_cpp_base.h main.cpp +writeonly_cpp_CXXFLAGS = -Wall $(BOOST_CPPFLAGS) -I$(CFDIR)/include -I$(CFDIR)/include/ossie +writeonly_cpp_LDADD = ../../../../dom/deps/cpp_dep1/cpp/libcpp_dep1.la $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_THREAD_LIB) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/main.cpp b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/main.cpp new file mode 100644 index 000000000..a4a46eb66 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/main.cpp @@ -0,0 +1,26 @@ +#include +#include "ossie/ossieSupport.h" + +#include "writeonly_cpp.h" + +writeonly_cpp_i *devicePtr; + +void signal_catcher(int sig) +{ + // IMPORTANT Don't call exit(...) in this function + // issue all CORBA calls that you need for cleanup here before calling ORB shutdown + if (devicePtr) { + devicePtr->halt(); + } +} +int main(int argc, char* argv[]) +{ + struct sigaction sa; + sa.sa_handler = signal_catcher; + sa.sa_flags = 0; + devicePtr = 0; + + Device_impl::start_device(&devicePtr, sa, argc, argv); + return 0; +} + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/struct_props.h b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/struct_props.h new file mode 100644 index 000000000..e310c54d7 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/struct_props.h @@ -0,0 +1,101 @@ +#ifndef STRUCTPROPS_H +#define STRUCTPROPS_H + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + +*******************************************************************************************/ + +#include +#include +#include + +struct foo_struct_struct { + foo_struct_struct () + { + abc = "def"; + } + + static std::string getId() { + return std::string("foo_struct"); + } + + static const char* getFormat() { + return "s"; + } + + std::string abc; +}; + +inline bool operator>>= (const CORBA::Any& a, foo_struct_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("abc")) { + if (!(props["abc"] >>= s.abc)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const foo_struct_struct& s) { + redhawk::PropertyMap props; + + props["abc"] = s.abc; + a <<= props; +} + +inline bool operator== (const foo_struct_struct& s1, const foo_struct_struct& s2) { + if (s1.abc!=s2.abc) + return false; + return true; +} + +inline bool operator!= (const foo_struct_struct& s1, const foo_struct_struct& s2) { + return !(s1==s2); +} + +struct ghi_struct { + ghi_struct () + { + } + + static std::string getId() { + return std::string("ghi"); + } + + static const char* getFormat() { + return "s"; + } + + std::string jkl; +}; + +inline bool operator>>= (const CORBA::Any& a, ghi_struct& s) { + CF::Properties* temp; + if (!(a >>= temp)) return false; + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("jkl")) { + if (!(props["jkl"] >>= s.jkl)) return false; + } + return true; +} + +inline void operator<<= (CORBA::Any& a, const ghi_struct& s) { + redhawk::PropertyMap props; + + props["jkl"] = s.jkl; + a <<= props; +} + +inline bool operator== (const ghi_struct& s1, const ghi_struct& s2) { + if (s1.jkl!=s2.jkl) + return false; + return true; +} + +inline bool operator!= (const ghi_struct& s1, const ghi_struct& s2) { + return !(s1==s2); +} + +#endif // STRUCTPROPS_H diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp.cpp b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp.cpp new file mode 100644 index 000000000..0a8b4829c --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp.cpp @@ -0,0 +1,301 @@ +/************************************************************************** + + This is the device code. This file contains the child class where + custom functionality can be added to the device. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "writeonly_cpp.h" + +PREPARE_LOGGING(writeonly_cpp_i) + +writeonly_cpp_i::writeonly_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : + writeonly_cpp_base(devMgr_ior, id, lbl, sftwrPrfl) +{ +} + +writeonly_cpp_i::writeonly_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : + writeonly_cpp_base(devMgr_ior, id, lbl, sftwrPrfl, compDev) +{ +} + +writeonly_cpp_i::writeonly_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : + writeonly_cpp_base(devMgr_ior, id, lbl, sftwrPrfl, capacities) +{ +} + +writeonly_cpp_i::writeonly_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : + writeonly_cpp_base(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev) +{ +} + +writeonly_cpp_i::~writeonly_cpp_i() +{ +} + +void writeonly_cpp_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ +} + +/************************************************************************** + + This is called automatically after allocateCapacity or deallocateCapacity are called. + Your implementation should determine the current state of the device: + + setUsageState(CF::Device::IDLE); // not in use + setUsageState(CF::Device::ACTIVE); // in use, with capacity remaining for allocation + setUsageState(CF::Device::BUSY); // in use, with no capacity remaining for allocation + +**************************************************************************/ +void writeonly_cpp_i::updateUsageState() +{ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the device has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the device base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the device developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void writeonly_cpp_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &writeonly_cpp_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Device Manager: + CF::DeviceManager_ptr devmgr = this->getDeviceManager()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (writeonly_cpp_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &writeonly_cpp_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to writeonly_cpp.cpp + writeonly_cpp_i::writeonly_cpp_i(const char *uuid, const char *label) : + writeonly_cpp_base(uuid, label) + { + addPropertyListener(scaleValue, this, &writeonly_cpp_i::scaleChanged); + addPropertyListener(status, this, &writeonly_cpp_i::statusChanged); + } + + void writeonly_cpp_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(writeonly_cpp_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void writeonly_cpp_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(writeonly_cpp_i, "status changed"); + } + + //Add to writeonly_cpp.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + Allocation: + + Allocation callbacks are available to customize the Device's response to + allocation requests. For example, if the Device contains the allocation + property "my_alloc" of type string, the allocation and deallocation + callbacks follow the pattern (with arbitrary function names + my_alloc_fn and my_dealloc_fn): + + bool writeonly_cpp_i::my_alloc_fn(const std::string &value) + { + // perform logic + return true; // successful allocation + } + void writeonly_cpp_i::my_dealloc_fn(const std::string &value) + { + // perform logic + } + + The allocation and deallocation functions are then registered with the Device + base class with the setAllocationImpl call. Note that the variable for the property is used rather + than its id: + + this->setAllocationImpl(my_alloc, this, &writeonly_cpp_i::my_alloc_fn, &writeonly_cpp_i::my_dealloc_fn); + + + +************************************************************************************************/ +int writeonly_cpp_i::serviceFunction() +{ + LOG_DEBUG(writeonly_cpp_i, "serviceFunction() example log message"); + + return NOOP; +} + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp.h b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp.h new file mode 100644 index 000000000..77ad9ec7f --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp.h @@ -0,0 +1,24 @@ +#ifndef WRITEONLY_CPP_I_IMPL_H +#define WRITEONLY_CPP_I_IMPL_H + +#include "writeonly_cpp_base.h" + +class writeonly_cpp_i : public writeonly_cpp_base +{ + ENABLE_LOGGING + public: + writeonly_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl); + writeonly_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev); + writeonly_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities); + writeonly_cpp_i(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev); + ~writeonly_cpp_i(); + + void constructor(); + + int serviceFunction(); + + protected: + void updateUsageState(); +}; + +#endif // WRITEONLY_CPP_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp_base.cpp b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp_base.cpp new file mode 100644 index 000000000..1dbaa770e --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp_base.cpp @@ -0,0 +1,145 @@ +#include "writeonly_cpp_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the device class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +writeonly_cpp_base::writeonly_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl), + ThreadedComponent() +{ + construct(); +} + +writeonly_cpp_base::writeonly_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl, compDev), + ThreadedComponent() +{ + construct(); +} + +writeonly_cpp_base::writeonly_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl, capacities), + ThreadedComponent() +{ + construct(); +} + +writeonly_cpp_base::writeonly_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev) : + Device_impl(devMgr_ior, id, lbl, sftwrPrfl, capacities, compDev), + ThreadedComponent() +{ + construct(); +} + +writeonly_cpp_base::~writeonly_cpp_base() +{ +} + +void writeonly_cpp_base::construct() +{ + loadProperties(); + +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void writeonly_cpp_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Device_impl::start(); + ThreadedComponent::startThread(); +} + +void writeonly_cpp_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Device_impl::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void writeonly_cpp_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the device running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Device_impl::releaseObject(); +} + +void writeonly_cpp_base::loadProperties() +{ + addProperty(device_kind, + "DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", + "device_kind", + "readonly", + "", + "eq", + "allocation"); + + addProperty(device_model, + "DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", + "device_model", + "readonly", + "", + "eq", + "allocation"); + + addProperty(foo, + "something", + "foo", + "", + "writeonly", + "", + "external", + "allocation"); + + // Set the sequence with its initial values + foo_seq.push_back("abc"); + addProperty(foo_seq, + foo_seq, + "foo_seq", + "", + "writeonly", + "", + "external", + "allocation"); + + addProperty(foo_struct, + foo_struct_struct(), + "foo_struct", + "", + "writeonly", + "", + "external", + "allocation"); + + addProperty(foo_struct_seq, + foo_struct_seq, + "foo_struct_seq", + "", + "writeonly", + "", + "external", + "allocation"); + + { + ghi_struct __tmp; + __tmp.jkl = "mno"; + foo_struct_seq.push_back(__tmp); + } + +} + + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp_base.h b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp_base.h new file mode 100644 index 000000000..19b0c5337 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/cpp/writeonly_cpp_base.h @@ -0,0 +1,45 @@ +#ifndef WRITEONLY_CPP_BASE_IMPL_BASE_H +#define WRITEONLY_CPP_BASE_IMPL_BASE_H + +#include +#include +#include + +#include "struct_props.h" + +class writeonly_cpp_base : public Device_impl, protected ThreadedComponent +{ + public: + writeonly_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl); + writeonly_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, char *compDev); + writeonly_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities); + writeonly_cpp_base(char *devMgr_ior, char *id, char *lbl, char *sftwrPrfl, CF::Properties capacities, char *compDev); + ~writeonly_cpp_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + // Member variables exposed as properties + /// Property: device_kind + std::string device_kind; + /// Property: device_model + std::string device_model; + /// Property: foo + std::string foo; + /// Property: foo_seq + std::vector foo_seq; + /// Property: foo_struct + foo_struct_struct foo_struct; + /// Property: foo_struct_seq + std::vector foo_struct_seq; + + private: + void construct(); +}; +#endif // WRITEONLY_CPP_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/writeonly_cpp.prf.xml b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/writeonly_cpp.prf.xml new file mode 100644 index 000000000..5f5f67f3f --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/writeonly_cpp.prf.xml @@ -0,0 +1,42 @@ + + + + + This specifies the device kind + + + + + This specifies the specific device + + + + + something + + + + + + def + + + + + + abc + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/writeonly_cpp.scd.xml b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/writeonly_cpp.scd.xml new file mode 100644 index 000000000..125128ef1 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/writeonly_cpp.scd.xml @@ -0,0 +1,49 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/writeonly_cpp.spd.xml b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/writeonly_cpp.spd.xml new file mode 100644 index 000000000..428633f94 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_cpp/writeonly_cpp.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/writeonly_cpp + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/Makefile.am b/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/Makefile.am new file mode 100644 index 000000000..ff44ad4ea --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/Makefile.am @@ -0,0 +1,29 @@ +writeonly_java.jar$(EXEEXT): $(writeonly_java_jar_SOURCES) + mkdir -p bin + $(JAVAC) -cp $(OSSIE_CLASSPATH):../../../../dom/deps/java_dep1/java/java_dep1.jar -d bin $(writeonly_java_jar_SOURCES) + $(JAR) cf ./writeonly_java.jar -C bin . + +clean-local: + rm -rf bin + +distclean-local: + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + +ossieName = writeonly_java +bindir = $(prefix)/dev/devices/writeonly_java/java/ +noinst_PROGRAMS = writeonly_java.jar +writeonly_java_jar_SOURCES := $(shell find ./src -name "*.java") + +xmldir = $(prefix)/dev/devices/writeonly_java/ + +devdir = $(prefix)/dev/devices/writeonly_java/java/ diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/src/writeonly_java/java/writeonly_java.java b/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/src/writeonly_java/java/writeonly_java.java new file mode 100644 index 000000000..fabbe90c0 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/src/writeonly_java/java/writeonly_java.java @@ -0,0 +1,292 @@ +package writeonly_java.java; + +import java.util.Properties; + +/** + * This is the device code. This file contains the derived class where custom + * functionality can be added to the device. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general device housekeeping + * + * Source: writeonly_java.spd.xml + */ +public class writeonly_java extends writeonly_java_base { + /** + * This is the device constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your device. + * + * A device may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * device class. + * + * Devices may contain allocation properties with "external" action, which + * are used in capacity allocation and deallocation. In order to support + * this capability, allocation properties require additional functionality. + * This is implemented by calling setAllocator() on the property instance + * with an object that implements the Allocator interface for that data type. + * + * Example: + * // This example makes use of the following properties + * // - A struct property called tuner_alloc + * // The following methods are defined elsewhere in your class: + * // - private boolean allocate_tuner(tuner_alloc_struct capacity) + * // - private void deallocate_tuner(tuner_alloc_struct capacity) + * // The file must import "org.ossie.properties.Allocator" + * + * this.tuner_alloc.setAllocator(new Allocator() { + * public boolean allocate(tuner_alloc_struct capacity) { + * return allocate_tuner(capacity); + * } + * public void deallocate(tuner_alloc_struct capacity) { + * deallocate_tuner(capacity); + * } + * }); + * + * The recommended practice is for the allocate() and deallocate() methods + * to contain only glue code to dispatch the call to private methods on the + * device class. + * Accessing the Device Manager and Domain Manager: + * + * Both the Device Manager hosting this Device and the Domain Manager hosting + * the Device Manager are available to the Device. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Device Manager: + * CF.DeviceManager devmgr = this.getDeviceManager().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + + public writeonly_java() + { + super(); + } + + public void constructor() + { + } + + /************************************************************************** + + This is called automatically after allocateCapacity or deallocateCapacity are called. + Your implementation should determine the current state of the device: + + setUsageState(CF.DevicePackage.UsageType.IDLE); // not in use + setUsageState(CF.DevicePackage.UsageType.ACTIVE); // in use, with capacity remaining for allocation + setUsageState(CF.DevicePackage.UsageType.BUSY); // in use, with no capacity remaining for allocation + + ***************************************************************************/ + protected void updateUsageState() + { + } + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the device's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the device developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Example: + * + * This example assumes that the device has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the device + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData = new float[data.getData().length]; + * for (int i = 0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i] = (float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i] = (float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + logger.debug("serviceFunction() example log message"); + + return NOOP; + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/src/writeonly_java/java/writeonly_java_base.java b/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/src/writeonly_java/java/writeonly_java_base.java new file mode 100644 index 000000000..1a88298f7 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/src/writeonly_java/java/writeonly_java_base.java @@ -0,0 +1,280 @@ +package writeonly_java.java; + + +import java.util.Properties; + +import org.apache.log4j.Logger; + +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; + +import CF.DevicePackage.AdminType; +import CF.DevicePackage.OperationalType; +import CF.DevicePackage.UsageType; +import CF.InvalidObjectReference; + +import org.ossie.component.*; +import org.ossie.properties.*; + + +/** + * This is the device code. This file contains all the access points + * you need to use to be able to access all input and output ports, + * respond to incoming data, and perform general device housekeeping + * + * Source: writeonly_java.spd.xml + * + * @generated + */ + +public abstract class writeonly_java_base extends ThreadedDevice { + /** + * @generated + */ + public final static Logger logger = Logger.getLogger(writeonly_java_base.class.getName()); + + /** + * The property DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d + * This specifies the device kind + * + * @generated + */ + public final StringProperty device_kind = + new StringProperty( + "DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", //id + "device_kind", //name + null, //default value + Mode.READONLY, //mode + Action.EQ, //action + new Kind[] {Kind.ALLOCATION} + ); + + /** + * The property DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb + * This specifies the specific device + * + * @generated + */ + public final StringProperty device_model = + new StringProperty( + "DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", //id + "device_model", //name + null, //default value + Mode.READONLY, //mode + Action.EQ, //action + new Kind[] {Kind.ALLOCATION} + ); + + /** + * The property foo + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final StringProperty foo = + new StringProperty( + "foo", //id + null, //name + "something", //default value + Mode.WRITEONLY, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.ALLOCATION} + ); + + /** + * The property foo_seq + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final StringSequenceProperty foo_seq = + new StringSequenceProperty( + "foo_seq", //id + null, //name + StringSequenceProperty.asList("abc"), //default value + Mode.WRITEONLY, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.ALLOCATION} + ); + + /** + * The property foo_struct + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + /** + * The structure for property foo_struct + * + * @generated + */ + public static class foo_struct_struct extends StructDef { + public final StringProperty abc = + new StringProperty( + "abc", //id + null, //name + "def", //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + + /** + * @generated + */ + public foo_struct_struct(String abc) { + this(); + this.abc.setValue(abc); + } + + /** + * @generated + */ + public void set_abc(String abc) { + this.abc.setValue(abc); + } + public String get_abc() { + return this.abc.getValue(); + } + + /** + * @generated + */ + public foo_struct_struct() { + addElement(this.abc); + } + + public String getId() { + return "foo_struct"; + } + }; + + public final StructProperty foo_struct = + new StructProperty( + "foo_struct", //id + null, //name + foo_struct_struct.class, //type + new foo_struct_struct(), //default value + Mode.WRITEONLY, //mode + new Kind[] {Kind.ALLOCATION} //kind + ); + + /** + * The property foo_struct_seq + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + /** + * The structure for property ghi + * + * @generated + */ + public static class ghi_struct extends StructDef { + public final StringProperty jkl = + new StringProperty( + "jkl", //id + null, //name + null, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + + /** + * @generated + */ + public ghi_struct(String jkl) { + this(); + this.jkl.setValue(jkl); + } + + /** + * @generated + */ + public void set_jkl(String jkl) { + this.jkl.setValue(jkl); + } + public String get_jkl() { + return this.jkl.getValue(); + } + + /** + * @generated + */ + public ghi_struct() { + addElement(this.jkl); + } + + public String getId() { + return "ghi"; + } + }; + + public final StructSequenceProperty foo_struct_seq = + new StructSequenceProperty ( + "foo_struct_seq", //id + null, //name + ghi_struct.class, //type + StructSequenceProperty.asList( + new ghi_struct("mno") + ), //defaultValue + Mode.WRITEONLY, //mode + new Kind[] { Kind.ALLOCATION } //kind + ); + + /** + * @generated + */ + public writeonly_java_base() + { + super(); + + setLogger( logger, writeonly_java_base.class.getName() ); + + + // Properties + addProperty(device_kind); + + addProperty(device_model); + + addProperty(foo); + + addProperty(foo_seq); + + addProperty(foo_struct); + + addProperty(foo_struct_seq); + + } + + + + /** + * The main function of your device. If no args are provided, then the + * CORBA object is not bound to an SCA Domain or NamingService and can + * be run as a standard Java application. + * + * @param args + * @generated + */ + public static void main(String[] args) + { + final Properties orbProps = new Properties(); + writeonly_java.configureOrb(orbProps); + + try { + ThreadedDevice.start_device(writeonly_java.class, args, orbProps); + } catch (InvalidObjectReference e) { + e.printStackTrace(); + } catch (ServantNotActive e) { + e.printStackTrace(); + } catch (WrongPolicy e) { + e.printStackTrace(); + } catch (InstantiationException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + } +} diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/startJava.sh b/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/startJava.sh new file mode 100755 index 000000000..1dfbdf015 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_java/java/startJava.sh @@ -0,0 +1,28 @@ +#!/bin/sh +myDir=`dirname $0` + +# Setup the OSSIEHOME Lib jars on the classpath +libDir=$OSSIEHOME/lib +libFiles=`ls -1 $libDir/*.jar` +for file in $libFiles +do + if [ x"$CLASSPATH" = "x" ] + then + export CLASSPATH=$file + else + export CLASSPATH=$file:$CLASSPATH + fi +done + +# Path for Java +if test -x $JAVA_HOME/bin/java; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=java +fi + +# NOTE: the $@ must be quoted "$@" for arguments to be passed correctly + +#Sun ORB start line +exec $JAVA -cp :$myDir/writeonly_java.jar:$myDir/bin:$CLASSPATH writeonly_java.java.writeonly_java "$@" + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_java/writeonly_java.prf.xml b/redhawk/src/testing/sdr/dev/devices/writeonly_java/writeonly_java.prf.xml new file mode 100644 index 000000000..81f867d16 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_java/writeonly_java.prf.xml @@ -0,0 +1,42 @@ + + + + + This specifies the device kind + + + + + This specifies the specific device + + + + + something + + + + + + abc + + + + + + + def + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_java/writeonly_java.scd.xml b/redhawk/src/testing/sdr/dev/devices/writeonly_java/writeonly_java.scd.xml new file mode 100644 index 000000000..125128ef1 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_java/writeonly_java.scd.xml @@ -0,0 +1,49 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_java/writeonly_java.spd.xml b/redhawk/src/testing/sdr/dev/devices/writeonly_java/writeonly_java.spd.xml new file mode 100644 index 000000000..41483d1f1 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_java/writeonly_java.spd.xml @@ -0,0 +1,26 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_py/python/writeonly_py.py b/redhawk/src/testing/sdr/dev/devices/writeonly_py/python/writeonly_py.py new file mode 100755 index 000000000..71416ebf8 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_py/python/writeonly_py.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: writeonly_py.spd.xml +from ossie.device import start_device +import logging + +from writeonly_py_base import * + +class writeonly_py_i(writeonly_py_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your device registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def updateUsageState(self): + """ + This is called automatically after allocateCapacity or deallocateCapacity are called. + Your implementation should determine the current state of the device: + self._usageState = CF.Device.IDLE # not in use + self._usageState = CF.Device.ACTIVE # in use, with capacity remaining for allocation + self._usageState = CF.Device.BUSY # in use, with no capacity remaining for allocation + """ + return NOOP + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the device. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the device developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", writeonly_py_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = writeonly_py_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Application: + app = self.getApplication().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + Allocation: + + Allocation callbacks are available to customize a Device's response to an allocation request. + Callback allocation/deallocation functions are registered using the setAllocationImpl function, + usually in the initialize() function + For example, allocation property "my_alloc" can be registered with allocation function + my_alloc_fn and deallocation function my_dealloc_fn as follows: + + self.setAllocationImpl("my_alloc", self.my_alloc_fn, self.my_dealloc_fn) + + def my_alloc_fn(self, value): + # perform logic + return True # successful allocation + + def my_dealloc_fn(self, value): + # perform logic + pass + + Example: + + # This example assumes that the device has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the device + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Device") + start_device(writeonly_py_i) + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_py/python/writeonly_py_base.py b/redhawk/src/testing/sdr/dev/devices/writeonly_py/python/writeonly_py_base.py new file mode 100644 index 000000000..1ee6f1fd6 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_py/python/writeonly_py_base.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: writeonly_py.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.device import Device +from ossie.threadedcomponent import * +from ossie.properties import simple_property +from ossie.properties import simpleseq_property +from ossie.properties import struct_property +from ossie.properties import structseq_property + +import Queue, copy, time, threading + +class writeonly_py_base(CF__POA.Device, Device, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams): + Device.__init__(self, devmgr, uuid, label, softwareProfile, compositeDevice, execparams) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 devices. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this device + + def start(self): + Device.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Device.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Device.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + device_kind = simple_property(id_="DCE:cdc5ee18-7ceb-4ae6-bf4c-31f983179b4d", + name="device_kind", + type_="string", + mode="readonly", + action="eq", + kinds=("allocation",), + description="""This specifies the device kind""") + + + device_model = simple_property(id_="DCE:0f99b2e4-9903-4631-9846-ff349d18ecfb", + name="device_model", + type_="string", + mode="readonly", + action="eq", + kinds=("allocation",), + description=""" This specifies the specific device""") + + + foo = simple_property(id_="foo", + type_="string", + defvalue="something", + mode="writeonly", + action="external", + kinds=("allocation",)) + + + foo_seq = simpleseq_property(id_="foo_seq", + type_="string", + defvalue=["abc" ], + mode="writeonly", + action="external", + kinds=("allocation",)) + + + class FooStruct(object): + abc = simple_property( + id_="abc", + + type_="string", + defvalue="def" + ) + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["abc"] = self.abc + return str(d) + + @classmethod + def getId(cls): + return "foo_struct" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("abc",self.abc)] + + foo_struct = struct_property(id_="foo_struct", + structdef=FooStruct, + configurationkind=("allocation",), + mode="writeonly") + + + class Ghi(object): + jkl = simple_property( + id_="jkl", + + type_="string") + + def __init__(self, jkl=""): + self.jkl = jkl + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["jkl"] = self.jkl + return str(d) + + @classmethod + def getId(cls): + return "ghi" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("jkl",self.jkl)] + + foo_struct_seq = structseq_property(id_="foo_struct_seq", + structdef=Ghi, + defvalue=[Ghi(jkl="mno")], + configurationkind=("allocation",), + mode="writeonly") + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_py/writeonly_py.prf.xml b/redhawk/src/testing/sdr/dev/devices/writeonly_py/writeonly_py.prf.xml new file mode 100644 index 000000000..14023a96d --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_py/writeonly_py.prf.xml @@ -0,0 +1,41 @@ + + + + + This specifies the device kind + + + + + This specifies the specific device + + + + + something + + + + + + def + + + + + + abc + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_py/writeonly_py.scd.xml b/redhawk/src/testing/sdr/dev/devices/writeonly_py/writeonly_py.scd.xml new file mode 100644 index 000000000..125128ef1 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_py/writeonly_py.scd.xml @@ -0,0 +1,49 @@ + + + + 2.2 + + device + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/devices/writeonly_py/writeonly_py.spd.xml b/redhawk/src/testing/sdr/dev/devices/writeonly_py/writeonly_py.spd.xml new file mode 100644 index 000000000..e6677646d --- /dev/null +++ b/redhawk/src/testing/sdr/dev/devices/writeonly_py/writeonly_py.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/writeonly_py.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/DuplicateService_node/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/DuplicateService_node/DeviceManager.dcd.xml new file mode 100644 index 000000000..2d5b9e001 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/DuplicateService_node/DeviceManager.dcd.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + BasicService1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/PersonaNode/PersonaNode.spec b/redhawk/src/testing/sdr/dev/nodes/PersonaNode/PersonaNode.spec deleted file mode 100644 index 179332587..000000000 --- a/redhawk/src/testing/sdr/dev/nodes/PersonaNode/PersonaNode.spec +++ /dev/null @@ -1,56 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# RPM package for PersonaNode -# This file is regularly AUTO-GENERATED by the IDE. DO NOT MODIFY. - -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -Name: PersonaNode -Summary: Node PersonaNode -Version: 1.0.0 -Release: 1 -License: None -Group: REDHAWK/Nodes -Source: %{name}-%{version}.tar.gz -# Require the device manager whose SPD is referenced -Requires: DeviceManager -# Require each referenced device/service -Requires: PersonaDevice ProgrammableDevice -BuildArch: noarch -BuildRoot: %{_tmppath}/%{name}-%{version} - -%description - -%prep -%setup - -%install -%__rm -rf $RPM_BUILD_ROOT -%__mkdir_p "$RPM_BUILD_ROOT%{_prefix}/dev/nodes/%{name}" -%__install -m 644 DeviceManager.dcd.xml $RPM_BUILD_ROOT%{_prefix}/dev/nodes/%{name}/DeviceManager.dcd.xml - -%files -%defattr(-,redhawk,redhawk) -%dir %{_prefix}/dev/nodes/%{name} -%{_prefix}/dev/nodes/%{name}/DeviceManager.dcd.xml diff --git a/redhawk/src/testing/sdr/dev/nodes/bad_init_device_node/bad_init_device_node.spec b/redhawk/src/testing/sdr/dev/nodes/bad_init_device_node/bad_init_device_node.spec deleted file mode 100644 index 8dd35d883..000000000 --- a/redhawk/src/testing/sdr/dev/nodes/bad_init_device_node/bad_init_device_node.spec +++ /dev/null @@ -1,56 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# RPM package for bad_init_device_node -# This file is regularly AUTO-GENERATED by the IDE. DO NOT MODIFY. - -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -Name: bad_init_device_node -Summary: Node bad_init_device_node -Version: 1.0.0 -Release: 1 -License: None -Group: REDHAWK/Nodes -Source: %{name}-%{version}.tar.gz -# Require the device manager whose SPD is referenced -Requires: DeviceManager -# Require each referenced device/service -Requires: bad_init_device -BuildArch: noarch -BuildRoot: %{_tmppath}/%{name}-%{version} - -%description - -%prep -%setup - -%install -%__rm -rf $RPM_BUILD_ROOT -%__mkdir_p "$RPM_BUILD_ROOT%{_prefix}/dev/nodes/%{name}" -%__install -m 644 DeviceManager.dcd.xml $RPM_BUILD_ROOT%{_prefix}/dev/nodes/%{name}/DeviceManager.dcd.xml - -%files -%defattr(-,redhawk,redhawk) -%dir %{_prefix}/dev/nodes/%{name} -%{_prefix}/dev/nodes/%{name}/DeviceManager.dcd.xml diff --git a/redhawk/src/testing/sdr/dev/nodes/dev_alloc_node/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/dev_alloc_node/DeviceManager.dcd.xml new file mode 100644 index 000000000..ed242e778 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/dev_alloc_node/DeviceManager.dcd.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + dev_alloc_cpp_1 + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/dev_props_bad_numbers_node/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/dev_props_bad_numbers_node/DeviceManager.dcd.xml new file mode 100644 index 000000000..3caf4e5b6 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/dev_props_bad_numbers_node/DeviceManager.dcd.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + dev_props_bad_numbers_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/log_test_cpp_node/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/log_test_cpp_node/DeviceManager.dcd.xml new file mode 100644 index 000000000..97f06729b --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/log_test_cpp_node/DeviceManager.dcd.xml @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + log_test_cpp_1 + + + + + + log_test_cpp_2 + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/log_test_cpp_override_node/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/log_test_cpp_override_node/DeviceManager.dcd.xml new file mode 100644 index 000000000..87843fa0f --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/log_test_cpp_override_node/DeviceManager.dcd.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + log_test_cpp_1 + + + + + + log_test_cpp_2 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/log_test_java_node/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/log_test_java_node/DeviceManager.dcd.xml new file mode 100644 index 000000000..480d1c51b --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/log_test_java_node/DeviceManager.dcd.xml @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + log_test_java_1 + + + + + + log_test_java_2 + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/log_test_py_node/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/log_test_py_node/DeviceManager.dcd.xml new file mode 100644 index 000000000..39c184dff --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/log_test_py_node/DeviceManager.dcd.xml @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + log_test_py_1 + + + + + + log_test_py_2 + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/loggingconfig/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/loggingconfig/DeviceManager.dcd.xml index 541424279..0d80724ea 100644 --- a/redhawk/src/testing/sdr/dev/nodes/loggingconfig/DeviceManager.dcd.xml +++ b/redhawk/src/testing/sdr/dev/nodes/loggingconfig/DeviceManager.dcd.xml @@ -1,21 +1,21 @@ @@ -25,7 +25,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + @@ -34,7 +34,8 @@ with this program. If not, see http://www.gnu.org/licenses/. - BasicTestDevice1 + BasicTestDevice1 + sca:///logcfg/log.basic.props diff --git a/redhawk/src/testing/sdr/dev/nodes/startorder_events/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/startorder_events/DeviceManager.dcd.xml new file mode 100644 index 000000000..6996653bc --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/startorder_events/DeviceManager.dcd.xml @@ -0,0 +1,115 @@ + + + + + + + + + + + + + + + + + + + + start_event_device_1 + + + + + + no_start_event_device_2 + + + + + + start_event_service_1 + + + + + + start_event_device_3 + + + + + + + message_out + + + + IDL:ExtendedEvent/MessageEvent:1.0 + + + + + + + + message_out + + + + IDL:ExtendedEvent/MessageEvent:1.0 + + + + + + + + message_out + + + + IDL:ExtendedEvent/MessageEvent:1.0 + + + + + + + + message_out + + + + + + IDL:ExtendedEvent/MessageEvent:1.0 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/startorder_fail/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/startorder_fail/DeviceManager.dcd.xml new file mode 100644 index 000000000..5b018bbde --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/startorder_fail/DeviceManager.dcd.xml @@ -0,0 +1,98 @@ + + + + + + + + + + + + + + + + + start_event_device_1 + + + + + + fail_device_1 + + + + + + + + + + + + start_event_device_2 + + + + + + + message_out + + + + IDL:ExtendedEvent/MessageEvent:1.0 + + + + + + + + message_out + + + + IDL:ExtendedEvent/MessageEvent:1.0 + + + + + + + + message_out + + + + IDL:ExtendedEvent/MessageEvent:1.0 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/test_GPP_green/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/test_GPP_green/DeviceManager.dcd.xml new file mode 100644 index 000000000..c59cae8d1 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/test_GPP_green/DeviceManager.dcd.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + test_GPP_green::GPP_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/test_GPP_node/DeviceManager.dcd.xml.cache_working_dir b/redhawk/src/testing/sdr/dev/nodes/test_GPP_node/DeviceManager.dcd.xml.cache_working_dir new file mode 100644 index 000000000..e905e3c13 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/test_GPP_node/DeviceManager.dcd.xml.cache_working_dir @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + GPP_1 + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/test_GPP_node/DeviceManager.dcd.xml.working_dir b/redhawk/src/testing/sdr/dev/nodes/test_GPP_node/DeviceManager.dcd.xml.working_dir new file mode 100644 index 000000000..ffdf9a1b0 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/test_GPP_node/DeviceManager.dcd.xml.working_dir @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + GPP_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/test_GPP_red/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/test_GPP_red/DeviceManager.dcd.xml new file mode 100644 index 000000000..7c871e729 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/test_GPP_red/DeviceManager.dcd.xml @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + test_GPP_red::GPP_1 + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/test_NicAllocation_node/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/test_NicAllocation_node/DeviceManager.dcd.xml new file mode 100644 index 000000000..04cef2bfb --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/test_NicAllocation_node/DeviceManager.dcd.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + NicExecDevice_1 + + + + eth0 + eth1 + eth2 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/nodes/test_service_startup_node/DeviceManager.dcd.xml b/redhawk/src/testing/sdr/dev/nodes/test_service_startup_node/DeviceManager.dcd.xml new file mode 100644 index 000000000..e0336b972 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/nodes/test_service_startup_node/DeviceManager.dcd.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + + + + + S1_1 + + + + + + S2_1 + + + + + + + + + + S2_pre_1 + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/BasicService/BasicService.py b/redhawk/src/testing/sdr/dev/services/BasicService/BasicService.py index 8ceacd6fd..122304fdb 100755 --- a/redhawk/src/testing/sdr/dev/services/BasicService/BasicService.py +++ b/redhawk/src/testing/sdr/dev/services/BasicService/BasicService.py @@ -53,7 +53,7 @@ def query(self, configProperties): else: result = [] for p in configProperties: - result.append(CF.DataType(id=p.id, value=any.to_any(self.parms[p.id]))) + result.append(CF.DataType(id=p.id, value=any.to_any(self.params[p.id]))) return result if __name__ == "__main__": diff --git a/redhawk/src/testing/sdr/dev/services/BasicService_cpp/BasicService_cpp.scd.xml b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/BasicService_cpp.scd.xml new file mode 100644 index 000000000..f18f8f6b3 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/BasicService_cpp.scd.xml @@ -0,0 +1,18 @@ + + + + 2.2 + + service + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/BasicService_cpp/BasicService_cpp.spd.xml b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/BasicService_cpp.spd.xml new file mode 100644 index 000000000..e4a0c7232 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/BasicService_cpp.spd.xml @@ -0,0 +1,24 @@ + + + + + + null + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/BasicService_cpp + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp.cpp b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp.cpp new file mode 100644 index 000000000..da6ef962d --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp.cpp @@ -0,0 +1,102 @@ +/************************************************************************** + + This is the service code. This file contains the child class where + custom functionality can be added to the service. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "BasicService_cpp.h" + +PREPARE_LOGGING(BasicService_cpp_i) +BasicService_cpp_i::BasicService_cpp_i(char *devMgr_ior, char *name) : + BasicService_cpp_base(devMgr_ior, name) +{ +} + +BasicService_cpp_i::~BasicService_cpp_i() +{ +} + +void BasicService_cpp_i::remove(const char* fileName) +{ + // TODO: Fill in this function +} + +void BasicService_cpp_i::copy(const char* sourceFileName, const char* destinationFileName) +{ + // TODO: Fill in this function +} + +void BasicService_cpp_i::move(const char* sourceFileName, const char* destinationFileName) +{ + // TODO: Fill in this function +} + +CORBA::Boolean BasicService_cpp_i::exists(const char* fileName) +{ + CORBA::Boolean tmpVal = false; + // TODO: Fill in this function + + return tmpVal; +} + +CF::FileSystem::FileInformationSequence* BasicService_cpp_i::list(const char* pattern) +{ + CF::FileSystem::FileInformationSequence* tmpVal = new CF::FileSystem::FileInformationSequence(); + // TODO: Fill in this function + + return tmpVal; +} + +CF::File_ptr BasicService_cpp_i::create(const char* fileName) +{ + CF::File_ptr tmpVal = CF::File::_nil(); + // TODO: Fill in this function + + return tmpVal; +} + +CF::File_ptr BasicService_cpp_i::open(const char* fileName, CORBA::Boolean read_Only) +{ + CF::File_ptr tmpVal = CF::File::_nil(); + // TODO: Fill in this function + + return tmpVal; +} + +void BasicService_cpp_i::mkdir(const char* directoryName) +{ + // TODO: Fill in this function +} + +void BasicService_cpp_i::rmdir(const char* directoryName) +{ + // TODO: Fill in this function +} + +void BasicService_cpp_i::query(CF::Properties& fileSystemProperties) +{ + // TODO: Fill in this function +} + +void BasicService_cpp_i::mount(const char* mountPoint, CF::FileSystem_ptr file_System) +{ + // TODO: Fill in this function +} + +void BasicService_cpp_i::unmount(const char* mountPoint) +{ + // TODO: Fill in this function +} + +CF::FileManager::MountSequence* BasicService_cpp_i::getMounts() +{ + CF::FileManager::MountSequence* tmpVal = new CF::FileManager::MountSequence(); + // TODO: Fill in this function + + return tmpVal; +} + + diff --git a/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp.h b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp.h new file mode 100644 index 000000000..211623e14 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp.h @@ -0,0 +1,29 @@ +#ifndef BASICSERVICE_CPP_I_IMPL_H +#define BASICSERVICE_CPP_I_IMPL_H + +#include "BasicService_cpp_base.h" + +class BasicService_cpp_i; + +class BasicService_cpp_i : public BasicService_cpp_base +{ + ENABLE_LOGGING + public: + BasicService_cpp_i(char *devMgr_ior, char *name); + ~BasicService_cpp_i(); + void remove(const char* fileName); + void copy(const char* sourceFileName, const char* destinationFileName); + void move(const char* sourceFileName, const char* destinationFileName); + CORBA::Boolean exists(const char* fileName); + CF::FileSystem::FileInformationSequence* list(const char* pattern); + CF::File_ptr create(const char* fileName); + CF::File_ptr open(const char* fileName, CORBA::Boolean read_Only); + void mkdir(const char* directoryName); + void rmdir(const char* directoryName); + void query(CF::Properties& fileSystemProperties); + void mount(const char* mountPoint, CF::FileSystem_ptr file_System); + void unmount(const char* mountPoint); + CF::FileManager::MountSequence* getMounts(); +}; + +#endif // BASICSERVICE_CPP_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp_base.cpp b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp_base.cpp new file mode 100644 index 000000000..9b598e8b2 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp_base.cpp @@ -0,0 +1,34 @@ +#include "BasicService_cpp_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the service class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +BasicService_cpp_base::BasicService_cpp_base(char *devMgr_ior, char *name) : + Service_impl(devMgr_ior, name) +{ +} + +void BasicService_cpp_base::registerServiceWithDevMgr () +{ + _deviceManager->registerService(this->_this(), this->_name.c_str()); +} + +void BasicService_cpp_base::terminateService () +{ + try { + _deviceManager->unregisterService(this->_this(), this->_name.c_str()); + } catch (...) { + } + + PortableServer::POA_ptr root_poa = ossie::corba::RootPOA(); + PortableServer::ObjectId_var oid = root_poa->servant_to_id(this); + root_poa->deactivate_object(oid); +} + diff --git a/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp_base.h b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp_base.h new file mode 100644 index 000000000..0594edafb --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp_base.h @@ -0,0 +1,18 @@ +#ifndef BASICSERVICE_CPP_BASE_IMPL_BASE_H +#define BASICSERVICE_CPP_BASE_IMPL_BASE_H + +#include +#include +#include + +class BasicService_cpp_base : public Service_impl, public virtual POA_CF::FileManager +{ + public: + BasicService_cpp_base(char *devMgr_ior, char *name); + + void registerServiceWithDevMgr (); + void terminateService (); + void construct (); + +}; +#endif // BASICSERVICE_CPP_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/Makefile.am b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/Makefile.am new file mode 100644 index 000000000..e7e490412 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/Makefile.am @@ -0,0 +1,31 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +CFDIR = $(top_srcdir)/base + +noinst_PROGRAMS = BasicService_cpp + +BasicService_cpp_SOURCES = BasicService_cpp.cpp \ +BasicService_cpp.h \ +BasicService_cpp_base.cpp \ +BasicService_cpp_base.h \ +main.cpp + +BasicService_cpp_CXXFLAGS = -Wall $(BOOST_CPPFLAGS) -I$(CFDIR)/include -I$(CFDIR)/include/ossie +BasicService_cpp_LDADD = $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_FILESYSTEM_LIB) $(BOOST_THREAD_LIB) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la diff --git a/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/main.cpp b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/main.cpp new file mode 100644 index 000000000..d0ab6b2ac --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/BasicService_cpp/cpp/main.cpp @@ -0,0 +1,26 @@ +#include +#include "ossie/ossieSupport.h" + +#include "BasicService_cpp.h" + +BasicService_cpp_i *servicePtr; + +void signal_catcher(int sig) +{ + // IMPORTANT Don't call exit(...) in this function + // issue all CORBA calls that you need for cleanup here before calling ORB shutdown + if (servicePtr) { + servicePtr->halt(); + } +} + +int main(int argc, char* argv[]) +{ + struct sigaction sa; + sa.sa_handler = signal_catcher; + sa.sa_flags = 0; + servicePtr = 0; + + Service_impl::start_service(&servicePtr, sa, argc, argv); + return 0; +} diff --git a/redhawk/src/testing/sdr/dev/services/S2/S2.prf.xml b/redhawk/src/testing/sdr/dev/services/S2/S2.prf.xml new file mode 100644 index 000000000..2cc5b7c52 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/S2/S2.prf.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/S2/S2.scd.xml b/redhawk/src/testing/sdr/dev/services/S2/S2.scd.xml new file mode 100644 index 000000000..6296f0f91 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/S2/S2.scd.xml @@ -0,0 +1,18 @@ + + + + 2.2 + + service + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/S2/S2.spd.xml b/redhawk/src/testing/sdr/dev/services/S2/S2.spd.xml new file mode 100644 index 000000000..a008159f6 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/S2/S2.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software component. + + + python/S2.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/S2/python/S2.py b/redhawk/src/testing/sdr/dev/services/S2/python/S2.py new file mode 100755 index 000000000..799825164 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/S2/python/S2.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED +# +# Source: S2.spd.xml + +import sys, signal, copy, os +import logging + +from ossie.cf import CF, CF__POA #@UnusedImport +from ossie.service import start_service +from omniORB import any, CORBA, URI, PortableServer + +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie import properties; + + +class S2(CF__POA.PropertyEmitter): + + def __init__(self, name="S2", execparams={}): + self.name = name + self._log = logging.getLogger(self.name) + self._props = properties.PropertyStorage(self, (), execparams) + try: + self._props._addProperty( S2.p1 ) + self._props._addProperty( S2.p2) + except KeyError, e: + pass + except Exceptiopn, e: + raise e + self._props.initialize() + + def terminateService(self): + pass + + def configure(self, configProperties): + notSet = [] + error_message = '' + for prop in configProperties: + try: + if self._props.has_id(prop.id) and self._props.isConfigurable(prop.id): + try: + self._props.configure(prop.id, prop.value) + except Exception, e: + self._log.warning("Invalid value provided to configure for property %s: %s", prop.id, e) + notSet.append(prop) + else: + self._log.warning("Tried to configure non-existent, readonly, or property with action not equal to external %s", prop.id) + notSet.append(prop) + except Exception, e: + error_message += str(e) + self._log.exception("Unexpected exception.") + notSet.append(prop) + + if len(notSet) > 0 and len(notSet) < len(configProperties): + self._log.warning("Configure failed with partial configuration, %s", notSet) + raise CF.PropertySet.PartialConfiguration(notSet) + elif len(notSet) > 0 and len(notSet) >= len(configProperties): + self._log.warning("Configure failed with invalid configuration, %s", notSet) + raise CF.PropertySet.InvalidConfiguration("Failure: "+error_message, notSet) + self._log.trace("configure(%s)", configProperties) + + def query(self, configProperties): + if configProperties == []: + self._log.trace("query all properties") + try: + rv = [] + for propid in self._props.keys(): + if self._props.has_id(propid) and self._props.isQueryable(propid): + try: + value = self._props.query(propid) + except Exception, e: + self._log.error('Failed to query %s: %s', propid, e) + value = any.to_any(None) + prp = self._props.getPropDef(propid) + if type(prp) == properties.struct_property: + newvalval = [] + for v in value.value(): + if prp.fields[v.id][1].optional == True: + if isinstance(v.value.value(), list): + if v.value.value() != []: + newvalval.append(v) + else: + if v.value.value() != None: + newvalval.append(v) + else: + newvalval.append(v) + value = CORBA.Any(value.typecode(), newvalval) + + rv.append(CF.DataType(propid, value)) + except: + raise + + # otherwise get only the requested ones + else: + self._log.trace("query %s properties", len(configProperties)) + try: + unknownProperties = [] + for prop in configProperties: + if self._props.has_id(prop.id) and self._props.isQueryable(prop.id): + try: + prop.value = self._props.query(prop.id) + except Exception, e: + self._log.error('Failed to query %s: %s', prop.id, e) + prp = self._props.getPropDef(prop.id) + if type(prp) == properties.struct_property: + newvalval = [] + for v in prop.value.value(): + if prp.fields[v.id][1].optional == True: + if isinstance(v.value.value(), list): + if v.value.value() != []: + newvalval.append(v) + else: + if v.value.value() != None: + newvalval.append(v) + else: + newvalval.append(v) + prop.value = CORBA.Any(prop.value.typecode(), newvalval) + else: + self._log.warning("property %s cannot be queried. valid Id: %s", + prop.id, self._props.has_id(prop.id)) + unknownProperties.append(prop) + except: + raise + + if len(unknownProperties) > 0: + self._log.warning("query called with invalid properties %s", unknownProperties) + raise CF.UnknownProperties(unknownProperties) + + rv = configProperties + self._log.trace("query -> %s properties", len(rv)) + return rv + + def initializeProperties(self, ctorProps): + notSet = [] + for prop in ctorProps: + try: + if self._props.has_id(prop.id) and self._props.isProperty(prop.id): + try: + # run configure on property.. disable callback feature + self._props.construct(prop.id, prop.value) + except ValueError, e: + self._log.warning("Invalid value provided to construct for property %s %s", prop.id, e) + notSet.append(prop) + else: + self._log.warning("Tried to construct non-existent, readonly, or property with action not equal to external %s", prop.id) + notSet.append(prop) + except Exception, e: + self._log.exception("Unexpected exception.") + notSet.append(prop) + + + def registerPropertyListener(self, obj, prop_ids, interval): + # TODO + pass + + def unregisterPropertyListener(self, id): + # TODO + pass + + p1 = properties.simple_property(id_="p1", + name="p1", + type_="string", + mode="readwrite", + action="external", + kinds=("property",), + description=""" """) + + + p2 = properties.simple_property(id_="p2", + name="p2", + type_="long", + mode="readwrite", + action="external", + kinds=("property",)) + + +if __name__ == '__main__': + start_service(S2, thread_policy=PortableServer.SINGLE_THREAD_MODEL) diff --git a/redhawk/src/testing/sdr/dev/services/S2_pre/S2_pre.prf.xml b/redhawk/src/testing/sdr/dev/services/S2_pre/S2_pre.prf.xml new file mode 100644 index 000000000..724e67e97 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/S2_pre/S2_pre.prf.xml @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/S2_pre/S2_pre.scd.xml b/redhawk/src/testing/sdr/dev/services/S2_pre/S2_pre.scd.xml new file mode 100644 index 000000000..117aaa6b9 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/S2_pre/S2_pre.scd.xml @@ -0,0 +1,14 @@ + + + + 2.2 + + service + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/S2_pre/S2_pre.spd.xml b/redhawk/src/testing/sdr/dev/services/S2_pre/S2_pre.spd.xml new file mode 100644 index 000000000..e0f1422fb --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/S2_pre/S2_pre.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software component. + + + python/S2_pre.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/S2_pre/python/S2_pre.py b/redhawk/src/testing/sdr/dev/services/S2_pre/python/S2_pre.py new file mode 100755 index 000000000..daa0f9329 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/S2_pre/python/S2_pre.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED +# +# Source: S2_pre.spd.xml + +import sys, signal, copy, os +import logging + +from ossie.cf import CF, CF__POA #@UnusedImport +from ossie.service import start_service +from omniORB import any, CORBA, URI, PortableServer + +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie import properties; + + +class S2_pre(CF__POA.PropertySet): + + def __init__(self, name="S2_pre", execparams={}): + self.name = name + self._log = logging.getLogger(self.name) + self._props = properties.PropertyStorage(self, (), execparams) + try: + self._props._addProperty( S2_pre.p1 ) + self._props._addProperty( S2_pre.p2) + except KeyError, e: + pass + except Exceptiopn, e: + raise e + self._props.initialize() + + def terminateService(self): + pass + + def configure(self, configProperties): + notSet = [] + error_message = '' + for prop in configProperties: + try: + if self._props.has_id(prop.id) and self._props.isConfigurable(prop.id): + try: + self._props.configure(prop.id, prop.value) + except Exception, e: + self._log.warning("Invalid value provided to configure for property %s: %s", prop.id, e) + notSet.append(prop) + else: + self._log.warning("Tried to configure non-existent, readonly, or property with action not equal to external %s", prop.id) + notSet.append(prop) + except Exception, e: + error_message += str(e) + self._log.exception("Unexpected exception.") + notSet.append(prop) + + if len(notSet) > 0 and len(notSet) < len(configProperties): + self._log.warning("Configure failed with partial configuration, %s", notSet) + raise CF.PropertySet.PartialConfiguration(notSet) + elif len(notSet) > 0 and len(notSet) >= len(configProperties): + self._log.warning("Configure failed with invalid configuration, %s", notSet) + raise CF.PropertySet.InvalidConfiguration("Failure: "+error_message, notSet) + self._log.trace("configure(%s)", configProperties) + + + def query(self, configProperties): + if configProperties == []: + self._log.trace("query all properties") + try: + rv = [] + for propid in self._props.keys(): + if self._props.has_id(propid) and self._props.isQueryable(propid): + try: + value = self._props.query(propid) + except Exception, e: + self._log.error('Failed to query %s: %s', propid, e) + value = any.to_any(None) + prp = self._props.getPropDef(propid) + if type(prp) == properties.struct_property: + newvalval = [] + for v in value.value(): + if prp.fields[v.id][1].optional == True: + if isinstance(v.value.value(), list): + if v.value.value() != []: + newvalval.append(v) + else: + if v.value.value() != None: + newvalval.append(v) + else: + newvalval.append(v) + value = CORBA.Any(value.typecode(), newvalval) + + rv.append(CF.DataType(propid, value)) + except: + raise + + # otherwise get only the requested ones + else: + self._log.trace("query %s properties", len(configProperties)) + try: + unknownProperties = [] + for prop in configProperties: + if self._props.has_id(prop.id) and self._props.isQueryable(prop.id): + try: + prop.value = self._props.query(prop.id) + except Exception, e: + self._log.error('Failed to query %s: %s', prop.id, e) + prp = self._props.getPropDef(prop.id) + if type(prp) == properties.struct_property: + newvalval = [] + for v in prop.value.value(): + if prp.fields[v.id][1].optional == True: + if isinstance(v.value.value(), list): + if v.value.value() != []: + newvalval.append(v) + else: + if v.value.value() != None: + newvalval.append(v) + else: + newvalval.append(v) + prop.value = CORBA.Any(prop.value.typecode(), newvalval) + else: + self._log.warning("property %s cannot be queried. valid Id: %s", + prop.id, self._props.has_id(prop.id)) + unknownProperties.append(prop) + except: + raise + + if len(unknownProperties) > 0: + self._log.warning("query called with invalid properties %s", unknownProperties) + raise CF.UnknownProperties(unknownProperties) + + rv = configProperties + self._log.trace("query -> %s properties", len(rv)) + return rv + + def initializeProperties(self, ctorProps): + notSet = [] + for prop in ctorProps: + try: + if self._props.has_id(prop.id) and self._props.isProperty(prop.id): + try: + # run configure on property.. disable callback feature + self._props.construct(prop.id, prop.value) + except ValueError, e: + self._log.warning("Invalid value provided to construct for property %s %s", prop.id, e) + notSet.append(prop) + else: + self._log.warning("Tried to construct non-existent, readonly, or property with action not equal to external %s", prop.id) + notSet.append(prop) + except Exception, e: + self._log.exception("Unexpected exception.") + notSet.append(prop) + + def registerPropertyListener(self, obj, prop_ids, interval): + # TODO + pass + + def unregisterPropertyListener(self, id): + # TODO + pass + + p1 = properties.simple_property(id_="p1", + name="p1", + type_="string", + mode="readwrite", + action="external", + kinds=("configure",), + description=""" """) + + + p2 = properties.simple_property(id_="p2", + name="p2", + type_="long", + mode="readwrite", + action="external", + kinds=("configure",)) + + +if __name__ == '__main__': + start_service(S2_pre, thread_policy=PortableServer.SINGLE_THREAD_MODEL) diff --git a/redhawk/src/testing/sdr/dev/services/start_event_service/.md5sums b/redhawk/src/testing/sdr/dev/services/start_event_service/.md5sums new file mode 100644 index 000000000..1064ed268 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/start_event_service/.md5sums @@ -0,0 +1,2 @@ +f3669e41bf5b56f111e3d0aa8de4e0fe build.sh +8dae566ff2a389ea89cb56a9bf103fad start_event_service.spec diff --git a/redhawk/src/testing/sdr/dev/services/start_event_service/.start_event_service.wavedev b/redhawk/src/testing/sdr/dev/services/start_event_service/.start_event_service.wavedev new file mode 100644 index 000000000..1feb9a91e --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/start_event_service/.start_event_service.wavedev @@ -0,0 +1,25 @@ + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/start_event_service/python/.md5sums b/redhawk/src/testing/sdr/dev/services/start_event_service/python/.md5sums new file mode 100644 index 000000000..e8f46ff91 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/start_event_service/python/.md5sums @@ -0,0 +1,5 @@ +30ef5796724d2d3c157af12171e40c2e configure.ac +2bd5b53db27fc46ae7fb4ca9b4d8b6ea Makefile.am +8455e788a3d419e446753db0ae6443dc Makefile.am.ide +cb125bc3726241f5e766fde990546f6d start_event_service.py +8bfcd22353c3a57fee561ad86ee2a56b reconf diff --git a/redhawk/src/testing/sdr/dev/services/start_event_service/python/start_event_service.py b/redhawk/src/testing/sdr/dev/services/start_event_service/python/start_event_service.py new file mode 100755 index 000000000..7c76c9f9e --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/start_event_service/python/start_event_service.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +# +# AUTO-GENERATED +# +# Source: start_event_service.spd.xml + +import logging + +from ossie.cf import CF, CF__POA +from ossie.service import start_service +from omniORB import PortableServer + +from ossie.properties import simple_property, simpleseq_property +from ossie.events import MessageSupplierPort + +class start_event_service(CF__POA.Resource): + class StateChange(object): + identifier = simple_property( + id_="state_change::identifier", + name="identifier", + type_="string") + + event = simple_property( + id_="state_change::event", + name="event", + type_="string") + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["identifier"] = self.identifier + d["event"] = self.event + return str(d) + + @classmethod + def getId(cls): + return "state_change" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("identifier",self.identifier),("event",self.event)] + + def __init__(self, name="start_event_service", execparams={}): + self.name = name + self._log = logging.getLogger(self.name) + self._started = False + self.port_message_out = MessageSupplierPort() + + def terminateService(self): + pass + + def initialize(self): + # TODO + pass + + def releaseObject(self): + # TODO + pass + + def runTest(self, testid, testValues): + # TODO + pass + + def configure(self, configProperties): + # TODO + pass + + def query(self, configProperties): + # TODO + pass + + def initializeProperties(self, initialProperties): + # TODO + pass + + def registerPropertyListener(self, obj, prop_ids, interval): + # TODO + pass + + def unregisterPropertyListener(self, id): + # TODO + pass + + def getPort(self, name): + if name != 'message_out': + raise CF.PortSupplier.UnknownPort() + return self.port_message_out._this() + + def getPortSet(self): + # TODO + pass + + def retrieve_records(self, howMany, startingRecord): + # TODO + pass + + def retrieve_records_by_date(self, howMany, to_timeStamp): + # TODO + pass + + def retrieve_records_from_date(self, howMany, from_timeStamp): + # TODO + pass + + def setLogLevel(self, logger_id, newLevel): + # TODO + pass + + def getLogConfig(self): + # TODO + pass + + def setLogConfig(self, config_contents): + # TODO + pass + + def setLogConfigURL(self, config_url): + # TODO + pass + + def start(self): + if self._started: + return + self._log.info('starting %s', self.name) + self._started = True + message = start_event_service.StateChange() + message.identifier = self.name + message.event = "start" + self.port_message_out.sendMessage(message) + + def stop(self): + if not self._started: + return + self._log.info('stopping %s', self.name) + self._started = False + message = start_event_service.StateChange() + message.identifier = self.name + message.event = "stop" + self.port_message_out.sendMessage(message) + + def _get_log_level(self): + # TODO + pass + + def _set_log_level(self, data): + # TODO + pass + + def _get_identifier(self): + # TODO + pass + + def _get_started(self): + return self._started + + def _get_softwareProfile(self): + # TODO + pass + + +if __name__ == '__main__': + start_service(start_event_service, thread_policy=PortableServer.SINGLE_THREAD_MODEL) diff --git a/redhawk/src/testing/sdr/dev/services/start_event_service/start_event_service.prf.xml b/redhawk/src/testing/sdr/dev/services/start_event_service/start_event_service.prf.xml new file mode 100644 index 000000000..b1d8faf31 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/start_event_service/start_event_service.prf.xml @@ -0,0 +1,28 @@ + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/start_event_service/start_event_service.scd.xml b/redhawk/src/testing/sdr/dev/services/start_event_service/start_event_service.scd.xml new file mode 100644 index 000000000..d1fc5e5e5 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/start_event_service/start_event_service.scd.xml @@ -0,0 +1,74 @@ + + + + + 2.2 + + service + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dev/services/start_event_service/start_event_service.spd.xml b/redhawk/src/testing/sdr/dev/services/start_event_service/start_event_service.spd.xml new file mode 100644 index 000000000..657808a31 --- /dev/null +++ b/redhawk/src/testing/sdr/dev/services/start_event_service/start_event_service.spd.xml @@ -0,0 +1,44 @@ + + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/start_event_service.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/BasicAC/basicac_java_impl1/src/basicac_java_impl1/ports/CF_ResourceOutPort.java b/redhawk/src/testing/sdr/dom/components/BasicAC/basicac_java_impl1/src/basicac_java_impl1/ports/CF_ResourceOutPort.java index 229f2a65a..5926fa111 100644 --- a/redhawk/src/testing/sdr/dom/components/BasicAC/basicac_java_impl1/src/basicac_java_impl1/ports/CF_ResourceOutPort.java +++ b/redhawk/src/testing/sdr/dom/components/BasicAC/basicac_java_impl1/src/basicac_java_impl1/ports/CF_ResourceOutPort.java @@ -116,6 +116,40 @@ public boolean started() return retval; } + public void resetLog() { + synchronized(this.updatingPortsLock) { // don't want to process while command information is coming in + if (this.active) { + for (ResourceOperations p : this.outConnections.values()) { + p.resetLog(); + } + } + } // don't want to process while command information is coming in + } + + public String[] getNamedLoggers() { + String[] retval = new String[0]; + synchronized(this.updatingPortsLock) { // don't want to process while command information is coming in + if (this.active) { + for (ResourceOperations p : this.outConnections.values()) { + retval = p.getNamedLoggers(); + } + } + } // don't want to process while command information is coming in + return retval; + } + + public int getLogLevel(String logger_id) throws CF.UnknownIdentifier { + int retval = -1; + synchronized(this.updatingPortsLock) { // don't want to process while command information is coming in + if (this.active) { + for (ResourceOperations p : this.outConnections.values()) { + retval = p.getLogLevel(logger_id); + } + } + } // don't want to process while command information is coming in + return retval; + } + public void initializeProperties(CF.DataType[] configProperties) throws CF.PropertySetPackage.InvalidConfiguration, CF.PropertySetPackage.PartialConfiguration { } diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/.BasicShared.wavedev b/redhawk/src/testing/sdr/dom/components/BasicShared/.BasicShared.wavedev new file mode 100644 index 000000000..f7936e04b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/.BasicShared.wavedev @@ -0,0 +1,6 @@ + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/BasicShared.prf.xml b/redhawk/src/testing/sdr/dom/components/BasicShared/BasicShared.prf.xml new file mode 100644 index 000000000..4d895bb3a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/BasicShared.prf.xml @@ -0,0 +1,31 @@ + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/BasicShared.scd.xml b/redhawk/src/testing/sdr/dom/components/BasicShared/BasicShared.scd.xml new file mode 100644 index 000000000..1d4932fdf --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/BasicShared.scd.xml @@ -0,0 +1,64 @@ + + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/BasicShared.spd.xml b/redhawk/src/testing/sdr/dom/components/BasicShared/BasicShared.spd.xml new file mode 100644 index 000000000..d3997ac58 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/BasicShared.spd.xml @@ -0,0 +1,46 @@ + + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/BasicShared.so + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/.md5sums b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/.md5sums new file mode 100644 index 000000000..dfb58942f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/.md5sums @@ -0,0 +1,10 @@ +ebab982ed6b7711d65ef5c4ed652f729 main.cpp +d23f51d099f8c3b419d46aa7a2d953de reconf +8f558812db58086174799f58d769c6cf BasicShared_base.cpp +322b9b06a8a11aab55b091600bba67d2 configure.ac +91b35020418b07c156987bace0f23823 BasicShared.cpp +4e5b9b0356f5fb46ac9aefa910763516 Makefile.am.ide +b640eb40dc780b01f8872f6a9bcc9372 BasicShared.h +b1835160ba93e9e1248209acc174363d build.sh +03c8ef5a559ed7754644d158f5f8693c Makefile.am +68959d7d49f65106bf146314ed681aed BasicShared_base.h diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared.cpp b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared.cpp new file mode 100644 index 000000000..132f678ba --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared.cpp @@ -0,0 +1,275 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "BasicShared.h" + +PREPARE_LOGGING(BasicShared_i) + +BasicShared_i::BasicShared_i(const char *uuid, const char *label) : + BasicShared_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +BasicShared_i::~BasicShared_i() +{ +} + +void BasicShared_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ + setPropertyQueryImpl(pid, this, &BasicShared_i::get_pid); + nic_name = getNetwork()->getNic(); +} + +pid_t BasicShared_i::get_pid() +{ + return ::getpid(); +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) and string-based (dataString, dataXML and + dataFile) do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + // The component class must have an output stream member; add to + // BasicShared.h: + // bulkio::OutFloatStream outputStream; + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + short* inputData = block.data(); + std::vector outputData; + outputData.resize(block.size()); + for (size_t index = 0; index < block.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // If there is no output stream open, create one + if (!outputStream) { + outputStream = dataFloat_out->createStream(block.sri()); + } else if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Write to the output stream + outputStream.write(outputData, block.getTimestamps()); + + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide functions that return the correct interpretation of the data + buffer and number of complex elements: + + if (block.complex()) { + std::complex* data = block.cxdata(); + for (size_t index = 0; index < block.cxsize(); ++index) { + data[index] = std::abs(data[index]); + } + outputStream.write(data, block.cxsize(), bulkio::time::utils::now()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void BasicShared_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &BasicShared_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (BasicShared_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &BasicShared_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to BasicShared.cpp + BasicShared_i::BasicShared_i(const char *uuid, const char *label) : + BasicShared_base(uuid, label) + { + addPropertyListener(scaleValue, this, &BasicShared_i::scaleChanged); + addPropertyListener(status, this, &BasicShared_i::statusChanged); + } + + void BasicShared_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(BasicShared_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void BasicShared_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(BasicShared_i, "status changed"); + } + + //Add to BasicShared.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int BasicShared_i::serviceFunction() +{ + LOG_DEBUG(BasicShared_i, "serviceFunction() example log message"); + + return NOOP; +} + diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared.h b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared.h new file mode 100644 index 000000000..bf8b5c988 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared.h @@ -0,0 +1,40 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef BASICSHARED_I_IMPL_H +#define BASICSHARED_I_IMPL_H + +#include "BasicShared_base.h" + +class BasicShared_i : public BasicShared_base +{ + ENABLE_LOGGING + public: + BasicShared_i(const char *uuid, const char *label); + ~BasicShared_i(); + + void constructor(); + + int serviceFunction(); + + private: + pid_t get_pid(); +}; + +#endif // BASICSHARED_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared_base.cpp b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared_base.cpp new file mode 100644 index 000000000..843b15b16 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared_base.cpp @@ -0,0 +1,95 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#include "BasicShared_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +BasicShared_base::BasicShared_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + setThreadName(label); + + loadProperties(); +} + +BasicShared_base::~BasicShared_base() +{ +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void BasicShared_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void BasicShared_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void BasicShared_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void BasicShared_base::loadProperties() +{ + addProperty(pid, + "pid", + "", + "readonly", + "", + "external", + "property"); + + addProperty(nic_name, + "nic_name", + "", + "readonly", + "", + "external", + "property"); + +} + + diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared_base.h b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared_base.h new file mode 100644 index 000000000..43c8783a2 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/BasicShared_base.h @@ -0,0 +1,51 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef BASICSHARED_BASE_IMPL_BASE_H +#define BASICSHARED_BASE_IMPL_BASE_H + +#include +#include +#include + + +class BasicShared_base : public Component, protected ThreadedComponent +{ + public: + BasicShared_base(const char *uuid, const char *label); + ~BasicShared_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + // Member variables exposed as properties + /// Property: pid + CORBA::ULong pid; + /// Property: nic_name + std::string nic_name; + + private: +}; +#endif // BASICSHARED_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/Makefile.am new file mode 100644 index 000000000..61aaae577 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/Makefile.am @@ -0,0 +1,62 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +# for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +CFDIR = $(top_srcdir)/base + +ossieName = BasicShared +libdir = $(prefix)/dom/components/BasicShared/cpp +lib_LTLIBRARIES = BasicShared.la + +.PHONY: convenience-link clean-convenience-link + +install: + +all-local : convenience-link +clean-local : clean-convenience-link + +convenience-link : BasicShared.la + @ln -fs .libs/BasicShared.so + +clean-convenience-link: + @rm -f BasicShared.so + +distclean-local: + rm -rf m4 + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +include $(srcdir)/Makefile.am.ide +BasicShared_la_SOURCES = $(redhawk_SOURCES_auto) +BasicShared_la_LIBADD = $(SOFTPKG_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la +BasicShared_la_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) -I$(CFDIR)/include +BasicShared_la_LDFLAGS = -shared -module -export-dynamic -export-symbols-regex 'make_component' -avoid-version $(redhawk_LDFLAGS_auto) + diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/Makefile.am.ide b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/Makefile.am.ide new file mode 100644 index 000000000..7b9de3784 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/Makefile.am.ide @@ -0,0 +1,30 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +# for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! +# Files can be excluded by right-clicking on the file in the project explorer +# and choosing Resource Configurations -> Exclude from build. Re-include files +# by opening the Properties dialog of your project and choosing C/C++ Build -> +# Tool Chain Editor, and un-checking "Exclude resource from build " +redhawk_SOURCES_auto = main.cpp +redhawk_SOURCES_auto += BasicShared.cpp +redhawk_SOURCES_auto += BasicShared.h +redhawk_SOURCES_auto += BasicShared_base.cpp +redhawk_SOURCES_auto += BasicShared_base.h + diff --git a/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/main.cpp b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/main.cpp new file mode 100644 index 000000000..812960c91 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/BasicShared/cpp/main.cpp @@ -0,0 +1,30 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#include +#include "ossie/ossieSupport.h" + +#include "BasicShared.h" +extern "C" { + Resource_impl* make_component(const std::string& uuid, const std::string& identifier) + { + return new BasicShared_i(uuid.c_str(), identifier.c_str()); + } +} + diff --git a/redhawk/src/testing/sdr/dom/components/C1/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/C1/cpp/Makefile.am index 8a29c9b31..1a0f64ceb 100644 --- a/redhawk/src/testing/sdr/dom/components/C1/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dom/components/C1/cpp/Makefile.am @@ -41,6 +41,6 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide C1_SOURCES = $(redhawk_SOURCES_auto) -C1_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(redhawk_LDADD_auto) +C1_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) C1_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -I$(CFDIR)/include C1_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/C2/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/C2/cpp/Makefile.am index b105c0e49..1a397af0f 100644 --- a/redhawk/src/testing/sdr/dom/components/C2/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dom/components/C2/cpp/Makefile.am @@ -41,6 +41,6 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide C2_SOURCES = $(redhawk_SOURCES_auto) -C2_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(redhawk_LDADD_auto) +C2_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) C2_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -I$(CFDIR)/include C2_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/CommandWrapperEmptyDir/Makefile b/redhawk/src/testing/sdr/dom/components/CommandWrapperEmptyDir/Makefile index af9470e97..2f6fe40f3 100644 --- a/redhawk/src/testing/sdr/dom/components/CommandWrapperEmptyDir/Makefile +++ b/redhawk/src/testing/sdr/dom/components/CommandWrapperEmptyDir/Makefile @@ -14,3 +14,4 @@ distclean: clean: +check: diff --git a/redhawk/src/testing/sdr/dom/components/CommandWrapperNestedSPDDep/CommandWrapperNestedSPDDep.spd.xml b/redhawk/src/testing/sdr/dom/components/CommandWrapperNestedSPDDep/CommandWrapperNestedSPDDep.spd.xml index ba404df82..e7fa52cf9 100644 --- a/redhawk/src/testing/sdr/dom/components/CommandWrapperNestedSPDDep/CommandWrapperNestedSPDDep.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/CommandWrapperNestedSPDDep/CommandWrapperNestedSPDDep.spd.xml @@ -46,6 +46,7 @@ with this program. If not, see http://www.gnu.org/licenses/. + diff --git a/redhawk/src/testing/sdr/dom/components/ECM_CPP/ECM_CPP.spec b/redhawk/src/testing/sdr/dom/components/ECM_CPP/ECM_CPP.spec deleted file mode 100644 index 46edc99da..000000000 --- a/redhawk/src/testing/sdr/dom/components/ECM_CPP/ECM_CPP.spec +++ /dev/null @@ -1,84 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: ECM_CPP -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.11 -Requires: redhawk >= 1.11 - - -%description -Component %{name} - - -%prep -%setup -q - - -%build -# Implementation cpp -pushd cpp -./reconf -%define _bindir %{_prefix}/dom/components/ECM_CPP/cpp -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation cpp -pushd cpp -%define _bindir %{_prefix}/dom/components/ECM_CPP/cpp -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/ECM_CPP.scd.xml -%{_prefix}/dom/components/%{name}/ECM_CPP.prf.xml -%{_prefix}/dom/components/%{name}/ECM_CPP.spd.xml -%{_prefix}/dom/components/%{name}/cpp - diff --git a/redhawk/src/testing/sdr/dom/components/ECM_CPP/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/ECM_CPP/cpp/Makefile.am index 3790de0bc..04d5128a8 100644 --- a/redhawk/src/testing/sdr/dom/components/ECM_CPP/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dom/components/ECM_CPP/cpp/Makefile.am @@ -41,7 +41,7 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide ECM_CPP_SOURCES = $(redhawk_SOURCES_auto) -ECM_CPP_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(redhawk_LDADD_auto) +ECM_CPP_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) ECM_CPP_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -I$(CFDIR)/include ECM_CPP_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/ECM_JAVA/ECM_JAVA.spec b/redhawk/src/testing/sdr/dom/components/ECM_JAVA/ECM_JAVA.spec deleted file mode 100644 index bd18d847f..000000000 --- a/redhawk/src/testing/sdr/dom/components/ECM_JAVA/ECM_JAVA.spec +++ /dev/null @@ -1,90 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: ECM_JAVA -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.11 -Requires: redhawk >= 1.11 - -BuildArch: noarch - -# Java requirements -Requires: java >= 1.6 -BuildRequires: java-devel >= 1.6 - - -%description -Component %{name} - - -%prep -%setup -q - - -%build -# Implementation java -pushd java -./reconf -%define _bindir %{_prefix}/dom/components/ECM_JAVA/java -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation java -pushd java -%define _bindir %{_prefix}/dom/components/ECM_JAVA/java -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/ECM_JAVA.scd.xml -%{_prefix}/dom/components/%{name}/ECM_JAVA.prf.xml -%{_prefix}/dom/components/%{name}/ECM_JAVA.spd.xml -%{_prefix}/dom/components/%{name}/java - diff --git a/redhawk/src/testing/sdr/dom/components/ECM_PY/ECM_PY.spec b/redhawk/src/testing/sdr/dom/components/ECM_PY/ECM_PY.spec deleted file mode 100644 index 820e90bfa..000000000 --- a/redhawk/src/testing/sdr/dom/components/ECM_PY/ECM_PY.spec +++ /dev/null @@ -1,86 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: ECM_PY -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.11 -Requires: redhawk >= 1.11 - -BuildArch: noarch - - -%description -Component %{name} - - -%prep -%setup -q - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/ECM_PY/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/ECM_PY/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/ECM_PY.scd.xml -%{_prefix}/dom/components/%{name}/ECM_PY.prf.xml -%{_prefix}/dom/components/%{name}/ECM_PY.spd.xml -%{_prefix}/dom/components/%{name}/python - diff --git a/redhawk/src/testing/sdr/dom/components/ECM_PY/python/ECM_PY.py b/redhawk/src/testing/sdr/dom/components/ECM_PY/python/ECM_PY.py index 77e0db9a9..3c64093c9 100755 --- a/redhawk/src/testing/sdr/dom/components/ECM_PY/python/ECM_PY.py +++ b/redhawk/src/testing/sdr/dom/components/ECM_PY/python/ECM_PY.py @@ -162,8 +162,9 @@ def mycallback(self, id, old_value, new_value): time.sleep(.10) if self.msg_limit > self.msg_recv: msgin=0; - msgin = self.sub.getData() - if msgin != None: + inany = self.sub.getData() + if inany != None: + msgin = any.from_any(inany) self._log.info("Received MSG msgid =" +str(msgin)) if msgin == self.p_msgid : self.msg_recv = self.msg_recv + 1 diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.cpp.spd.xml b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.cpp.spd.xml new file mode 100644 index 000000000..b91706cec --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.cpp.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/EmptyString + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.java.spd.xml b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.java.spd.xml new file mode 100644 index 000000000..8246e8254 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.java.spd.xml @@ -0,0 +1,26 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.cpp.spd.xml b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.cpp.spd.xml new file mode 100644 index 000000000..e57364c74 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.cpp.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/EmptyString + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.java.spd.xml b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.java.spd.xml new file mode 100644 index 000000000..36533cf32 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.java.spd.xml @@ -0,0 +1,26 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.prf.xml b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.prf.xml new file mode 100644 index 000000000..a9a78dd08 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.prf.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.py.spd.xml b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.py.spd.xml new file mode 100644 index 000000000..a4d0a4433 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.novalue.py.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/EmptyString.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.prf.xml b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.prf.xml new file mode 100644 index 000000000..623ed15ee --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.prf.xml @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.py.spd.xml b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.py.spd.xml new file mode 100644 index 000000000..ae7af9696 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.py.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/EmptyString.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.scd.xml b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/EmptyString.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString.cpp b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString.cpp new file mode 100644 index 000000000..e82843385 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString.cpp @@ -0,0 +1,34 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "EmptyString.h" + +PREPARE_LOGGING(EmptyString_i) + +EmptyString_i::EmptyString_i(const char *uuid, const char *label) : + EmptyString_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + estr="ctor-value"; + +} + +EmptyString_i::~EmptyString_i() +{ +} + +void EmptyString_i::constructor() +{ +} + +int EmptyString_i::serviceFunction() +{ + + return NOOP; +} diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString.h b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString.h new file mode 100644 index 000000000..e8a97c09b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString.h @@ -0,0 +1,18 @@ +#ifndef EMPTYSTRING_I_IMPL_H +#define EMPTYSTRING_I_IMPL_H + +#include "EmptyString_base.h" + +class EmptyString_i : public EmptyString_base +{ + ENABLE_LOGGING + public: + EmptyString_i(const char *uuid, const char *label); + ~EmptyString_i(); + + void constructor(); + + int serviceFunction(); +}; + +#endif // EMPTYSTRING_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString_base.cpp b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString_base.cpp new file mode 100644 index 000000000..0c203b144 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString_base.cpp @@ -0,0 +1,65 @@ +#include "EmptyString_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +EmptyString_base::EmptyString_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + loadProperties(); +} + +EmptyString_base::~EmptyString_base() +{ +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void EmptyString_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void EmptyString_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void EmptyString_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void EmptyString_base::loadProperties() +{ + addProperty(estr, + "", + "estr", + "estr", + "readwrite", + "", + "external", + "property"); + +} diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString_base.h b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString_base.h new file mode 100644 index 000000000..a6716cca4 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/EmptyString_base.h @@ -0,0 +1,30 @@ +#ifndef EMPTYSTRING_BASE_IMPL_BASE_H +#define EMPTYSTRING_BASE_IMPL_BASE_H + +#include +#include +#include + + +class EmptyString_base : public Component, protected ThreadedComponent +{ + public: + EmptyString_base(const char *uuid, const char *label); + ~EmptyString_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + // Member variables exposed as properties + /// Property: estr + std::string estr; + + private: +}; +#endif // EMPTYSTRING_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/Makefile.am new file mode 100644 index 000000000..3ae9a5e2d --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/Makefile.am @@ -0,0 +1,47 @@ + +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +CFDIR = $(top_srcdir)/base + +noinst_PROGRAMS = EmptyString + +distclean-local: + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +include $(srcdir)/Makefile.am.ide +EmptyString_SOURCES = $(redhawk_SOURCES_auto) +EmptyString_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(redhawk_LDADD_auto) +EmptyString_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -I$(CFDIR)/include +EmptyString_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/Makefile.am.ide b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/Makefile.am.ide new file mode 100644 index 000000000..7010e81be --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/Makefile.am.ide @@ -0,0 +1,10 @@ +# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! +# Files can be excluded by right-clicking on the file in the project explorer +# and choosing Resource Configurations -> Exclude from build. Re-include files +# by opening the Properties dialog of your project and choosing C/C++ Build -> +# Tool Chain Editor, and un-checking "Exclude resource from build " +redhawk_SOURCES_auto = EmptyString.cpp +redhawk_SOURCES_auto += EmptyString.h +redhawk_SOURCES_auto += EmptyString_base.cpp +redhawk_SOURCES_auto += EmptyString_base.h +redhawk_SOURCES_auto += main.cpp diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/main.cpp b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/main.cpp new file mode 100644 index 000000000..fcc0cc704 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/cpp/main.cpp @@ -0,0 +1,10 @@ +#include +#include "ossie/ossieSupport.h" + +#include "EmptyString.h" +int main(int argc, char* argv[]) +{ + EmptyString_i* EmptyString_servant; + Component::start_component(EmptyString_servant, argc, argv); + return 0; +} diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/java/Makefile.am b/redhawk/src/testing/sdr/dom/components/EmptyString/java/Makefile.am new file mode 100644 index 000000000..b26570fa1 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/java/Makefile.am @@ -0,0 +1,45 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +EmptyString_jar_CLASSPATH = $(CLASSPATH_SOFTPKG_DEP)$(OSSIE_HOME)/lib/CFInterfaces.jar:$(OSSIE_HOME)/lib/log4j-1.2.15.jar:$(OSSIE_HOME)/lib/ossie.jar + +EmptyString.jar$(EXEEXT): $(EmptyString_jar_SOURCES) + mkdir -p bin + $(JAVAC) -cp $(OSSIE_CLASSPATH) -d bin $(EmptyString_jar_SOURCES) + $(JAR) cf ./EmptyString.jar -C bin . + +clean-local: + rm -rf bin + +distclean-local: + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + + +noinst_PROGRAMS = EmptyString.jar +EmptyString_jar_SOURCES := $(shell find ./src -name "*.java") diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/java/src/emptyString/java/EmptyString.java b/redhawk/src/testing/sdr/dom/components/EmptyString/java/src/emptyString/java/EmptyString.java new file mode 100644 index 000000000..4785f6545 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/java/src/emptyString/java/EmptyString.java @@ -0,0 +1,24 @@ +package emptyString.java; + +import java.util.Properties; + +public class EmptyString extends EmptyString_base { + + public EmptyString() + { + super(); + this.estr.setValue("ctor-value"); + } + + public void constructor() + { + } + + protected int serviceFunction() { + return NOOP; + } + + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/java/src/emptyString/java/EmptyString_base.java b/redhawk/src/testing/sdr/dom/components/EmptyString/java/src/emptyString/java/EmptyString_base.java new file mode 100644 index 000000000..5b602c683 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/java/src/emptyString/java/EmptyString_base.java @@ -0,0 +1,100 @@ +package emptyString.java; + +import java.util.Properties; + +import org.apache.log4j.Logger; + +import org.omg.CosNaming.NamingContextPackage.CannotProceed; +import org.omg.CosNaming.NamingContextPackage.InvalidName; +import org.omg.CosNaming.NamingContextPackage.NotFound; +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; + +import CF.InvalidObjectReference; + +import org.ossie.component.*; +import org.ossie.properties.*; + + +public abstract class EmptyString_base extends Component { + /** + * @generated + */ + public final static Logger logger = Logger.getLogger(EmptyString_base.class.getName()); + + /** + * The property estr + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final StringProperty estr = + new StringProperty( + "estr", //id + "estr", //name + "", //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * @generated + */ + public EmptyString_base() + { + super(); + + setLogger( logger, EmptyString_base.class.getName() ); + + + // Properties + addProperty(estr); + + } + + public void start() throws CF.ResourcePackage.StartError + { + super.start(); + } + + public void stop() throws CF.ResourcePackage.StopError + { + super.stop(); + } + + + /** + * The main function of your component. If no args are provided, then the + * CORBA object is not bound to an SCA Domain or NamingService and can + * be run as a standard Java application. + * + * @param args + * @generated + */ + public static void main(String[] args) + { + final Properties orbProps = new Properties(); + EmptyString.configureOrb(orbProps); + + try { + Component.start_component(EmptyString.class, args, orbProps); + } catch (InvalidObjectReference e) { + e.printStackTrace(); + } catch (NotFound e) { + e.printStackTrace(); + } catch (CannotProceed e) { + e.printStackTrace(); + } catch (InvalidName e) { + e.printStackTrace(); + } catch (ServantNotActive e) { + e.printStackTrace(); + } catch (WrongPolicy e) { + e.printStackTrace(); + } catch (InstantiationException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + } +} diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/java/startJava.sh b/redhawk/src/testing/sdr/dom/components/EmptyString/java/startJava.sh new file mode 100755 index 000000000..3f5789e05 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/java/startJava.sh @@ -0,0 +1,47 @@ +#!/bin/sh +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +myDir=`dirname $0` + +# Setup the OSSIEHOME Lib jars on the classpath +libDir=${SDRROOT}/../../base/framework/java +libFiles=`ls -1 $libDir/*.jar` +for file in $libFiles +do + if [ x"$CLASSPATH" = "x" ] + then + export CLASSPATH=$file + else + export CLASSPATH=$file:$CLASSPATH + fi +done + +# Path for Java +if test -x $JAVA_HOME/bin/java; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=java +fi + +# NOTE: the $@ must be quoted "$@" for arguments to be passed correctly + +#Sun ORB start line +exec $JAVA -cp :$myDir/EmptyString.jar:$myDir/bin:$CLASSPATH emptyString.java.EmptyString "$@" diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/python/EmptyString.py b/redhawk/src/testing/sdr/dom/components/EmptyString/python/EmptyString.py new file mode 100755 index 000000000..c7e7cdffc --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/python/EmptyString.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: EmptyString.spd.xml +from ossie.resource import start_component +import logging + +from EmptyString_base import * + +class EmptyString_i(EmptyString_base): + """""" + def __init__(self, identifier, execparams): + EmptyString_base.__init__(self,identifier,execparams) + self.estr="ctor-value" + + def constructor(self): + """ + """ + # TODO add customization here. + + def process(self): + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(EmptyString_i) diff --git a/redhawk/src/testing/sdr/dom/components/EmptyString/python/EmptyString_base.py b/redhawk/src/testing/sdr/dom/components/EmptyString/python/EmptyString_base.py new file mode 100644 index 000000000..a0ca735f2 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/EmptyString/python/EmptyString_base.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: EmptyString.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * +from ossie.properties import simple_property + +import Queue, copy, time, threading + +class EmptyString_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + estr = simple_property(id_="estr", + name="estr", + type_="string", + defvalue="", + mode="readwrite", + action="external", + kinds=("property",)) diff --git a/redhawk/src/testing/sdr/dom/components/EventReceive/EventReceive.scd.xml b/redhawk/src/testing/sdr/dom/components/EventReceive/EventReceive.scd.xml index cbf694df5..aea5f6fd7 100644 --- a/redhawk/src/testing/sdr/dom/components/EventReceive/EventReceive.scd.xml +++ b/redhawk/src/testing/sdr/dom/components/EventReceive/EventReceive.scd.xml @@ -31,8 +31,12 @@ with this program. If not, see http://www.gnu.org/licenses/. - - + + + + + + @@ -46,6 +50,5 @@ with this program. If not, see http://www.gnu.org/licenses/. - diff --git a/redhawk/src/testing/sdr/dom/components/EventSend/EventSend.scd.xml b/redhawk/src/testing/sdr/dom/components/EventSend/EventSend.scd.xml index 9d8e3e3ef..92373711b 100644 --- a/redhawk/src/testing/sdr/dom/components/EventSend/EventSend.scd.xml +++ b/redhawk/src/testing/sdr/dom/components/EventSend/EventSend.scd.xml @@ -31,7 +31,9 @@ with this program. If not, see http://www.gnu.org/licenses/. - + + + @@ -45,6 +47,5 @@ with this program. If not, see http://www.gnu.org/licenses/. - diff --git a/redhawk/src/testing/sdr/dom/components/FailStartup/python/FailStartup.py b/redhawk/src/testing/sdr/dom/components/FailStartup/python/FailStartup.py index 14f331a8d..c7a5df24c 100755 --- a/redhawk/src/testing/sdr/dom/components/FailStartup/python/FailStartup.py +++ b/redhawk/src/testing/sdr/dom/components/FailStartup/python/FailStartup.py @@ -25,6 +25,7 @@ # Source: FailStartup.spd.xml from ossie.resource import start_component import logging +import sys from FailStartup_base import * @@ -34,6 +35,11 @@ def __init__(self, identifier, execparams): raise StandardError FailStartup_base.__init__(self, identifier, execparams) + def _get_identifier(self): + if self.FAIL_AT == "identifier": + sys.exit(-1) + return FailStartup_base._get_identifier(self) + def initializeProperties(self, initProperties): if self.FAIL_AT == "initializeProperties": raise CF.PropertySet.InvalidConfiguration('Failure requested', initProperties) diff --git a/redhawk/src/testing/sdr/dom/components/FileManagerPortTest/FileManagerPortTest.spd.xml b/redhawk/src/testing/sdr/dom/components/FileManagerPortTest/FileManagerPortTest.spd.xml index 92ce7f78a..d747e50d0 100644 --- a/redhawk/src/testing/sdr/dom/components/FileManagerPortTest/FileManagerPortTest.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/FileManagerPortTest/FileManagerPortTest.spd.xml @@ -38,7 +38,8 @@ with this program. If not, see http://www.gnu.org/licenses/. - + + @@ -54,7 +55,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + diff --git a/redhawk/src/testing/sdr/dom/components/MessageReceiverCpp/MessageReceiverCpp.spd.xml b/redhawk/src/testing/sdr/dom/components/MessageReceiverCpp/MessageReceiverCpp.spd.xml index d5149610f..0bf50786e 100644 --- a/redhawk/src/testing/sdr/dom/components/MessageReceiverCpp/MessageReceiverCpp.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/MessageReceiverCpp/MessageReceiverCpp.spd.xml @@ -42,5 +42,6 @@ with this program. If not, see http://www.gnu.org/licenses/. + diff --git a/redhawk/src/testing/sdr/dom/components/MessageReceiverPy/MessageReceiverPy.spd.xml b/redhawk/src/testing/sdr/dom/components/MessageReceiverPy/MessageReceiverPy.spd.xml index cb8b04d86..9c136f2f1 100644 --- a/redhawk/src/testing/sdr/dom/components/MessageReceiverPy/MessageReceiverPy.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/MessageReceiverPy/MessageReceiverPy.spd.xml @@ -41,5 +41,6 @@ with this program. If not, see http://www.gnu.org/licenses/. + diff --git a/redhawk/src/testing/sdr/dom/components/MessageSenderPy/MessageSenderPy.py b/redhawk/src/testing/sdr/dom/components/MessageSenderPy/MessageSenderPy.py old mode 100644 new mode 100755 index 8cd387791..83e8d1c5d --- a/redhawk/src/testing/sdr/dom/components/MessageSenderPy/MessageSenderPy.py +++ b/redhawk/src/testing/sdr/dom/components/MessageSenderPy/MessageSenderPy.py @@ -47,7 +47,7 @@ class TestMessagePort(CF__POA.Resource, Resource): Example component to demonstrate REDHAWK sender-side messaging port. """ message_out = usesport(name="message_out", - repid="IDL:CosEventComm/PushConsumer:1.0", + repid="IDL:ExtendedEvent/MessageEvent:1.0", type_="data") def __init__(self, identifier, execparams): diff --git a/redhawk/src/testing/sdr/dom/components/MessageSenderPy/MessageSenderPy.scd.xml b/redhawk/src/testing/sdr/dom/components/MessageSenderPy/MessageSenderPy.scd.xml index 511b588cc..4247cfc02 100644 --- a/redhawk/src/testing/sdr/dom/components/MessageSenderPy/MessageSenderPy.scd.xml +++ b/redhawk/src/testing/sdr/dom/components/MessageSenderPy/MessageSenderPy.scd.xml @@ -31,9 +31,9 @@ with this program. If not, see http://www.gnu.org/licenses/. - - - + + + diff --git a/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCpp.spd.xml b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCpp.spd.xml new file mode 100644 index 000000000..34e5cec58 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCpp.spd.xml @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + The implementation contains descriptive information about the template for a software component. + + + ../cpp_comp/cpp/cpp_comp + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCppIdentifier.spd.xml b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCppIdentifier.spd.xml new file mode 100644 index 000000000..c72645bb3 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCppIdentifier.spd.xml @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + The implementation contains descriptive information about the template for a software component. + + + ../cpp_comp/cpp/cpp_comp + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCppShared.spd.xml b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCppShared.spd.xml new file mode 100644 index 000000000..033079a64 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCppShared.spd.xml @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + The implementation contains descriptive information about the template for a software component. + + + ../BasicShared/cpp/BasicShared.so + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCppSharedIdentifier.spd.xml b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCppSharedIdentifier.spd.xml new file mode 100644 index 000000000..e58e01bcd --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocCppSharedIdentifier.spd.xml @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + The implementation contains descriptive information about the template for a software component. + + + ../BasicShared/cpp/BasicShared.so + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocJava.spd.xml b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocJava.spd.xml new file mode 100644 index 000000000..1acf98568 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocJava.spd.xml @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + The implementation contains descriptive information about the template for a software component. + + + ../java_comp/java/startJava.sh + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocJavaIdentifier.spd.xml b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocJavaIdentifier.spd.xml new file mode 100644 index 000000000..f733e4fb1 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocJavaIdentifier.spd.xml @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + The implementation contains descriptive information about the template for a software component. + + + ../java_comp/java/startJava.sh + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocPy.spd.xml b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocPy.spd.xml new file mode 100644 index 000000000..efb06c15f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocPy.spd.xml @@ -0,0 +1,50 @@ + + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software component. + + + ../py_comp/python/py_comp.py + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocPyIdentifier.spd.xml b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocPyIdentifier.spd.xml new file mode 100644 index 000000000..78dac917b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/NicAlloc/NicAllocPyIdentifier.spd.xml @@ -0,0 +1,50 @@ + + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software component. + + + ../py_comp/python/py_comp.py + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/PortTest/PortTest.spd.xml b/redhawk/src/testing/sdr/dom/components/PortTest/PortTest.spd.xml index 543b81ccc..294923d01 100644 --- a/redhawk/src/testing/sdr/dom/components/PortTest/PortTest.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/PortTest/PortTest.spd.xml @@ -41,7 +41,8 @@ with this program. If not, see http://www.gnu.org/licenses/. - + + @@ -57,7 +58,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + diff --git a/redhawk/src/testing/sdr/dom/components/PropertyChangeEvents/PropertyChangeEvents.spd.xml b/redhawk/src/testing/sdr/dom/components/PropertyChangeEvents/PropertyChangeEvents.spd.xml index c787a4486..1bee8f53a 100644 --- a/redhawk/src/testing/sdr/dom/components/PropertyChangeEvents/PropertyChangeEvents.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/PropertyChangeEvents/PropertyChangeEvents.spd.xml @@ -41,5 +41,6 @@ with this program. If not, see http://www.gnu.org/licenses/. + diff --git a/redhawk/src/testing/sdr/dom/components/PropertyChange_C1/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/PropertyChange_C1/cpp/Makefile.am index 078778386..0d5628881 100644 --- a/redhawk/src/testing/sdr/dom/components/PropertyChange_C1/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dom/components/PropertyChange_C1/cpp/Makefile.am @@ -41,7 +41,7 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide PropertyChange_C1_SOURCES = $(redhawk_SOURCES_auto) -PropertyChange_C1_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(redhawk_LDADD_auto) +PropertyChange_C1_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) PropertyChange_C1_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -I$(CFDIR)/include PropertyChange_C1_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/Property_CPP/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/Property_CPP/cpp/Makefile.am index 3a91f2ca7..8a3bb61db 100644 --- a/redhawk/src/testing/sdr/dom/components/Property_CPP/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dom/components/Property_CPP/cpp/Makefile.am @@ -42,7 +42,7 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide Property_CPP_SOURCES = $(redhawk_SOURCES_auto) -Property_CPP_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(redhawk_LDADD_auto) +Property_CPP_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) Property_CPP_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -I$(CFDIR)/include -I$(CFDIR)/include/ossie Property_CPP_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/ServiceComponent/python/ServiceComponent_base.py b/redhawk/src/testing/sdr/dom/components/ServiceComponent/python/ServiceComponent_base.py index 66b1e79b9..26a4c3b75 100644 --- a/redhawk/src/testing/sdr/dom/components/ServiceComponent/python/ServiceComponent_base.py +++ b/redhawk/src/testing/sdr/dom/components/ServiceComponent/python/ServiceComponent_base.py @@ -28,6 +28,8 @@ # Version:M.1.8.2 # Build id: v201211021737RC2 from ossie.cf import CF, CF__POA +from ossie.cf import ExtendedCF +from ossie.cf import ExtendedCF__POA from ossie.utils import uuid from ossie.resource import Resource @@ -166,7 +168,7 @@ def compareSRI(self, a, b): # 'CF/PropertySet' port - class PortCFPropertySetOut(CF__POA.Port): + class PortCFPropertySetOut(ExtendedCF__POA.QueryablePort): """This class is a port template for the output port and should not be instantiated nor modified. @@ -209,16 +211,25 @@ def disconnectPort(self, connectionId): self.outConnections.pop(str(connectionId), None) finally: self.port_lock.release() - + + def _get_connections(self): + self.port_lock.acquire() + try: + return [ExtendedCF.UsesConnection(name, port) for name, port in self.outConnections.iteritems()] + finally: + self.port_lock.release() + def configure(self, configProperties): self.port_lock.acquire() - try: - try: - for connId, port in self.outConnections.items(): - if port != None: port.configure(configProperties) - except Exception: - self.parent._log.exception("The call to configure failed on port %s connection %s instance %s", self.name, connId, port) + try: + for connId, port in self.outConnections.items(): + if port != None: + try: + port.configure(configProperties) + except Exception: + self.parent._log.exception("The call to configure failed on port %s connection %s instance %s", self.name, connId, port) + raise finally: self.port_lock.release() @@ -226,14 +237,18 @@ def query(self, configProperties): retVal = None self.port_lock.acquire() - try: - try: - for connId, port in self.outConnections.items(): - if port != None:retVal = port.query(configProperties) - except Exception: - self.parent._log.exception("The call to query failed on port %s connection %s instance %s", self.name, connId, port) + try: + for connId, port in self.outConnections.items(): + if port != None: + try: + retVal = port.query(configProperties) + except Exception: + self.parent._log.exception("The call to query failed on port %s connection %s instance %s", self.name, connId, port) + raise finally: self.port_lock.release() return retVal - + + + diff --git a/redhawk/src/testing/sdr/dom/components/SlowComponent/SlowComponent.spd.xml b/redhawk/src/testing/sdr/dom/components/SlowComponent/SlowComponent.spd.xml index 581df59c0..b8e7a8b3f 100644 --- a/redhawk/src/testing/sdr/dom/components/SlowComponent/SlowComponent.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/SlowComponent/SlowComponent.spd.xml @@ -43,6 +43,7 @@ with this program. If not, see http://www.gnu.org/licenses/. + diff --git a/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/TestAllPropTypes.prf.xml b/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/TestAllPropTypes.prf.xml index c742e7d43..6d7addc78 100644 --- a/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/TestAllPropTypes.prf.xml +++ b/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/TestAllPropTypes.prf.xml @@ -73,6 +73,10 @@ with this program. If not, see http://www.gnu.org/licenses/. + + + + @@ -125,6 +129,10 @@ with this program. If not, see http://www.gnu.org/licenses/. + + + + diff --git a/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/cpp/TestAllPropTypes_base.cpp b/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/cpp/TestAllPropTypes_base.cpp index 4eea84484..5ab93d4f1 100644 --- a/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/cpp/TestAllPropTypes_base.cpp +++ b/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/cpp/TestAllPropTypes_base.cpp @@ -216,6 +216,14 @@ void TestAllPropTypes_base::loadProperties() "external", "configure"); + addProperty(simple_utctime, + "simple_utctime", + "", + "readwrite", + "", + "external", + "property"); + addProperty(simple_sequence_string, "simple_sequence_string", "", @@ -320,6 +328,14 @@ void TestAllPropTypes_base::loadProperties() "external", "configure"); + addProperty(simple_sequence_utctime, + "simple_sequence_utctime", + "", + "readwrite", + "", + "external", + "property"); + addProperty(struct_vars, struct_vars_struct(), "struct_vars", diff --git a/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/cpp/TestAllPropTypes_base.h b/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/cpp/TestAllPropTypes_base.h index eeb07d4df..f3a5b8a98 100644 --- a/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/cpp/TestAllPropTypes_base.h +++ b/redhawk/src/testing/sdr/dom/components/TestAllPropTypes/cpp/TestAllPropTypes_base.h @@ -136,6 +136,7 @@ class TestAllPropTypes_base : public Resource_impl CORBA::Long simple_long; CORBA::LongLong simple_longlong; CORBA::ULongLong simple_ulonglong; + CF::UTCTime simple_utctime; std::vector simple_sequence_string; std::vector simple_sequence_boolean; std::vector simple_sequence_ulong; @@ -149,6 +150,7 @@ class TestAllPropTypes_base : public Resource_impl std::vector simple_sequence_long; std::vector simple_sequence_longlong; std::vector simple_sequence_ulonglong; + std::vector simple_sequence_utctime; struct_vars_struct struct_vars; std::vector struct_seq; diff --git a/redhawk/src/testing/sdr/dom/components/TestComplexProps/TestComplexProps.prf.xml b/redhawk/src/testing/sdr/dom/components/TestComplexProps/TestComplexProps.prf.xml index 347b46509..8377f9b14 100644 --- a/redhawk/src/testing/sdr/dom/components/TestComplexProps/TestComplexProps.prf.xml +++ b/redhawk/src/testing/sdr/dom/components/TestComplexProps/TestComplexProps.prf.xml @@ -22,57 +22,52 @@ with this program. If not, see http://www.gnu.org/licenses/. 0+j1 - + 4+j5 - + 4+j5 - + 4+j5 - + 4+j5 - - - - - 4+j5 - + 4+j5 - + 4+j5 - + 4+j5 - + 4+j5 - + 4+j5 - + @@ -80,42 +75,59 @@ with this program. If not, see http://www.gnu.org/licenses/. 6+j7 4+j5 - 4+j5 + 8+j9 - + 6 - + - + 6+j7 - + - + + + 3+j4 + + + 6 + + + 3 + + - + - - 6+j5 - + + - + + + + + 6+j5 + + + + diff --git a/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/TestComplexProps_base.cpp b/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/TestComplexProps_base.cpp index dfe987c4e..00271e349 100644 --- a/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/TestComplexProps_base.cpp +++ b/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/TestComplexProps_base.cpp @@ -1,22 +1,3 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK core. - * - * REDHAWK core is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ #include "TestComplexProps_base.h" /******************************************************************************************* @@ -30,55 +11,31 @@ ******************************************************************************************/ TestComplexProps_base::TestComplexProps_base(const char *uuid, const char *label) : - Resource_impl(uuid, label), - serviceThread(0) + Component(uuid, label), + ThreadedComponent() { - construct(); + loadProperties(); } -void TestComplexProps_base::construct() +TestComplexProps_base::~TestComplexProps_base() { - Resource_impl::_started = false; - loadProperties(); - serviceThread = 0; - - PortableServer::ObjectId_var oid; } /******************************************************************************************* Framework-level functions These functions are generally called by the framework to perform housekeeping. *******************************************************************************************/ -void TestComplexProps_base::initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemException) -{ -} - void TestComplexProps_base::start() throw (CORBA::SystemException, CF::Resource::StartError) { - boost::mutex::scoped_lock lock(serviceThreadLock); - if (serviceThread == 0) { - serviceThread = new ProcessThread(this, 0.1); - serviceThread->start(); - } - - if (!Resource_impl::started()) { - Resource_impl::start(); - } + Component::start(); + ThreadedComponent::startThread(); } void TestComplexProps_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) { - boost::mutex::scoped_lock lock(serviceThreadLock); - // release the child thread (if it exists) - if (serviceThread != 0) { - if (!serviceThread->release(2)) { - throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); - } - serviceThread = 0; - } - - if (Resource_impl::started()) { - Resource_impl::stop(); + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); } } @@ -91,119 +48,105 @@ void TestComplexProps_base::releaseObject() throw (CORBA::SystemException, CF::L // TODO - this should probably be logged instead of ignored } - // deactivate ports - releaseInPorts(); - releaseOutPorts(); - - - Resource_impl::releaseObject(); + Component::releaseObject(); } void TestComplexProps_base::loadProperties() { addProperty(complexBooleanProp, - std::complex (0,1), + std::complex(false,true), "complexBooleanProp", "", "readwrite", "", "external", - "configure"); + "property"); addProperty(complexULongProp, - std::complex (4,5), + std::complex(4,5), "complexULongProp", "", "readwrite", "", "external", - "configure"); + "property"); addProperty(complexShortProp, - std::complex (4,5), + std::complex(4,5), "complexShortProp", "", "readwrite", "", "external", - "configure"); + "property"); addProperty(complexFloatProp, - std::complex (4.0,5.0), + std::complex(4.0,5.0), "complexFloatProp", "", "readwrite", "", "external", - "configure"); + "property"); addProperty(complexOctetProp, - std::complex (4,5), + std::complex(4,5), "complexOctetProp", "", "readwrite", "", "external", - "configure"); - - addProperty(complexCharProp, - std::complex (4,5), - "complexCharProp", - "", - "readwrite", - "", - "external", - "configure"); + "property"); addProperty(complexUShort, - std::complex (4,5), + std::complex(4,5), "complexUShort", "", "readwrite", "", "external", - "configure"); + "property"); addProperty(complexDouble, - std::complex (4.0,5.0), + std::complex(4.0,5.0), "complexDouble", "", "readwrite", "", "external", - "configure"); + "property"); addProperty(complexLong, - std::complex (4,5), + std::complex(4,5), "complexLong", "", "readwrite", "", "external", - "configure"); + "property"); addProperty(complexLongLong, - std::complex (4,5), + std::complex(4LL,5LL), "complexLongLong", "", "readwrite", "", "external", - "configure"); + "property"); addProperty(complexULongLong, - std::complex (4,5), + std::complex(4LL,5LL), "complexULongLong", "", "readwrite", "", "external", - "configure"); + "property"); // Set the sequence with its initial values - complexFloatSequence.push_back(std::complex (4.0,5.0)); - complexFloatSequence.push_back(std::complex (4.0,5.0)); - complexFloatSequence.push_back(std::complex (4.0,5.0)); + complexFloatSequence.push_back(std::complex(6.0,7.0)); + complexFloatSequence.push_back(std::complex(4.0,5.0)); + complexFloatSequence.push_back(std::complex(4.0,5.0)); addProperty(complexFloatSequence, complexFloatSequence, "complexFloatSequence", @@ -211,7 +154,7 @@ void TestComplexProps_base::loadProperties() "readwrite", "", "external", - "configure"); + "property"); addProperty(FloatStruct, FloatStruct_struct(), @@ -220,7 +163,7 @@ void TestComplexProps_base::loadProperties() "readwrite", "", "external", - "configure"); + "property"); addProperty(complexFloatStruct, complexFloatStruct_struct(), @@ -229,7 +172,7 @@ void TestComplexProps_base::loadProperties() "readwrite", "", "external", - "configure"); + "property"); addProperty(FloatStructSequence, "FloatStructSequence", @@ -237,14 +180,24 @@ void TestComplexProps_base::loadProperties() "readwrite", "", "external", - "configure"); + "property"); addProperty(complexFloatStructSequence, + complexFloatStructSequence, "complexFloatStructSequence", "", "readwrite", "", "external", - "configure"); + "property"); + + { + complexFloatStructSequenceMember_struct __tmp; + __tmp.complex_float_seq.push_back(std::complex(9.0,4.0)); + __tmp.complexFloatStructSequenceMemberMemember = std::complex(6.0,5.0); + complexFloatStructSequence.push_back(__tmp); + } } + + diff --git a/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/TestComplexProps_base.h b/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/TestComplexProps_base.h index 25d378c93..e0cb11a0e 100644 --- a/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/TestComplexProps_base.h +++ b/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/TestComplexProps_base.h @@ -1,106 +1,17 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK core. - * - * REDHAWK core is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef TESTCOMPLEXPROPS_IMPL_BASE_H -#define TESTCOMPLEXPROPS_IMPL_BASE_H +#ifndef TESTCOMPLEXPROPS_BASE_IMPL_BASE_H +#define TESTCOMPLEXPROPS_BASE_IMPL_BASE_H #include -#include +#include +#include #include "struct_props.h" -#define NOOP 0 -#define FINISH -1 -#define NORMAL 1 - -class TestComplexProps_base; - -template < typename TargetClass > -class ProcessThread -{ - public: - ProcessThread(TargetClass *_target, float _delay) : - target(_target) - { - _mythread = 0; - _thread_running = false; - _udelay = (__useconds_t)(_delay * 1000000); - }; - - // kick off the thread - void start() { - if (_mythread == 0) { - _thread_running = true; - _mythread = new boost::thread(&ProcessThread::run, this); - } - }; - - // manage calls to target's service function - void run() { - int state = NORMAL; - while (_thread_running and (state != FINISH)) { - state = target->serviceFunction(); - if (state == NOOP) usleep(_udelay); - } - }; - - // stop thread and wait for termination - bool release(unsigned long secs = 0, unsigned long usecs = 0) { - _thread_running = false; - if (_mythread) { - if ((secs == 0) and (usecs == 0)){ - _mythread->join(); - } else { - boost::system_time waitime= boost::get_system_time() + boost::posix_time::seconds(secs) + boost::posix_time::microseconds(usecs) ; - if (!_mythread->timed_join(waitime)) { - return 0; - } - } - delete _mythread; - _mythread = 0; - } - - return 1; - }; - - virtual ~ProcessThread(){ - if (_mythread != 0) { - release(0); - _mythread = 0; - } - }; - - void updateDelay(float _delay) { _udelay = (__useconds_t)(_delay * 1000000); }; - - private: - boost::thread *_mythread; - bool _thread_running; - TargetClass *target; - __useconds_t _udelay; - boost::condition_variable _end_of_run; - boost::mutex _eor_mutex; -}; - -class TestComplexProps_base : public Resource_impl +class TestComplexProps_base : public Component, protected ThreadedComponent { public: TestComplexProps_base(const char *uuid, const char *label); + ~TestComplexProps_base(); void start() throw (CF::Resource::StartError, CORBA::SystemException); @@ -108,36 +19,41 @@ class TestComplexProps_base : public Resource_impl void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); - void initialize() throw (CF::LifeCycle::InitializeError, CORBA::SystemException); - void loadProperties(); - virtual int serviceFunction() = 0; - protected: - ProcessThread *serviceThread; - boost::mutex serviceThreadLock; - // Member variables exposed as properties - std::complex complexBooleanProp; - std::complex complexULongProp; - std::complex complexShortProp; - std::complex complexFloatProp; - std::complex complexOctetProp; - std::complex complexCharProp; - std::complex complexUShort; - std::complex complexDouble; - std::complex complexLong; - std::complex complexLongLong; - std::complex complexULongLong; + /// Property: complexBooleanProp + std::complex complexBooleanProp; + /// Property: complexULongProp + std::complex complexULongProp; + /// Property: complexShortProp + std::complex complexShortProp; + /// Property: complexFloatProp + std::complex complexFloatProp; + /// Property: complexOctetProp + std::complex complexOctetProp; + /// Property: complexUShort + std::complex complexUShort; + /// Property: complexDouble + std::complex complexDouble; + /// Property: complexLong + std::complex complexLong; + /// Property: complexLongLong + std::complex complexLongLong; + /// Property: complexULongLong + std::complex complexULongLong; + /// Property: complexFloatSequence std::vector > complexFloatSequence; + /// Property: FloatStruct FloatStruct_struct FloatStruct; + /// Property: complexFloatStruct complexFloatStruct_struct complexFloatStruct; + /// Property: FloatStructSequence std::vector FloatStructSequence; + /// Property: complexFloatStructSequence std::vector complexFloatStructSequence; private: - void construct(); - }; -#endif +#endif // TESTCOMPLEXPROPS_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/struct_props.h b/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/struct_props.h index 18a8c7943..bc193e2cc 100644 --- a/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/struct_props.h +++ b/redhawk/src/testing/sdr/dom/components/TestComplexProps/cpp/struct_props.h @@ -31,12 +31,16 @@ struct FloatStruct_struct { FloatStruct_struct () { - FloatStructMember = 4; - }; + FloatStructMember = 6; + } - std::string getId() { + static std::string getId() { return std::string("FloatStruct"); - }; + } + + static const char* getFormat() { + return "f"; + } float FloatStructMember; }; @@ -44,226 +48,188 @@ struct FloatStruct_struct { inline bool operator>>= (const CORBA::Any& a, FloatStruct_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("FloatStructMember", props[idx].id)) { - if (!(props[idx].value >>= s.FloatStructMember)) return false; - } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("FloatStructMember")) { + if (!(props["FloatStructMember"] >>= s.FloatStructMember)) return false; } return true; -}; +} inline void operator<<= (CORBA::Any& a, const FloatStruct_struct& s) { - CF::Properties props; - props.length(1); - props[0].id = CORBA::string_dup("FloatStructMember"); - props[0].value <<= s.FloatStructMember; + redhawk::PropertyMap props; + + props["FloatStructMember"] = s.FloatStructMember; a <<= props; -}; +} inline bool operator== (const FloatStruct_struct& s1, const FloatStruct_struct& s2) { if (s1.FloatStructMember!=s2.FloatStructMember) return false; return true; -}; +} inline bool operator!= (const FloatStruct_struct& s1, const FloatStruct_struct& s2) { return !(s1==s2); -}; - -template<> inline short StructProperty::compare (const CORBA::Any& a) { - if (super::isNil_) { - CORBA::TypeCode_var aType = a.type(); - if (aType->kind() == (CORBA::tk_null)) { - return 0; - } - return 1; - } - - FloatStruct_struct tmp; - if (fromAny(a, tmp)) { - if (tmp != this->value_) { - return 1; - } - - return 0; - } else { - return 1; - } } struct complexFloatStruct_struct { complexFloatStruct_struct () { - complexFloatStructMember = std::complex (4.0,5.0); - }; + complexFloatStructMember = std::complex(6.0,7.0); + complex_float_seq.push_back(std::complex(3.0,2.0)); + } - std::string getId() { + static std::string getId() { return std::string("complexFloatStruct"); - }; + } - std::complex complexFloatStructMember; + static const char* getFormat() { + return "2f[2f]"; + } + + std::complex complexFloatStructMember; + std::vector > complex_float_seq; }; inline bool operator>>= (const CORBA::Any& a, complexFloatStruct_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("complexFloatStructMember", props[idx].id)) { - if (!(props[idx].value >>= s.complexFloatStructMember)) return false; - } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("complexFloatStructMember")) { + if (!(props["complexFloatStructMember"] >>= s.complexFloatStructMember)) return false; + } + if (props.contains("complexFloatStruct::complex_float_seq")) { + if (!(props["complexFloatStruct::complex_float_seq"] >>= s.complex_float_seq)) return false; } return true; -}; +} inline void operator<<= (CORBA::Any& a, const complexFloatStruct_struct& s) { - CF::Properties props; - props.length(1); - props[0].id = CORBA::string_dup("complexFloatStructMember"); - props[0].value <<= s.complexFloatStructMember; + redhawk::PropertyMap props; + + props["complexFloatStructMember"] = s.complexFloatStructMember; + + props["complexFloatStruct::complex_float_seq"] = s.complex_float_seq; a <<= props; -}; +} inline bool operator== (const complexFloatStruct_struct& s1, const complexFloatStruct_struct& s2) { if (s1.complexFloatStructMember!=s2.complexFloatStructMember) return false; + if (s1.complex_float_seq!=s2.complex_float_seq) + return false; return true; -}; +} inline bool operator!= (const complexFloatStruct_struct& s1, const complexFloatStruct_struct& s2) { return !(s1==s2); -}; - -template<> inline short StructProperty::compare (const CORBA::Any& a) { - if (super::isNil_) { - CORBA::TypeCode_var aType = a.type(); - if (aType->kind() == (CORBA::tk_null)) { - return 0; - } - return 1; - } - - complexFloatStruct_struct tmp; - if (fromAny(a, tmp)) { - if (tmp != this->value_) { - return 1; - } - - return 0; - } else { - return 1; - } } struct FloatStructSequenceMember_struct { FloatStructSequenceMember_struct () { - FloatStructSequenceMemberMemember = 4; - }; + FloatStructSequenceMemberMemember = 6; + float_seq.push_back(3); + } - std::string getId() { + static std::string getId() { return std::string("FloatStructSequenceMember"); - }; + } + + static const char* getFormat() { + return "f[f]"; + } float FloatStructSequenceMemberMemember; + std::vector float_seq; }; inline bool operator>>= (const CORBA::Any& a, FloatStructSequenceMember_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("FloatStructSequenceMemberMemember", props[idx].id)) { - if (!(props[idx].value >>= s.FloatStructSequenceMemberMemember)) return false; - } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("FloatStructSequenceMemberMemember")) { + if (!(props["FloatStructSequenceMemberMemember"] >>= s.FloatStructSequenceMemberMemember)) return false; + } + if (props.contains("FloatStructSequence::float_seq")) { + if (!(props["FloatStructSequence::float_seq"] >>= s.float_seq)) return false; } return true; -}; +} inline void operator<<= (CORBA::Any& a, const FloatStructSequenceMember_struct& s) { - CF::Properties props; - props.length(1); - props[0].id = CORBA::string_dup("FloatStructSequenceMemberMemember"); - props[0].value <<= s.FloatStructSequenceMemberMemember; + redhawk::PropertyMap props; + + props["FloatStructSequenceMemberMemember"] = s.FloatStructSequenceMemberMemember; + + props["FloatStructSequence::float_seq"] = s.float_seq; a <<= props; -}; +} inline bool operator== (const FloatStructSequenceMember_struct& s1, const FloatStructSequenceMember_struct& s2) { if (s1.FloatStructSequenceMemberMemember!=s2.FloatStructSequenceMemberMemember) return false; + if (s1.float_seq!=s2.float_seq) + return false; return true; -}; +} inline bool operator!= (const FloatStructSequenceMember_struct& s1, const FloatStructSequenceMember_struct& s2) { return !(s1==s2); -}; - -template<> inline short StructProperty::compare (const CORBA::Any& a) { - if (super::isNil_) { - CORBA::TypeCode_var aType = a.type(); - if (aType->kind() == (CORBA::tk_null)) { - return 0; - } - return 1; - } - - FloatStructSequenceMember_struct tmp; - if (fromAny(a, tmp)) { - if (tmp != this->value_) { - return 1; - } - - return 0; - } else { - return 1; - } } struct complexFloatStructSequenceMember_struct { complexFloatStructSequenceMember_struct () { - complexFloatStructSequenceMemberMemember = std::complex (4.0,5.0); - }; + } - std::string getId() { + static std::string getId() { return std::string("complexFloatStructSequenceMember"); - }; + } + + static const char* getFormat() { + return "2f[2f]"; + } - std::complex complexFloatStructSequenceMemberMemember; + std::complex complexFloatStructSequenceMemberMemember; + std::vector > complex_float_seq; }; inline bool operator>>= (const CORBA::Any& a, complexFloatStructSequenceMember_struct& s) { CF::Properties* temp; if (!(a >>= temp)) return false; - CF::Properties& props = *temp; - for (unsigned int idx = 0; idx < props.length(); idx++) { - if (!strcmp("complexFloatStructSequenceMemberMemember", props[idx].id)) { - if (!(props[idx].value >>= s.complexFloatStructSequenceMemberMemember)) return false; - } + const redhawk::PropertyMap& props = redhawk::PropertyMap::cast(*temp); + if (props.contains("complexFloatStructSequenceMemberMemember")) { + if (!(props["complexFloatStructSequenceMemberMemember"] >>= s.complexFloatStructSequenceMemberMemember)) return false; + } + if (props.contains("complexFloatStructSequence::complex_float_seq")) { + if (!(props["complexFloatStructSequence::complex_float_seq"] >>= s.complex_float_seq)) return false; } return true; -}; +} inline void operator<<= (CORBA::Any& a, const complexFloatStructSequenceMember_struct& s) { - CF::Properties props; - props.length(1); - props[0].id = CORBA::string_dup("complexFloatStructSequenceMemberMemember"); - props[0].value <<= s.complexFloatStructSequenceMemberMemember; + redhawk::PropertyMap props; + + props["complexFloatStructSequenceMemberMemember"] = s.complexFloatStructSequenceMemberMemember; + + props["complexFloatStructSequence::complex_float_seq"] = s.complex_float_seq; a <<= props; -}; +} inline bool operator== (const complexFloatStructSequenceMember_struct& s1, const complexFloatStructSequenceMember_struct& s2) { if (s1.complexFloatStructSequenceMemberMemember!=s2.complexFloatStructSequenceMemberMemember) return false; + if (s1.complex_float_seq!=s2.complex_float_seq) + return false; return true; -}; +} inline bool operator!= (const complexFloatStructSequenceMember_struct& s1, const complexFloatStructSequenceMember_struct& s2) { return !(s1==s2); -}; +} -template<> inline short StructProperty::compare (const CORBA::Any& a) { +template<> inline short StructSequenceProperty::compare (const CORBA::Any& a) { if (super::isNil_) { CORBA::TypeCode_var aType = a.type(); if (aType->kind() == (CORBA::tk_null)) { @@ -272,7 +238,7 @@ template<> inline short StructProperty: return 1; } - complexFloatStructSequenceMember_struct tmp; + std::vector tmp; if (fromAny(a, tmp)) { if (tmp != this->value_) { return 1; @@ -284,23 +250,28 @@ template<> inline short StructProperty: } } -inline bool operator== (const std::vector& s1, const std::vector& s2) { - if (s1.size() != s2.size()) { - return false; - } - for (unsigned int i=0; i inline short StructProperty::compare (const CORBA::Any& a) { + if (super::isNil_) { + CORBA::TypeCode_var aType = a.type(); + if (aType->kind() == (CORBA::tk_null)) { + return 0; } + return 1; } - return true; -}; -inline bool operator!= (const std::vector& s1, const std::vector& s2) { - return !(s1==s2); -}; + FloatStruct_struct tmp; + if (fromAny(a, tmp)) { + if (tmp != this->value_) { + return 1; + } -template<> inline short StructSequenceProperty::compare (const CORBA::Any& a) { + return 0; + } else { + return 1; + } +} + +template<> inline short StructProperty::compare (const CORBA::Any& a) { if (super::isNil_) { CORBA::TypeCode_var aType = a.type(); if (aType->kind() == (CORBA::tk_null)) { @@ -309,7 +280,7 @@ template<> inline short StructSequenceProperty return 1; } - std::vector tmp; + complexFloatStruct_struct tmp; if (fromAny(a, tmp)) { if (tmp != this->value_) { return 1; @@ -320,23 +291,50 @@ template<> inline short StructSequenceProperty return 1; } } -inline bool operator== (const std::vector& s1, const std::vector& s2) { - if (s1.size() != s2.size()) { - return false; + +template<> inline short StructProperty::compare (const CORBA::Any& a) { + if (super::isNil_) { + CORBA::TypeCode_var aType = a.type(); + if (aType->kind() == (CORBA::tk_null)) { + return 0; + } + return 1; } - for (unsigned int i=0; ivalue_) { + return 1; } + + return 0; + } else { + return 1; } - return true; -}; +} -inline bool operator!= (const std::vector& s1, const std::vector& s2) { - return !(s1==s2); -}; +template<> inline short StructProperty::compare (const CORBA::Any& a) { + if (super::isNil_) { + CORBA::TypeCode_var aType = a.type(); + if (aType->kind() == (CORBA::tk_null)) { + return 0; + } + return 1; + } -template<> inline short StructSequenceProperty::compare (const CORBA::Any& a) { + complexFloatStructSequenceMember_struct tmp; + if (fromAny(a, tmp)) { + if (tmp != this->value_) { + return 1; + } + + return 0; + } else { + return 1; + } +} + +template<> inline short StructSequenceProperty::compare (const CORBA::Any& a) { if (super::isNil_) { CORBA::TypeCode_var aType = a.type(); if (aType->kind() == (CORBA::tk_null)) { @@ -345,7 +343,7 @@ template<> inline short StructSequenceProperty tmp; + std::vector tmp; if (fromAny(a, tmp)) { if (tmp != this->value_) { return 1; diff --git a/redhawk/src/testing/sdr/dom/components/TestComplexProps/java/src/TestComplexProps/java/TestComplexProps.java b/redhawk/src/testing/sdr/dom/components/TestComplexProps/java/src/TestComplexProps/java/TestComplexProps.java index f32d99396..1fb21413e 100644 --- a/redhawk/src/testing/sdr/dom/components/TestComplexProps/java/src/TestComplexProps/java/TestComplexProps.java +++ b/redhawk/src/testing/sdr/dom/components/TestComplexProps/java/src/TestComplexProps/java/TestComplexProps.java @@ -1,616 +1,219 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK core. - * - * REDHAWK core is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ package TestComplexProps.java; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; import java.util.Properties; -import org.omg.CORBA.ORB; -import org.omg.PortableServer.POA; -import org.omg.PortableServer.POAPackage.ServantNotActive; -import org.omg.PortableServer.POAPackage.WrongPolicy; -import org.omg.CosNaming.NamingContextPackage.CannotProceed; -import org.omg.CosNaming.NamingContextPackage.InvalidName; -import org.omg.CosNaming.NamingContextPackage.NotFound; -import CF.PropertiesHolder; -import CF.ResourceHelper; -import CF.UnknownProperties; -import CF.LifeCyclePackage.InitializeError; -import CF.LifeCyclePackage.ReleaseError; -import CF.InvalidObjectReference; -import CF.PropertySetPackage.InvalidConfiguration; -import CF.PropertySetPackage.PartialConfiguration; -import CF.ResourcePackage.StartError; -import CF.ResourcePackage.StopError; -import CF.DataType; -import org.omg.CORBA.UserException; -import org.omg.CosNaming.NameComponent; -import org.apache.log4j.Logger; -import org.ossie.component.*; -import org.ossie.properties.*; - -import CF.complexBoolean; -import CF.complexULong; -import CF.complexShort; -import CF.complexFloat; -import CF.complexOctet; -import CF.complexChar; -import CF.complexUShort; -import CF.complexDouble; -import CF.complexLong; -import CF.complexLongLong; -import CF.complexULongLong; /** - * This is the component code. This file contains all the access points - * you need to use to be able to access all input and output ports, - * respond to incoming data, and perform general component housekeeping + * This is the component code. This file contains the derived class where custom + * functionality can be added to the component. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general component housekeeping * * Source: TestComplexProps.spd.xml - * - * @generated */ -public class TestComplexProps extends Resource implements Runnable { - /** - * @generated - */ - public final static Logger logger = Logger.getLogger(TestComplexProps.class.getName()); - - /** - * The property complexBooleanProp - * If the meaning of this property isn't clear, a description should be added. - * - * - * - * @generated +public class TestComplexProps extends TestComplexProps_base { + /** + * This is the component constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your component. + * + * A component may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * component class. + * Accessing the Application and Domain Manager: + * + * Both the Application hosting this Component and the Domain Manager hosting + * the Application are available to the Component. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Application: + * CF.Application app = this.getApplication().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. */ - public final ComplexBooleanProperty complexBooleanProp = - new ComplexBooleanProperty( - "complexBooleanProp", //id - null, //name - new complexBoolean(false,true), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - /** - * The property complexULongProp - * If the meaning of this property isn't clear, a description should be added. - * - * - * - * @generated - */ - public final ComplexULongProperty complexULongProp = - new ComplexULongProperty( - "complexULongProp", //id - null, //name - new complexULong(4,5), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); + public TestComplexProps() + { + super(); + } - /** - * The property complexShortProp - * If the meaning of this property isn't clear, a description should be added. - * - * - * - * @generated - */ - public final ComplexShortProperty complexShortProp = - new ComplexShortProperty( - "complexShortProp", //id - null, //name - new complexShort((short)4,(short)5), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); + public void constructor() + { + } - /** - * The property complexFloatProp - * If the meaning of this property isn't clear, a description should be added. - * - * - * - * @generated - */ - public final ComplexFloatProperty complexFloatProp = - new ComplexFloatProperty( - "complexFloatProp", //id - null, //name - new complexFloat(4.0F,5.0F), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); /** - * The property complexOctetProp - * If the meaning of this property isn't clear, a description should be added. * - * - * - * @generated - */ - public final ComplexOctetProperty complexOctetProp = - new ComplexOctetProperty( - "complexOctetProp", //id - null, //name - new complexOctet((byte)4,(byte)5), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * The property complexCharProp - * If the meaning of this property isn't clear, a description should be added. + * Main processing function * - * - * - * @generated - */ - public final ComplexCharProperty complexCharProp = - new ComplexCharProperty( - "complexCharProp", //id - null, //name - new complexChar('4','5'), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * The property complexUShort - * If the meaning of this property isn't clear, a description should be added. + * General functionality: * - * - * - * @generated - */ - public final ComplexUShortProperty complexUShort = - new ComplexUShortProperty( - "complexUShort", //id - null, //name - new complexUShort((short)4,(short)5), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * The property complexDouble - * If the meaning of this property isn't clear, a description should be added. + * The serviceFunction() is called repeatedly by the component's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. * - * - * - * @generated - */ - public final ComplexDoubleProperty complexDouble = - new ComplexDoubleProperty( - "complexDouble", //id - null, //name - new complexDouble(4.0,5.0), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * The property complexLong - * If the meaning of this property isn't clear, a description should be added. + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur * - * - * - * @generated - */ - public final ComplexLongProperty complexLong = - new ComplexLongProperty( - "complexLong", //id - null, //name - new complexLong(4,5), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * The property complexLongLong - * If the meaning of this property isn't clear, a description should be added. + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; * - * - * - * @generated - */ - public final ComplexLongLongProperty complexLongLong = - new ComplexLongLongProperty( - "complexLongLong", //id - null, //name - new complexLongLong(4,5), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * The property complexULongLong - * If the meaning of this property isn't clear, a description should be added. + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); * - * - * - * @generated - */ - public final ComplexULongLongProperty complexULongLong = - new ComplexULongLongProperty( - "complexULongLong", //id - null, //name - new complexULongLong(4,5), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * The property complexFloatSequence - * If the meaning of this property isn't clear, a description should be added. + * Ports: * - * - * - * @generated - */ - public final ComplexFloatSequenceProperty complexFloatSequence = - new ComplexFloatSequenceProperty( - "complexFloatSequence", //id - null, //name - new ArrayList(Arrays.asList(new complexFloat(4.0F,5.0F),new complexFloat(4.0F,5.0F),new complexFloat(4.0F,5.0F))), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * The structure for property FloatStruct - * - * - * - * @generated - */ - public static class FloatStruct_struct extends StructDef { - /** - * The property FloatStructMember - * If the meaning of this property isn't clear, a description should be added. - * - * - * - * @generated - */ - public final FloatProperty FloatStructMember = - new FloatProperty( - "FloatStructMember", //id - null, //name - 4.0F, //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * @generated - */ - public FloatStruct_struct() { - addElement(FloatStructMember); - //begin-user-code - //end-user-code - } - }; - - /** - * The property FloatStruct - * If the meaning of this property isn't clear, a description should be added. + * Each port instance is accessed through members of the following form: * - * - * - * @generated - */ - public final StructProperty FloatStruct = - new StructProperty( - "FloatStruct", //id - null, //name - new FloatStruct_struct(), //type - new FloatStruct_struct(), // tmp type - "readwrite", //mode - new String[] {"configure"} //kind - ); - - /** - * The structure for property complexFloatStruct - * - * - * - * @generated - */ - public static class complexFloatStruct_struct extends StructDef { - /** - * The property complexFloatStructMember - * If the meaning of this property isn't clear, a description should be added. - * - * - * - * @generated - */ - public final ComplexFloatProperty complexFloatStructMember = - new ComplexFloatProperty( - "complexFloatStructMember", //id - null, //name - new complexFloat(4.0F,5.0F), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * @generated - */ - public complexFloatStruct_struct() { - addElement(complexFloatStructMember); - //begin-user-code - //end-user-code - } - }; - - /** - * The property complexFloatStruct - * If the meaning of this property isn't clear, a description should be added. + * this.port_ * - * - * - * @generated - */ - public final StructProperty complexFloatStruct = - new StructProperty( - "complexFloatStruct", //id - null, //name - new complexFloatStruct_struct(), //type - new complexFloatStruct_struct(), // tmp type - "readwrite", //mode - new String[] {"configure"} //kind - ); - - /** - * The structure for property FloatStructSequenceMember - * - * - * - * @generated - */ - public static class FloatStructSequenceMember_struct extends StructDef { - /** - * The property FloatStructSequenceMemberMemember - * If the meaning of this property isn't clear, a description should be added. - * - * - * - * @generated - */ - public final FloatProperty FloatStructSequenceMemberMemember = - new FloatProperty( - "FloatStructSequenceMemberMemember", //id - null, //name - 4.0F, //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * @generated - */ - public FloatStructSequenceMember_struct() { - addElement(FloatStructSequenceMemberMemember); - //begin-user-code - //end-user-code - } - }; - - /** - * The property FloatStructSequence - * If the meaning of this property isn't clear, a description should be added. + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. * - * - * - * @generated - */ - public final StructSequenceProperty FloatStructSequence; - - /** - * The structure for property complexFloatStructSequenceMember - * - * - * - * @generated - */ - public static class complexFloatStructSequenceMember_struct extends StructDef { - /** - * The property complexFloatStructSequenceMemberMemember - * If the meaning of this property isn't clear, a description should be added. - * - * - * - * @generated - */ - public final ComplexFloatProperty complexFloatStructSequenceMemberMemember = - new ComplexFloatProperty( - "complexFloatStructSequenceMemberMemember", //id - null, //name - new complexFloat(4.0F,5.0F), //default value - Mode.READWRITE, //mode - Action.EXTERNAL, //action - new Kind[] {Kind.CONFIGURE} //kind - ); - - /** - * @generated - */ - public complexFloatStructSequenceMember_struct() { - addElement(complexFloatStructSequenceMemberMemember); - //begin-user-code - //end-user-code - } - }; - - /** - * The property complexFloatStructSequence - * If the meaning of this property isn't clear, a description should be added. + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. * - * - * - * @generated - */ - public final StructSequenceProperty complexFloatStructSequence; - - // Provides/inputs - // Uses/outputs - /** - * @generated - */ - public TestComplexProps() - { - super(); - ArrayList structVals_FloatStructSequenceMember_struct = new ArrayList(); - - this.FloatStructSequence = new StructSequenceProperty ( - "FloatStructSequence", //id - null, //name - FloatStructSequenceMember_struct.class, //type - structVals_FloatStructSequenceMember_struct, //defaultValue - Mode.READWRITE, //mode - new Kind[] { Kind.CONFIGURE } //kind - ); - ArrayList structVals_complexFloatStructSequenceMember_struct = new ArrayList(); - - this.complexFloatStructSequence = new StructSequenceProperty ( - "complexFloatStructSequence", //id - null, //name - complexFloatStructSequenceMember_struct.class, //type - structVals_complexFloatStructSequenceMember_struct, //defaultValue - Mode.READWRITE, //mode - new Kind[] { Kind.CONFIGURE } //kind - ); - addProperty(complexBooleanProp); - addProperty(complexULongProp); - addProperty(complexShortProp); - addProperty(complexFloatProp); - addProperty(complexOctetProp); - addProperty(complexCharProp); - addProperty(complexUShort); - addProperty(complexDouble); - addProperty(complexLong); - addProperty(complexLongLong); - addProperty(complexULongLong); - addProperty(complexFloatSequence); - addProperty(FloatStruct); - addProperty(complexFloatStruct); - addProperty(FloatStructSequence); - addProperty(complexFloatStructSequence); - - // Provides/input - - // Uses/output - - //begin-user-code - //end-user-code - } - - /** + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". * - * Main processing thread + * Interactions with non-BULKIO ports are left up to the discretion of + * the component developer. * - * - * - * General functionality: - * - * This function is running as a separate thread from the component's main thread. - * - * The IDE uses JMerge during the generation (and re-generation) process. To keep - * customizations to this file from being over-written during subsequent generations, - * put your customization in between the following tags: - * - //begin-user-code - * - //end-user-code - * or, alternatively, set the @generated flag located before the code you wish to - * modify, in the following way: - * - "@generated NOT" - * - * StreamSRI: - * To create a StreamSRI object, use the following code: - * this.stream_id = "stream"; - * StreamSRI sri = new StreamSRI(); - * sri.mode = 0; - * sri.xdelta = 0.0; - * sri.ydelta = 1.0; - * sri.subsize = 0; - * sri.xunits = 1; // TIME_S - * sri.streamID = (this.stream_id.getValue() != null) ? this.stream_id.getValue() : ""; - * - * PrecisionUTCTime: - * To create a PrecisionUTCTime object, use the following code: - * long tmp_time = System.currentTimeMillis(); - * double wsec = tmp_time / 1000; - * double fsec = tmp_time % 1000; - * PrecisionUTCTime tstamp = new PrecisionUTCTime(BULKIO.TCM_CPU.value, (short)1, (short)0, wsec, fsec); - * - * Ports: - * - * Each port instance is accessed through members of the following form: this.port_ - * - * Data is obtained in the run function through the getPacket call (BULKIO only) on a - * provides port member instance. The getPacket function call is non-blocking; it takes - * one argument which is the time to wait on new data. If you pass 0, it will return - * immediately if no data available (won't wait). - * - * To send data, call the appropriate function in the port directly. In the case of BULKIO, - * convenience functions have been added in the port classes that aid in output. - * - * Interactions with non-BULKIO ports are left up to the component developer's discretion. - * * Properties: - * - * Properties are accessed through members of the same name with helper functions. If the - * property name is baudRate, then reading the value is achieved by: this.baudRate.getValue(); - * and writing a new value is achieved by: this.baudRate.setValue(new_value); - * + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * * Example: - * + * * This example assumes that the component has two ports: - * - A provides (input) port of type BULKIO::dataShort called dataShort_in - * - A uses (output) port of type BULKIO::dataFloat called dataFloat_out - * The mapping between the port and the class is found the class of the same name. + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the component + * base class file. * This example also makes use of the following Properties: * - A float value called amplitude with a default value of 2.0 * - A boolean called increaseAmplitude with a default value of true - * - * BULKIO_dataShortInPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); * * if (data != null) { * float[] outData = new float[data.getData().length]; @@ -628,73 +231,22 @@ public TestComplexProps() * } * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); * } - * - * - * - * @generated + * */ - public void run() - { - //begin-user-code - //end-user-code - - while(this.started()) - { - //begin-user-code - // Process data here - try { - logger.debug("run() example log message"); - Thread.sleep(1000); - } catch (InterruptedException e) { - break; - } - - //end-user-code - } - - //begin-user-code - //end-user-code + protected int serviceFunction() { + logger.debug("serviceFunction() example log message"); + + return NOOP; } /** - * The main function of your component. If no args are provided, then the - * CORBA object is not bound to an SCA Domain or NamingService and can - * be run as a standard Java application. - * - * @param args - * @generated + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps */ - public static void main(String[] args) - { - final Properties orbProps = new Properties(); - - //begin-user-code - // TODO You may add extra startup code here, for example: - // orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); - //end-user-code - - try { - Resource.start_component(TestComplexProps.class, args, orbProps); - } catch (InvalidObjectReference e) { - e.printStackTrace(); - } catch (NotFound e) { - e.printStackTrace(); - } catch (CannotProceed e) { - e.printStackTrace(); - } catch (InvalidName e) { - e.printStackTrace(); - } catch (ServantNotActive e) { - e.printStackTrace(); - } catch (WrongPolicy e) { - e.printStackTrace(); - } catch (InstantiationException e) { - e.printStackTrace(); - } catch (IllegalAccessException e) { - e.printStackTrace(); - } - - //begin-user-code - // TODO You may add extra shutdown code here - //end-user-code + public static void configureOrb(final Properties orbProps) { } + } diff --git a/redhawk/src/testing/sdr/dom/components/TestComplexProps/java/src/TestComplexProps/java/TestComplexProps_base.java b/redhawk/src/testing/sdr/dom/components/TestComplexProps/java/src/TestComplexProps/java/TestComplexProps_base.java new file mode 100644 index 000000000..ede2c6d65 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/TestComplexProps/java/src/TestComplexProps/java/TestComplexProps_base.java @@ -0,0 +1,601 @@ +package TestComplexProps.java; + + +import java.util.List; +import java.util.List; +import java.util.List; +import java.util.Properties; + +import org.apache.log4j.Logger; + +import org.omg.CosNaming.NamingContextPackage.CannotProceed; +import org.omg.CosNaming.NamingContextPackage.InvalidName; +import org.omg.CosNaming.NamingContextPackage.NotFound; +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; + +import CF.InvalidObjectReference; + +import org.ossie.component.*; +import org.ossie.properties.*; + + +/** + * This is the component code. This file contains all the access points + * you need to use to be able to access all input and output ports, + * respond to incoming data, and perform general component housekeeping + * + * Source: TestComplexProps.spd.xml + * + * @generated + */ + +public abstract class TestComplexProps_base extends Component { + /** + * @generated + */ + public final static Logger logger = Logger.getLogger(TestComplexProps_base.class.getName()); + + /** + * The property complexBooleanProp + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexBooleanProperty complexBooleanProp = + new ComplexBooleanProperty( + "complexBooleanProp", //id + null, //name + new CF.complexBoolean(false,true), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property complexULongProp + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexULongProperty complexULongProp = + new ComplexULongProperty( + "complexULongProp", //id + null, //name + new CF.complexULong(4,5), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property complexShortProp + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexShortProperty complexShortProp = + new ComplexShortProperty( + "complexShortProp", //id + null, //name + new CF.complexShort((short)4,(short)5), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property complexFloatProp + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexFloatProperty complexFloatProp = + new ComplexFloatProperty( + "complexFloatProp", //id + null, //name + new CF.complexFloat(4.0F,5.0F), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property complexOctetProp + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexOctetProperty complexOctetProp = + new ComplexOctetProperty( + "complexOctetProp", //id + null, //name + new CF.complexOctet((byte)4,(byte)5), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property complexUShort + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexUShortProperty complexUShort = + new ComplexUShortProperty( + "complexUShort", //id + null, //name + new CF.complexUShort((short)4,(short)5), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property complexDouble + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexDoubleProperty complexDouble = + new ComplexDoubleProperty( + "complexDouble", //id + null, //name + new CF.complexDouble(4.0,5.0), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property complexLong + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexLongProperty complexLong = + new ComplexLongProperty( + "complexLong", //id + null, //name + new CF.complexLong(4,5), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property complexLongLong + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexLongLongProperty complexLongLong = + new ComplexLongLongProperty( + "complexLongLong", //id + null, //name + new CF.complexLongLong(4,5), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property complexULongLong + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexULongLongProperty complexULongLong = + new ComplexULongLongProperty( + "complexULongLong", //id + null, //name + new CF.complexULongLong(4,5), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property complexFloatSequence + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final ComplexFloatSequenceProperty complexFloatSequence = + new ComplexFloatSequenceProperty( + "complexFloatSequence", //id + null, //name + ComplexFloatSequenceProperty.asList(new CF.complexFloat(6.0F,7.0F),new CF.complexFloat(4.0F,5.0F),new CF.complexFloat(4.0F,5.0F)), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property FloatStruct + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + /** + * The structure for property FloatStruct + * + * @generated + */ + public static class FloatStruct_struct extends StructDef { + public final FloatProperty FloatStructMember = + new FloatProperty( + "FloatStructMember", //id + null, //name + 6.0F, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * @generated + */ + public FloatStruct_struct(Float FloatStructMember) { + this(); + this.FloatStructMember.setValue(FloatStructMember); + } + + /** + * @generated + */ + public void set_FloatStructMember(Float FloatStructMember) { + this.FloatStructMember.setValue(FloatStructMember); + } + public Float get_FloatStructMember() { + return this.FloatStructMember.getValue(); + } + + /** + * @generated + */ + public FloatStruct_struct() { + addElement(this.FloatStructMember); + } + + public String getId() { + return "FloatStruct"; + } + }; + + public final StructProperty FloatStruct = + new StructProperty( + "FloatStruct", //id + null, //name + FloatStruct_struct.class, //type + new FloatStruct_struct(), //default value + Mode.READWRITE, //mode + new Kind[] {Kind.PROPERTY} //kind + ); + + /** + * The property complexFloatStruct + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + /** + * The structure for property complexFloatStruct + * + * @generated + */ + public static class complexFloatStruct_struct extends StructDef { + public final ComplexFloatProperty complexFloatStructMember = + new ComplexFloatProperty( + "complexFloatStructMember", //id + null, //name + new CF.complexFloat(6.0F,7.0F), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + public final ComplexFloatSequenceProperty complex_float_seq = + new ComplexFloatSequenceProperty( + "complexFloatStruct::complex_float_seq", //id + "complex_float_seq", //name + ComplexFloatSequenceProperty.asList(new CF.complexFloat(3.0F,2.0F)), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + + /** + * @generated + */ + public complexFloatStruct_struct(CF.complexFloat complexFloatStructMember, List complex_float_seq) { + this(); + this.complexFloatStructMember.setValue(complexFloatStructMember); + this.complex_float_seq.setValue(complex_float_seq); + } + + /** + * @generated + */ + public void set_complexFloatStructMember(CF.complexFloat complexFloatStructMember) { + this.complexFloatStructMember.setValue(complexFloatStructMember); + } + public CF.complexFloat get_complexFloatStructMember() { + return this.complexFloatStructMember.getValue(); + } + public void set_complex_float_seq(List complex_float_seq) { + this.complex_float_seq.setValue(complex_float_seq); + } + public List get_complex_float_seq() { + return this.complex_float_seq.getValue(); + } + + /** + * @generated + */ + public complexFloatStruct_struct() { + addElement(this.complexFloatStructMember); + addElement(this.complex_float_seq); + } + + public String getId() { + return "complexFloatStruct"; + } + }; + + public final StructProperty complexFloatStruct = + new StructProperty( + "complexFloatStruct", //id + null, //name + complexFloatStruct_struct.class, //type + new complexFloatStruct_struct(), //default value + Mode.READWRITE, //mode + new Kind[] {Kind.PROPERTY} //kind + ); + + /** + * The property FloatStructSequence + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + /** + * The structure for property FloatStructSequenceMember + * + * @generated + */ + public static class FloatStructSequenceMember_struct extends StructDef { + public final FloatProperty FloatStructSequenceMemberMemember = + new FloatProperty( + "FloatStructSequenceMemberMemember", //id + null, //name + 6.0F, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + public final FloatSequenceProperty float_seq = + new FloatSequenceProperty( + "FloatStructSequence::float_seq", //id + "float_seq", //name + FloatSequenceProperty.asList(3.0F), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + + /** + * @generated + */ + public FloatStructSequenceMember_struct(Float FloatStructSequenceMemberMemember, List float_seq) { + this(); + this.FloatStructSequenceMemberMemember.setValue(FloatStructSequenceMemberMemember); + this.float_seq.setValue(float_seq); + } + + /** + * @generated + */ + public void set_FloatStructSequenceMemberMemember(Float FloatStructSequenceMemberMemember) { + this.FloatStructSequenceMemberMemember.setValue(FloatStructSequenceMemberMemember); + } + public Float get_FloatStructSequenceMemberMemember() { + return this.FloatStructSequenceMemberMemember.getValue(); + } + public void set_float_seq(List float_seq) { + this.float_seq.setValue(float_seq); + } + public List get_float_seq() { + return this.float_seq.getValue(); + } + + /** + * @generated + */ + public FloatStructSequenceMember_struct() { + addElement(this.FloatStructSequenceMemberMemember); + addElement(this.float_seq); + } + + public String getId() { + return "FloatStructSequenceMember"; + } + }; + + public final StructSequenceProperty FloatStructSequence = + new StructSequenceProperty ( + "FloatStructSequence", //id + null, //name + FloatStructSequenceMember_struct.class, //type + StructSequenceProperty.asList(), //defaultValue + Mode.READWRITE, //mode + new Kind[] { Kind.PROPERTY } //kind + ); + + /** + * The property complexFloatStructSequence + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + /** + * The structure for property complexFloatStructSequenceMember + * + * @generated + */ + public static class complexFloatStructSequenceMember_struct extends StructDef { + public final ComplexFloatProperty complexFloatStructSequenceMemberMemember = + new ComplexFloatProperty( + "complexFloatStructSequenceMemberMemember", //id + null, //name + new CF.complexFloat(6.0F,5.0F), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + public final ComplexFloatSequenceProperty complex_float_seq = + new ComplexFloatSequenceProperty( + "complexFloatStructSequence::complex_float_seq", //id + "complex_float_seq", //name + ComplexFloatSequenceProperty.asList(new CF.complexFloat(3.0F,2.0F)), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.CONFIGURE} + ); + + /** + * @generated + */ + public complexFloatStructSequenceMember_struct(CF.complexFloat complexFloatStructSequenceMemberMemember, List complex_float_seq) { + this(); + this.complexFloatStructSequenceMemberMemember.setValue(complexFloatStructSequenceMemberMemember); + this.complex_float_seq.setValue(complex_float_seq); + } + + /** + * @generated + */ + public void set_complexFloatStructSequenceMemberMemember(CF.complexFloat complexFloatStructSequenceMemberMemember) { + this.complexFloatStructSequenceMemberMemember.setValue(complexFloatStructSequenceMemberMemember); + } + public CF.complexFloat get_complexFloatStructSequenceMemberMemember() { + return this.complexFloatStructSequenceMemberMemember.getValue(); + } + public void set_complex_float_seq(List complex_float_seq) { + this.complex_float_seq.setValue(complex_float_seq); + } + public List get_complex_float_seq() { + return this.complex_float_seq.getValue(); + } + + /** + * @generated + */ + public complexFloatStructSequenceMember_struct() { + addElement(this.complexFloatStructSequenceMemberMemember); + addElement(this.complex_float_seq); + } + + public String getId() { + return "complexFloatStructSequenceMember"; + } + }; + + public final StructSequenceProperty complexFloatStructSequence = + new StructSequenceProperty ( + "complexFloatStructSequence", //id + null, //name + complexFloatStructSequenceMember_struct.class, //type + StructSequenceProperty.asList(), //defaultValue + Mode.READWRITE, //mode + new Kind[] { Kind.PROPERTY } //kind + ); + + /** + * @generated + */ + public TestComplexProps_base() + { + super(); + + setLogger( logger, TestComplexProps_base.class.getName() ); + + + // Properties + addProperty(complexBooleanProp); + + addProperty(complexULongProp); + + addProperty(complexShortProp); + + addProperty(complexFloatProp); + + addProperty(complexOctetProp); + + addProperty(complexUShort); + + addProperty(complexDouble); + + addProperty(complexLong); + + addProperty(complexLongLong); + + addProperty(complexULongLong); + + addProperty(complexFloatSequence); + + addProperty(FloatStruct); + + addProperty(complexFloatStruct); + + addProperty(FloatStructSequence); + + addProperty(complexFloatStructSequence); + + } + + public void start() throws CF.ResourcePackage.StartError + { + super.start(); + } + + public void stop() throws CF.ResourcePackage.StopError + { + super.stop(); + } + + + /** + * The main function of your component. If no args are provided, then the + * CORBA object is not bound to an SCA Domain or NamingService and can + * be run as a standard Java application. + * + * @param args + * @generated + */ + public static void main(String[] args) + { + final Properties orbProps = new Properties(); + TestComplexProps.configureOrb(orbProps); + + try { + Component.start_component(TestComplexProps.class, args, orbProps); + } catch (InvalidObjectReference e) { + e.printStackTrace(); + } catch (NotFound e) { + e.printStackTrace(); + } catch (CannotProceed e) { + e.printStackTrace(); + } catch (InvalidName e) { + e.printStackTrace(); + } catch (ServantNotActive e) { + e.printStackTrace(); + } catch (WrongPolicy e) { + e.printStackTrace(); + } catch (InstantiationException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + } +} diff --git a/redhawk/src/testing/sdr/dom/components/TestComplexProps/python/TestComplexProps_base.py b/redhawk/src/testing/sdr/dom/components/TestComplexProps/python/TestComplexProps_base.py index 3592605a2..e5f4bec6d 100644 --- a/redhawk/src/testing/sdr/dom/components/TestComplexProps/python/TestComplexProps_base.py +++ b/redhawk/src/testing/sdr/dom/components/TestComplexProps/python/TestComplexProps_base.py @@ -1,31 +1,14 @@ #!/usr/bin/env python # -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# # AUTO-GENERATED CODE. DO NOT MODIFY! # # Source: TestComplexProps.spd.xml -from ossie.cf import CF, CF__POA +from ossie.cf import CF +from ossie.cf import CF__POA from ossie.utils import uuid -from ossie.resource import Resource +from ossie.component import Component +from ossie.threadedcomponent import * from ossie.properties import simple_property from ossie.properties import simpleseq_property from ossie.properties import struct_property @@ -33,32 +16,7 @@ import Queue, copy, time, threading -NOOP = -1 -NORMAL = 0 -FINISH = 1 -class ProcessThread(threading.Thread): - def __init__(self, target, pause=0.0125): - threading.Thread.__init__(self) - self.setDaemon(True) - self.target = target - self.pause = pause - self.stop_signal = threading.Event() - - def stop(self): - self.stop_signal.set() - - def updatePause(self, pause): - self.pause = pause - - def run(self): - state = NORMAL - while (state != FINISH) and (not self.stop_signal.isSet()): - state = self.target() - if (state == NOOP): - # If there was no data to process sleep to avoid spinning - time.sleep(self.pause) - -class TestComplexProps_base(CF__POA.Resource, Resource): +class TestComplexProps_base(CF__POA.Resource, Component, ThreadedComponent): # These values can be altered in the __init__ of your derived class PAUSE = 0.0125 # The amount of time to sleep if process return NOOP @@ -67,60 +25,30 @@ class TestComplexProps_base(CF__POA.Resource, Resource): def __init__(self, identifier, execparams): loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] - Resource.__init__(self, identifier, execparams, loggerName=loggerName) - self.threadControlLock = threading.RLock() - self.process_thread = None - # self.auto_start is deprecated and is only kept for API compatability + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility # with 1.7.X and 1.8.0 components. This variable may be removed # in future releases self.auto_start = False - - def initialize(self): - Resource.initialize(self) - # Instantiate the default implementations for all ports on this component def start(self): - self.threadControlLock.acquire() - try: - Resource.start(self) - if self.process_thread == None: - self.process_thread = ProcessThread(target=self.process, pause=self.PAUSE) - self.process_thread.start() - finally: - self.threadControlLock.release() - - def process(self): - """The process method should process a single "chunk" of data and then return. This method will be called - from the processing thread again, and again, and again until it returns FINISH or stop() is called on the - component. If no work is performed, then return NOOP""" - raise NotImplementedError + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) def stop(self): - self.threadControlLock.acquire() - try: - process_thread = self.process_thread - self.process_thread = None - - if process_thread != None: - process_thread.stop() - process_thread.join(self.TIMEOUT) - if process_thread.isAlive(): - raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") - Resource.stop(self) - finally: - self.threadControlLock.release() + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") def releaseObject(self): try: self.stop() except Exception: self._log.exception("Error stopping") - self.threadControlLock.acquire() - try: - Resource.releaseObject(self) - finally: - self.threadControlLock.release() + Component.releaseObject(self) ###################################################################### # PORTS @@ -139,110 +67,111 @@ def releaseObject(self): complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) + kinds=("property",)) + + complexULongProp = simple_property(id_="complexULongProp", type_="ulong", defvalue=complex(4,5), complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) + kinds=("property",)) + + complexShortProp = simple_property(id_="complexShortProp", type_="short", defvalue=complex(4,5), complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) + kinds=("property",)) + + complexFloatProp = simple_property(id_="complexFloatProp", type_="float", defvalue=complex(4.0,5.0), complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) + kinds=("property",)) + + complexOctetProp = simple_property(id_="complexOctetProp", type_="octet", defvalue=complex(4,5), complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) - complexCharProp = simple_property(id_="complexCharProp", - type_="char", - defvalue=complex(4,5), - complex=True, - mode="readwrite", - action="external", - kinds=("configure",) - ) + kinds=("property",)) + + complexUShort = simple_property(id_="complexUShort", type_="ushort", defvalue=complex(4,5), complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) + kinds=("property",)) + + complexDouble = simple_property(id_="complexDouble", type_="double", defvalue=complex(4.0,5.0), complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) + kinds=("property",)) + + complexLong = simple_property(id_="complexLong", type_="long", defvalue=complex(4,5), complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) + kinds=("property",)) + + complexLongLong = simple_property(id_="complexLongLong", type_="longlong", defvalue=complex(4,5), complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) + kinds=("property",)) + + complexULongLong = simple_property(id_="complexULongLong", type_="ulonglong", defvalue=complex(4,5), complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) + kinds=("property",)) + + complexFloatSequence = simpleseq_property(id_="complexFloatSequence", type_="float", - defvalue=[ - complex(4.0,5.0), - complex(4.0,5.0), - complex(4.0,5.0), - ], + defvalue=[complex(6.0,7.0), complex(4.0,5.0), complex(4.0,5.0) ], complex=True, mode="readwrite", action="external", - kinds=("configure",) - ) - class Float(object): - FloatStructMember = simple_property(id_="FloatStructMember", + kinds=("property",)) + + + class _Float(object): + FloatStructMember = simple_property( + id_="FloatStructMember", + type_="float", - defvalue=4.0, + defvalue=6.0 ) def __init__(self, **kw): """Construct an initialized instance of this struct definition""" - for attrname, classattr in type(self).__dict__.items(): - if type(classattr) == simple_property: + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): classattr.initialize(self) for k,v in kw.items(): setattr(self,k,v) @@ -253,31 +182,45 @@ def __str__(self): d["FloatStructMember"] = self.FloatStructMember return str(d) - def getId(self): + @classmethod + def getId(cls): return "FloatStruct" - def isStruct(self): + @classmethod + def isStruct(cls): return True def getMembers(self): return [("FloatStructMember",self.FloatStructMember)] FloatStruct = struct_property(id_="FloatStruct", - structdef=Float, - configurationkind=("configure",), - mode="readwrite" - ) + structdef=_Float, + configurationkind=("property",), + mode="readwrite") + + class ComplexFloat(object): - complexFloatStructMember = simple_property(id_="complexFloatStructMember", + complexFloatStructMember = simple_property( + id_="complexFloatStructMember", + type_="float", - defvalue=complex(4.0,5.0), - complex=True, - ) + defvalue=complex(6.0,7.0) + , + complex=True) + + complex_float_seq = simpleseq_property( + id_="complexFloatStruct::complex_float_seq", + + name="complex_float_seq", + type_="float", + defvalue=[complex(3.0,2.0)] + , + complex=True) def __init__(self, **kw): """Construct an initialized instance of this struct definition""" - for attrname, classattr in type(self).__dict__.items(): - if type(classattr) == simple_property: + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): classattr.initialize(self) for k,v in kw.items(): setattr(self,k,v) @@ -286,82 +229,117 @@ def __str__(self): """Return a string representation of this structure""" d = {} d["complexFloatStructMember"] = self.complexFloatStructMember + d["complex_float_seq"] = self.complex_float_seq return str(d) - def getId(self): + @classmethod + def getId(cls): return "complexFloatStruct" - def isStruct(self): + @classmethod + def isStruct(cls): return True def getMembers(self): - return [("complexFloatStructMember",self.complexFloatStructMember)] + return [("complexFloatStructMember",self.complexFloatStructMember),("complex_float_seq",self.complex_float_seq)] complexFloatStruct = struct_property(id_="complexFloatStruct", structdef=ComplexFloat, - configurationkind=("configure",), - mode="readwrite" - ) + configurationkind=("property",), + mode="readwrite") + - class FloatStructSequenceMember(object): - FloatStructSequenceMemberMemember = simple_property(id_="FloatStructSequenceMemberMemember", + class _FloatStructSequenceMember(object): + FloatStructSequenceMemberMemember = simple_property( + id_="FloatStructSequenceMemberMemember", + type_="float", - defvalue=4.0, + defvalue=6.0 ) - def __init__(self, FloatStructSequenceMemberMemember=4.0): + float_seq = simpleseq_property( + id_="FloatStructSequence::float_seq", + + name="float_seq", + type_="float", + defvalue=[3.0] + ) + + def __init__(self, FloatStructSequenceMemberMemember=6.0, float_seq=[3.0]): self.FloatStructSequenceMemberMemember = FloatStructSequenceMemberMemember + self.float_seq = float_seq def __str__(self): """Return a string representation of this structure""" d = {} d["FloatStructSequenceMemberMemember"] = self.FloatStructSequenceMemberMemember + d["float_seq"] = self.float_seq return str(d) - def getId(self): + @classmethod + def getId(cls): return "FloatStructSequenceMember" - def isStruct(self): + @classmethod + def isStruct(cls): return True def getMembers(self): - return [("FloatStructSequenceMemberMemember",self.FloatStructSequenceMemberMemember)] + return [("FloatStructSequenceMemberMemember",self.FloatStructSequenceMemberMemember),("float_seq",self.float_seq)] FloatStructSequence = structseq_property(id_="FloatStructSequence", - structdef=FloatStructSequenceMember, + structdef=_FloatStructSequenceMember, defvalue=[], - configurationkind=("configure",), - mode="readwrite" - ) + configurationkind=("property",), + mode="readwrite") + + class ComplexFloatStructSequenceMember(object): - complexFloatStructSequenceMemberMemember = simple_property(id_="complexFloatStructSequenceMemberMemember", + complexFloatStructSequenceMemberMemember = simple_property( + id_="complexFloatStructSequenceMemberMemember", + type_="float", - defvalue=complex(4.0,5.0), - complex=True, - ) + defvalue=complex(6.0,5.0) + , + complex=True) + + complex_float_seq = simpleseq_property( + id_="complexFloatStructSequence::complex_float_seq", + + name="complex_float_seq", + type_="float", + defvalue=[complex(3.0,2.0)] + , + complex=True) - def __init__(self, complexFloatStructSequenceMemberMemember=complex(4.0,5.0)): + def __init__(self, complexFloatStructSequenceMemberMemember=complex(6.0,5.0), complex_float_seq=[complex(3.0,2.0)]): self.complexFloatStructSequenceMemberMemember = complexFloatStructSequenceMemberMemember + self.complex_float_seq = complex_float_seq def __str__(self): """Return a string representation of this structure""" d = {} d["complexFloatStructSequenceMemberMemember"] = self.complexFloatStructSequenceMemberMemember + d["complex_float_seq"] = self.complex_float_seq return str(d) - def getId(self): + @classmethod + def getId(cls): return "complexFloatStructSequenceMember" - def isStruct(self): + @classmethod + def isStruct(cls): return True def getMembers(self): - return [("complexFloatStructSequenceMemberMemember",self.complexFloatStructSequenceMemberMemember)] + return [("complexFloatStructSequenceMemberMemember",self.complexFloatStructSequenceMemberMemember),("complex_float_seq",self.complex_float_seq)] complexFloatStructSequence = structseq_property(id_="complexFloatStructSequence", structdef=ComplexFloatStructSequenceMember, defvalue=[], - configurationkind=("configure",), - mode="readwrite" - ) + configurationkind=("property",), + mode="readwrite") + + + diff --git a/redhawk/src/testing/sdr/dom/components/TestCppOptionalProps/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/TestCppOptionalProps/cpp/Makefile.am index ae16a69bd..047909b42 100644 --- a/redhawk/src/testing/sdr/dom/components/TestCppOptionalProps/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dom/components/TestCppOptionalProps/cpp/Makefile.am @@ -39,7 +39,7 @@ distclean-local: # generated by the REDHAWK IDE. You can remove/modify the following lines if # you wish to manually control these options. TestCppOptionalProps_SOURCES = main.cpp TestCppOptionalProps.cpp TestCppOptionalProps.h TestCppOptionalProps_base.cpp TestCppOptionalProps_base.h struct_props.h -TestCppOptionalProps_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(redhawk_LDADD_auto) +TestCppOptionalProps_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) TestCppOptionalProps_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -I$(CFDIR)/include TestCppOptionalProps_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.cpp b/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.cpp index ed04e02ee..552b90437 100644 --- a/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.cpp +++ b/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.cpp @@ -52,6 +52,35 @@ TestCppProps::TestCppProps(const char *uuid, const char *label) : "external", "configure"); + addProperty(simple_utctime, + "2017:2:1::12:01:00.123", + "simple_utctime", + "", + "readwrite", + "", + "external", + "property"); + + seq_utctime.push_back(redhawk::time::utils::convert("2010:2:1::12:01:00.123")); + seq_utctime.push_back(redhawk::time::utils::convert("2011:2:1::12:01:00.123")); + addProperty(seq_utctime, + seq_utctime, + "seq_utctime", + "", + "readwrite", + "", + "external", + "property"); + + addProperty(reset_utctime, + "false", + "reset_utctime", + "", + "readwrite", + "", + "external", + "property"); + addProperty(seq_oct, "DCE:f877b9ee-a682-43a6-ba21-5ea980167f55", "seq_oct", @@ -82,12 +111,20 @@ TestCppProps::TestCppProps(const char *uuid, const char *label) : "", "external", "property"); + addPropertyListener(reset_utctime, this, &TestCppProps::resetUTCtime); } TestCppProps::~TestCppProps (void) { } +void TestCppProps::resetUTCtime(bool oldValue, bool newValue) +{ + if (newValue) { + this->simple_utctime = redhawk::time::utils::create(); + } +} + void TestCppProps::runTest (CORBA::ULong testId, CF::Properties& testValues) throw (CF::UnknownProperties, CF::TestableObject::UnknownTest, CORBA::SystemException) { diff --git a/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.h b/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.h index 0dbc5dcb3..7df331624 100644 --- a/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.h +++ b/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.h @@ -53,6 +53,7 @@ class TestCppProps : public Resource_impl void testMemberCallbacks (CF::Properties& values); void testStaticCallbacks (CF::Properties& values); void testCallbacks (CF::Properties& values); + void resetUTCtime(bool oldValue, bool newValue); void testEnableNil (CF::Properties& values); void testSetNil (CF::Properties& values); @@ -60,6 +61,9 @@ class TestCppProps : public Resource_impl // Member variables exposed as properties CORBA::Long prop_long; std::string prop_str; + CF::UTCTime simple_utctime; + bool reset_utctime; + std::vector seq_utctime; std::vector seq_oct; std::vector seq_foo; diff --git a/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.prf.xml b/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.prf.xml index 7a721917e..11116d7ee 100644 --- a/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.prf.xml +++ b/redhawk/src/testing/sdr/dom/components/TestCppProps/TestCppProps.prf.xml @@ -30,6 +30,24 @@ with this program. If not, see http://www.gnu.org/licenses/. + + 2017:2:1::12:01:00.123 + + + + + false + + + + + + 2010:2:1::12:01:00.123 + 2011:2:1::12:01:00.123 + + + + diff --git a/redhawk/src/testing/sdr/dom/components/TestJavaOptionalProps/TestJavaOptionalProps.spec b/redhawk/src/testing/sdr/dom/components/TestJavaOptionalProps/TestJavaOptionalProps.spec deleted file mode 100644 index 084b5bf87..000000000 --- a/redhawk/src/testing/sdr/dom/components/TestJavaOptionalProps/TestJavaOptionalProps.spec +++ /dev/null @@ -1,90 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: TestJavaOptionalProps -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.11 -Requires: redhawk >= 1.11 - -BuildArch: noarch - -# Java requirements -Requires: java >= 1.6 -BuildRequires: java-devel >= 1.6 - - -%description -Component %{name} - - -%prep -%setup -q - - -%build -# Implementation java -pushd java -./reconf -%define _bindir %{_prefix}/dom/components/TestJavaOptionalProps/java -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation java -pushd java -%define _bindir %{_prefix}/dom/components/TestJavaOptionalProps/java -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/TestJavaOptionalProps.scd.xml -%{_prefix}/dom/components/%{name}/TestJavaOptionalProps.prf.xml -%{_prefix}/dom/components/%{name}/TestJavaOptionalProps.spd.xml -%{_prefix}/dom/components/%{name}/java - diff --git a/redhawk/src/testing/sdr/dom/components/TestJavaProps/TestJavaProps.java b/redhawk/src/testing/sdr/dom/components/TestJavaProps/TestJavaProps.java index c0bae293a..dacc5e5c2 100644 --- a/redhawk/src/testing/sdr/dom/components/TestJavaProps/TestJavaProps.java +++ b/redhawk/src/testing/sdr/dom/components/TestJavaProps/TestJavaProps.java @@ -45,6 +45,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Arrays; +import org.ossie.properties.PropertyListener; +import org.ossie.redhawk.time.utils; /** * This is the component code. This file contains all the access points @@ -125,6 +127,35 @@ public class TestJavaProps extends Resource implements Runnable { new Kind[] {Kind.CONFIGURE,} // kind ); + public final UTCTimeProperty simple_utctime = + new UTCTimeProperty( + "simple_utctime", //id + null, //name + "2017:2:1::14:01:00.123", //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + public final UTCTimeSequenceProperty seq_utctime = + new UTCTimeSequenceProperty( + "seq_utctime", //id + null, //name + UTCTimeSequenceProperty.asList("2010:2:1::12:01:00.123","2011:2:1::12:01:00.123"), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + public final BooleanProperty reset_utctime = + new BooleanProperty( + "reset_utctime", //id + null, //name + false, //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + /** * The property readOnly * If the meaning of this property isn't clear, a description should be added. @@ -242,14 +273,27 @@ public TestJavaProps() addProperty(struct_prop); addProperty(structseq_prop); addProperty(readOnly); + addProperty(simple_utctime); + addProperty(reset_utctime); + addProperty(seq_utctime); // Project/input // Uses/outputs //begin-user-code + this.reset_utctime.addChangeListener(new PropertyListener() { + public void valueChanged(Boolean oldValue, Boolean newValue) { + reset_utctimeValueChanged(oldValue, newValue); + } + }); //end-user-code } - + + private void reset_utctimeValueChanged(Boolean oldValue, Boolean newValue) + { + this.simple_utctime.setValue(utils.now()); + } + /** * * Main processing thread diff --git a/redhawk/src/testing/sdr/dom/components/TestJavaProps/TestJavaProps.prf.xml b/redhawk/src/testing/sdr/dom/components/TestJavaProps/TestJavaProps.prf.xml index d20f20a68..83ea0e271 100644 --- a/redhawk/src/testing/sdr/dom/components/TestJavaProps/TestJavaProps.prf.xml +++ b/redhawk/src/testing/sdr/dom/components/TestJavaProps/TestJavaProps.prf.xml @@ -41,6 +41,24 @@ with this program. If not, see http://www.gnu.org/licenses/. + + 2017:2:1::13:01:00.123 + + + + + false + + + + + + 2010:2:1::12:01:00.123 + 2011:2:1::12:01:00.123 + + + + 0 diff --git a/redhawk/src/testing/sdr/dom/components/TestLoggingAPI/TestLoggingAPI.spec b/redhawk/src/testing/sdr/dom/components/TestLoggingAPI/TestLoggingAPI.spec deleted file mode 100644 index 8a764683d..000000000 --- a/redhawk/src/testing/sdr/dom/components/TestLoggingAPI/TestLoggingAPI.spec +++ /dev/null @@ -1,109 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: TestLoggingAPI -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.10 -Requires: redhawk >= 1.10 - -%description -Component %{name} - - -%prep -%setup -q - - -%build -# Implementation cpp -pushd cpp -./reconf -%define _bindir %{_prefix}/dom/components/TestLoggingAPI/cpp -%configure -make %{?_smp_mflags} -popd -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/TestLoggingAPI/python -%configure -make %{?_smp_mflags} -popd -# Implementation java -pushd java -./reconf -%define _bindir %{_prefix}/dom/components/TestLoggingAPI/java -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation cpp -pushd cpp -%define _bindir %{_prefix}/dom/components/TestLoggingAPI/cpp -make install DESTDIR=$RPM_BUILD_ROOT -popd -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/TestLoggingAPI/python -make install DESTDIR=$RPM_BUILD_ROOT -popd -# Implementation java -pushd java -%define _bindir %{_prefix}/dom/components/TestLoggingAPI/java -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/TestLoggingAPI.scd.xml -%{_prefix}/dom/components/%{name}/TestLoggingAPI.prf.xml -%{_prefix}/dom/components/%{name}/TestLoggingAPI.spd.xml -%{_prefix}/dom/components/%{name}/cpp -%{_prefix}/dom/components/%{name}/python -%{_prefix}/dom/components/%{name}/java - diff --git a/redhawk/src/testing/sdr/dom/components/TestPythonProps/TestPythonProps.prf.xml b/redhawk/src/testing/sdr/dom/components/TestPythonProps/TestPythonProps.prf.xml index 0cb88da1b..76dad64e7 100644 --- a/redhawk/src/testing/sdr/dom/components/TestPythonProps/TestPythonProps.prf.xml +++ b/redhawk/src/testing/sdr/dom/components/TestPythonProps/TestPythonProps.prf.xml @@ -88,6 +88,25 @@ with this program. If not, see http://www.gnu.org/licenses/. + + 2017:2:1::13:01:00.123 + + + + + false + + + + + + 2010:2:1::12:01:00.123 + 2011:2:1::12:01:00.123 + + + + + empty diff --git a/redhawk/src/testing/sdr/dom/components/TestPythonProps/TestPythonProps.py b/redhawk/src/testing/sdr/dom/components/TestPythonProps/TestPythonProps.py index f7cca8240..0f1a54132 100755 --- a/redhawk/src/testing/sdr/dom/components/TestPythonProps/TestPythonProps.py +++ b/redhawk/src/testing/sdr/dom/components/TestPythonProps/TestPythonProps.py @@ -25,6 +25,7 @@ from ossie.resource import Resource, start_component from ossie.properties import simple_property, simpleseq_property, struct_property, structseq_property from omniORB.any import from_any +from ossie.utils import rhtime class TestPythonProps (CF__POA.Resource, Resource): @@ -64,6 +65,29 @@ class TestPythonProps (CF__POA.Resource, Resource): action="external", kinds=("property",)) + simple_utctime = simple_property(id_="simple_utctime", + name="simple_utctime", + type_="utctime", + defvalue="2017:2:1::14:01:00.123", + mode="readwrite", + action="external", + kinds=("property",)) + + reset_utctime = simple_property(id_="reset_utctime", + name="reset_utctime", + type_="boolean", + defvalue=False, + mode="readwrite", + action="external", + kinds=("property",)) + + seq_utctime = simpleseq_property(id_="seq_utctime", + name="seq_utctime", + type_="utctime", + mode="readwrite", + action="external", + kinds=("property",)) + class SomeStruct(object): field1 = simple_property(id_="item1", type_="string", @@ -90,6 +114,8 @@ def __init__(self, ip_address="", port=0): self.ip_address = ip_address self.port = port + def reset_utcCallback(self, id, old_value, new_value): + self.simple_utctime = rhtime.now() multicasts = structseq_property(id_="DCE:897a5489-f680-46a8-a698-e36fd8bbae80[]", name="multicasts", @@ -98,6 +124,7 @@ def __init__(self, ip_address="", port=0): def __init__(self, identifier, execparams): Resource.__init__(self, identifier, execparams) + self.addPropertyChangeListener('reset_utctime', self.reset_utcCallback) def runTest(self, test, props): if test == 0: diff --git a/redhawk/src/testing/sdr/dom/components/alloc_shm/alloc_shm.prf.xml b/redhawk/src/testing/sdr/dom/components/alloc_shm/alloc_shm.prf.xml new file mode 100644 index 000000000..02810eeca --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/alloc_shm/alloc_shm.prf.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/redhawk/src/testing/sdr/dom/components/alloc_shm/alloc_shm.scd.xml b/redhawk/src/testing/sdr/dom/components/alloc_shm/alloc_shm.scd.xml new file mode 100644 index 000000000..fb0b4311f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/alloc_shm/alloc_shm.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/redhawk/src/testing/sdr/dom/components/alloc_shm/alloc_shm.spd.xml b/redhawk/src/testing/sdr/dom/components/alloc_shm/alloc_shm.spd.xml new file mode 100644 index 000000000..e2d3a6b8c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/alloc_shm/alloc_shm.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/alloc_shm.so + + + + + + + + + \ No newline at end of file diff --git a/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/Makefile.am new file mode 100644 index 000000000..31bb0b52b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/Makefile.am @@ -0,0 +1,60 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +# for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +CFDIR = $(top_srcdir)/base + +ossieName = alloc_shm +libdir = $(prefix)/dom/components/alloc_shm/cpp +lib_LTLIBRARIES = alloc_shm.la + +.PHONY: convenience-link clean-convenience-link + +all-local : convenience-link +clean-local : clean-convenience-link + +convenience-link : alloc_shm.la + @ln -fs .libs/alloc_shm.so + +clean-convenience-link: + @rm -f alloc_shm.so + +distclean-local: + rm -rf m4 + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +include $(srcdir)/Makefile.am.ide +alloc_shm_la_SOURCES = $(redhawk_SOURCES_auto) +alloc_shm_la_LIBADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la +alloc_shm_la_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) -I$(CFDIR)/include +alloc_shm_la_LDFLAGS = -shared -module -export-dynamic -export-symbols-regex 'make_component' -avoid-version $(redhawk_LDFLAGS_auto) + diff --git a/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/Makefile.am.ide b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/Makefile.am.ide new file mode 100644 index 000000000..b11db095e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/Makefile.am.ide @@ -0,0 +1,10 @@ +# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! +# Files can be excluded by right-clicking on the file in the project explorer +# and choosing Resource Configurations -> Exclude from build. Re-include files +# by opening the Properties dialog of your project and choosing C/C++ Build -> +# Tool Chain Editor, and un-checking "Exclude resource from build " +redhawk_SOURCES_auto = alloc_shm.cpp +redhawk_SOURCES_auto += alloc_shm.h +redhawk_SOURCES_auto += alloc_shm_base.cpp +redhawk_SOURCES_auto += alloc_shm_base.h +redhawk_SOURCES_auto += main.cpp diff --git a/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm.cpp b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm.cpp new file mode 100644 index 000000000..b982e432f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm.cpp @@ -0,0 +1,263 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "alloc_shm.h" +#include + +PREPARE_LOGGING(alloc_shm_i) + +alloc_shm_i::alloc_shm_i(const char *uuid, const char *label) : + alloc_shm_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +alloc_shm_i::~alloc_shm_i() +{ +} + +void alloc_shm_i::constructor() +{ + //redhawk::bitbuffer buffer(1000000); + redhawk::buffer outputData(1000000); + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void alloc_shm_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &alloc_shm_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (alloc_shm_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &alloc_shm_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to alloc_shm.cpp + alloc_shm_i::alloc_shm_i(const char *uuid, const char *label) : + alloc_shm_base(uuid, label) + { + addPropertyListener(scaleValue, this, &alloc_shm_i::scaleChanged); + addPropertyListener(status, this, &alloc_shm_i::statusChanged); + } + + void alloc_shm_i::scaleChanged(float oldValue, float newValue) + { + RH_DEBUG(this->_baseLog, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void alloc_shm_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + RH_DEBUG(this->_baseLog, "status changed"); + } + + //Add to alloc_shm.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + Logging: + + The member _baseLog is a logger whose base name is the component (or device) instance name. + New logs should be created based on this logger name. + + To create a new logger, + rh_logger::LoggerPtr my_logger = this->_baseLog->getChildLogger("foo"); + + Assuming component instance name abc_1, my_logger will then be created with the + name "abc_1.user.foo". + + +************************************************************************************************/ +int alloc_shm_i::serviceFunction() +{ + RH_DEBUG(this->_baseLog, "serviceFunction() example log message"); + + return NOOP; +} + diff --git a/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm.h b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm.h new file mode 100644 index 000000000..9899e4763 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm.h @@ -0,0 +1,18 @@ +#ifndef ALLOC_SHM_I_IMPL_H +#define ALLOC_SHM_I_IMPL_H + +#include "alloc_shm_base.h" + +class alloc_shm_i : public alloc_shm_base +{ + ENABLE_LOGGING + public: + alloc_shm_i(const char *uuid, const char *label); + ~alloc_shm_i(); + + void constructor(); + + int serviceFunction(); +}; + +#endif // ALLOC_SHM_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm_base.cpp b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm_base.cpp new file mode 100644 index 000000000..e6008cc19 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm_base.cpp @@ -0,0 +1,60 @@ +#include "alloc_shm_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +alloc_shm_base::alloc_shm_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + setThreadName(label); + + loadProperties(); +} + +alloc_shm_base::~alloc_shm_base() +{ +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void alloc_shm_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void alloc_shm_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void alloc_shm_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void alloc_shm_base::loadProperties() +{ +} + + diff --git a/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm_base.h b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm_base.h new file mode 100644 index 000000000..e8b0e136e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/alloc_shm_base.h @@ -0,0 +1,27 @@ +#ifndef ALLOC_SHM_BASE_IMPL_BASE_H +#define ALLOC_SHM_BASE_IMPL_BASE_H + +#include +#include +#include + + +class alloc_shm_base : public Component, protected ThreadedComponent +{ + public: + alloc_shm_base(const char *uuid, const char *label); + ~alloc_shm_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + + private: +}; +#endif // ALLOC_SHM_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/main.cpp b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/main.cpp new file mode 100644 index 000000000..41f95f703 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/alloc_shm/cpp/main.cpp @@ -0,0 +1,11 @@ +#include +#include "ossie/ossieSupport.h" + +#include "alloc_shm.h" +extern "C" { + Resource_impl* make_component(const std::string& uuid, const std::string& identifier) + { + return new alloc_shm_i(uuid.c_str(), identifier.c_str()); + } +} + diff --git a/redhawk/src/testing/sdr/dom/components/another_ticket_462/another_ticket_462.spec b/redhawk/src/testing/sdr/dom/components/another_ticket_462/another_ticket_462.spec deleted file mode 100644 index 13eac4c74..000000000 --- a/redhawk/src/testing/sdr/dom/components/another_ticket_462/another_ticket_462.spec +++ /dev/null @@ -1,90 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: another_ticket_462 -Summary: Component %{name} -Version: 1.0.0 -Release: 1 -License: None -Group: REDHAWK/Components -Source: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-root - -Requires: redhawk >= 1.8 -BuildRequires: redhawk >= 1.8 -BuildRequires: autoconf automake libtool - -BuildArch: noarch - -# Python requirements -Requires: python omniORBpy -BuildRequires: libomniORBpy3-devel -BuildRequires: python-devel >= 2.3 - - -%description -Component %{name} - - -%prep -%setup - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/another_ticket_462/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/another_ticket_462/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/another_ticket_462.spd.xml -%{_prefix}/dom/components/%{name}/another_ticket_462.prf.xml -%{_prefix}/dom/components/%{name}/another_ticket_462.scd.xml -%{_prefix}/dom/components/%{name}/python diff --git a/redhawk/src/testing/sdr/dom/components/busycomp/busycomp.prf.xml b/redhawk/src/testing/sdr/dom/components/busycomp/busycomp.prf.xml new file mode 100644 index 000000000..8f537d89f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/busycomp/busycomp.prf.xml @@ -0,0 +1,3 @@ + + + diff --git a/redhawk/src/testing/sdr/dom/components/busycomp/busycomp.scd.xml b/redhawk/src/testing/sdr/dom/components/busycomp/busycomp.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/busycomp/busycomp.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/busycomp/busycomp.spd.xml b/redhawk/src/testing/sdr/dom/components/busycomp/busycomp.spd.xml new file mode 100644 index 000000000..1b850f0c2 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/busycomp/busycomp.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/busycomp.so + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/busycomp/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/Makefile.am new file mode 100644 index 000000000..0cc41ceb3 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/Makefile.am @@ -0,0 +1,60 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +CFDIR = $(top_srcdir)/base + +ossieName = busycomp +libdir = $(prefix)/dom/components/busycomp/cpp +lib_LTLIBRARIES = busycomp.la + +.PHONY: convenience-link clean-convenience-link + +install: + +all-local : convenience-link +clean-local : clean-convenience-link + +convenience-link : busycomp.la + @ln -fs .libs/busycomp.so + +clean-convenience-link: + @rm -f busycomp.so + +distclean-local: + rm -rf m4 + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +busycomp_la_SOURCES = main.cpp busycomp.cpp busycomp_base.cpp +busycomp_la_LIBADD = $(SOFTPKG_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la +busycomp_la_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) -I$(CFDIR)/include +busycomp_la_LDFLAGS = -shared -module -export-dynamic -export-symbols-regex 'make_component' -avoid-version $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp.cpp b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp.cpp new file mode 100644 index 000000000..3d56e38db --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp.cpp @@ -0,0 +1,249 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "busycomp.h" + +PREPARE_LOGGING(busycomp_i) + +busycomp_i::busycomp_i(const char *uuid, const char *label) : + busycomp_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +busycomp_i::~busycomp_i() +{ +} + +void busycomp_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void busycomp_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &busycomp_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (busycomp_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &busycomp_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to busycomp.cpp + busycomp_i::busycomp_i(const char *uuid, const char *label) : + busycomp_base(uuid, label) + { + addPropertyListener(scaleValue, this, &busycomp_i::scaleChanged); + addPropertyListener(status, this, &busycomp_i::statusChanged); + } + + void busycomp_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(busycomp_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void busycomp_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(busycomp_i, "status changed"); + } + + //Add to busycomp.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int busycomp_i::serviceFunction() +{ + LOG_DEBUG(busycomp_i, "serviceFunction() example log message"); + + return NORMAL; +} + diff --git a/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp.h b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp.h new file mode 100644 index 000000000..f5cf1396e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp.h @@ -0,0 +1,18 @@ +#ifndef BUSYCOMP_I_IMPL_H +#define BUSYCOMP_I_IMPL_H + +#include "busycomp_base.h" + +class busycomp_i : public busycomp_base +{ + ENABLE_LOGGING + public: + busycomp_i(const char *uuid, const char *label); + ~busycomp_i(); + + void constructor(); + + int serviceFunction(); +}; + +#endif // BUSYCOMP_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp_base.cpp b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp_base.cpp new file mode 100644 index 000000000..bc3ffb71c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp_base.cpp @@ -0,0 +1,60 @@ +#include "busycomp_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +busycomp_base::busycomp_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + setThreadName(label); + + loadProperties(); +} + +busycomp_base::~busycomp_base() +{ +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void busycomp_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void busycomp_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void busycomp_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void busycomp_base::loadProperties() +{ +} + + diff --git a/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp_base.h b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp_base.h new file mode 100644 index 000000000..1d6a69421 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/busycomp_base.h @@ -0,0 +1,27 @@ +#ifndef BUSYCOMP_BASE_IMPL_BASE_H +#define BUSYCOMP_BASE_IMPL_BASE_H + +#include +#include +#include + + +class busycomp_base : public Component, protected ThreadedComponent +{ + public: + busycomp_base(const char *uuid, const char *label); + ~busycomp_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + + private: +}; +#endif // BUSYCOMP_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/busycomp/cpp/main.cpp b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/main.cpp new file mode 100644 index 000000000..17391581a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/busycomp/cpp/main.cpp @@ -0,0 +1,11 @@ +#include +#include "ossie/ossieSupport.h" + +#include "busycomp.h" +extern "C" { + Resource_impl* make_component(const std::string& uuid, const std::string& identifier) + { + return new busycomp_i(uuid.c_str(), identifier.c_str()); + } +} + diff --git a/redhawk/src/testing/sdr/dom/components/commandline_prop/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/commandline_prop/cpp/Makefile.am index c4c115801..ec1cc5797 100644 --- a/redhawk/src/testing/sdr/dom/components/commandline_prop/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dom/components/commandline_prop/cpp/Makefile.am @@ -42,7 +42,7 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide commandline_prop_SOURCES = $(redhawk_SOURCES_auto) -commandline_prop_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(redhawk_LDADD_auto) +commandline_prop_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) commandline_prop_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -I$(CFDIR)/include -I$(CFDIR)/include/ossie commandline_prop_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.prf.xml b/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.prf.xml index 3624eb818..8f537d89f 100644 --- a/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.prf.xml +++ b/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.prf.xml @@ -1,22 +1,3 @@ - diff --git a/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.scd.xml b/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.scd.xml index f78d4af11..1a7d502e3 100644 --- a/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.scd.xml +++ b/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.scd.xml @@ -1,23 +1,4 @@ - 2.2 @@ -35,7 +16,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + @@ -62,11 +43,11 @@ with this program. If not, see http://www.gnu.org/licenses/. - - - + + + - - + + diff --git a/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.spd.xml b/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.spd.xml index 09b6b4a94..ddbf6ae67 100644 --- a/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/comp_snk/comp_snk.spd.xml @@ -1,25 +1,6 @@ - - + null @@ -31,7 +12,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - The implementation contains descriptive information about the template for a software component. + The implementation contains descriptive information about the template for a software resource. python/comp_snk.py diff --git a/redhawk/src/testing/sdr/dom/components/comp_snk/python/comp_snk.py b/redhawk/src/testing/sdr/dom/components/comp_snk/python/comp_snk.py index 3b3155a22..9d6c0ba8f 100755 --- a/redhawk/src/testing/sdr/dom/components/comp_snk/python/comp_snk.py +++ b/redhawk/src/testing/sdr/dom/components/comp_snk/python/comp_snk.py @@ -1,24 +1,5 @@ #!/usr/bin/env python # -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# # # AUTO-GENERATED # @@ -52,7 +33,7 @@ def process(self): StreamSRI: To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): - self.sri = bulkio.sri.create(self.stream_id) + sri = bulkio.sri.create("my_stream_id") PrecisionUTCTime: To create a PrecisionUTCTime object, use the following code: diff --git a/redhawk/src/testing/sdr/dom/components/comp_snk/python/comp_snk_base.py b/redhawk/src/testing/sdr/dom/components/comp_snk/python/comp_snk_base.py index 3df90b956..801985b48 100644 --- a/redhawk/src/testing/sdr/dom/components/comp_snk/python/comp_snk_base.py +++ b/redhawk/src/testing/sdr/dom/components/comp_snk/python/comp_snk_base.py @@ -1,24 +1,5 @@ #!/usr/bin/env python # -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# # AUTO-GENERATED CODE. DO NOT MODIFY! # # Source: comp_snk.spd.xml @@ -31,7 +12,7 @@ import Queue, copy, time, threading from ossie.resource import usesport, providesport -import bulkio +from ossie.events import MessageConsumerPort class comp_snk_base(CF__POA.Resource, Component, ThreadedComponent): # These values can be altered in the __init__ of your derived class @@ -50,7 +31,7 @@ def __init__(self, identifier, execparams): # in future releases self.auto_start = False # Instantiate the default implementations for all ports on this component - self.port_dataFloat_in = bulkio.InFloatPort("dataFloat_in", maxsize=self.DEFAULT_QUEUE_SIZE) + self.port_input = MessageConsumerPort(thread_sleep=0.1, parent = self) def start(self): Component.start(self) @@ -74,9 +55,9 @@ def releaseObject(self): # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, # or via the IDE. - port_dataFloat_in = providesport(name="dataFloat_in", - repid="IDL:BULKIO/dataFloat:1.0", - type_="control") + port_input = providesport(name="input", + repid="IDL:ExtendedEvent/MessageEvent:1.0", + type_="control") ###################################################################### # PROPERTIES diff --git a/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.prf.xml b/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.prf.xml index 3624eb818..8f537d89f 100644 --- a/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.prf.xml +++ b/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.prf.xml @@ -1,22 +1,3 @@ - diff --git a/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.scd.xml b/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.scd.xml index ae645f2c5..62a50f70f 100644 --- a/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.scd.xml +++ b/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.scd.xml @@ -1,23 +1,4 @@ - 2.2 @@ -35,7 +16,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + @@ -62,11 +43,11 @@ with this program. If not, see http://www.gnu.org/licenses/. - - - + + + - - + + diff --git a/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.spd.xml b/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.spd.xml index 272b11fc1..4e49aa7a1 100644 --- a/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/comp_src/comp_src.spd.xml @@ -1,25 +1,6 @@ - - + null @@ -31,7 +12,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - The implementation contains descriptive information about the template for a software component. + The implementation contains descriptive information about the template for a software resource. python/comp_src.py diff --git a/redhawk/src/testing/sdr/dom/components/comp_src/python/comp_src.py b/redhawk/src/testing/sdr/dom/components/comp_src/python/comp_src.py index 15a423a72..768583f31 100755 --- a/redhawk/src/testing/sdr/dom/components/comp_src/python/comp_src.py +++ b/redhawk/src/testing/sdr/dom/components/comp_src/python/comp_src.py @@ -1,24 +1,5 @@ #!/usr/bin/env python # -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# # # AUTO-GENERATED # @@ -52,7 +33,7 @@ def process(self): StreamSRI: To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): - self.sri = bulkio.sri.create(self.stream_id) + sri = bulkio.sri.create("my_stream_id") PrecisionUTCTime: To create a PrecisionUTCTime object, use the following code: diff --git a/redhawk/src/testing/sdr/dom/components/comp_src/python/comp_src_base.py b/redhawk/src/testing/sdr/dom/components/comp_src/python/comp_src_base.py index f361673a7..d6d0396fb 100644 --- a/redhawk/src/testing/sdr/dom/components/comp_src/python/comp_src_base.py +++ b/redhawk/src/testing/sdr/dom/components/comp_src/python/comp_src_base.py @@ -1,24 +1,5 @@ #!/usr/bin/env python # -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# # AUTO-GENERATED CODE. DO NOT MODIFY! # # Source: comp_src.spd.xml @@ -31,7 +12,7 @@ import Queue, copy, time, threading from ossie.resource import usesport, providesport -import bulkio +from ossie.events import MessageSupplierPort class comp_src_base(CF__POA.Resource, Component, ThreadedComponent): # These values can be altered in the __init__ of your derived class @@ -50,7 +31,7 @@ def __init__(self, identifier, execparams): # in future releases self.auto_start = False # Instantiate the default implementations for all ports on this component - self.port_dataFloat_out = bulkio.OutFloatPort("dataFloat_out") + self.port_output = MessageSupplierPort() def start(self): Component.start(self) @@ -74,9 +55,9 @@ def releaseObject(self): # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, # or via the IDE. - port_dataFloat_out = usesport(name="dataFloat_out", - repid="IDL:BULKIO/dataFloat:1.0", - type_="control") + port_output = usesport(name="output", + repid="IDL:ExtendedEvent/MessageEvent:1.0", + type_="control") ###################################################################### # PROPERTIES diff --git a/redhawk/src/testing/sdr/dom/components/cpp_with_deps/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/cpp_with_deps/cpp/Makefile.am index 40c21372e..3015d8256 100644 --- a/redhawk/src/testing/sdr/dom/components/cpp_with_deps/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dom/components/cpp_with_deps/cpp/Makefile.am @@ -53,7 +53,7 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide cpp_with_deps_SOURCES = $(redhawk_SOURCES_auto) -cpp_with_deps_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) $(redhawk_LDADD_auto) +cpp_with_deps_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(SOFTPKG_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(OMNIORB_LIBS) cpp_with_deps_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto ) -I$(CFDIR)/include cpp_with_deps_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/cpp_with_deps/cpp_with_deps.spec b/redhawk/src/testing/sdr/dom/components/cpp_with_deps/cpp_with_deps.spec deleted file mode 100644 index e51e28968..000000000 --- a/redhawk/src/testing/sdr/dom/components/cpp_with_deps/cpp_with_deps.spec +++ /dev/null @@ -1,87 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: cpp_with_deps -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 2.0 -Requires: redhawk >= 2.0 - - - -%description -Component %{name} - * Commit: __REVISION__ - * Source Date/Time: __DATETIME__ - - -%prep -%setup -q - - -%build -# Implementation cpp -pushd cpp -./reconf -%define _bindir %{_prefix}/dom/components/cpp_with_deps/cpp -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation cpp -pushd cpp -%define _bindir %{_prefix}/dom/components/cpp_with_deps/cpp -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/cpp_with_deps -%{_prefix}/dom/components/cpp_with_deps/cpp_with_deps.scd.xml -%{_prefix}/dom/components/cpp_with_deps/cpp_with_deps.prf.xml -%{_prefix}/dom/components/cpp_with_deps/cpp_with_deps.spd.xml -%{_prefix}/dom/components/cpp_with_deps/cpp - diff --git a/redhawk/src/testing/sdr/dom/components/hanging_stop/hanging_stop.prf.xml b/redhawk/src/testing/sdr/dom/components/hanging_stop/hanging_stop.prf.xml new file mode 100644 index 000000000..8f537d89f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/hanging_stop/hanging_stop.prf.xml @@ -0,0 +1,3 @@ + + + diff --git a/redhawk/src/testing/sdr/dom/components/hanging_stop/hanging_stop.scd.xml b/redhawk/src/testing/sdr/dom/components/hanging_stop/hanging_stop.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/hanging_stop/hanging_stop.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/hanging_stop/hanging_stop.spd.xml b/redhawk/src/testing/sdr/dom/components/hanging_stop/hanging_stop.spd.xml new file mode 100644 index 000000000..1a55435f8 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/hanging_stop/hanging_stop.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/hanging_stop.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/hanging_stop/python/hanging_stop.py b/redhawk/src/testing/sdr/dom/components/hanging_stop/python/hanging_stop.py new file mode 100755 index 000000000..b80fe93b1 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/hanging_stop/python/hanging_stop.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: hanging_stop.spd.xml +from ossie.resource import start_component +import logging + +from hanging_stop_base import * + +class hanging_stop_i(hanging_stop_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def stop(self): + while True: + time.sleep(0.1) + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", hanging_stop_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = hanging_stop_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(hanging_stop_i) + diff --git a/redhawk/src/testing/sdr/dom/components/hanging_stop/python/hanging_stop_base.py b/redhawk/src/testing/sdr/dom/components/hanging_stop/python/hanging_stop_base.py new file mode 100644 index 000000000..99bc6f672 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/hanging_stop/python/hanging_stop_base.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: hanging_stop.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * + +import Queue, copy, time, threading + +class hanging_stop_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + + diff --git a/redhawk/src/testing/sdr/dom/components/huge_msg_cpp/cpp/huge_msg_cpp.cpp b/redhawk/src/testing/sdr/dom/components/huge_msg_cpp/cpp/huge_msg_cpp.cpp index b85e54884..035702ecd 100644 --- a/redhawk/src/testing/sdr/dom/components/huge_msg_cpp/cpp/huge_msg_cpp.cpp +++ b/redhawk/src/testing/sdr/dom/components/huge_msg_cpp/cpp/huge_msg_cpp.cpp @@ -244,16 +244,28 @@ int huge_msg_cpp_i::serviceFunction() { LOG_DEBUG(huge_msg_cpp_i, "serviceFunction() example log message"); ::my_msg_struct msg; - std::string str_msg = "1234567890"; - msg.string_payload = str_msg; + std::string payload = "1234567890"; + msg.string_payload = payload; for (unsigned int i=0; i<250000; i++) { - msg.string_payload += str_msg; + msg.string_payload += payload; } this->output->sendMessage(msg); std::vector msgs; msgs.push_back(msg); this->output->sendMessages(msgs); + msgs.clear(); + std::string str_msg = payload; + for (unsigned int i=0; i<19999; i++) { + str_msg += payload; + } + for (unsigned int i=0; i<101; i++) { + ::my_msg_struct message; + message.string_payload = str_msg; + msgs.push_back(message); + } + LOG_DEBUG(huge_msg_cpp_i, "Sending " << msgs.size() << " messages"); + this->output->sendMessages(msgs); return FINISH; } diff --git a/redhawk/src/testing/sdr/dom/components/huge_msg_java/java/src/huge_msg_java/java/huge_msg_java.java b/redhawk/src/testing/sdr/dom/components/huge_msg_java/java/src/huge_msg_java/java/huge_msg_java.java index 2893820a6..f079f46a3 100644 --- a/redhawk/src/testing/sdr/dom/components/huge_msg_java/java/src/huge_msg_java/java/huge_msg_java.java +++ b/redhawk/src/testing/sdr/dom/components/huge_msg_java/java/src/huge_msg_java/java/huge_msg_java.java @@ -241,13 +241,22 @@ protected int serviceFunction() { String payload = "1234567890"; StringBuilder str_msg = new StringBuilder(); for (int i=0; i<250001; i++) { - str_msg.append(payload); + str_msg.append(payload); } - msg.string_payload.setValue(str_msg.toString()); + msg.string_payload.setValue(str_msg.toString()); this.port_output.sendMessage(msg); List msgs = new ArrayList(); msgs.add(msg); this.port_output.sendMessages(msgs); + + msgs.clear(); + str_msg.setLength(200000); + msg.string_payload.setValue(str_msg.toString()); + for (int i = 0; i < 101; ++i) { + msgs.add(msg); + } + this.port_output.sendMessages(msgs); + return FINISH; } diff --git a/redhawk/src/testing/sdr/dom/components/huge_msg_python/python/huge_msg_python.py b/redhawk/src/testing/sdr/dom/components/huge_msg_python/python/huge_msg_python.py index 0734af56e..7f360e570 100755 --- a/redhawk/src/testing/sdr/dom/components/huge_msg_python/python/huge_msg_python.py +++ b/redhawk/src/testing/sdr/dom/components/huge_msg_python/python/huge_msg_python.py @@ -154,9 +154,18 @@ def mycallback(self, id, old_value, new_value): msg.string_payload = str_msg self.port_output.sendMessage(msg) self.port_output.sendMessages([msg]) + + msgs = [] + str_msg = payload + for i in range(19999): + str_msg += payload + msg.string_payload = str_msg + for i in range(101): + msgs.append(msg) + self._log.debug("Sending %d messages" % len(msgs)) + self.port_output.sendMessages(msgs) return FINISH - if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) logging.debug("Starting Component") diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.javaSoftpkgJarDep.wavedev b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.javaSoftpkgJarDep.wavedev new file mode 100644 index 000000000..9c9170a72 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.javaSoftpkgJarDep.wavedev @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.md5sums b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.md5sums new file mode 100644 index 000000000..1ea7fb1f5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.md5sums @@ -0,0 +1,2 @@ +4995709c3260fa3898e7cf864ff8ef61 javaSoftpkgJarDep.spec +de7d9a8747d41129c575e5f7d9b502d6 build.sh diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.project b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.project new file mode 100644 index 000000000..5a69fd075 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.project @@ -0,0 +1,25 @@ + + + javaSoftpkgJarDep + + + + + + org.eclipse.jdt.core.javabuilder + + + + + gov.redhawk.ide.builders.scaproject + + + + + + gov.redhawk.ide.natures.scaproject + org.python.pydev.pythonNature + gov.redhawk.ide.natures.sca.component + org.eclipse.jdt.core.javanature + + diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.pydevproject b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.pydevproject new file mode 100644 index 000000000..d001f0aea --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.pydevproject @@ -0,0 +1,5 @@ + + +Default +python interpreter + diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.settings/gov.redhawk.ide.sdr.ui.prefs b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.settings/gov.redhawk.ide.sdr.ui.prefs new file mode 100644 index 000000000..0beee6543 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/.settings/gov.redhawk.ide.sdr.ui.prefs @@ -0,0 +1,2 @@ +eclipse.preferences.version=1 +useBuild.sh=true diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/build.sh b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/build.sh new file mode 100755 index 000000000..49227e343 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/build.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +if [ "$1" = "rpm" ]; then + # A very simplistic RPM build scenario + if [ -e javaSoftpkgJarDep.spec ]; then + mydir=`dirname $0` + tmpdir=`mktemp -d` + cp -r ${mydir} ${tmpdir}/javaSoftpkgJarDep-1.0.0 + tar czf ${tmpdir}/javaSoftpkgJarDep-1.0.0.tar.gz --exclude=".svn" --exclude=".git" -C ${tmpdir} javaSoftpkgJarDep-1.0.0 + rpmbuild -ta ${tmpdir}/javaSoftpkgJarDep-1.0.0.tar.gz + rm -rf $tmpdir + else + echo "Missing RPM spec file in" `pwd` + exit 1 + fi +else + for impl in java ; do + if [ ! -d "$impl" ]; then + echo "Directory '$impl' does not exist...continuing" + continue + fi + cd $impl + if [ -e build.sh ]; then + if [ $# == 1 ]; then + if [ $1 == 'clean' ]; then + rm -f Makefile + rm -f config.* + ./build.sh distclean + else + ./build.sh $* + fi + else + ./build.sh $* + fi + elif [ -e Makefile ] && [ Makefile.am -ot Makefile ]; then + make $* + elif [ -e reconf ]; then + ./reconf && ./configure && make $* + else + echo "No build.sh found for $impl" + fi + retval=$? + if [ $retval != '0' ]; then + exit $retval + fi + cd - + done +fi diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/.md5sums b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/.md5sums new file mode 100644 index 000000000..86734604b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/.md5sums @@ -0,0 +1,6 @@ +4898e1897ae4acef17e0c2445de401f4 src/javaSoftpkgDepComp/java/javaSoftpkgJarDep.java +65c00ae5ec6e4e27fc27101fc988efa4 startJava.sh +8bfcd22353c3a57fee561ad86ee2a56b reconf +b7d3f3e2bb055197ea853c28b72118ad src/javaSoftpkgDepComp/java/javaSoftpkgJarDep_base.java +7208e6d9b547ef444e39b7256365e8ac configure.ac +1966984521431b53916a812153ce8d75 Makefile.am diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/Makefile.am b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/Makefile.am new file mode 100644 index 000000000..1ebb3327f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/Makefile.am @@ -0,0 +1,38 @@ +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects + +javaSoftpkgJarDep_jar_CLASSPATH = ../../../deps/java_dep1/java/java_dep1.jar:$(OSSIE_CLASSPATH): + +javaSoftpkgJarDep.jar$(EXEEXT): $(javaSoftpkgJarDep_jar_SOURCES) + mkdir -p bin + $(JAVAC) -cp $(javaSoftpkgJarDep_jar_CLASSPATH) -g -d bin $(javaSoftpkgJarDep_jar_SOURCES) + $(JAR) cf ./javaSoftpkgJarDep.jar -C bin . + $(JAR) uf ./javaSoftpkgJarDep.jar -C src . + +clean-local: + rm -rf bin + +distclean-local: + rm -rf m4 + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + +ossieName = javaSoftpkgJarDep +bindir = $(prefix)/dom/components/javaSoftpkgJarDep/java/ +bin_PROGRAMS = javaSoftpkgJarDep.jar +javaSoftpkgJarDep_jar_SOURCES := $(shell find ./src -name "*.java") + +xmldir = $(prefix)/dom/components/javaSoftpkgJarDep/ +dist_xml_DATA = ../javaSoftpkgJarDep.scd.xml ../javaSoftpkgJarDep.prf.xml ../javaSoftpkgJarDep.spd.xml + +domdir = $(prefix)/dom/components/javaSoftpkgJarDep/java/ +dist_dom_SCRIPTS = startJava.sh diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/configure.ac b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/configure.ac new file mode 100644 index 000000000..425b38569 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/configure.ac @@ -0,0 +1,18 @@ +AC_INIT(javaSoftpkgJarDep, 1.0.0) +AM_INIT_AUTOMAKE([nostdinc foreign]) +AC_CONFIG_MACRO_DIR([m4]) + +OSSIE_CHECK_OSSIE +OSSIE_SDRROOT_AS_PREFIX + +PKG_CHECK_MODULES([OSSIE], [ossie >= 2.0]) + +RH_JAVA_HOME +RH_PROG_JAVAC([1.6]) +RH_PROG_JAR + +RH_PKG_CLASSPATH([REDHAWK], [ossie]) +RH_SOFTPKG_JAVA([/deps/java_dep1/java_dep1.spd.xml]) + +AC_CONFIG_FILES([Makefile]) +AC_OUTPUT diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/reconf b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/reconf new file mode 100755 index 000000000..8ff01d431 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/reconf @@ -0,0 +1,6 @@ +#!/bin/sh + +rm -f config.cache +[ -d m4 ] || mkdir m4 +autoreconf -i + diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/src/javaSoftpkgDepComp/java/javaSoftpkgJarDep.java b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/src/javaSoftpkgDepComp/java/javaSoftpkgJarDep.java new file mode 100644 index 000000000..520019280 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/src/javaSoftpkgDepComp/java/javaSoftpkgJarDep.java @@ -0,0 +1,259 @@ +package javaSoftpkgDepComp.java; + +import java.util.Properties; +import helloworld.HelloWorld; + +/** + * This is the component code. This file contains the derived class where custom + * functionality can be added to the component. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general component housekeeping + * + * Source: javaSoftpkgJarDep.spd.xml + */ +public class javaSoftpkgJarDep extends javaSoftpkgJarDep_base { + private HelloWorld helloworld; + + /** + * This is the component constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your component. + * + * A component may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * component class. + * Accessing the Application and Domain Manager: + * + * Both the Application hosting this Component and the Domain Manager hosting + * the Application are available to the Component. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Application: + * CF.Application app = this.getApplication().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + + public javaSoftpkgJarDep() + { + super(); + } + + public void constructor() + { + logger.debug("Constructor called!"); + this.helloworld = new HelloWorld(); + logger.debug("Constructor finished!"); + } + + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the component's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the component developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Example: + * + * This example assumes that the component has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the component + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData = new float[data.getData().length]; + * for (int i = 0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i] = (float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i] = (float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + logger.debug("serviceFunction() example log message"); + this.hello.setValue(helloworld.b); + + return NOOP; + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/src/javaSoftpkgDepComp/java/javaSoftpkgJarDep_base.java b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/src/javaSoftpkgDepComp/java/javaSoftpkgJarDep_base.java new file mode 100644 index 000000000..472571bb5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/src/javaSoftpkgDepComp/java/javaSoftpkgJarDep_base.java @@ -0,0 +1,111 @@ +package javaSoftpkgDepComp.java; + + +import java.util.Properties; + +import org.apache.log4j.Logger; + +import org.omg.CosNaming.NamingContextPackage.CannotProceed; +import org.omg.CosNaming.NamingContextPackage.InvalidName; +import org.omg.CosNaming.NamingContextPackage.NotFound; +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; + +import CF.InvalidObjectReference; + +import org.ossie.component.*; +import org.ossie.properties.*; + + +/** + * This is the component code. This file contains all the access points + * you need to use to be able to access all input and output ports, + * respond to incoming data, and perform general component housekeeping + * + * Source: javaSoftpkgJarDep.spd.xml + * + * @generated + */ + +public abstract class javaSoftpkgJarDep_base extends Component { + /** + * @generated + */ + public final static Logger logger = Logger.getLogger(javaSoftpkgJarDep_base.class.getName()); + + /** + * The property hello + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final StringProperty hello = + new StringProperty( + "hello", //id + "hello", //name + "world", //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * @generated + */ + public javaSoftpkgJarDep_base() + { + super(); + + setLogger( logger, javaSoftpkgJarDep_base.class.getName() ); + + + // Properties + addProperty(hello); + + } + + public void start() throws CF.ResourcePackage.StartError + { + super.start(); + } + + public void stop() throws CF.ResourcePackage.StopError + { + super.stop(); + } + + + /** + * The main function of your component. If no args are provided, then the + * CORBA object is not bound to an SCA Domain or NamingService and can + * be run as a standard Java application. + * + * @param args + * @generated + */ + public static void main(String[] args) + { + final Properties orbProps = new Properties(); + javaSoftpkgJarDep.configureOrb(orbProps); + + try { + Component.start_component(javaSoftpkgJarDep.class, args, orbProps); + } catch (InvalidObjectReference e) { + e.printStackTrace(); + } catch (NotFound e) { + e.printStackTrace(); + } catch (CannotProceed e) { + e.printStackTrace(); + } catch (InvalidName e) { + e.printStackTrace(); + } catch (ServantNotActive e) { + e.printStackTrace(); + } catch (WrongPolicy e) { + e.printStackTrace(); + } catch (InstantiationException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + } +} diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/startJava.sh b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/startJava.sh new file mode 100755 index 000000000..e2dfabdf1 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/java/startJava.sh @@ -0,0 +1,30 @@ +#!/bin/sh +myDir=`dirname $0` + +# Setup the OSSIEHOME Lib jars on the classpath +libDir=$OSSIEHOME/lib +libFiles=`ls -1 $libDir/*.jar` +for file in $libFiles +do + if [ x"$CLASSPATH" = "x" ] + then + export CLASSPATH=$file + else + export CLASSPATH=$file:$CLASSPATH + fi +done + +# Path for Java +if test -x $JAVA_HOME/bin/java; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=java +fi + +# NOTE: the $@ must be quoted "$@" for arguments to be passed correctly + +#Sun ORB start line +exec $JAVA -cp :$myDir/javaSoftpkgJarDep.jar:$myDir/bin:$CLASSPATH javaSoftpkgDepComp.java.javaSoftpkgJarDep "$@" + +#JacORB start lines +#exec $JAVA -cp :$myDir/jacorb.jar:$myDir/antlr.jar:$myDir/avalon-framework.jar:$myDir/backport-util-concurrent.jar:$myDir/logkit.jar:$myDir/javaSoftpkgJarDep.jar:$myDir/bin:$CLASSPATH javaSoftpkgDepComp.java.javaSoftpkgJarDep "$@" diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.prf.xml b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.prf.xml new file mode 100644 index 000000000..948a86779 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.prf.xml @@ -0,0 +1,9 @@ + + + + + world + + + + \ No newline at end of file diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.scd.xml b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.scd.xml new file mode 100644 index 000000000..fb0b4311f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.spd.xml b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.spd.xml new file mode 100644 index 000000000..a8fa70ee1 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.spd.xml @@ -0,0 +1,31 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + + + + + + \ No newline at end of file diff --git a/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.spec b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.spec new file mode 100644 index 000000000..7013d1d61 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.spec @@ -0,0 +1,75 @@ +# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) +# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) +%{!?_sdrroot: %global _sdrroot /var/redhawk/sdr} +%define _prefix %{_sdrroot} +Prefix: %{_prefix} + +# Point install paths to locations within our target SDR root +%define _sysconfdir %{_prefix}/etc +%define _localstatedir %{_prefix}/var +%define _mandir %{_prefix}/man +%define _infodir %{_prefix}/info + +Name: javaSoftpkgJarDep +Version: 1.0.0 +Release: 1%{?dist} +Summary: Component %{name} + +Group: REDHAWK/Components +License: None +Source0: %{name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: redhawk-devel >= 2.0 +Requires: redhawk >= 2.0 + +BuildRequires: java_dep1-devel +Requires: java_dep1 + +BuildArch: noarch + +# Implementation java +Requires: java >= 1.6 +BuildRequires: java-devel >= 1.6 + +%description +Component %{name} + * Commit: __REVISION__ + * Source Date/Time: __DATETIME__ + + +%prep +%setup -q + + +%build +# Implementation java +pushd java +./reconf +%define _bindir %{_prefix}/dom/components/javaSoftpkgJarDep/java +%configure +make %{?_smp_mflags} +popd + + +%install +rm -rf $RPM_BUILD_ROOT +# Implementation java +pushd java +%define _bindir %{_prefix}/dom/components/javaSoftpkgJarDep/java +make install DESTDIR=$RPM_BUILD_ROOT +popd + + +%clean +rm -rf $RPM_BUILD_ROOT + + +%files +%defattr(-,redhawk,redhawk,-) +%dir %{_sdrroot}/dom/components/javaSoftpkgJarDep +%{_prefix}/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.scd.xml +%{_prefix}/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.prf.xml +%{_prefix}/dom/components/javaSoftpkgJarDep/javaSoftpkgJarDep.spd.xml +%{_prefix}/dom/components/javaSoftpkgJarDep/java + diff --git a/redhawk/src/testing/sdr/dom/components/logger/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/logger/cpp/Makefile.am new file mode 100644 index 000000000..f409b0001 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger/cpp/Makefile.am @@ -0,0 +1,60 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +# for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +CFDIR = $(top_srcdir)/base + +ossieName = logger +libdir = $(prefix)/dom/components/logger/cpp +lib_LTLIBRARIES = logger.la + +.PHONY: convenience-link clean-convenience-link + +all-local : convenience-link +clean-local : clean-convenience-link + +convenience-link : logger.la + @ln -fs .libs/logger.so + +clean-convenience-link: + @rm -f logger.so + +distclean-local: + rm -rf m4 + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +include $(srcdir)/Makefile.am.ide +logger_la_SOURCES = $(redhawk_SOURCES_auto) +logger_la_LIBADD = $(SOFTPKG_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la +logger_la_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) -I$(CFDIR)/include +logger_la_LDFLAGS = -shared -module -export-dynamic -export-symbols-regex 'make_component' -avoid-version $(redhawk_LDFLAGS_auto) + diff --git a/redhawk/src/testing/sdr/dom/components/logger/cpp/Makefile.am.ide b/redhawk/src/testing/sdr/dom/components/logger/cpp/Makefile.am.ide new file mode 100644 index 000000000..fa0d76d98 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger/cpp/Makefile.am.ide @@ -0,0 +1,10 @@ +# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! +# Files can be excluded by right-clicking on the file in the project explorer +# and choosing Resource Configurations -> Exclude from build. Re-include files +# by opening the Properties dialog of your project and choosing C/C++ Build -> +# Tool Chain Editor, and un-checking "Exclude resource from build " +redhawk_SOURCES_auto = logger.cpp +redhawk_SOURCES_auto += logger.h +redhawk_SOURCES_auto += logger_base.cpp +redhawk_SOURCES_auto += logger_base.h +redhawk_SOURCES_auto += main.cpp diff --git a/redhawk/src/testing/sdr/dom/components/logger/cpp/logger.cpp b/redhawk/src/testing/sdr/dom/components/logger/cpp/logger.cpp new file mode 100644 index 000000000..d07954fff --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger/cpp/logger.cpp @@ -0,0 +1,264 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "logger.h" + +PREPARE_LOGGING(logger_i) + +logger_i::logger_i(const char *uuid, const char *label) : + logger_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +logger_i::~logger_i() +{ +} + +void logger_i::constructor() +{ + baseline_1_logger = this->_baseLog->getChildLogger("some_stuff"); + baseline_2_logger = this->_baseLog->getChildLogger("more_stuff"); + namespaced_logger = this->_baseLog->getChildLogger("lower", "namespace"); + basetree_logger = this->_baseLog->getChildLogger("lower", ""); + rh_logger::LoggerPtr child_ns_logger = basetree_logger->getChildLogger("first", "second"); + rh_logger::LoggerPtr child_no_ns_logger = basetree_logger->getChildLogger("third"); + basel4_logger = this->_baseLog->getChildLogger("access", "l4"); +#ifdef HAVE_LOG4CXX + my_l4 = log4cxx::LoggerPtr(static_cast(basel4_logger->getUnderlyingLogger())); +#endif +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void logger_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &logger_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (logger_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &logger_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to logger.cpp + logger_i::logger_i(const char *uuid, const char *label) : + logger_base(uuid, label) + { + addPropertyListener(scaleValue, this, &logger_i::scaleChanged); + addPropertyListener(status, this, &logger_i::statusChanged); + } + + void logger_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(logger_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void logger_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(logger_i, "status changed"); + } + + //Add to logger.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int logger_i::serviceFunction() +{ + LOG_DEBUG(logger_i, "serviceFunction() example log message"); + RH_DEBUG(_baseLog, "message from _log"); + RH_DEBUG(baseline_1_logger, "message from baseline_1_logger"); + RH_DEBUG(baseline_2_logger, "message from baseline_2_logger"); + RH_DEBUG(namespaced_logger, "message from namespaced_logger"); + RH_DEBUG(basetree_logger, "message from basetree_logger"); +#ifdef HAVE_LOG4CXX + my_l4->info("this is the log4cxx logger"); +#endif + + return NOOP; +} + diff --git a/redhawk/src/testing/sdr/dom/components/logger/cpp/logger.h b/redhawk/src/testing/sdr/dom/components/logger/cpp/logger.h new file mode 100644 index 000000000..4553a73f7 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger/cpp/logger.h @@ -0,0 +1,30 @@ +#ifndef LOGGER_I_IMPL_H +#define LOGGER_I_IMPL_H + +#include "logger_base.h" + +#ifdef HAVE_LOG4CXX +#include +#endif + +class logger_i : public logger_base +{ + ENABLE_LOGGING + public: + logger_i(const char *uuid, const char *label); + ~logger_i(); + + void constructor(); + + int serviceFunction(); + rh_logger::LoggerPtr baseline_1_logger; + rh_logger::LoggerPtr baseline_2_logger; + rh_logger::LoggerPtr namespaced_logger; + rh_logger::LoggerPtr basetree_logger; + rh_logger::LoggerPtr basel4_logger; +#ifdef HAVE_LOG4CXX + log4cxx::LoggerPtr my_l4; +#endif +}; + +#endif // LOGGER_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dom/components/logger/cpp/logger_base.cpp b/redhawk/src/testing/sdr/dom/components/logger/cpp/logger_base.cpp new file mode 100644 index 000000000..51c644654 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger/cpp/logger_base.cpp @@ -0,0 +1,60 @@ +#include "logger_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +logger_base::logger_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + setThreadName(label); + + loadProperties(); +} + +logger_base::~logger_base() +{ +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void logger_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void logger_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void logger_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void logger_base::loadProperties() +{ +} + + diff --git a/redhawk/src/testing/sdr/dom/components/logger/cpp/logger_base.h b/redhawk/src/testing/sdr/dom/components/logger/cpp/logger_base.h new file mode 100644 index 000000000..05e12ecab --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger/cpp/logger_base.h @@ -0,0 +1,27 @@ +#ifndef LOGGER_BASE_IMPL_BASE_H +#define LOGGER_BASE_IMPL_BASE_H + +#include +#include +#include + + +class logger_base : public Component, protected ThreadedComponent +{ + public: + logger_base(const char *uuid, const char *label); + ~logger_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + + private: +}; +#endif // LOGGER_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/logger/cpp/main.cpp b/redhawk/src/testing/sdr/dom/components/logger/cpp/main.cpp new file mode 100644 index 000000000..8fb4b9f02 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger/cpp/main.cpp @@ -0,0 +1,11 @@ +#include +#include "ossie/ossieSupport.h" + +#include "logger.h" +extern "C" { + Resource_impl* make_component(const std::string& uuid, const std::string& identifier) + { + return new logger_i(uuid.c_str(), identifier.c_str()); + } +} + diff --git a/redhawk/src/testing/sdr/dom/components/logger/logger.prf.xml b/redhawk/src/testing/sdr/dom/components/logger/logger.prf.xml new file mode 100644 index 000000000..8f537d89f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger/logger.prf.xml @@ -0,0 +1,3 @@ + + + diff --git a/redhawk/src/testing/sdr/dom/components/logger/logger.scd.xml b/redhawk/src/testing/sdr/dom/components/logger/logger.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger/logger.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/logger/logger.spd.xml b/redhawk/src/testing/sdr/dom/components/logger/logger.spd.xml new file mode 100644 index 000000000..e5d98f42e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger/logger.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/logger.so + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/logger_java/java/Makefile.am b/redhawk/src/testing/sdr/dom/components/logger_java/java/Makefile.am new file mode 100644 index 000000000..6b6593eb4 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_java/java/Makefile.am @@ -0,0 +1,41 @@ +## This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# # +# # This file is part of REDHAWK core. +# # +# # REDHAWK core is free software: you can redistribute it and/or modify it under +# # the terms of the GNU Lesser General Public License as published by the Free +# # Software Foundation, either version 3 of the License, or (at your option) any +# # later version. +# # +# # REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# # details. +# # +# # You should have received a copy of the GNU Lesser General Public License +# # along with this program. If not, see http://www.gnu.org/licenses/. +# # +# +if HAVE_JAVASUPPORT + +logger_java.jar: + mkdir -p bin + find ./src -name "*.java" > fileList.txt + $(JAVAC) -cp $(OSSIE_CLASSPATH) -d bin @fileList.txt + $(JAR) cf ./logger_java.jar -C bin . + rm fileList.txt + +clean-local: + rm -rf bin + +logger_java_jar_SOURCES := $(shell find ./src -name "*.java") + +ossieName = logger_java +noinst_PROGRAMS = logger_java.jar + +else + +all-local: + @echo "Java support disabled - logger_java will not be compiled" +endif diff --git a/redhawk/src/testing/sdr/dom/components/logger_java/java/src/logger_java/java/logger_java.java b/redhawk/src/testing/sdr/dom/components/logger_java/java/src/logger_java/java/logger_java.java new file mode 100644 index 000000000..6507189e1 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_java/java/src/logger_java/java/logger_java.java @@ -0,0 +1,273 @@ +package logger_java.java; + +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import org.apache.log4j.Logger; +import org.ossie.component.RHLogger; + +/** + * This is the component code. This file contains the derived class where custom + * functionality can be added to the component. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general component housekeeping + * + * Source: logger_java.spd.xml + */ +public class logger_java extends logger_java_base { + /** + * This is the component constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your component. + * + * A component may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * component class. + * Accessing the Application and Domain Manager: + * + * Both the Application hosting this Component and the Domain Manager hosting + * the Application are available to the Component. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Application: + * CF.Application app = this.getApplication().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + + public RHLogger baseline_1_logger; + public RHLogger baseline_2_logger; + public RHLogger namespaced_logger; + public RHLogger basetree_logger; + + public logger_java() + { + super(); + } + + public void constructor() + { + baseline_1_logger = this._baseLog.getChildLogger("some_stuff"); + baseline_2_logger = this._baseLog.getChildLogger("more_stuff"); + namespaced_logger = this._baseLog.getChildLogger("lower", "namespace"); + basetree_logger = this._baseLog.getChildLogger("lower", ""); + RHLogger child_ns_logger = basetree_logger.getChildLogger("first", "second"); + RHLogger child_no_ns_logger = basetree_logger.getChildLogger("third"); + } + + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the component's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the component developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Example: + * + * This example assumes that the component has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the component + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData = new float[data.getData().length]; + * for (int i = 0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i] = (float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i] = (float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + logger.debug("serviceFunction() example log message"); + + this._baseLog.debug("message from _log"); + this.baseline_1_logger.debug("message from baseline_1_logger"); + this.baseline_2_logger.debug("message from baseline_2_logger"); + this.namespaced_logger.debug("message from namespaced_logger"); + this.basetree_logger.debug("message from basetree_logger"); + + return NOOP; + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/redhawk/src/testing/sdr/dom/components/logger_java/java/src/logger_java/java/logger_java_base.java b/redhawk/src/testing/sdr/dom/components/logger_java/java/src/logger_java/java/logger_java_base.java new file mode 100644 index 000000000..e73d60769 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_java/java/src/logger_java/java/logger_java_base.java @@ -0,0 +1,81 @@ +package logger_java.java; + + +import java.util.Properties; + +import org.apache.log4j.Logger; + +import org.omg.CosNaming.NamingContextPackage.CannotProceed; +import org.omg.CosNaming.NamingContextPackage.InvalidName; +import org.omg.CosNaming.NamingContextPackage.NotFound; +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; + +import CF.InvalidObjectReference; + +import org.ossie.component.*; + + +/** + * This is the component code. This file contains all the access points + * you need to use to be able to access all input and output ports, + * respond to incoming data, and perform general component housekeeping + * + * Source: logger_java.spd.xml + * + * @generated + */ + +public abstract class logger_java_base extends Component { + /** + * @generated + */ + public final static Logger logger = Logger.getLogger(logger_java_base.class.getName()); + + /** + * @generated + */ + public logger_java_base() + { + super(); + + setLogger( logger, logger_java_base.class.getName() ); + + } + + + + /** + * The main function of your component. If no args are provided, then the + * CORBA object is not bound to an SCA Domain or NamingService and can + * be run as a standard Java application. + * + * @param args + * @generated + */ + public static void main(String[] args) + { + final Properties orbProps = new Properties(); + logger_java.configureOrb(orbProps); + + try { + Component.start_component(logger_java.class, args, orbProps); + } catch (InvalidObjectReference e) { + e.printStackTrace(); + } catch (NotFound e) { + e.printStackTrace(); + } catch (CannotProceed e) { + e.printStackTrace(); + } catch (InvalidName e) { + e.printStackTrace(); + } catch (ServantNotActive e) { + e.printStackTrace(); + } catch (WrongPolicy e) { + e.printStackTrace(); + } catch (InstantiationException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + } +} diff --git a/redhawk/src/testing/sdr/dom/components/logger_java/java/startJava.sh b/redhawk/src/testing/sdr/dom/components/logger_java/java/startJava.sh new file mode 100755 index 000000000..8daa2675a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_java/java/startJava.sh @@ -0,0 +1,38 @@ +#!/bin/sh +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +#Sun ORB start line +# Important, the $@ must be quoted "$@" for arguments to be passed correctly +myDir=`dirname $0` +JAVA_LIBDIR=${myDir}/../../../../../base/framework/java +JAVA_CLASSPATH=${JAVA_LIBDIR}/apache-commons-lang-2.4.jar:${JAVA_LIBDIR}/log4j-1.2.15.jar:${JAVA_LIBDIR}/CFInterfaces.jar:${JAVA_LIBDIR}/ossie.jar:${myDir}/logger_java.jar:${myDir}:${myDir}/bin:${CLASSPATH} + +# Path for Java +if test -x $JAVA_HOME/bin/java; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=java +fi + +# NOTE: the $@ must be quoted "$@" for arguments to be passed correctly + +#Sun ORB start line +exec $JAVA -cp ${JAVA_CLASSPATH} logger_java.java.logger_java "$@" diff --git a/redhawk/src/testing/sdr/dom/components/logger_java/logger_java.prf.xml b/redhawk/src/testing/sdr/dom/components/logger_java/logger_java.prf.xml new file mode 100644 index 000000000..8f537d89f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_java/logger_java.prf.xml @@ -0,0 +1,3 @@ + + + diff --git a/redhawk/src/testing/sdr/dom/components/logger_java/logger_java.scd.xml b/redhawk/src/testing/sdr/dom/components/logger_java/logger_java.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_java/logger_java.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/logger_java/logger_java.spd.xml b/redhawk/src/testing/sdr/dom/components/logger_java/logger_java.spd.xml new file mode 100644 index 000000000..523aa20ae --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_java/logger_java.spd.xml @@ -0,0 +1,26 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/logger_py/logger_py.prf.xml b/redhawk/src/testing/sdr/dom/components/logger_py/logger_py.prf.xml new file mode 100644 index 000000000..8f537d89f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_py/logger_py.prf.xml @@ -0,0 +1,3 @@ + + + diff --git a/redhawk/src/testing/sdr/dom/components/logger_py/logger_py.scd.xml b/redhawk/src/testing/sdr/dom/components/logger_py/logger_py.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_py/logger_py.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/logger_py/logger_py.spd.xml b/redhawk/src/testing/sdr/dom/components/logger_py/logger_py.spd.xml new file mode 100644 index 000000000..72c010101 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_py/logger_py.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/logger_py.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/logger_py/python/logger_py.py b/redhawk/src/testing/sdr/dom/components/logger_py/python/logger_py.py new file mode 100755 index 000000000..5a7b39e7b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_py/python/logger_py.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: logger_py.spd.xml +from ossie.resource import start_component +import logging + +from logger_py_base import * + +class logger_py_i(logger_py_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + self.baseline_1_logger = self._baseLog.getChildLogger("some_stuff") + self.baseline_2_logger = self._baseLog.getChildLogger("more_stuff") + self.namespaced_logger = self._baseLog.getChildLogger("lower", "namespace") + self.basetree_logger = self._baseLog.getChildLogger("lower", "") + child_ns_logger = self.basetree_logger.getChildLogger("first", "second") + child_no_ns_logger = self.basetree_logger.getChildLogger("third") + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", logger_py_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = logger_py_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + self._baseLog.debug("message from _log") + self.baseline_1_logger.debug("message from baseline_1_logger") + self.baseline_2_logger.debug("message from baseline_2_logger") + self.namespaced_logger.debug("message from namespaced_logger") + self.basetree_logger.debug("message from basetree_logger") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(logger_py_i) + diff --git a/redhawk/src/testing/sdr/dom/components/logger_py/python/logger_py_base.py b/redhawk/src/testing/sdr/dom/components/logger_py/python/logger_py_base.py new file mode 100644 index 000000000..127c54f5a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/logger_py/python/logger_py_base.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: logger_py.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * + +import Queue, copy, time, threading + +class logger_py_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + + diff --git a/redhawk/src/testing/sdr/dom/components/math_py/math_py.prf.xml b/redhawk/src/testing/sdr/dom/components/math_py/math_py.prf.xml new file mode 100644 index 000000000..b0706c842 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/math_py/math_py.prf.xml @@ -0,0 +1,9 @@ + + + + + 10000 + + + + diff --git a/redhawk/src/testing/sdr/dom/components/math_py/math_py.scd.xml b/redhawk/src/testing/sdr/dom/components/math_py/math_py.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/math_py/math_py.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/math_py/math_py.spd.xml b/redhawk/src/testing/sdr/dom/components/math_py/math_py.spd.xml new file mode 100644 index 000000000..0c04fb4f3 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/math_py/math_py.spd.xml @@ -0,0 +1,28 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/math_py.py + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/math_py/python/math_py.py b/redhawk/src/testing/sdr/dom/components/math_py/python/math_py.py new file mode 100755 index 000000000..674ff0623 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/math_py/python/math_py.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: math_py.spd.xml +from ossie.resource import start_component +import logging + +from math_py_base import * + +class math_py_i(math_py_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", math_py_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = math_py_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(math_py_i) + diff --git a/redhawk/src/testing/sdr/dom/components/math_py/python/math_py_base.py b/redhawk/src/testing/sdr/dom/components/math_py/python/math_py_base.py new file mode 100644 index 000000000..9d217e050 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/math_py/python/math_py_base.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: math_py.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * +from ossie.properties import simple_property + +import Queue, copy, time, threading + +class math_py_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + prop1 = simple_property(id_="prop1", + type_="longlong", + defvalue=10000L, + mode="readwrite", + action="external", + kinds=("property",)) + + + + diff --git a/redhawk/src/testing/sdr/dom/components/msg_through_cpp/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/msg_through_cpp/cpp/Makefile.am index 545559963..e45b64b96 100644 --- a/redhawk/src/testing/sdr/dom/components/msg_through_cpp/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dom/components/msg_through_cpp/cpp/Makefile.am @@ -36,6 +36,6 @@ distclean-local: msg_through_cpp_SOURCES = msg_through_cpp.cpp msg_through_cpp.h msg_through_cpp_base.cpp msg_through_cpp_base.h main.cpp struct_props.h msg_through_cpp_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -I$(CFDIR)/include -I$(CFDIR)/include/ossie -msg_through_cpp_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(redhawk_LDADD_auto) +msg_through_cpp_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) msg_through_cpp_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/newtime/newtime.prf.xml b/redhawk/src/testing/sdr/dom/components/newtime/newtime.prf.xml new file mode 100644 index 000000000..4fec9ee15 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/newtime/newtime.prf.xml @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/newtime/newtime.scd.xml b/redhawk/src/testing/sdr/dom/components/newtime/newtime.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/newtime/newtime.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/newtime/newtime.spd.xml b/redhawk/src/testing/sdr/dom/components/newtime/newtime.spd.xml new file mode 100644 index 000000000..4fed1388f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/newtime/newtime.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/newtime.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/newtime/python/newtime.py b/redhawk/src/testing/sdr/dom/components/newtime/python/newtime.py new file mode 100755 index 000000000..d7f891ed8 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/newtime/python/newtime.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: newtime.spd.xml +from ossie.resource import start_component +import logging + +from newtime_base import * + +class newtime_i(newtime_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", newtime_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = newtime_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(newtime_i) + diff --git a/redhawk/src/testing/sdr/dom/components/newtime/python/newtime_base.py b/redhawk/src/testing/sdr/dom/components/newtime/python/newtime_base.py new file mode 100644 index 000000000..e125073e9 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/newtime/python/newtime_base.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: newtime.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * +from ossie.properties import simple_property + +import Queue, copy, time, threading + +class newtime_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + rightnow = simple_property(id_="rightnow", + type_="utctime", + mode="readwrite", + action="external", + kinds=("property",)) + + + + diff --git a/redhawk/src/testing/sdr/dom/components/nocommandline_prop/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/nocommandline_prop/cpp/Makefile.am index 903833d23..a4acfda74 100644 --- a/redhawk/src/testing/sdr/dom/components/nocommandline_prop/cpp/Makefile.am +++ b/redhawk/src/testing/sdr/dom/components/nocommandline_prop/cpp/Makefile.am @@ -42,7 +42,7 @@ distclean-local: # you wish to manually control these options. include $(srcdir)/Makefile.am.ide nocommandline_prop_SOURCES = $(redhawk_SOURCES_auto) -nocommandline_prop_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) $(redhawk_LDADD_auto) +nocommandline_prop_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(OMNIDYNAMIC_LIBS) nocommandline_prop_CXXFLAGS = -Wall $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -I$(CFDIR)/include -I$(CFDIR)/include/ossie nocommandline_prop_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) diff --git a/redhawk/src/testing/sdr/dom/components/noop_mix_one/noop_mix_one.spec b/redhawk/src/testing/sdr/dom/components/noop_mix_one/noop_mix_one.spec deleted file mode 100644 index ec27a8c9c..000000000 --- a/redhawk/src/testing/sdr/dom/components/noop_mix_one/noop_mix_one.spec +++ /dev/null @@ -1,85 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: noop_mix_one -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.10 -Requires: redhawk >= 1.10 - -BuildArch: noarch - -%description -Component %{name} - - -%prep -%setup -q - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/noop_mix_one/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/noop_mix_one/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/noop_mix_one.scd.xml -%{_prefix}/dom/components/%{name}/noop_mix_one.prf.xml -%{_prefix}/dom/components/%{name}/noop_mix_one.spd.xml -%{_prefix}/dom/components/%{name}/python - diff --git a/redhawk/src/testing/sdr/dom/components/noop_mix_three/noop_mix_three.spec b/redhawk/src/testing/sdr/dom/components/noop_mix_three/noop_mix_three.spec deleted file mode 100644 index a127429c3..000000000 --- a/redhawk/src/testing/sdr/dom/components/noop_mix_three/noop_mix_three.spec +++ /dev/null @@ -1,85 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: noop_mix_three -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.10 -Requires: redhawk >= 1.10 - -BuildArch: noarch - -%description -Component %{name} - - -%prep -%setup -q - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/noop_mix_three/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/noop_mix_three/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/noop_mix_three.scd.xml -%{_prefix}/dom/components/%{name}/noop_mix_three.prf.xml -%{_prefix}/dom/components/%{name}/noop_mix_three.spd.xml -%{_prefix}/dom/components/%{name}/python - diff --git a/redhawk/src/testing/sdr/dom/components/noop_mix_two/noop_mix_two.spec b/redhawk/src/testing/sdr/dom/components/noop_mix_two/noop_mix_two.spec deleted file mode 100644 index 2b4e9d93e..000000000 --- a/redhawk/src/testing/sdr/dom/components/noop_mix_two/noop_mix_two.spec +++ /dev/null @@ -1,85 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: noop_mix_two -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.10 -Requires: redhawk >= 1.10 - -BuildArch: noarch - -%description -Component %{name} - - -%prep -%setup -q - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/noop_mix_two/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/noop_mix_two/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/noop_mix_two.scd.xml -%{_prefix}/dom/components/%{name}/noop_mix_two.prf.xml -%{_prefix}/dom/components/%{name}/noop_mix_two.spd.xml -%{_prefix}/dom/components/%{name}/python - diff --git a/redhawk/src/testing/sdr/dom/components/props_bad_numbers/props_bad_numbers.prf.xml b/redhawk/src/testing/sdr/dom/components/props_bad_numbers/props_bad_numbers.prf.xml new file mode 100644 index 000000000..a3e29d771 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/props_bad_numbers/props_bad_numbers.prf.xml @@ -0,0 +1,14 @@ + + + + + 3 + + + + + 5 + + + + diff --git a/redhawk/src/testing/sdr/dom/components/props_bad_numbers/props_bad_numbers.scd.xml b/redhawk/src/testing/sdr/dom/components/props_bad_numbers/props_bad_numbers.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/props_bad_numbers/props_bad_numbers.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/props_bad_numbers/props_bad_numbers.spd.xml b/redhawk/src/testing/sdr/dom/components/props_bad_numbers/props_bad_numbers.spd.xml new file mode 100644 index 000000000..42ef6d6ab --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/props_bad_numbers/props_bad_numbers.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/props_bad_numbers.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/props_bad_numbers/python/props_bad_numbers.py b/redhawk/src/testing/sdr/dom/components/props_bad_numbers/python/props_bad_numbers.py new file mode 100755 index 000000000..55cf1a895 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/props_bad_numbers/python/props_bad_numbers.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: props_bad_numbers.spd.xml +from ossie.resource import start_component +import logging + +from props_bad_numbers_base import * + +class props_bad_numbers_i(props_bad_numbers_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", props_bad_numbers_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = props_bad_numbers_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(props_bad_numbers_i) + diff --git a/redhawk/src/testing/sdr/dom/components/props_bad_numbers/python/props_bad_numbers_base.py b/redhawk/src/testing/sdr/dom/components/props_bad_numbers/python/props_bad_numbers_base.py new file mode 100644 index 000000000..0b8293050 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/props_bad_numbers/python/props_bad_numbers_base.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: props_bad_numbers.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * +from ossie.properties import simple_property + +import Queue, copy, time, threading + +class props_bad_numbers_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + some_short = simple_property(id_="some_short", + type_="short", + defvalue=3, + mode="readwrite", + action="external", + kinds=("property",)) + + + some_float = simple_property(id_="some_float", + type_="float", + defvalue=5.0, + mode="readwrite", + action="external", + kinds=("property",)) + + + + diff --git a/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/build.sh b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/build.sh new file mode 100755 index 000000000..d5c48818b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/build.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +if [ "$1" = "rpm" ]; then + # A very simplistic RPM build scenario + if [ -e pythonSoftpkgDep.spec ]; then + mydir=`dirname $0` + tmpdir=`mktemp -d` + cp -r ${mydir} ${tmpdir}/pythonSoftpkgDep-1.0.0 + tar czf ${tmpdir}/pythonSoftpkgDep-1.0.0.tar.gz --exclude=".svn" --exclude=".git" -C ${tmpdir} pythonSoftpkgDep-1.0.0 + rpmbuild -ta ${tmpdir}/pythonSoftpkgDep-1.0.0.tar.gz + rm -rf $tmpdir + else + echo "Missing RPM spec file in" `pwd` + exit 1 + fi +else + for impl in python ; do + if [ ! -d "$impl" ]; then + echo "Directory '$impl' does not exist...continuing" + continue + fi + cd $impl + if [ -e build.sh ]; then + if [ $# == 1 ]; then + if [ $1 == 'clean' ]; then + rm -f Makefile + rm -f config.* + ./build.sh distclean + else + ./build.sh $* + fi + else + ./build.sh $* + fi + elif [ -e Makefile ] && [ Makefile.am -ot Makefile ]; then + make $* + elif [ -e reconf ]; then + ./reconf && ./configure && make $* + else + echo "No build.sh found for $impl" + fi + retval=$? + if [ $retval != '0' ]; then + exit $retval + fi + cd - + done +fi diff --git a/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/python/pythonSoftpkgDep.py b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/python/pythonSoftpkgDep.py new file mode 100755 index 000000000..f068543f8 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/python/pythonSoftpkgDep.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: pythonSoftpkgDep.spd.xml +from ossie.resource import start_component +import logging + +from pythonSoftpkgDep_base import * + +class pythonSoftpkgDep_i(pythonSoftpkgDep_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + import helloworld + self.h1 = helloworld.HelloWorld() + from helloworld2 import helloworld2 + self.h2 = helloworld2.HelloWorld() + from helloworld3 import helloworld3 + self.h3 = helloworld3.HelloWorld() + + def start(self): + pythonSoftpkgDep_base.start(self) + self.prop1 = self.h1.c['goober'] + self.prop2 = self.h2.c['jones'] + self.prop3 = self.h3.c.keys()[2] + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", pythonSoftpkgDep_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = pythonSoftpkgDep_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(pythonSoftpkgDep_i) + diff --git a/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/python/pythonSoftpkgDep_base.py b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/python/pythonSoftpkgDep_base.py new file mode 100644 index 000000000..a9dbac9ca --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/python/pythonSoftpkgDep_base.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: pythonSoftpkgDep.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * +from ossie.properties import simple_property + +import Queue, copy, time, threading + +class pythonSoftpkgDep_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + prop1 = simple_property(id_="prop1", + name="prop1", + type_="string", + defvalue="hello", + mode="readwrite", + action="external", + kinds=("property",)) + + + prop2 = simple_property(id_="prop2", + name="prop2", + type_="string", + defvalue="world", + mode="readwrite", + action="external", + kinds=("property",)) + + + prop3 = simple_property(id_="prop3", + name="prop3", + type_="string", + defvalue="helloworld", + mode="readwrite", + action="external", + kinds=("property",)) + + + + diff --git a/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.prf.xml b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.prf.xml new file mode 100644 index 000000000..de2a3703a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.prf.xml @@ -0,0 +1,19 @@ + + + + + hello + + + + + world + + + + + helloworld + + + + diff --git a/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.scd.xml b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.scd.xml new file mode 100644 index 000000000..fb0b4311f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.spd.xml b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.spd.xml new file mode 100644 index 000000000..71069d88a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.spd.xml @@ -0,0 +1,40 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/pythonSoftpkgDep.py + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.spec b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.spec new file mode 100644 index 000000000..27482e5fa --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.spec @@ -0,0 +1,74 @@ +# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) +# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) +%{!?_sdrroot: %global _sdrroot /var/redhawk/sdr} +%define _prefix %{_sdrroot} +Prefix: %{_prefix} + +# Point install paths to locations within our target SDR root +%define _sysconfdir %{_prefix}/etc +%define _localstatedir %{_prefix}/var +%define _mandir %{_prefix}/man +%define _infodir %{_prefix}/info + +Name: pythonSoftpkgDep +Version: 1.0.0 +Release: 1%{?dist} +Summary: Component %{name} + +Group: REDHAWK/Components +License: None +Source0: %{name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: redhawk-devel >= 2.0 +Requires: redhawk >= 2.0 + +BuildRequires: py_dep1-devel +Requires: py_dep1 +BuildRequires: py_dep2-devel +Requires: py_dep2 + +BuildArch: noarch + + +%description +Component %{name} + * Commit: __REVISION__ + * Source Date/Time: __DATETIME__ + + +%prep +%setup -q + + +%build +# Implementation python +pushd python +./reconf +%define _bindir %{_prefix}/dom/components/pythonSoftpkgDep/python +%configure +make %{?_smp_mflags} +popd + + +%install +rm -rf $RPM_BUILD_ROOT +# Implementation python +pushd python +%define _bindir %{_prefix}/dom/components/pythonSoftpkgDep/python +make install DESTDIR=$RPM_BUILD_ROOT +popd + + +%clean +rm -rf $RPM_BUILD_ROOT + + +%files +%defattr(-,redhawk,redhawk,-) +%dir %{_sdrroot}/dom/components/pythonSoftpkgDep +%{_prefix}/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.scd.xml +%{_prefix}/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.prf.xml +%{_prefix}/dom/components/pythonSoftpkgDep/pythonSoftpkgDep.spd.xml +%{_prefix}/dom/components/pythonSoftpkgDep/python + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop/python/slow_stop.py b/redhawk/src/testing/sdr/dom/components/slow_stop/python/slow_stop.py new file mode 100755 index 000000000..f09c114ae --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop/python/slow_stop.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: slow_stop.spd.xml +from ossie.resource import start_component +import logging + +from slow_stop_base import * + +class slow_stop_i(slow_stop_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", slow_stop_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = slow_stop_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(slow_stop_i) + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop/python/slow_stop_base.py b/redhawk/src/testing/sdr/dom/components/slow_stop/python/slow_stop_base.py new file mode 100644 index 000000000..880a4d9b4 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop/python/slow_stop_base.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: slow_stop.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * + +import Queue, copy, time, threading + +class slow_stop_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + begin_time = time.time() + curr_time = time.time() + while (curr_time-begin_time < 5): + time.sleep(0.1) + curr_time = time.time() + try: + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + except: + pass + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + try: + Component.releaseObject(self) + except: + pass + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop/slow_stop.prf.xml b/redhawk/src/testing/sdr/dom/components/slow_stop/slow_stop.prf.xml new file mode 100644 index 000000000..8f537d89f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop/slow_stop.prf.xml @@ -0,0 +1,3 @@ + + + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop/slow_stop.scd.xml b/redhawk/src/testing/sdr/dom/components/slow_stop/slow_stop.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop/slow_stop.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop/slow_stop.spd.xml b/redhawk/src/testing/sdr/dom/components/slow_stop/slow_stop.spd.xml new file mode 100644 index 000000000..cc1b4303e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop/slow_stop.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/slow_stop.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/Makefile.am new file mode 100644 index 000000000..323a800b6 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/Makefile.am @@ -0,0 +1,30 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +CFDIR = $(top_srcdir)/base + +noinst_PROGRAMS = slow_stop_cpp + +slow_stop_cpp_SOURCES = slow_stop_cpp.cpp slow_stop_cpp.h slow_stop_cpp_base.cpp slow_stop_cpp_base.h main.cpp +slow_stop_cpp_CXXFLAGS = -Wall $(BOOST_CPPFLAGS) -I$(CFDIR)/include +slow_stop_cpp_LDADD = $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_THREAD_LIB) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/main.cpp b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/main.cpp new file mode 100644 index 000000000..6de8653e9 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/main.cpp @@ -0,0 +1,11 @@ +#include +#include "ossie/ossieSupport.h" + +#include "slow_stop_cpp.h" +int main(int argc, char* argv[]) +{ + slow_stop_cpp_i* slow_stop_cpp_servant; + Component::start_component(slow_stop_cpp_servant, argc, argv); + return 0; +} + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp.cpp b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp.cpp new file mode 100644 index 000000000..e79f7d6e8 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp.cpp @@ -0,0 +1,250 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "slow_stop_cpp.h" + +PREPARE_LOGGING(slow_stop_cpp_i) + +slow_stop_cpp_i::slow_stop_cpp_i(const char *uuid, const char *label) : + slow_stop_cpp_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +slow_stop_cpp_i::~slow_stop_cpp_i() +{ +} + +void slow_stop_cpp_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ + this->setThreadDelay(10); +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) and string-based (dataString, dataXML and + dataFile) do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + // The component class must have an output stream member; add to + // slow_stop_cpp.h: + // bulkio::OutFloatStream outputStream; + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + short* inputData = block.data(); + std::vector outputData; + outputData.resize(block.size()); + for (size_t index = 0; index < block.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // If there is no output stream open, create one + if (!outputStream) { + outputStream = dataFloat_out->createStream(block.sri()); + } else if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Write to the output stream + outputStream.write(outputData, block.getTimestamps()); + + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide functions that return the correct interpretation of the data + buffer and number of complex elements: + + if (block.complex()) { + std::complex* data = block.cxdata(); + for (size_t index = 0; index < block.cxsize(); ++index) { + data[index] = std::abs(data[index]); + } + outputStream.write(data, block.cxsize(), bulkio::time::utils::now()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void slow_stop_cpp_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &slow_stop_cpp_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (slow_stop_cpp_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &slow_stop_cpp_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to slow_stop_cpp.cpp + slow_stop_cpp_i::slow_stop_cpp_i(const char *uuid, const char *label) : + slow_stop_cpp_base(uuid, label) + { + addPropertyListener(scaleValue, this, &slow_stop_cpp_i::scaleChanged); + addPropertyListener(status, this, &slow_stop_cpp_i::statusChanged); + } + + void slow_stop_cpp_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(slow_stop_cpp_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void slow_stop_cpp_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(slow_stop_cpp_i, "status changed"); + } + + //Add to slow_stop_cpp.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int slow_stop_cpp_i::serviceFunction() +{ + LOG_DEBUG(slow_stop_cpp_i, "serviceFunction() example log message"); + + return NOOP; +} + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp.h b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp.h new file mode 100644 index 000000000..0ed62af65 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp.h @@ -0,0 +1,18 @@ +#ifndef SLOW_STOP_CPP_I_IMPL_H +#define SLOW_STOP_CPP_I_IMPL_H + +#include "slow_stop_cpp_base.h" + +class slow_stop_cpp_i : public slow_stop_cpp_base +{ + ENABLE_LOGGING + public: + slow_stop_cpp_i(const char *uuid, const char *label); + ~slow_stop_cpp_i(); + + void constructor(); + + int serviceFunction(); +}; + +#endif // SLOW_STOP_CPP_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp_base.cpp b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp_base.cpp new file mode 100644 index 000000000..abe9bfd3a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp_base.cpp @@ -0,0 +1,58 @@ +#include "slow_stop_cpp_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +slow_stop_cpp_base::slow_stop_cpp_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + loadProperties(); +} + +slow_stop_cpp_base::~slow_stop_cpp_base() +{ +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void slow_stop_cpp_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void slow_stop_cpp_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void slow_stop_cpp_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void slow_stop_cpp_base::loadProperties() +{ +} + + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp_base.h b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp_base.h new file mode 100644 index 000000000..3c8527e59 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/cpp/slow_stop_cpp_base.h @@ -0,0 +1,27 @@ +#ifndef SLOW_STOP_CPP_BASE_IMPL_BASE_H +#define SLOW_STOP_CPP_BASE_IMPL_BASE_H + +#include +#include +#include + + +class slow_stop_cpp_base : public Component, protected ThreadedComponent +{ + public: + slow_stop_cpp_base(const char *uuid, const char *label); + ~slow_stop_cpp_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + + private: +}; +#endif // SLOW_STOP_CPP_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/slow_stop_cpp.prf.xml b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/slow_stop_cpp.prf.xml new file mode 100644 index 000000000..8f537d89f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/slow_stop_cpp.prf.xml @@ -0,0 +1,3 @@ + + + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/slow_stop_cpp.scd.xml b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/slow_stop_cpp.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/slow_stop_cpp.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/slow_stop_cpp.spd.xml b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/slow_stop_cpp.spd.xml new file mode 100644 index 000000000..88ed7f166 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/slow_stop_cpp/slow_stop_cpp.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/slow_stop_cpp + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/Makefile.am deleted file mode 100644 index 9d1d65daa..000000000 --- a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/Makefile.am +++ /dev/null @@ -1,26 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -CFDIR = $(top_srcdir)/base - -noinst_PROGRAMS = svc_error_cpp - -svc_error_cpp_SOURCES = svc_error_cpp.cpp svc_error_cpp.h svc_error_cpp_base.cpp svc_error_cpp_base.h main.cpp -svc_error_cpp_CXXFLAGS = -Wall $(BOOST_CPPFLAGS) -I$(CFDIR)/include -I$(CFDIR)/include/ossie -svc_error_cpp_LDADD = $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_THREAD_LIB) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la diff --git a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/Makefile.am.ide b/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/Makefile.am.ide deleted file mode 100644 index 093b43e7d..000000000 --- a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/Makefile.am.ide +++ /dev/null @@ -1,11 +0,0 @@ -# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! -# Files can be excluded by right-clicking on the file in the project explorer -# and choosing Resource Configurations -> Exclude from build. Re-include files -# by opening the Properties dialog of your project and choosing C/C++ Build -> -# Tool Chain Editor, and un-checking "Exclude resource from build " -redhawk_SOURCES_auto = main.cpp -redhawk_SOURCES_auto += svc_error_cpp.cpp -redhawk_SOURCES_auto += svc_error_cpp.h -redhawk_SOURCES_auto += svc_error_cpp_base.cpp -redhawk_SOURCES_auto += svc_error_cpp_base.h - diff --git a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/main.cpp b/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/main.cpp deleted file mode 100644 index 37b3bf481..000000000 --- a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/main.cpp +++ /dev/null @@ -1,11 +0,0 @@ -#include -#include "ossie/ossieSupport.h" - -#include "svc_error_cpp.h" -int main(int argc, char* argv[]) -{ - svc_error_cpp_i* svc_error_cpp_servant; - Component::start_component(svc_error_cpp_servant, argc, argv); - return 0; -} - diff --git a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp.cpp b/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp.cpp deleted file mode 100644 index aa6a5289a..000000000 --- a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp.cpp +++ /dev/null @@ -1,247 +0,0 @@ -/************************************************************************** - - This is the component code. This file contains the child class where - custom functionality can be added to the component. Custom - functionality to the base class can be extended here. Access to - the ports can also be done from this class - -**************************************************************************/ - -#include "svc_error_cpp.h" - -PREPARE_LOGGING(svc_error_cpp_i) - -svc_error_cpp_i::svc_error_cpp_i(const char *uuid, const char *label) : - svc_error_cpp_base(uuid, label) -{ - // Avoid placing constructor code here. Instead, use the "constructor" function. - -} - -svc_error_cpp_i::~svc_error_cpp_i() -{ -} - -void svc_error_cpp_i::constructor() -{ - /*********************************************************************************** - This is the RH constructor. All properties are properly initialized before this function is called - ***********************************************************************************/ -} - -/*********************************************************************************************** - - Basic functionality: - - The service function is called by the serviceThread object (of type ProcessThread). - This call happens immediately after the previous call if the return value for - the previous call was NORMAL. - If the return value for the previous call was NOOP, then the serviceThread waits - an amount of time defined in the serviceThread's constructor. - - SRI: - To create a StreamSRI object, use the following code: - std::string stream_id = "testStream"; - BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); - - Time: - To create a PrecisionUTCTime object, use the following code: - BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); - - - Ports: - - Data is passed to the serviceFunction through by reading from input streams - (BulkIO only). The input stream class is a port-specific class, so each port - implementing the BulkIO interface will have its own type-specific input stream. - UDP multicast (dataSDDS and dataVITA49) and string-based (dataString, dataXML and - dataFile) do not support streams. - - The input stream from which to read can be requested with the getCurrentStream() - method. The optional argument to getCurrentStream() is a floating point number that - specifies the time to wait in seconds. A zero value is non-blocking. A negative value - is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and - bulkio::Const::NON_BLOCKING. - - More advanced uses of input streams are possible; refer to the REDHAWK documentation - for more details. - - Input streams return data blocks that automatically manage the memory for the data - and include the SRI that was in effect at the time the data was received. It is not - necessary to delete the block; it will be cleaned up when it goes out of scope. - - To send data using a BulkIO interface, create an output stream and write the - data to it. When done with the output stream, the close() method sends and end-of- - stream flag and cleans up. - - NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call - "port->updateStats()" to update the port statistics when appropriate. - - Example: - // This example assumes that the component has two ports: - // An input (provides) port of type bulkio::InShortPort called dataShort_in - // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out - // The mapping between the port and the class is found - // in the component base class header file - // The component class must have an output stream member; add to - // svc_error_cpp.h: - // bulkio::OutFloatStream outputStream; - - bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); - if (!inputStream) { // No streams are available - return NOOP; - } - - bulkio::ShortDataBlock block = inputStream.read(); - if (!block) { // No data available - // Propagate end-of-stream - if (inputStream.eos()) { - outputStream.close(); - } - return NOOP; - } - - short* inputData = block.data(); - std::vector outputData; - outputData.resize(block.size()); - for (size_t index = 0; index < block.size(); ++index) { - outputData[index] = (float) inputData[index]; - } - - // If there is no output stream open, create one - if (!outputStream) { - outputStream = dataFloat_out->createStream(block.sri()); - } else if (block.sriChanged()) { - // Update output SRI - outputStream.sri(block.sri()); - } - - // Write to the output stream - outputStream.write(outputData, block.getTimestamps()); - - // Propagate end-of-stream - if (inputStream.eos()) { - outputStream.close(); - } - - return NORMAL; - - If working with complex data (i.e., the "mode" on the SRI is set to - true), the data block's complex() method will return true. Data blocks - provide functions that return the correct interpretation of the data - buffer and number of complex elements: - - if (block.complex()) { - std::complex* data = block.cxdata(); - for (size_t index = 0; index < block.cxsize(); ++index) { - data[index] = std::abs(data[index]); - } - outputStream.write(data, block.cxsize(), bulkio::time::utils::now()); - } - - Interactions with non-BULKIO ports are left up to the component developer's discretion - - Messages: - - To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described - as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback - with the input port. - - Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of - type MessageEvent, create the following code: - - void svc_error_cpp_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ - } - - Register the message callback onto the input port with the following form: - this->msg_input->registerMessage("my_msg", this, &svc_error_cpp_i::my_message_callback); - - To send a message, you need to (1) create a message structure, (2) a message prototype described - as a structure property of kind message, and (3) send the message over the port. - - Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of - type MessageEvent, create the following code: - - ::my_msg_struct msg_out; - this->msg_output->sendMessage(msg_out); - - Accessing the Application and Domain Manager: - - Both the Application hosting this Component and the Domain Manager hosting - the Application are available to the Component. - - To access the Domain Manager: - CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); - To access the Application: - CF::Application_ptr app = this->getApplication()->getRef(); - - Properties: - - Properties are accessed directly as member variables. For example, if the - property name is "baudRate", it may be accessed within member functions as - "baudRate". Unnamed properties are given the property id as its name. - Property types are mapped to the nearest C++ type, (e.g. "string" becomes - "std::string"). All generated properties are declared in the base class - (svc_error_cpp_base). - - Simple sequence properties are mapped to "std::vector" of the simple type. - Struct properties, if used, are mapped to C++ structs defined in the - generated file "struct_props.h". Field names are taken from the name in - the properties file; if no name is given, a generated name of the form - "field_n" is used, where "n" is the ordinal number of the field. - - Example: - // This example makes use of the following Properties: - // - A float value called scaleValue - // - A boolean called scaleInput - - if (scaleInput) { - dataOut[i] = dataIn[i] * scaleValue; - } else { - dataOut[i] = dataIn[i]; - } - - Callback methods can be associated with a property so that the methods are - called each time the property value changes. This is done by calling - addPropertyListener(, this, &svc_error_cpp_i::) - in the constructor. - - The callback method receives two arguments, the old and new values, and - should return nothing (void). The arguments can be passed by value, - receiving a copy (preferred for primitive types), or by const reference - (preferred for strings, structs and vectors). - - Example: - // This example makes use of the following Properties: - // - A float value called scaleValue - // - A struct property called status - - //Add to svc_error_cpp.cpp - svc_error_cpp_i::svc_error_cpp_i(const char *uuid, const char *label) : - svc_error_cpp_base(uuid, label) - { - addPropertyListener(scaleValue, this, &svc_error_cpp_i::scaleChanged); - addPropertyListener(status, this, &svc_error_cpp_i::statusChanged); - } - - void svc_error_cpp_i::scaleChanged(float oldValue, float newValue) - { - LOG_DEBUG(svc_error_cpp_i, "scaleValue changed from" << oldValue << " to " << newValue); - } - - void svc_error_cpp_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) - { - LOG_DEBUG(svc_error_cpp_i, "status changed"); - } - - //Add to svc_error_cpp.h - void scaleChanged(float oldValue, float newValue); - void statusChanged(const status_struct& oldValue, const status_struct& newValue); - - -************************************************************************************************/ -int svc_error_cpp_i::serviceFunction() -{ - throw std::runtime_error("test exception in serviceFunction()"); -} - diff --git a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp.h b/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp.h deleted file mode 100644 index de950e690..000000000 --- a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef SVC_ERROR_CPP_I_IMPL_H -#define SVC_ERROR_CPP_I_IMPL_H - -#include "svc_error_cpp_base.h" - -class svc_error_cpp_i : public svc_error_cpp_base -{ - ENABLE_LOGGING - public: - svc_error_cpp_i(const char *uuid, const char *label); - ~svc_error_cpp_i(); - - void constructor(); - - int serviceFunction(); -}; - -#endif // SVC_ERROR_CPP_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp_base.cpp b/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp_base.cpp deleted file mode 100644 index 4ebbf84dc..000000000 --- a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp_base.cpp +++ /dev/null @@ -1,58 +0,0 @@ -#include "svc_error_cpp_base.h" - -/******************************************************************************************* - - AUTO-GENERATED CODE. DO NOT MODIFY - - The following class functions are for the base class for the component class. To - customize any of these functions, do not modify them here. Instead, overload them - on the child class - -******************************************************************************************/ - -svc_error_cpp_base::svc_error_cpp_base(const char *uuid, const char *label) : - Component(uuid, label), - ThreadedComponent() -{ - loadProperties(); -} - -svc_error_cpp_base::~svc_error_cpp_base() -{ -} - -/******************************************************************************************* - Framework-level functions - These functions are generally called by the framework to perform housekeeping. -*******************************************************************************************/ -void svc_error_cpp_base::start() throw (CORBA::SystemException, CF::Resource::StartError) -{ - Component::start(); - ThreadedComponent::startThread(); -} - -void svc_error_cpp_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) -{ - Component::stop(); - if (!ThreadedComponent::stopThread()) { - throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); - } -} - -void svc_error_cpp_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) -{ - // This function clears the component running condition so main shuts down everything - try { - stop(); - } catch (CF::Resource::StopError& ex) { - // TODO - this should probably be logged instead of ignored - } - - Component::releaseObject(); -} - -void svc_error_cpp_base::loadProperties() -{ -} - - diff --git a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp_base.h b/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp_base.h deleted file mode 100644 index a4b4a81a3..000000000 --- a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/cpp/svc_error_cpp_base.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef SVC_ERROR_CPP_BASE_IMPL_BASE_H -#define SVC_ERROR_CPP_BASE_IMPL_BASE_H - -#include -#include -#include - - -class svc_error_cpp_base : public Component, protected ThreadedComponent -{ - public: - svc_error_cpp_base(const char *uuid, const char *label); - ~svc_error_cpp_base(); - - void start() throw (CF::Resource::StartError, CORBA::SystemException); - - void stop() throw (CF::Resource::StopError, CORBA::SystemException); - - void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); - - void loadProperties(); - - protected: - - private: -}; -#endif // SVC_ERROR_CPP_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/svc_error_cpp.spd.xml b/redhawk/src/testing/sdr/dom/components/svc_error_cpp/svc_error_cpp.spd.xml deleted file mode 100644 index 910308d75..000000000 --- a/redhawk/src/testing/sdr/dom/components/svc_error_cpp/svc_error_cpp.spd.xml +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - null - - - - - - - - - The implementation contains descriptive information about the template for a software resource. - - - cpp/svc_error_cpp - - - - - - - - - diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error/python/svc_fn_error.py b/redhawk/src/testing/sdr/dom/components/svc_fn_error/python/svc_fn_error.py index dd0b91cc1..9689bc479 100755 --- a/redhawk/src/testing/sdr/dom/components/svc_fn_error/python/svc_fn_error.py +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error/python/svc_fn_error.py @@ -162,11 +162,7 @@ def mycallback(self, id, old_value, new_value): return NORMAL """ - - # TODO fill in your code here - self._log.debug("process() example log message") - a = b - return NOOP + raise RuntimeError('test exception in process()') if __name__ == '__main__': diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/.md5sums b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/.md5sums new file mode 100644 index 000000000..a17841f0a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/.md5sums @@ -0,0 +1,2 @@ +86a352d13108642a5a08c9fe8c4e8a47 svc_fn_error_cpp.spec +cb201b3f32a196109f65d4da9e11ea16 build.sh diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/.svc_fn_error_cpp.wavedev b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/.svc_fn_error_cpp.wavedev new file mode 100644 index 000000000..2668804c3 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/.svc_fn_error_cpp.wavedev @@ -0,0 +1,25 @@ + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/.md5sums b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/.md5sums new file mode 100644 index 000000000..04d532fec --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/.md5sums @@ -0,0 +1,10 @@ +934a036cb6d1d632e463be733e484fb3 svc_fn_error_cpp_base.cpp +6fff1615e36ce7a3b335ed7d3bfabb95 main.cpp +d23f51d099f8c3b419d46aa7a2d953de reconf +65a7972a03ea68b984777395e7d74cee svc_fn_error_cpp_base.h +c43620d52d78f531a172e559b6e124d3 configure.ac +7c989e7d0850431d36cf51ed9c26f829 Makefile.am +a664b33bc1038cb1d0a7a41f290c6bc1 Makefile.am.ide +2c076f8e3fccffed1199ef5c8adac4b1 svc_fn_error_cpp.h +b1835160ba93e9e1248209acc174363d build.sh +f3060fccac98f1aa5a65109e1569c738 svc_fn_error_cpp.cpp diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/Makefile.am new file mode 100644 index 000000000..50213e7c6 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/Makefile.am @@ -0,0 +1,34 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +# for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +CFDIR = $(top_srcdir)/base + +ossieName = svc_fn_error_cpp +noinst_PROGRAMS = svc_fn_error_cpp + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +include $(srcdir)/Makefile.am.ide +svc_fn_error_cpp_SOURCES = $(redhawk_SOURCES_auto) +svc_fn_error_cpp_LDADD = $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +svc_fn_error_cpp_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +svc_fn_error_cpp_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/Makefile.am.ide b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/Makefile.am.ide new file mode 100644 index 000000000..7f0c124c3 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/Makefile.am.ide @@ -0,0 +1,30 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +# for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! +# Files can be excluded by right-clicking on the file in the project explorer +# and choosing Resource Configurations -> Exclude from build. Re-include files +# by opening the Properties dialog of your project and choosing C/C++ Build -> +# Tool Chain Editor, and un-checking "Exclude resource from build " +redhawk_SOURCES_auto = main.cpp +redhawk_SOURCES_auto += svc_fn_error_cpp.cpp +redhawk_SOURCES_auto += svc_fn_error_cpp.h +redhawk_SOURCES_auto += svc_fn_error_cpp_base.cpp +redhawk_SOURCES_auto += svc_fn_error_cpp_base.h + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/main.cpp b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/main.cpp new file mode 100644 index 000000000..f5409d364 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/main.cpp @@ -0,0 +1,30 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#include +#include "ossie/ossieSupport.h" + +#include "svc_fn_error_cpp.h" +int main(int argc, char* argv[]) +{ + svc_fn_error_cpp_i* svc_fn_error_cpp_servant; + Component::start_component(svc_fn_error_cpp_servant, argc, argv); + return 0; +} + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp.cpp b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp.cpp new file mode 100644 index 000000000..c290e54c5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp.cpp @@ -0,0 +1,266 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "svc_fn_error_cpp.h" + +PREPARE_LOGGING(svc_fn_error_cpp_i) + +svc_fn_error_cpp_i::svc_fn_error_cpp_i(const char *uuid, const char *label) : + svc_fn_error_cpp_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +svc_fn_error_cpp_i::~svc_fn_error_cpp_i() +{ +} + +void svc_fn_error_cpp_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void svc_fn_error_cpp_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &svc_fn_error_cpp_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (svc_fn_error_cpp_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &svc_fn_error_cpp_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to svc_fn_error_cpp.cpp + svc_fn_error_cpp_i::svc_fn_error_cpp_i(const char *uuid, const char *label) : + svc_fn_error_cpp_base(uuid, label) + { + addPropertyListener(scaleValue, this, &svc_fn_error_cpp_i::scaleChanged); + addPropertyListener(status, this, &svc_fn_error_cpp_i::statusChanged); + } + + void svc_fn_error_cpp_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(svc_fn_error_cpp_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void svc_fn_error_cpp_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(svc_fn_error_cpp_i, "status changed"); + } + + //Add to svc_fn_error_cpp.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int svc_fn_error_cpp_i::serviceFunction() +{ + throw std::runtime_error("test exception in serviceFunction()"); +} + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp.h b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp.h new file mode 100644 index 000000000..39bc98fea --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp.h @@ -0,0 +1,37 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef SVC_FN_ERROR_CPP_I_IMPL_H +#define SVC_FN_ERROR_CPP_I_IMPL_H + +#include "svc_fn_error_cpp_base.h" + +class svc_fn_error_cpp_i : public svc_fn_error_cpp_base +{ + ENABLE_LOGGING + public: + svc_fn_error_cpp_i(const char *uuid, const char *label); + ~svc_fn_error_cpp_i(); + + void constructor(); + + int serviceFunction(); +}; + +#endif // SVC_FN_ERROR_CPP_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp_base.cpp b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp_base.cpp new file mode 100644 index 000000000..a1bbfbcd8 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp_base.cpp @@ -0,0 +1,79 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#include "svc_fn_error_cpp_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +svc_fn_error_cpp_base::svc_fn_error_cpp_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + setThreadName(label); + + loadProperties(); +} + +svc_fn_error_cpp_base::~svc_fn_error_cpp_base() +{ +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void svc_fn_error_cpp_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void svc_fn_error_cpp_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void svc_fn_error_cpp_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void svc_fn_error_cpp_base::loadProperties() +{ +} + + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp_base.h b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp_base.h new file mode 100644 index 000000000..9df91a298 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/cpp/svc_fn_error_cpp_base.h @@ -0,0 +1,46 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +#ifndef SVC_FN_ERROR_CPP_BASE_IMPL_BASE_H +#define SVC_FN_ERROR_CPP_BASE_IMPL_BASE_H + +#include +#include +#include + + +class svc_fn_error_cpp_base : public Component, protected ThreadedComponent +{ + public: + svc_fn_error_cpp_base(const char *uuid, const char *label); + ~svc_fn_error_cpp_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + + private: +}; +#endif // SVC_FN_ERROR_CPP_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/svc_fn_error_cpp.prf.xml b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/svc_fn_error_cpp.prf.xml new file mode 100644 index 000000000..3624eb818 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/svc_fn_error_cpp.prf.xml @@ -0,0 +1,22 @@ + + + + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/svc_fn_error_cpp.scd.xml b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/svc_fn_error_cpp.scd.xml new file mode 100644 index 000000000..1d4932fdf --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/svc_fn_error_cpp.scd.xml @@ -0,0 +1,64 @@ + + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/svc_fn_error_cpp.spd.xml b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/svc_fn_error_cpp.spd.xml new file mode 100644 index 000000000..04e83a0aa --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_cpp/svc_fn_error_cpp.spd.xml @@ -0,0 +1,46 @@ + + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/svc_fn_error_cpp + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/.md5sums b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/.md5sums new file mode 100644 index 000000000..bc7509352 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/.md5sums @@ -0,0 +1,2 @@ +8a63fa0181a8a7cc08b5ca1be581f282 build.sh +a061c18f2ff92e0b86bb9d5a9f7f6c45 svc_fn_error_java.spec diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/.svc_fn_error_java.wavedev b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/.svc_fn_error_java.wavedev new file mode 100644 index 000000000..a4bc474c1 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/.svc_fn_error_java.wavedev @@ -0,0 +1,27 @@ + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/.md5sums b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/.md5sums new file mode 100644 index 000000000..51d98c46e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/.md5sums @@ -0,0 +1,6 @@ +dae876691e3921f8ca9c44cdc5b7385f src/svc_fn_error_java/java/svc_fn_error_java.java +c98e20c0d3998cfdf2d5d553850a28ef startJava.sh +8bfcd22353c3a57fee561ad86ee2a56b reconf +396a7d30cdc00a18bf0b7c82771f5547 src/svc_fn_error_java/java/svc_fn_error_java_base.java +c11aad975ee01664a4d30750be2008bf configure.ac +4fcc54af3b88cca523b14c2b8c9d11a2 Makefile.am diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/Makefile.am b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/Makefile.am new file mode 100644 index 000000000..e6aceccd7 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/Makefile.am @@ -0,0 +1,31 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +svc_fn_error_java.jar$(EXEEXT): $(svc_fn_error_java_jar_SOURCES) + mkdir -p bin + $(JAVAC) -cp $(OSSIE_CLASSPATH) -g -d bin $(svc_fn_error_java_jar_SOURCES) + $(JAR) cf ./svc_fn_error_java.jar -C bin . + $(JAR) uf ./svc_fn_error_java.jar -C src . + +clean-local: + rm -rf bin + +noinst_PROGRAMS = svc_fn_error_java.jar +svc_fn_error_java_jar_SOURCES := $(shell find ./src -name "*.java") diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/src/svc_fn_error_java/java/svc_fn_error_java.java b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/src/svc_fn_error_java/java/svc_fn_error_java.java new file mode 100644 index 000000000..7a002ba43 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/src/svc_fn_error_java/java/svc_fn_error_java.java @@ -0,0 +1,269 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package svc_fn_error_java.java; + +import java.util.Properties; + +/** + * This is the component code. This file contains the derived class where custom + * functionality can be added to the component. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general component housekeeping + * + * Source: svc_fn_error_java.spd.xml + */ +public class svc_fn_error_java extends svc_fn_error_java_base { + /** + * This is the component constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your component. + * + * A component may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * component class. + * Accessing the Application and Domain Manager: + * + * Both the Application hosting this Component and the Domain Manager hosting + * the Application are available to the Component. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Application: + * CF.Application app = this.getApplication().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + + public svc_fn_error_java() + { + super(); + } + + public void constructor() + { + } + + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the component's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the component developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Example: + * + * This example assumes that the component has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the component + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData = new float[data.getData().length]; + * for (int i = 0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i] = (float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i] = (float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + throw new RuntimeException("test exception in serviceFunction()"); + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/src/svc_fn_error_java/java/svc_fn_error_java_base.java b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/src/svc_fn_error_java/java/svc_fn_error_java_base.java new file mode 100644 index 000000000..c525f771e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/src/svc_fn_error_java/java/svc_fn_error_java_base.java @@ -0,0 +1,99 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ +package svc_fn_error_java.java; + +import java.util.Properties; + +import org.apache.log4j.Logger; + +import org.omg.CosNaming.NamingContextPackage.CannotProceed; +import org.omg.CosNaming.NamingContextPackage.InvalidName; +import org.omg.CosNaming.NamingContextPackage.NotFound; +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; + +import CF.InvalidObjectReference; + +import org.ossie.component.*; + + +/** + * This is the component code. This file contains all the access points + * you need to use to be able to access all input and output ports, + * respond to incoming data, and perform general component housekeeping + * + * Source: svc_fn_error_java.spd.xml + * + * @generated + */ + +public abstract class svc_fn_error_java_base extends Component { + /** + * @generated + */ + public final static Logger logger = Logger.getLogger(svc_fn_error_java_base.class.getName()); + + /** + * @generated + */ + public svc_fn_error_java_base() + { + super(); + + setLogger( logger, svc_fn_error_java_base.class.getName() ); + + } + + + + /** + * The main function of your component. If no args are provided, then the + * CORBA object is not bound to an SCA Domain or NamingService and can + * be run as a standard Java application. + * + * @param args + * @generated + */ + public static void main(String[] args) + { + final Properties orbProps = new Properties(); + svc_fn_error_java.configureOrb(orbProps); + + try { + Component.start_component(svc_fn_error_java.class, args, orbProps); + } catch (InvalidObjectReference e) { + e.printStackTrace(); + } catch (NotFound e) { + e.printStackTrace(); + } catch (CannotProceed e) { + e.printStackTrace(); + } catch (InvalidName e) { + e.printStackTrace(); + } catch (ServantNotActive e) { + e.printStackTrace(); + } catch (WrongPolicy e) { + e.printStackTrace(); + } catch (InstantiationException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + } +} diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/startJava.sh b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/startJava.sh new file mode 100755 index 000000000..509ae3f77 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/java/startJava.sh @@ -0,0 +1,50 @@ +#!/bin/sh +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +myDir=`dirname $0` + +# Setup the OSSIEHOME Lib jars on the classpath +libDir=${SDRROOT}/../../base/framework/java +libFiles=`ls -1 $libDir/*.jar` +for file in $libFiles +do + if [ x"$CLASSPATH" = "x" ] + then + export CLASSPATH=$file + else + export CLASSPATH=$file:$CLASSPATH + fi +done + +# Path for Java +if test -x $JAVA_HOME/bin/java; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=java +fi + +# NOTE: the $@ must be quoted "$@" for arguments to be passed correctly + +#Sun ORB start line +exec $JAVA -cp :$myDir/svc_fn_error_java.jar:$myDir/bin:$CLASSPATH svc_fn_error_java.java.svc_fn_error_java "$@" + +#JacORB start lines +#exec $JAVA -cp :$myDir/jacorb.jar:$myDir/antlr.jar:$myDir/avalon-framework.jar:$myDir/backport-util-concurrent.jar:$myDir/logkit.jar:$myDir/svc_fn_error_java.jar:$myDir/bin:$CLASSPATH svc_fn_error_java.java.svc_fn_error_java "$@" diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/svc_fn_error_java.prf.xml b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/svc_fn_error_java.prf.xml new file mode 100644 index 000000000..3624eb818 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/svc_fn_error_java.prf.xml @@ -0,0 +1,22 @@ + + + + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/svc_fn_error_java.scd.xml b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/svc_fn_error_java.scd.xml new file mode 100644 index 000000000..1d4932fdf --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/svc_fn_error_java.scd.xml @@ -0,0 +1,64 @@ + + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/svc_fn_error_java.spd.xml b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/svc_fn_error_java.spd.xml new file mode 100644 index 000000000..7777bae6b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/svc_fn_error_java/svc_fn_error_java.spd.xml @@ -0,0 +1,45 @@ + + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/ticket_490_double/ticket_490_double.spd.xml b/redhawk/src/testing/sdr/dom/components/ticket_490_double/ticket_490_double.spd.xml index 69c46738c..295f8f12b 100644 --- a/redhawk/src/testing/sdr/dom/components/ticket_490_double/ticket_490_double.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/ticket_490_double/ticket_490_double.spd.xml @@ -41,6 +41,7 @@ with this program. If not, see http://www.gnu.org/licenses/. + diff --git a/redhawk/src/testing/sdr/dom/components/ticket_490_double/ticket_490_double.spec b/redhawk/src/testing/sdr/dom/components/ticket_490_double/ticket_490_double.spec deleted file mode 100644 index 82e2b4115..000000000 --- a/redhawk/src/testing/sdr/dom/components/ticket_490_double/ticket_490_double.spec +++ /dev/null @@ -1,85 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: ticket_490_double -Summary: Component %{name} -Version: 1.0.0 -Release: 1 -License: None -Group: REDHAWK/Components -Source: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-root - -Requires: redhawk >= 1.9 -BuildRequires: redhawk-devel >= 1.9 -BuildRequires: autoconf automake libtool - -BuildArch: noarch - -%description -Component %{name} - - -%prep -%setup - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/ticket_490_double/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/ticket_490_double/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/ticket_490_double.scd.xml -%{_prefix}/dom/components/%{name}/ticket_490_double.prf.xml -%{_prefix}/dom/components/%{name}/ticket_490_double.spd.xml -%{_prefix}/dom/components/%{name}/python - diff --git a/redhawk/src/testing/sdr/dom/components/ticket_490_none/ticket_490_none.spd.xml b/redhawk/src/testing/sdr/dom/components/ticket_490_none/ticket_490_none.spd.xml index 1f2219a3a..72caef0c9 100644 --- a/redhawk/src/testing/sdr/dom/components/ticket_490_none/ticket_490_none.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/ticket_490_none/ticket_490_none.spd.xml @@ -41,6 +41,7 @@ with this program. If not, see http://www.gnu.org/licenses/. + diff --git a/redhawk/src/testing/sdr/dom/components/ticket_490_none/ticket_490_none.spec b/redhawk/src/testing/sdr/dom/components/ticket_490_none/ticket_490_none.spec deleted file mode 100644 index ce3b80061..000000000 --- a/redhawk/src/testing/sdr/dom/components/ticket_490_none/ticket_490_none.spec +++ /dev/null @@ -1,85 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: ticket_490_none -Summary: Component %{name} -Version: 1.0.0 -Release: 1 -License: None -Group: REDHAWK/Components -Source: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-root - -Requires: redhawk >= 1.9 -BuildRequires: redhawk-devel >= 1.9 -BuildRequires: autoconf automake libtool - -BuildArch: noarch - -%description -Component %{name} - - -%prep -%setup - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/ticket_490_none/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/ticket_490_none/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/ticket_490_none.scd.xml -%{_prefix}/dom/components/%{name}/ticket_490_none.prf.xml -%{_prefix}/dom/components/%{name}/ticket_490_none.spd.xml -%{_prefix}/dom/components/%{name}/python - diff --git a/redhawk/src/testing/sdr/dom/components/ticket_490_single/ticket_490_single.spd.xml b/redhawk/src/testing/sdr/dom/components/ticket_490_single/ticket_490_single.spd.xml index 86f94962e..2221051e5 100644 --- a/redhawk/src/testing/sdr/dom/components/ticket_490_single/ticket_490_single.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/ticket_490_single/ticket_490_single.spd.xml @@ -41,6 +41,7 @@ with this program. If not, see http://www.gnu.org/licenses/. + diff --git a/redhawk/src/testing/sdr/dom/components/ticket_490_single/ticket_490_single.spec b/redhawk/src/testing/sdr/dom/components/ticket_490_single/ticket_490_single.spec deleted file mode 100644 index 7a0feb245..000000000 --- a/redhawk/src/testing/sdr/dom/components/ticket_490_single/ticket_490_single.spec +++ /dev/null @@ -1,85 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: ticket_490_single -Summary: Component %{name} -Version: 1.0.0 -Release: 1 -License: None -Group: REDHAWK/Components -Source: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-root - -Requires: redhawk >= 1.9 -BuildRequires: redhawk-devel >= 1.9 -BuildRequires: autoconf automake libtool - -BuildArch: noarch - -%description -Component %{name} - - -%prep -%setup - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/ticket_490_single/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/ticket_490_single/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/ticket_490_single.scd.xml -%{_prefix}/dom/components/%{name}/ticket_490_single.prf.xml -%{_prefix}/dom/components/%{name}/ticket_490_single.spd.xml -%{_prefix}/dom/components/%{name}/python - diff --git a/redhawk/src/testing/sdr/dom/components/ticket_cf_1066_comp/ticket_cf_1066_comp.spec b/redhawk/src/testing/sdr/dom/components/ticket_cf_1066_comp/ticket_cf_1066_comp.spec deleted file mode 100644 index 7bb70989e..000000000 --- a/redhawk/src/testing/sdr/dom/components/ticket_cf_1066_comp/ticket_cf_1066_comp.spec +++ /dev/null @@ -1,90 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: ticket_cf_1066_comp -Summary: Component %{name} -Version: 1.0.0 -Release: 1 -License: None -Group: REDHAWK/Components -Source: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-root - -Requires: redhawk >= 1.8 -BuildRequires: redhawk >= 1.8 -BuildRequires: autoconf automake libtool - -BuildArch: noarch - -# Python requirements -Requires: python omniORBpy -BuildRequires: libomniORBpy3-devel -BuildRequires: python-devel >= 2.3 - - -%description -Component %{name} - - -%prep -%setup - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/ticket_cf_1066_comp/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/ticket_cf_1066_comp/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/ticket_cf_1066_comp.spd.xml -%{_prefix}/dom/components/%{name}/ticket_cf_1066_comp.prf.xml -%{_prefix}/dom/components/%{name}/ticket_cf_1066_comp.scd.xml -%{_prefix}/dom/components/%{name}/python diff --git a/redhawk/src/testing/sdr/dom/components/ticket_cf_1067_comp/ticket_cf_1067_comp.spec b/redhawk/src/testing/sdr/dom/components/ticket_cf_1067_comp/ticket_cf_1067_comp.spec deleted file mode 100644 index d7c46669c..000000000 --- a/redhawk/src/testing/sdr/dom/components/ticket_cf_1067_comp/ticket_cf_1067_comp.spec +++ /dev/null @@ -1,85 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -# Point install paths to locations within our target SDR root -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: ticket_cf_1067_comp -Version: 1.0.0 -Release: 1%{?dist} -Summary: Component %{name} - -Group: REDHAWK/Components -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 1.10 -Requires: redhawk >= 1.10 - -BuildArch: noarch - -%description -Component %{name} - - -%prep -%setup -q - - -%build -# Implementation python -pushd python -./reconf -%define _bindir %{_prefix}/dom/components/ticket_cf_1067_comp/python -%configure -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation python -pushd python -%define _bindir %{_prefix}/dom/components/ticket_cf_1067_comp/python -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_prefix}/dom/components/%{name} -%{_prefix}/dom/components/%{name}/ticket_cf_1067_comp.scd.xml -%{_prefix}/dom/components/%{name}/ticket_cf_1067_comp.prf.xml -%{_prefix}/dom/components/%{name}/ticket_cf_1067_comp.spd.xml -%{_prefix}/dom/components/%{name}/python - diff --git a/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/Makefile.am new file mode 100644 index 000000000..a18f6f9c5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/Makefile.am @@ -0,0 +1,61 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License +# for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +CFDIR = $(top_srcdir)/base + +ossieName = time_cp_now +libdir = $(prefix)/dom/components/time_cp_now/cpp +lib_LTLIBRARIES = time_cp_now.la + +.PHONY: convenience-link clean-convenience-link + +install: + +all-local : convenience-link +clean-local : clean-convenience-link + +convenience-link : time_cp_now.la + @ln -fs .libs/time_cp_now.so + +clean-convenience-link: + @rm -f time_cp_now.so + +distclean-local: + rm -rf m4 + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + + +# Sources, libraries and library directories are auto-included from a file +# generated by the REDHAWK IDE. You can remove/modify the following lines if +# you wish to manually control these options. +time_cp_now_la_SOURCES = main.cpp time_cp_now.cpp time_cp_now_base.cpp +time_cp_now_la_LIBADD = $(SOFTPKG_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la +time_cp_now_la_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) -I$(CFDIR)/include +time_cp_now_la_LDFLAGS = -shared -module -export-dynamic -export-symbols-regex 'make_component' -avoid-version $(redhawk_LDFLAGS_auto) + diff --git a/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/main.cpp b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/main.cpp new file mode 100644 index 000000000..1a186306d --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/main.cpp @@ -0,0 +1,11 @@ +#include +#include "ossie/ossieSupport.h" + +#include "time_cp_now.h" +extern "C" { + Resource_impl* make_component(const std::string& uuid, const std::string& identifier) + { + return new time_cp_now_i(uuid.c_str(), identifier.c_str()); + } +} + diff --git a/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now.cpp b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now.cpp new file mode 100644 index 000000000..7a3c5379b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now.cpp @@ -0,0 +1,248 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "time_cp_now.h" + +PREPARE_LOGGING(time_cp_now_i) + +time_cp_now_i::time_cp_now_i(const char *uuid, const char *label) : + time_cp_now_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. +} + +time_cp_now_i::~time_cp_now_i() +{ +} + +void time_cp_now_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) ports do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + // Get the output stream, creating it if it doesn't exist yet + bulkio::OutFloatStream outputStream = dataFloat_out->getStream(inputStream.streamID()); + if (!outputStream) { + outputStream = dataFloat_out->createStream(inputStream.sri()); + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Get read-only access to the input data + redhawk::shared_buffer inputData = block.buffer(); + + // Acquire a new buffer to hold the output data + redhawk::buffer outputData(inputData.size()); + + // Transform input data into output data + for (size_t index = 0; index < inputData.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // Write to the output stream; outputData must not be modified after + // this method call + outputStream.write(outputData, block.getStartTime()); + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide a cxbuffer() method that returns a complex interpretation of the + buffer without making a copy: + + if (block.complex()) { + redhawk::shared_buffer > inData = block.cxbuffer(); + redhawk::buffer > outData(inData.size()); + for (size_t index = 0; index < inData.size(); ++index) { + outData[index] = inData[index]; + } + outputStream.write(outData, block.getStartTime()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void time_cp_now_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &time_cp_now_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (time_cp_now_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &time_cp_now_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to time_cp_now.cpp + time_cp_now_i::time_cp_now_i(const char *uuid, const char *label) : + time_cp_now_base(uuid, label) + { + addPropertyListener(scaleValue, this, &time_cp_now_i::scaleChanged); + addPropertyListener(status, this, &time_cp_now_i::statusChanged); + } + + void time_cp_now_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(time_cp_now_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void time_cp_now_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(time_cp_now_i, "status changed"); + } + + //Add to time_cp_now.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int time_cp_now_i::serviceFunction() +{ + LOG_DEBUG(time_cp_now_i, "serviceFunction() example log message"); + + return NOOP; +} + diff --git a/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now.h b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now.h new file mode 100644 index 000000000..e15ad700b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now.h @@ -0,0 +1,18 @@ +#ifndef TIME_CP_NOW_I_IMPL_H +#define TIME_CP_NOW_I_IMPL_H + +#include "time_cp_now_base.h" + +class time_cp_now_i : public time_cp_now_base +{ + ENABLE_LOGGING + public: + time_cp_now_i(const char *uuid, const char *label); + ~time_cp_now_i(); + + void constructor(); + + int serviceFunction(); +}; + +#endif // TIME_CP_NOW_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now_base.cpp b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now_base.cpp new file mode 100644 index 000000000..57a92b834 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now_base.cpp @@ -0,0 +1,108 @@ +#include "time_cp_now_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +time_cp_now_base::time_cp_now_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + setThreadName(label); + + loadProperties(); +} + +time_cp_now_base::~time_cp_now_base() +{ +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void time_cp_now_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void time_cp_now_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void time_cp_now_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void time_cp_now_base::loadProperties() +{ + addProperty(rightnow, + "now", + "rightnow", + "", + "readwrite", + "", + "external", + "property"); + + addProperty(simple1970, + "1970:01:01::00:00:00", + "simple1970", + "", + "readwrite", + "", + "external", + "property"); + + // Set the sequence with its initial values + simpleSeqDefNow.push_back(redhawk::time::utils::convert("now")); + addProperty(simpleSeqDefNow, + simpleSeqDefNow, + "simpleSeqDefNow", + "", + "readwrite", + "", + "external", + "property"); + + addProperty(simpleSeqNoDef, + "simpleSeqNoDef", + "", + "readwrite", + "", + "external", + "property"); + + // Set the sequence with its initial values + simpleSeq1970.push_back(redhawk::time::utils::convert("1970:01:01::00:00:00")); + addProperty(simpleSeq1970, + simpleSeq1970, + "simpleSeq1970", + "", + "readwrite", + "", + "external", + "property"); + +} + + diff --git a/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now_base.h b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now_base.h new file mode 100644 index 000000000..edc1d9f26 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_cp_now/cpp/time_cp_now_base.h @@ -0,0 +1,38 @@ +#ifndef TIME_CP_NOW_BASE_IMPL_BASE_H +#define TIME_CP_NOW_BASE_IMPL_BASE_H + +#include +#include +#include + + +class time_cp_now_base : public Component, protected ThreadedComponent +{ + public: + time_cp_now_base(const char *uuid, const char *label); + ~time_cp_now_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + // Member variables exposed as properties + /// Property: rightnow + CF::UTCTime rightnow; + /// Property: simple1970 + CF::UTCTime simple1970; + /// Property: simpleSeqDefNow + std::vector simpleSeqDefNow; + /// Property: simpleSeqNoDef + std::vector simpleSeqNoDef; + /// Property: simpleSeq1970 + std::vector simpleSeq1970; + + private: +}; +#endif // TIME_CP_NOW_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/time_cp_now/time_cp_now.prf.xml b/redhawk/src/testing/sdr/dom/components/time_cp_now/time_cp_now.prf.xml new file mode 100644 index 000000000..07dadd7c8 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_cp_now/time_cp_now.prf.xml @@ -0,0 +1,32 @@ + + + + + now + + + + + 1970:01:01::00:00:00 + + + + + + now + + + + + + + + + + + 1970:01:01::00:00:00 + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/time_cp_now/time_cp_now.scd.xml b/redhawk/src/testing/sdr/dom/components/time_cp_now/time_cp_now.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_cp_now/time_cp_now.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/time_cp_now/time_cp_now.spd.xml b/redhawk/src/testing/sdr/dom/components/time_cp_now/time_cp_now.spd.xml new file mode 100644 index 000000000..a87476b2a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_cp_now/time_cp_now.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/time_cp_now.so + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/time_ja_now/java/Makefile.am b/redhawk/src/testing/sdr/dom/components/time_ja_now/java/Makefile.am new file mode 100644 index 000000000..18d6baf71 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_ja_now/java/Makefile.am @@ -0,0 +1,41 @@ +## This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# # +# # This file is part of REDHAWK core. +# # +# # REDHAWK core is free software: you can redistribute it and/or modify it under +# # the terms of the GNU Lesser General Public License as published by the Free +# # Software Foundation, either version 3 of the License, or (at your option) any +# # later version. +# # +# # REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# # details. +# # +# # You should have received a copy of the GNU Lesser General Public License +# # along with this program. If not, see http://www.gnu.org/licenses/. +# # +# +if HAVE_JAVASUPPORT + +time_ja_now.jar: + mkdir -p bin + find ./src -name "*.java" > fileList.txt + $(JAVAC) -cp $(OSSIE_CLASSPATH) -d bin @fileList.txt + $(JAR) cf ./time_ja_now.jar -C bin . + rm fileList.txt + +clean-local: + rm -rf bin + +time_ja_now_jar_SOURCES := $(shell find ./src -name "*.java") + +ossieName = time_ja_now +noinst_PROGRAMS = time_ja_now.jar + +else + +all-local: + @echo "Java support disabled - time_ja_now will not be compiled" +endif diff --git a/redhawk/src/testing/sdr/dom/components/time_ja_now/java/src/time_ja_now/java/time_ja_now.java b/redhawk/src/testing/sdr/dom/components/time_ja_now/java/src/time_ja_now/java/time_ja_now.java new file mode 100644 index 000000000..745c95cd0 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_ja_now/java/src/time_ja_now/java/time_ja_now.java @@ -0,0 +1,252 @@ +package time_ja_now.java; + +import java.util.Properties; + +/** + * This is the component code. This file contains the derived class where custom + * functionality can be added to the component. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general component housekeeping + * + * Source: time_ja_now.spd.xml + */ +public class time_ja_now extends time_ja_now_base { + /** + * This is the component constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your component. + * + * A component may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * component class. + * Accessing the Application and Domain Manager: + * + * Both the Application hosting this Component and the Domain Manager hosting + * the Application are available to the Component. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Application: + * CF.Application app = this.getApplication().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + + public time_ja_now() + { + super(); + } + + public void constructor() + { + } + + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the component's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the component developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Example: + * + * This example assumes that the component has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the component + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData = new float[data.getData().length]; + * for (int i = 0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i] = (float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i] = (float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + logger.debug("serviceFunction() example log message"); + + return NOOP; + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/redhawk/src/testing/sdr/dom/components/time_ja_now/java/src/time_ja_now/java/time_ja_now_base.java b/redhawk/src/testing/sdr/dom/components/time_ja_now/java/src/time_ja_now/java/time_ja_now_base.java new file mode 100644 index 000000000..c885ac560 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_ja_now/java/src/time_ja_now/java/time_ja_now_base.java @@ -0,0 +1,173 @@ +package time_ja_now.java; + + +import java.util.Properties; + +import org.apache.log4j.Logger; + +import org.omg.CosNaming.NamingContextPackage.CannotProceed; +import org.omg.CosNaming.NamingContextPackage.InvalidName; +import org.omg.CosNaming.NamingContextPackage.NotFound; +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; + +import CF.InvalidObjectReference; + +import org.ossie.component.*; +import org.ossie.properties.*; + + +/** + * This is the component code. This file contains all the access points + * you need to use to be able to access all input and output ports, + * respond to incoming data, and perform general component housekeeping + * + * Source: time_ja_now.spd.xml + * + * @generated + */ + +public abstract class time_ja_now_base extends Component { + /** + * @generated + */ + public final static Logger logger = Logger.getLogger(time_ja_now_base.class.getName()); + + /** + * The property rightnow + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final UTCTimeProperty rightnow = + new UTCTimeProperty( + "rightnow", //id + null, //name + "now", //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + /** + * The property simple1970 + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final UTCTimeProperty simple1970 = + new UTCTimeProperty( + "simple1970", //id + null, //name + "1970:01:01::00:00:00", //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property simpleSeqDefNow + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final UTCTimeSequenceProperty simpleSeqDefNow = + new UTCTimeSequenceProperty( + "simpleSeqDefNow", //id + null, //name + UTCTimeSequenceProperty.asList("now"), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property simpleSeqNoDef + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final UTCTimeSequenceProperty simpleSeqNoDef = + new UTCTimeSequenceProperty( + "simpleSeqNoDef", //id + null, //name + UTCTimeSequenceProperty.asList(), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * The property simpleSeq1970 + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final UTCTimeSequenceProperty simpleSeq1970 = + new UTCTimeSequenceProperty( + "simpleSeq1970", //id + null, //name + UTCTimeSequenceProperty.asList("1970:01:01::00:00:00"), //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * @generated + */ + public time_ja_now_base() + { + super(); + + setLogger( logger, time_ja_now_base.class.getName() ); + + + // Properties + addProperty(rightnow); + + addProperty(simple1970); + + addProperty(simpleSeqDefNow); + + addProperty(simpleSeqNoDef); + + addProperty(simpleSeq1970); + + } + + + + /** + * The main function of your component. If no args are provided, then the + * CORBA object is not bound to an SCA Domain or NamingService and can + * be run as a standard Java application. + * + * @param args + * @generated + */ + public static void main(String[] args) + { + final Properties orbProps = new Properties(); + time_ja_now.configureOrb(orbProps); + + try { + Component.start_component(time_ja_now.class, args, orbProps); + } catch (InvalidObjectReference e) { + e.printStackTrace(); + } catch (NotFound e) { + e.printStackTrace(); + } catch (CannotProceed e) { + e.printStackTrace(); + } catch (InvalidName e) { + e.printStackTrace(); + } catch (ServantNotActive e) { + e.printStackTrace(); + } catch (WrongPolicy e) { + e.printStackTrace(); + } catch (InstantiationException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + } +} diff --git a/redhawk/src/testing/sdr/dom/components/time_ja_now/java/startJava.sh b/redhawk/src/testing/sdr/dom/components/time_ja_now/java/startJava.sh new file mode 100755 index 000000000..bac09c081 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_ja_now/java/startJava.sh @@ -0,0 +1,38 @@ +#!/bin/sh +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +#Sun ORB start line +# Important, the $@ must be quoted "$@" for arguments to be passed correctly +myDir=`dirname $0` +JAVA_LIBDIR=${myDir}/../../../../../base/framework/java +JAVA_CLASSPATH=${JAVA_LIBDIR}/apache-commons-lang-2.4.jar:${JAVA_LIBDIR}/log4j-1.2.15.jar:${JAVA_LIBDIR}/CFInterfaces.jar:${JAVA_LIBDIR}/ossie.jar:${myDir}/time_ja_now.jar:${myDir}:${myDir}/bin:${CLASSPATH} + +# Path for Java +if test -x $JAVA_HOME/bin/java; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=java +fi + +# NOTE: the $@ must be quoted "$@" for arguments to be passed correctly + +#Sun ORB start line +exec $JAVA -cp ${JAVA_CLASSPATH} time_ja_now.java.time_ja_now "$@" diff --git a/redhawk/src/testing/sdr/dom/components/time_ja_now/time_ja_now.prf.xml b/redhawk/src/testing/sdr/dom/components/time_ja_now/time_ja_now.prf.xml new file mode 100644 index 000000000..07dadd7c8 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_ja_now/time_ja_now.prf.xml @@ -0,0 +1,32 @@ + + + + + now + + + + + 1970:01:01::00:00:00 + + + + + + now + + + + + + + + + + + 1970:01:01::00:00:00 + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/time_ja_now/time_ja_now.scd.xml b/redhawk/src/testing/sdr/dom/components/time_ja_now/time_ja_now.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_ja_now/time_ja_now.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/time_ja_now/time_ja_now.spd.xml b/redhawk/src/testing/sdr/dom/components/time_ja_now/time_ja_now.spd.xml new file mode 100644 index 000000000..e41a2c85e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_ja_now/time_ja_now.spd.xml @@ -0,0 +1,26 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/time_py_now/python/time_py_now.py b/redhawk/src/testing/sdr/dom/components/time_py_now/python/time_py_now.py new file mode 100755 index 000000000..a155d9602 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_py_now/python/time_py_now.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: time_py_now.spd.xml +from ossie.resource import start_component +import logging + +from time_py_now_base import * + +class time_py_now_i(time_py_now_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", time_py_now_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = time_py_now_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(time_py_now_i) + diff --git a/redhawk/src/testing/sdr/dom/components/time_py_now/python/time_py_now_base.py b/redhawk/src/testing/sdr/dom/components/time_py_now/python/time_py_now_base.py new file mode 100644 index 000000000..9b4d9e0f2 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_py_now/python/time_py_now_base.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: time_py_now.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * +from ossie.properties import simple_property +from ossie.properties import simpleseq_property + +import Queue, copy, time, threading + +class time_py_now_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + rightnow = simple_property(id_="rightnow", + type_="utctime", + defvalue="now", + mode="readwrite", + action="external", + kinds=("property",)) + simple1970 = simple_property(id_="simple1970", + type_="utctime", + defvalue="1970:01:01::00:00:00", + mode="readwrite", + action="external", + kinds=("property",)) + + simpleSeqDefNow = simpleseq_property(id_="simpleSeqDefNow", + type_="utctime", + defvalue=["now"], + mode="readwrite", + action="external", + kinds=("property",)) + + simpleSeqNoDef = simpleseq_property(id_="simpleSeqNoDef", + type_="utctime", + defvalue=[], + mode="readwrite", + action="external", + kinds=("property",)) + + simpleSeq1970 = simpleseq_property(id_="simpleSeq1970", + type_="utctime", + defvalue=["1970:01:01::00:00:00"], + mode="readwrite", + action="external", + kinds=("property",)) diff --git a/redhawk/src/testing/sdr/dom/components/time_py_now/time_py_now.prf.xml b/redhawk/src/testing/sdr/dom/components/time_py_now/time_py_now.prf.xml new file mode 100644 index 000000000..07dadd7c8 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_py_now/time_py_now.prf.xml @@ -0,0 +1,32 @@ + + + + + now + + + + + 1970:01:01::00:00:00 + + + + + + now + + + + + + + + + + + 1970:01:01::00:00:00 + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/time_py_now/time_py_now.scd.xml b/redhawk/src/testing/sdr/dom/components/time_py_now/time_py_now.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_py_now/time_py_now.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/time_py_now/time_py_now.spd.xml b/redhawk/src/testing/sdr/dom/components/time_py_now/time_py_now.spd.xml new file mode 100644 index 000000000..b6acd707d --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/time_py_now/time_py_now.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/time_py_now.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/Makefile.am b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/Makefile.am new file mode 100644 index 000000000..bf05a4933 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/Makefile.am @@ -0,0 +1,27 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# +CFDIR = $(top_srcdir)/base + +noinst_PROGRAMS = timeprop_cpp + +timeprop_cpp_SOURCES = timeprop_cpp.cpp timeprop_cpp.h timeprop_cpp_base.cpp timeprop_cpp_base.h main.cpp +timeprop_cpp_CXXFLAGS = -Wall $(BOOST_CPPFLAGS) -I$(CFDIR)/include +timeprop_cpp_LDADD = $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(BOOST_THREAD_LIB) $(OMNIDYNAMIC_LIBS) $(OMNICOS_LIBS) $(CFDIR)/framework/libossiecf.la $(CFDIR)/framework/idl/libossieidl.la + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/main.cpp b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/main.cpp new file mode 100644 index 000000000..2a4a62239 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/main.cpp @@ -0,0 +1,11 @@ +#include +#include "ossie/ossieSupport.h" + +#include "timeprop_cpp.h" +int main(int argc, char* argv[]) +{ + timeprop_cpp_i* timeprop_cpp_servant; + Component::start_component(timeprop_cpp_servant, argc, argv); + return 0; +} + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp.cpp b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp.cpp new file mode 100644 index 000000000..b8e9b830a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp.cpp @@ -0,0 +1,249 @@ +/************************************************************************** + + This is the component code. This file contains the child class where + custom functionality can be added to the component. Custom + functionality to the base class can be extended here. Access to + the ports can also be done from this class + +**************************************************************************/ + +#include "timeprop_cpp.h" + +PREPARE_LOGGING(timeprop_cpp_i) + +timeprop_cpp_i::timeprop_cpp_i(const char *uuid, const char *label) : + timeprop_cpp_base(uuid, label) +{ + // Avoid placing constructor code here. Instead, use the "constructor" function. + +} + +timeprop_cpp_i::~timeprop_cpp_i() +{ +} + +void timeprop_cpp_i::constructor() +{ + /*********************************************************************************** + This is the RH constructor. All properties are properly initialized before this function is called + ***********************************************************************************/ +} + +/*********************************************************************************************** + + Basic functionality: + + The service function is called by the serviceThread object (of type ProcessThread). + This call happens immediately after the previous call if the return value for + the previous call was NORMAL. + If the return value for the previous call was NOOP, then the serviceThread waits + an amount of time defined in the serviceThread's constructor. + + SRI: + To create a StreamSRI object, use the following code: + std::string stream_id = "testStream"; + BULKIO::StreamSRI sri = bulkio::sri::create(stream_id); + + Time: + To create a PrecisionUTCTime object, use the following code: + BULKIO::PrecisionUTCTime tstamp = bulkio::time::utils::now(); + + + Ports: + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). The input stream class is a port-specific class, so each port + implementing the BulkIO interface will have its own type-specific input stream. + UDP multicast (dataSDDS and dataVITA49) and string-based (dataString, dataXML and + dataFile) do not support streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio::Const::BLOCKING and + bulkio::Const::NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that automatically manage the memory for the data + and include the SRI that was in effect at the time the data was received. It is not + necessary to delete the block; it will be cleaned up when it goes out of scope. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + NOTE: If you have a BULKIO dataSDDS or dataVITA49 port, you must manually call + "port->updateStats()" to update the port statistics when appropriate. + + Example: + // This example assumes that the component has two ports: + // An input (provides) port of type bulkio::InShortPort called dataShort_in + // An output (uses) port of type bulkio::OutFloatPort called dataFloat_out + // The mapping between the port and the class is found + // in the component base class header file + // The component class must have an output stream member; add to + // timeprop_cpp.h: + // bulkio::OutFloatStream outputStream; + + bulkio::InShortStream inputStream = dataShort_in->getCurrentStream(); + if (!inputStream) { // No streams are available + return NOOP; + } + + bulkio::ShortDataBlock block = inputStream.read(); + if (!block) { // No data available + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + return NOOP; + } + + short* inputData = block.data(); + std::vector outputData; + outputData.resize(block.size()); + for (size_t index = 0; index < block.size(); ++index) { + outputData[index] = (float) inputData[index]; + } + + // If there is no output stream open, create one + if (!outputStream) { + outputStream = dataFloat_out->createStream(block.sri()); + } else if (block.sriChanged()) { + // Update output SRI + outputStream.sri(block.sri()); + } + + // Write to the output stream + outputStream.write(outputData, block.getTimestamps()); + + // Propagate end-of-stream + if (inputStream.eos()) { + outputStream.close(); + } + + return NORMAL; + + If working with complex data (i.e., the "mode" on the SRI is set to + true), the data block's complex() method will return true. Data blocks + provide functions that return the correct interpretation of the data + buffer and number of complex elements: + + if (block.complex()) { + std::complex* data = block.cxdata(); + for (size_t index = 0; index < block.cxsize(); ++index) { + data[index] = std::abs(data[index]); + } + outputStream.write(data, block.cxsize(), bulkio::time::utils::now()); + } + + Interactions with non-BULKIO ports are left up to the component developer's discretion + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + void timeprop_cpp_i::my_message_callback(const std::string& id, const my_msg_struct &msg){ + } + + Register the message callback onto the input port with the following form: + this->msg_input->registerMessage("my_msg", this, &timeprop_cpp_i::my_message_callback); + + To send a message, you need to (1) create a message structure, (2) a message prototype described + as a structure property of kind message, and (3) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + ::my_msg_struct msg_out; + this->msg_output->sendMessage(msg_out); + + Accessing the Application and Domain Manager: + + Both the Application hosting this Component and the Domain Manager hosting + the Application are available to the Component. + + To access the Domain Manager: + CF::DomainManager_ptr dommgr = this->getDomainManager()->getRef(); + To access the Application: + CF::Application_ptr app = this->getApplication()->getRef(); + + Properties: + + Properties are accessed directly as member variables. For example, if the + property name is "baudRate", it may be accessed within member functions as + "baudRate". Unnamed properties are given the property id as its name. + Property types are mapped to the nearest C++ type, (e.g. "string" becomes + "std::string"). All generated properties are declared in the base class + (timeprop_cpp_base). + + Simple sequence properties are mapped to "std::vector" of the simple type. + Struct properties, if used, are mapped to C++ structs defined in the + generated file "struct_props.h". Field names are taken from the name in + the properties file; if no name is given, a generated name of the form + "field_n" is used, where "n" is the ordinal number of the field. + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A boolean called scaleInput + + if (scaleInput) { + dataOut[i] = dataIn[i] * scaleValue; + } else { + dataOut[i] = dataIn[i]; + } + + Callback methods can be associated with a property so that the methods are + called each time the property value changes. This is done by calling + addPropertyListener(, this, &timeprop_cpp_i::) + in the constructor. + + The callback method receives two arguments, the old and new values, and + should return nothing (void). The arguments can be passed by value, + receiving a copy (preferred for primitive types), or by const reference + (preferred for strings, structs and vectors). + + Example: + // This example makes use of the following Properties: + // - A float value called scaleValue + // - A struct property called status + + //Add to timeprop_cpp.cpp + timeprop_cpp_i::timeprop_cpp_i(const char *uuid, const char *label) : + timeprop_cpp_base(uuid, label) + { + addPropertyListener(scaleValue, this, &timeprop_cpp_i::scaleChanged); + addPropertyListener(status, this, &timeprop_cpp_i::statusChanged); + } + + void timeprop_cpp_i::scaleChanged(float oldValue, float newValue) + { + LOG_DEBUG(timeprop_cpp_i, "scaleValue changed from" << oldValue << " to " << newValue); + } + + void timeprop_cpp_i::statusChanged(const status_struct& oldValue, const status_struct& newValue) + { + LOG_DEBUG(timeprop_cpp_i, "status changed"); + } + + //Add to timeprop_cpp.h + void scaleChanged(float oldValue, float newValue); + void statusChanged(const status_struct& oldValue, const status_struct& newValue); + + +************************************************************************************************/ +int timeprop_cpp_i::serviceFunction() +{ + LOG_DEBUG(timeprop_cpp_i, "serviceFunction() example log message"); + + return NOOP; +} + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp.h b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp.h new file mode 100644 index 000000000..a19366112 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp.h @@ -0,0 +1,18 @@ +#ifndef TIMEPROP_CPP_I_IMPL_H +#define TIMEPROP_CPP_I_IMPL_H + +#include "timeprop_cpp_base.h" + +class timeprop_cpp_i : public timeprop_cpp_base +{ + ENABLE_LOGGING + public: + timeprop_cpp_i(const char *uuid, const char *label); + ~timeprop_cpp_i(); + + void constructor(); + + int serviceFunction(); +}; + +#endif // TIMEPROP_CPP_I_IMPL_H diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp_base.cpp b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp_base.cpp new file mode 100644 index 000000000..95d0e897b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp_base.cpp @@ -0,0 +1,69 @@ +#include "timeprop_cpp_base.h" + +/******************************************************************************************* + + AUTO-GENERATED CODE. DO NOT MODIFY + + The following class functions are for the base class for the component class. To + customize any of these functions, do not modify them here. Instead, overload them + on the child class + +******************************************************************************************/ + +timeprop_cpp_base::timeprop_cpp_base(const char *uuid, const char *label) : + Component(uuid, label), + ThreadedComponent() +{ + setThreadName(label); + + loadProperties(); +} + +timeprop_cpp_base::~timeprop_cpp_base() +{ +} + +/******************************************************************************************* + Framework-level functions + These functions are generally called by the framework to perform housekeeping. +*******************************************************************************************/ +void timeprop_cpp_base::start() throw (CORBA::SystemException, CF::Resource::StartError) +{ + Component::start(); + ThreadedComponent::startThread(); +} + +void timeprop_cpp_base::stop() throw (CORBA::SystemException, CF::Resource::StopError) +{ + Component::stop(); + if (!ThreadedComponent::stopThread()) { + throw CF::Resource::StopError(CF::CF_NOTSET, "Processing thread did not die"); + } +} + +void timeprop_cpp_base::releaseObject() throw (CORBA::SystemException, CF::LifeCycle::ReleaseError) +{ + // This function clears the component running condition so main shuts down everything + try { + stop(); + } catch (CF::Resource::StopError& ex) { + // TODO - this should probably be logged instead of ignored + } + + Component::releaseObject(); +} + +void timeprop_cpp_base::loadProperties() +{ + addProperty(prop, + "value", + "prop", + "", + "readwrite", + "", + "external", + "property"); + +} + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp_base.h b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp_base.h new file mode 100644 index 000000000..036c26fc0 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/cpp/timeprop_cpp_base.h @@ -0,0 +1,30 @@ +#ifndef TIMEPROP_CPP_BASE_IMPL_BASE_H +#define TIMEPROP_CPP_BASE_IMPL_BASE_H + +#include +#include +#include + + +class timeprop_cpp_base : public Component, protected ThreadedComponent +{ + public: + timeprop_cpp_base(const char *uuid, const char *label); + ~timeprop_cpp_base(); + + void start() throw (CF::Resource::StartError, CORBA::SystemException); + + void stop() throw (CF::Resource::StopError, CORBA::SystemException); + + void releaseObject() throw (CF::LifeCycle::ReleaseError, CORBA::SystemException); + + void loadProperties(); + + protected: + // Member variables exposed as properties + /// Property: prop + std::string prop; + + private: +}; +#endif // TIMEPROP_CPP_BASE_IMPL_BASE_H diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_cpp/timeprop_cpp.prf.xml b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/timeprop_cpp.prf.xml new file mode 100644 index 000000000..fac40ad7c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/timeprop_cpp.prf.xml @@ -0,0 +1,9 @@ + + + + + value + + + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_cpp/timeprop_cpp.scd.xml b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/timeprop_cpp.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/timeprop_cpp.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_cpp/timeprop_cpp.spd.xml b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/timeprop_cpp.spd.xml new file mode 100644 index 000000000..97905b81a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_cpp/timeprop_cpp.spd.xml @@ -0,0 +1,27 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + cpp/timeprop_cpp + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_java/java/Makefile.am b/redhawk/src/testing/sdr/dom/components/timeprop_java/java/Makefile.am new file mode 100644 index 000000000..c48f8b23f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_java/java/Makefile.am @@ -0,0 +1,41 @@ +## This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# # +# # This file is part of REDHAWK core. +# # +# # REDHAWK core is free software: you can redistribute it and/or modify it under +# # the terms of the GNU Lesser General Public License as published by the Free +# # Software Foundation, either version 3 of the License, or (at your option) any +# # later version. +# # +# # REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# # details. +# # +# # You should have received a copy of the GNU Lesser General Public License +# # along with this program. If not, see http://www.gnu.org/licenses/. +# # +# +if HAVE_JAVASUPPORT + +timeprop_java.jar: + mkdir -p bin + find ./src -name "*.java" > fileList.txt + $(JAVAC) -cp $(OSSIE_CLASSPATH) -d bin @fileList.txt + $(JAR) cf ./timeprop_java.jar -C bin . + rm fileList.txt + +clean-local: + rm -rf bin + +timeprop_java_jar_SOURCES := $(shell find ./src -name "*.java") + +ossieName = timeprop_java +noinst_PROGRAMS = timeprop_java.jar + +else + +all-local: + @echo "Java support disabled - timeprop_java will not be compiled" +endif diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_java/java/src/timeprop_java/java/timeprop_java.java b/redhawk/src/testing/sdr/dom/components/timeprop_java/java/src/timeprop_java/java/timeprop_java.java new file mode 100644 index 000000000..5c7bf3ba7 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_java/java/src/timeprop_java/java/timeprop_java.java @@ -0,0 +1,252 @@ +package timeprop_java.java; + +import java.util.Properties; + +/** + * This is the component code. This file contains the derived class where custom + * functionality can be added to the component. You may add methods and code to + * this class to handle property changes, respond to incoming data, and perform + * general component housekeeping + * + * Source: timeprop_java.spd.xml + */ +public class timeprop_java extends timeprop_java_base { + /** + * This is the component constructor. In this method, you may add + * additional functionality to properties such as listening for changes + * or handling allocation, register message handlers and set up internal + * state for your component. + * + * A component may listen for external changes to properties (i.e., by a + * call to configure) using the PropertyListener interface. Listeners are + * registered by calling addChangeListener() on the property instance + * with an object that implements the PropertyListener interface for that + * data type (e.g., "PropertyListener" for a float property). More + * than one listener can be connected to a property. + * + * Example: + * // This example makes use of the following properties: + * // - A float value called scaleValue + * // The file must import "org.ossie.properties.PropertyListener" + * // Add the following import to the top of the file: + * import org.ossie.properties.PropertyListener; + * + * //Add the following to the class constructor: + * this.scaleValue.addChangeListener(new PropertyListener() { + * public void valueChanged(Float oldValue, Float newValue) { + * scaleValueChanged(oldValue, newValue); + * } + * }); + * + * //Add the following method to the class: + * private void scaleValueChanged(Float oldValue, Float newValue) + * { + * logger.debug("Changed scaleValue " + oldValue + " to " + newValue); + * } + * + * The recommended practice is for the implementation of valueChanged() to + * contain only glue code to dispatch the call to a private method on the + * component class. + * Accessing the Application and Domain Manager: + * + * Both the Application hosting this Component and the Domain Manager hosting + * the Application are available to the Component. + * + * To access the Domain Manager: + * CF.DomainManager dommgr = this.getDomainManager().getRef(); + * To access the Application: + * CF.Application app = this.getApplication().getRef(); + * + * Messages: + * + * To send or receive messages, you must have at least one message + * prototype described as a struct property of kind "message." + * + * Receiving: + * + * To receive a message, you must have an input port of type MessageEvent + * (marked as "bi-dir" in the Ports editor). For each message type the + * component supports, you must register a message handler callback with + * the message input port. Message handlers implement the MessageListener + * interface. + * + * A callback is registered by calling registerMessage() on the message + * input port with the message ID, the message struct's Class object and + * an object that implements the MessageListener interface for that + * message struct (e.g., "MessageListener" for a + * message named "my_message"). + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an input MessageEvent port called "message_in". + * // Add the following to the top of the file: + * import org.ossie.events.MessageListener; + * + * // Register the callback in the class constructor: + * this.message_in.registerMessage("my_message", my_message_struct.class, new MessageListener() { + * public void messageReceived(String messageId, my_message_struct messageData) { + * my_message_received(messageData); + * } + * }); + * + * // Implement the message handler method: + * private void my_message_received(my_message_struct messageData) { + * // Respond to the message + * } + * + * The recommended practice is for the implementation of messageReceived() + * to contain only glue code to dispatch the call to a private method on + * the component class. + * + * Sending: + * + * To send a message, you must have an output port of type MessageEvent. + * Create an instance of the message struct type and call sendMessage() + * to send a single message. + * + * Example: + * // Assume the component has a message type called "my_message" and + * // an output MessageEvent port called "message_out". + * my_message_struct message = new my_message_struct(); + * this.message_out.sendMessage(message); + * + * You may also send a batch of messages at once with the sendMessages() + * method. + */ + + public timeprop_java() + { + super(); + } + + public void constructor() + { + } + + + /** + * + * Main processing function + * + * General functionality: + * + * The serviceFunction() is called repeatedly by the component's processing + * thread, which runs independently of the main thread. Each invocation + * should perform a single unit of work, such as reading and processing one + * data packet. + * + * The return status of serviceFunction() determines how soon the next + * invocation occurs: + * - NORMAL: the next call happens immediately + * - NOOP: the next call happens after a pre-defined delay (100 ms) + * - FINISH: no more calls occur + * + * StreamSRI: + * To create a StreamSRI object, use the following code: + * String stream_id = "testStream"; + * BULKIO.StreamSRI sri = new BULKIO.StreamSRI(); + * sri.mode = 0; + * sri.xdelta = 0.0; + * sri.ydelta = 1.0; + * sri.subsize = 0; + * sri.xunits = 1; // TIME_S + * sri.streamID = (stream_id != null) ? stream_id : ""; + * + * PrecisionUTCTime: + * To create a PrecisionUTCTime object, use the following code: + * BULKIO.PrecisionUTCTime tstamp = bulkio.time.utils.now(); + * + * Ports: + * + * Each port instance is accessed through members of the following form: + * + * this.port_ + * + * Input BULKIO data is obtained by calling getPacket on the provides + * port. The getPacket method takes one argument: the time to wait for + * data to arrive, in milliseconds. A timeout of 0 causes getPacket to + * return immediately, while a negative timeout indicates an indefinite + * wait. If no data is queued and no packet arrives during the waiting + * period, getPacket returns null. + * + * Output BULKIO data is sent by calling pushPacket on the uses port. In + * the case of numeric data, the pushPacket method takes a primitive + * array (e.g., "float[]"), a timestamp, an end-of-stream flag and a + * stream ID. You must make at least one call to pushSRI to associate a + * StreamSRI with the stream ID before calling pushPacket, or receivers + * may drop the data. + * + * When all processing on a stream is complete, a call should be made to + * pushPacket with the end-of-stream flag set to "true". + * + * Interactions with non-BULKIO ports are left up to the discretion of + * the component developer. + * + * Properties: + * + * Properties are accessed through members of the same name; characters + * that are invalid for a Java identifier are replaced with "_". The + * current value of the property is read with getValue and written with + * setValue: + * + * float val = this.float_prop.getValue(); + * ... + * this.float_prop.setValue(1.5f); + * + * Primitive data types are stored using the corresponding Java object + * wrapper class. For example, a property of type "float" is stored as a + * Float. Java will automatically box and unbox primitive types where + * appropriate. + * + * Numeric properties support assignment via setValue from any numeric + * type. The standard Java type coercion rules apply (e.g., truncation + * of floating point values when converting to integer types). + * + * Example: + * + * This example assumes that the component has two ports: + * - A bulkio.InShortPort provides (input) port called dataShort_in + * - A bulkio.OutFloatPort uses (output) port called dataFloat_out + * The mapping between the port and the class is found in the component + * base class file. + * This example also makes use of the following Properties: + * - A float value called amplitude with a default value of 2.0 + * - A boolean called increaseAmplitude with a default value of true + * + * bulkio.InShortPort.Packet data = this.port_dataShort_in.getPacket(125); + * + * if (data != null) { + * float[] outData = new float[data.getData().length]; + * for (int i = 0; i < data.getData().length; i++) { + * if (this.increaseAmplitude.getValue()) { + * outData[i] = (float)data.getData()[i] * this.amplitude.getValue(); + * } else { + * outData[i] = (float)data.getData()[i]; + * } + * } + * + * // NOTE: You must make at least one valid pushSRI call + * if (data.sriChanged()) { + * this.port_dataFloat_out.pushSRI(data.getSRI()); + * } + * this.port_dataFloat_out.pushPacket(outData, data.getTime(), data.getEndOfStream(), data.getStreamID()); + * } + * + */ + protected int serviceFunction() { + logger.debug("serviceFunction() example log message"); + + return NOOP; + } + + /** + * Set additional options for ORB startup. For example: + * + * orbProps.put("com.sun.CORBA.giop.ORBFragmentSize", Integer.toString(fragSize)); + * + * @param orbProps + */ + public static void configureOrb(final Properties orbProps) { + } + +} diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_java/java/src/timeprop_java/java/timeprop_java_base.java b/redhawk/src/testing/sdr/dom/components/timeprop_java/java/src/timeprop_java/java/timeprop_java_base.java new file mode 100644 index 000000000..401c36829 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_java/java/src/timeprop_java/java/timeprop_java_base.java @@ -0,0 +1,102 @@ +package timeprop_java.java; + + +import java.util.Properties; + +import org.apache.log4j.Logger; + +import org.omg.CosNaming.NamingContextPackage.CannotProceed; +import org.omg.CosNaming.NamingContextPackage.InvalidName; +import org.omg.CosNaming.NamingContextPackage.NotFound; +import org.omg.PortableServer.POAPackage.ServantNotActive; +import org.omg.PortableServer.POAPackage.WrongPolicy; + +import CF.InvalidObjectReference; + +import org.ossie.component.*; +import org.ossie.properties.*; + + +/** + * This is the component code. This file contains all the access points + * you need to use to be able to access all input and output ports, + * respond to incoming data, and perform general component housekeeping + * + * Source: timeprop_java.spd.xml + * + * @generated + */ + +public abstract class timeprop_java_base extends Component { + /** + * @generated + */ + public final static Logger logger = Logger.getLogger(timeprop_java_base.class.getName()); + + /** + * The property prop + * If the meaning of this property isn't clear, a description should be added. + * + * @generated + */ + public final StringProperty prop = + new StringProperty( + "prop", //id + null, //name + "value", //default value + Mode.READWRITE, //mode + Action.EXTERNAL, //action + new Kind[] {Kind.PROPERTY} + ); + + /** + * @generated + */ + public timeprop_java_base() + { + super(); + + setLogger( logger, timeprop_java_base.class.getName() ); + + + // Properties + addProperty(prop); + + } + + + + /** + * The main function of your component. If no args are provided, then the + * CORBA object is not bound to an SCA Domain or NamingService and can + * be run as a standard Java application. + * + * @param args + * @generated + */ + public static void main(String[] args) + { + final Properties orbProps = new Properties(); + timeprop_java.configureOrb(orbProps); + + try { + Component.start_component(timeprop_java.class, args, orbProps); + } catch (InvalidObjectReference e) { + e.printStackTrace(); + } catch (NotFound e) { + e.printStackTrace(); + } catch (CannotProceed e) { + e.printStackTrace(); + } catch (InvalidName e) { + e.printStackTrace(); + } catch (ServantNotActive e) { + e.printStackTrace(); + } catch (WrongPolicy e) { + e.printStackTrace(); + } catch (InstantiationException e) { + e.printStackTrace(); + } catch (IllegalAccessException e) { + e.printStackTrace(); + } + } +} diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_java/java/startJava.sh b/redhawk/src/testing/sdr/dom/components/timeprop_java/java/startJava.sh new file mode 100755 index 000000000..a76c5a36d --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_java/java/startJava.sh @@ -0,0 +1,38 @@ +#!/bin/sh +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +#Sun ORB start line +# Important, the $@ must be quoted "$@" for arguments to be passed correctly +myDir=`dirname $0` +JAVA_LIBDIR=${myDir}/../../../../../base/framework/java +JAVA_CLASSPATH=${JAVA_LIBDIR}/apache-commons-lang-2.4.jar:${JAVA_LIBDIR}/log4j-1.2.15.jar:${JAVA_LIBDIR}/CFInterfaces.jar:${JAVA_LIBDIR}/ossie.jar:${myDir}/timeprop_java.jar:${myDir}:${myDir}/bin:${CLASSPATH} + +# Path for Java +if test -x $JAVA_HOME/bin/java; then + JAVA=$JAVA_HOME/bin/java +else + JAVA=java +fi + +# NOTE: the $@ must be quoted "$@" for arguments to be passed correctly + +#Sun ORB start line +exec $JAVA -cp ${JAVA_CLASSPATH} timeprop_java.java.timeprop_java "$@" diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_java/timeprop_java.prf.xml b/redhawk/src/testing/sdr/dom/components/timeprop_java/timeprop_java.prf.xml new file mode 100644 index 000000000..fac40ad7c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_java/timeprop_java.prf.xml @@ -0,0 +1,9 @@ + + + + + value + + + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_java/timeprop_java.scd.xml b/redhawk/src/testing/sdr/dom/components/timeprop_java/timeprop_java.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_java/timeprop_java.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_java/timeprop_java.spd.xml b/redhawk/src/testing/sdr/dom/components/timeprop_java/timeprop_java.spd.xml new file mode 100644 index 000000000..97d2d1971 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_java/timeprop_java.spd.xml @@ -0,0 +1,26 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + java/startJava.sh + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_py/python/timeprop_py.py b/redhawk/src/testing/sdr/dom/components/timeprop_py/python/timeprop_py.py new file mode 100755 index 000000000..ca56d272c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_py/python/timeprop_py.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: timeprop_py.spd.xml +from ossie.resource import start_component +import logging + +from timeprop_py_base import * + +class timeprop_py_i(timeprop_py_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is obtained in the process function through the getPacket call (BULKIO only) on a + provides port member instance. The optional argument is a timeout value, in seconds. + A zero value is non-blocking, while a negative value is blocking. Constants have been + defined for these values, bulkio.const.BLOCKING and bulkio.const.NON_BLOCKING. If no + timeout is given, it defaults to non-blocking. + + The return value is a named tuple with the following fields: + - dataBuffer + - T + - EOS + - streamID + - SRI + - sriChanged + - inputQueueFlushed + If no data is available due to a timeout, all fields are None. + + To send data, call the appropriate function in the port directly. In the case of BULKIO, + convenience functions have been added in the port classes that aid in output. + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", timeprop_py_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = timeprop_py_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + packet = self.port_dataShort_in.getPacket() + + if packet.dataBuffer is None: + return NOOP + + outData = range(len(packet.dataBuffer)) + for i in range(len(packet.dataBuffer)): + if self.increaseAmplitude: + outData[i] = float(packet.dataBuffer[i]) * self.amplitude + else: + outData[i] = float(packet.dataBuffer[i]) + + # NOTE: You must make at least one valid pushSRI call + if packet.sriChanged: + self.port_dataFloat_out.pushSRI(packet.SRI); + + self.port_dataFloat_out.pushPacket(outData, packet.T, packet.EOS, packet.streamID) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(timeprop_py_i) + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_py/python/timeprop_py_base.py b/redhawk/src/testing/sdr/dom/components/timeprop_py/python/timeprop_py_base.py new file mode 100644 index 000000000..1ef931abb --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_py/python/timeprop_py_base.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: timeprop_py.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * +from ossie.properties import simple_property + +import Queue, copy, time, threading + +class timeprop_py_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + prop = simple_property(id_="prop", + type_="string", + defvalue="value", + mode="readwrite", + action="external", + kinds=("property",)) + + + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_py/timeprop_py.prf.xml b/redhawk/src/testing/sdr/dom/components/timeprop_py/timeprop_py.prf.xml new file mode 100644 index 000000000..fac40ad7c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_py/timeprop_py.prf.xml @@ -0,0 +1,9 @@ + + + + + value + + + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_py/timeprop_py.scd.xml b/redhawk/src/testing/sdr/dom/components/timeprop_py/timeprop_py.scd.xml new file mode 100644 index 000000000..df94ceaf5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_py/timeprop_py.scd.xml @@ -0,0 +1,45 @@ + + + + 2.2 + + resource + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/timeprop_py/timeprop_py.spd.xml b/redhawk/src/testing/sdr/dom/components/timeprop_py/timeprop_py.spd.xml new file mode 100644 index 000000000..2aa65201c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/timeprop_py/timeprop_py.spd.xml @@ -0,0 +1,25 @@ + + + + + + null + + + + + + + + + The implementation contains descriptive information about the template for a software resource. + + + python/timeprop_py.py + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/components/zero_length/python/Makefile.am b/redhawk/src/testing/sdr/dom/components/zero_length/python/Makefile.am new file mode 100644 index 000000000..8ccd2ba8e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/zero_length/python/Makefile.am @@ -0,0 +1,27 @@ +# vim: noet: softtabstop=0 +ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie +AUTOMAKE_OPTIONS = subdir-objects + +xmldir = $(prefix)/dom/components/zero_length/ +dist_xml_DATA = ../zero_length.scd.xml ../zero_length.prf.xml ../zero_length.spd.xml +domdir = $(prefix)/dom/components/zero_length/python/ + +distclean-local: + rm -rf m4 + rm -f config.* + rm -rf autom4te.cache + rm -f acinclude.m4 + rm -f aclocal.m4 + rm -f configure + rm -f depcomp + rm -f install-sh + rm -f ltmain.sh + rm -f Makefile.in + rm -f missing + rm -rf .deps + +# Sources are auto-included from a file generated by the REDHAWK IDE. You may +# remove the following lines if you wish to manually control these options. +include $(srcdir)/Makefile.am.ide +nobase_dist_dom_SCRIPTS = $(redhawk_SCRIPTS_auto) +nobase_dist_dom_DATA = $(redhawk_DATA_auto) diff --git a/redhawk/src/testing/sdr/dom/components/zero_length/python/Makefile.am.ide b/redhawk/src/testing/sdr/dom/components/zero_length/python/Makefile.am.ide new file mode 100644 index 000000000..b5b885448 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/zero_length/python/Makefile.am.ide @@ -0,0 +1,3 @@ +# This file is regularly auto-generated by the REDHAWK IDE. Do not modify! +redhawk_SCRIPTS_auto = zero_length.py +redhawk_DATA_auto = zero_length_base.py diff --git a/redhawk/src/testing/sdr/dom/components/zero_length/python/configure.ac b/redhawk/src/testing/sdr/dom/components/zero_length/python/configure.ac new file mode 100644 index 000000000..b39f00b31 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/zero_length/python/configure.ac @@ -0,0 +1,20 @@ +AC_INIT(zero_length, 1.0.0) +AM_INIT_AUTOMAKE([nostdinc foreign]) +AC_CONFIG_MACRO_DIR([m4]) + +AC_PROG_INSTALL + +AC_CORBA_ORB +OSSIE_CHECK_OSSIE +OSSIE_SDRROOT_AS_PREFIX +AM_PATH_PYTHON([2.4]) + +PKG_CHECK_MODULES([OSSIE], [ossie >= 2.1]) +AC_CHECK_PYMODULE(ossie, [], [AC_MSG_ERROR([the python ossie module is required])]) +PKG_CHECK_MODULES([OMNIORB], [omniORB4 >= 4.1.0]) +AC_CHECK_PYMODULE(omniORB, [], [AC_MSG_ERROR([the python omniORB module is required])]) + + +AC_CONFIG_FILES(Makefile) + +AC_OUTPUT diff --git a/redhawk/src/testing/sdr/dom/components/zero_length/python/reconf b/redhawk/src/testing/sdr/dom/components/zero_length/python/reconf new file mode 100755 index 000000000..8ff01d431 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/zero_length/python/reconf @@ -0,0 +1,6 @@ +#!/bin/sh + +rm -f config.cache +[ -d m4 ] || mkdir m4 +autoreconf -i + diff --git a/redhawk/src/testing/sdr/dom/components/zero_length/python/zero_length.py b/redhawk/src/testing/sdr/dom/components/zero_length/python/zero_length.py new file mode 100755 index 000000000..c22a3871b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/zero_length/python/zero_length.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python +# +# +# AUTO-GENERATED +# +# Source: zero_length.spd.xml +from ossie.resource import start_component +import logging + +from zero_length_base import * + +class zero_length_i(zero_length_base): + """""" + def constructor(self): + """ + This is called by the framework immediately after your component registers with the system. + + In general, you should add customization here and not in the __init__ constructor. If you have + a custom port implementation you can override the specific implementation here with a statement + similar to the following: + self.some_port = MyPortImplementation() + + """ + # TODO add customization here. + + def process(self): + """ + Basic functionality: + + The process method should process a single "chunk" of data and then return. This method + will be called from the processing thread again, and again, and again until it returns + FINISH or stop() is called on the component. If no work is performed, then return NOOP. + + StreamSRI: + To create a StreamSRI object, use the following code (this generates a normalized SRI that does not flush the queue when full): + sri = bulkio.sri.create("my_stream_id") + + PrecisionUTCTime: + To create a PrecisionUTCTime object, use the following code: + tstamp = bulkio.timestamp.now() + + Ports: + + Each port instance is accessed through members of the following form: self.port_ + + Data is passed to the serviceFunction through by reading from input streams + (BulkIO only). UDP multicast (dataSDDS and dataVITA49) ports do not support + streams. + + The input stream from which to read can be requested with the getCurrentStream() + method. The optional argument to getCurrentStream() is a floating point number that + specifies the time to wait in seconds. A zero value is non-blocking. A negative value + is blocking. Constants have been defined for these values, bulkio.const.BLOCKING and + bulkio.const.NON_BLOCKING. + + More advanced uses of input streams are possible; refer to the REDHAWK documentation + for more details. + + Input streams return data blocks that include the SRI that was in effect at the time + the data was received, and the time stamps associated with that data. + + To send data using a BulkIO interface, create an output stream and write the + data to it. When done with the output stream, the close() method sends and end-of- + stream flag and cleans up. + + If working with complex data (i.e., the "mode" on the SRI is set to 1), + the data block's complex attribute will return True. Data blocks provide a + cxdata attribute that gives the data as a list of complex values: + + if block.complex: + outData = [val.conjugate() for val in block.cxdata] + outputStream.write(outData, block.getStartTime()) + + Interactions with non-BULKIO ports are left up to the component developer's discretion. + + Messages: + + To receive a message, you need (1) an input port of type MessageEvent, (2) a message prototype described + as a structure property of kind message, (3) a callback to service the message, and (4) to register the callback + with the input port. + + Assuming a property of type message is declared called "my_msg", an input port called "msg_input" is declared of + type MessageEvent, create the following code: + + def msg_callback(self, msg_id, msg_value): + print msg_id, msg_value + + Register the message callback onto the input port with the following form: + self.port_input.registerMessage("my_msg", zero_length_i.MyMsg, self.msg_callback) + + To send a message, you need to (1) create a message structure, and (2) send the message over the port. + + Assuming a property of type message is declared called "my_msg", an output port called "msg_output" is declared of + type MessageEvent, create the following code: + + msg_out = zero_length_i.MyMsg() + this.port_msg_output.sendMessage(msg_out) + + Accessing the Device Manager and Domain Manager: + + Both the Device Manager hosting this Device and the Domain Manager hosting + the Device Manager are available to the Device. + + To access the Domain Manager: + dommgr = self.getDomainManager().getRef(); + To access the Device Manager: + devmgr = self.getDeviceManager().getRef(); + Properties: + + Properties are accessed directly as member variables. If the property name is baudRate, + then accessing it (for reading or writing) is achieved in the following way: self.baudRate. + + To implement a change callback notification for a property, create a callback function with the following form: + + def mycallback(self, id, old_value, new_value): + pass + + where id is the property id, old_value is the previous value, and new_value is the updated value. + + The callback is then registered on the component as: + self.addPropertyChangeListener('baudRate', self.mycallback) + + + Example: + + # This example assumes that the component has two ports: + # - A provides (input) port of type bulkio.InShortPort called dataShort_in + # - A uses (output) port of type bulkio.OutFloatPort called dataFloat_out + # The mapping between the port and the class if found in the component + # base class. + # This example also makes use of the following Properties: + # - A float value called amplitude + # - A boolean called increaseAmplitude + + inputStream = self.port_dataShort_in.getCurrentStream() + if not inputStream: + return NOOP + + outputStream = self.port_dataFloat_out.getStream(inputStream.streamID) + if not outputStream: + outputStream = self.port_dataFloat_out.createStream(inputStream.sri) + + block = inputStream.read() + if not block: + if inputStream.eos(): + outputStream.close() + return NOOP + + if self.increaseAmplitude: + scale = self.amplitude + else: + scale = 1.0 + outData = [float(val) * scale for val in block.data] + + if block.sriChanged: + outputStream.sri = block.sri + + outputStream.write(outData, block.getStartTime()) + return NORMAL + + """ + + # TODO fill in your code here + self._log.debug("process() example log message") + return NOOP + + +if __name__ == '__main__': + logging.getLogger().setLevel(logging.INFO) + logging.debug("Starting Component") + start_component(zero_length_i) + diff --git a/redhawk/src/testing/sdr/dom/components/zero_length/python/zero_length_base.py b/redhawk/src/testing/sdr/dom/components/zero_length/python/zero_length_base.py new file mode 100644 index 000000000..7ebcef73a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/components/zero_length/python/zero_length_base.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# +# AUTO-GENERATED CODE. DO NOT MODIFY! +# +# Source: zero_length.spd.xml +from ossie.cf import CF +from ossie.cf import CF__POA +from ossie.utils import uuid + +from ossie.component import Component +from ossie.threadedcomponent import * +from ossie.properties import simple_property +from ossie.properties import simpleseq_property +from ossie.properties import struct_property + +import Queue, copy, time, threading + +class zero_length_base(CF__POA.Resource, Component, ThreadedComponent): + # These values can be altered in the __init__ of your derived class + + PAUSE = 0.0125 # The amount of time to sleep if process return NOOP + TIMEOUT = 5.0 # The amount of time to wait for the process thread to die when stop() is called + DEFAULT_QUEUE_SIZE = 100 # The number of BulkIO packets that can be in the queue before pushPacket will block + + def __init__(self, identifier, execparams): + loggerName = (execparams['NAME_BINDING'].replace('/', '.')).rsplit("_", 1)[0] + Component.__init__(self, identifier, execparams, loggerName=loggerName) + ThreadedComponent.__init__(self) + + # self.auto_start is deprecated and is only kept for API compatibility + # with 1.7.X and 1.8.0 components. This variable may be removed + # in future releases + self.auto_start = False + # Instantiate the default implementations for all ports on this component + + def start(self): + Component.start(self) + ThreadedComponent.startThread(self, pause=self.PAUSE) + + def stop(self): + Component.stop(self) + if not ThreadedComponent.stopThread(self, self.TIMEOUT): + raise CF.Resource.StopError(CF.CF_NOTSET, "Processing thread did not die") + + def releaseObject(self): + try: + self.stop() + except Exception: + self._log.exception("Error stopping") + Component.releaseObject(self) + + ###################################################################### + # PORTS + # + # DO NOT ADD NEW PORTS HERE. You can add ports in your derived class, in the SCD xml file, + # or via the IDE. + + ###################################################################### + # PROPERTIES + # + # DO NOT ADD NEW PROPERTIES HERE. You can add properties in your derived class, in the PRF xml file + # or by using the IDE. + class Mystruct(object): + mystruct__mysimple = simple_property( + id_="mystruct::mysimple", + + type_="string", + defvalue="x" + ) + + mystruct__mysimpleseq = simpleseq_property( + id_="mystruct::mysimpleseq", + + type_="string", + defvalue=[] + ) + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["mystruct__mysimple"] = self.mystruct__mysimple + d["mystruct__mysimpleseq"] = self.mystruct__mysimpleseq + return str(d) + + @classmethod + def getId(cls): + return "mystruct" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("mystruct__mysimple",self.mystruct__mysimple),("mystruct__mysimpleseq",self.mystruct__mysimpleseq)] + + mystruct = struct_property(id_="mystruct", + structdef=Mystruct, + configurationkind=("property",), + mode="readwrite") + + + + diff --git a/redhawk/src/testing/sdr/dom/components/zero_length/zero_length.spd.xml b/redhawk/src/testing/sdr/dom/components/zero_length/zero_length.spd.xml index f9af1ff14..880e63baf 100644 --- a/redhawk/src/testing/sdr/dom/components/zero_length/zero_length.spd.xml +++ b/redhawk/src/testing/sdr/dom/components/zero_length/zero_length.spd.xml @@ -1,6 +1,6 @@ - + null @@ -24,4 +24,15 @@ + + The implementation contains descriptive information about the template for a software resource. + + + python/zero_length.py + + + + + + diff --git a/redhawk/src/testing/sdr/dom/deps/cpp_dep1/cpp_dep1.spec b/redhawk/src/testing/sdr/dom/deps/cpp_dep1/cpp_dep1.spec deleted file mode 100644 index 18871a0fa..000000000 --- a/redhawk/src/testing/sdr/dom/deps/cpp_dep1/cpp_dep1.spec +++ /dev/null @@ -1,97 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot}/dom/deps/cpp_dep1 - -# Point install paths to locations within our target SDR root -%define _libdir %{_prefix}/cpp/lib -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: cpp_dep1 -Version: 1.0.0 -Release: 1%{?dist} -Summary: Shared package %{name} - -Group: REDHAWK/Shared Packages -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 2.0 -BuildRequires: autoconf automake libtool - - - -%description -Shared package %{name} - -%package devel -Summary: Shared package %{name} -Group: REDHAWK/Shared Packages -Requires: %{name} = %{version}-%{release} - -%description devel -Libraries and header files for shared package %{name} - -%prep -%setup -q - - -%build -# Implementation cpp -pushd cpp -./reconf -%configure --with-sdr=%{_sdrroot} -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation cpp -pushd cpp -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_sdrroot}/dom/deps/cpp_dep1 -%{_prefix}/cpp_dep1.spd.xml -%{_prefix}/cpp -%exclude %{_libdir}/libcpp_dep1.la -%exclude %{_libdir}/libcpp_dep1.so -%exclude %{_libdir}/pkgconfig - -%files devel -%defattr(-,redhawk,redhawk,-) -%{_libdir}/libcpp_dep1.la -%{_libdir}/libcpp_dep1.so -%{_libdir}/pkgconfig -%{_prefix}/include - diff --git a/redhawk/src/testing/sdr/dom/deps/cpp_dep2/cpp_dep2.spec b/redhawk/src/testing/sdr/dom/deps/cpp_dep2/cpp_dep2.spec deleted file mode 100644 index f5df298c4..000000000 --- a/redhawk/src/testing/sdr/dom/deps/cpp_dep2/cpp_dep2.spec +++ /dev/null @@ -1,97 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot}/dom/deps/cpp_dep2 - -# Point install paths to locations within our target SDR root -%define _libdir %{_prefix}/cpp/lib -%define _sysconfdir %{_prefix}/etc -%define _localstatedir %{_prefix}/var -%define _mandir %{_prefix}/man -%define _infodir %{_prefix}/info - -Name: cpp_dep2 -Version: 1.0.0 -Release: 1%{?dist} -Summary: Shared package %{name} - -Group: REDHAWK/Shared Packages -License: None -Source0: %{name}-%{version}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) - -BuildRequires: redhawk-devel >= 2.0 -BuildRequires: autoconf automake libtool - - - -%description -Shared package %{name} - -%package devel -Summary: Shared package %{name} -Group: REDHAWK/Shared Packages -Requires: %{name} = %{version}-%{release} - -%description devel -Libraries and header files for shared package %{name} - -%prep -%setup -q - - -%build -# Implementation cpp -pushd cpp -./reconf -%configure --with-sdr=%{_sdrroot} -make %{?_smp_mflags} -popd - - -%install -rm -rf $RPM_BUILD_ROOT -# Implementation cpp -pushd cpp -make install DESTDIR=$RPM_BUILD_ROOT -popd - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,redhawk,redhawk,-) -%dir %{_sdrroot}/dom/deps/cpp_dep2 -%{_prefix}/cpp_dep2.spd.xml -%{_prefix}/cpp -%exclude %{_libdir}/libcpp_dep2.la -%exclude %{_libdir}/libcpp_dep2.so -%exclude %{_libdir}/pkgconfig - -%files devel -%defattr(-,redhawk,redhawk,-) -%{_libdir}/libcpp_dep2.la -%{_libdir}/libcpp_dep2.so -%{_libdir}/pkgconfig -%{_prefix}/include - diff --git a/redhawk/src/testing/sdr/dom/deps/py_dep2/lib/python/helloworld/helloworld.py b/redhawk/src/testing/sdr/dom/deps/py_dep2/lib/python/helloworld/helloworld.py deleted file mode 100644 index 5abb29dc6..000000000 --- a/redhawk/src/testing/sdr/dom/deps/py_dep2/lib/python/helloworld/helloworld.py +++ /dev/null @@ -1,14 +0,0 @@ - -import os -import sys - - -class HelloWorld(): - def __init__(self): - self.a = 100 - self.b = [ 1.0, 2.0, 3.0 ] - self.c = { 'testing' : 1, 'foobar' : 2, 'goober': 'jones' } - - - def __str__(self): - return "a: " + str(self.a) + "\nb: " + str(self.b) + "\nc: " + str(self.c) diff --git a/redhawk/src/testing/sdr/dom/deps/py_dep2/lib/python/helloworld2/__init__.py b/redhawk/src/testing/sdr/dom/deps/py_dep2/lib/python/helloworld2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/redhawk/src/testing/sdr/dom/deps/py_dep2/lib/python/helloworld2/helloworld2.py b/redhawk/src/testing/sdr/dom/deps/py_dep2/lib/python/helloworld2/helloworld2.py new file mode 100644 index 000000000..8339267b5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/deps/py_dep2/lib/python/helloworld2/helloworld2.py @@ -0,0 +1,14 @@ + +import os +import sys + + +class HelloWorld(): + def __init__(self): + self.a = 100 + self.b = [ 1.0, 2.0, 3.0 ] + self.c = { 'testing' : 1, 'foobar' : 2, 'jones': 'goober' } + + + def __str__(self): + return "a: " + str(self.a) + "\nb: " + str(self.b) + "\nc: " + str(self.c) diff --git a/redhawk/src/testing/sdr/dom/deps/py_dep2/py_dep2.spd.xml b/redhawk/src/testing/sdr/dom/deps/py_dep2/py_dep2.spd.xml index 929a91a6d..792030995 100644 --- a/redhawk/src/testing/sdr/dom/deps/py_dep2/py_dep2.spd.xml +++ b/redhawk/src/testing/sdr/dom/deps/py_dep2/py_dep2.spd.xml @@ -32,7 +32,7 @@ this program. If not, see http://www.gnu.org/licenses/. - + diff --git a/redhawk/src/testing/sdr/dom/deps/py_dep3/helloworld3/__init__.py b/redhawk/src/testing/sdr/dom/deps/py_dep3/helloworld3/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/redhawk/src/testing/sdr/dom/deps/py_dep3/helloworld3/helloworld3.py b/redhawk/src/testing/sdr/dom/deps/py_dep3/helloworld3/helloworld3.py new file mode 100644 index 000000000..8339267b5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/deps/py_dep3/helloworld3/helloworld3.py @@ -0,0 +1,14 @@ + +import os +import sys + + +class HelloWorld(): + def __init__(self): + self.a = 100 + self.b = [ 1.0, 2.0, 3.0 ] + self.c = { 'testing' : 1, 'foobar' : 2, 'jones': 'goober' } + + + def __str__(self): + return "a: " + str(self.a) + "\nb: " + str(self.b) + "\nc: " + str(self.c) diff --git a/redhawk/src/testing/sdr/dom/deps/py_dep3/py_dep3.spd.xml b/redhawk/src/testing/sdr/dom/deps/py_dep3/py_dep3.spd.xml new file mode 100644 index 000000000..3f54e6780 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/deps/py_dep3/py_dep3.spd.xml @@ -0,0 +1,41 @@ + + + + + + + + + + + HelloWorld Example + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/deps/ticket_490_dep_single/softpkgDep.spd.xml b/redhawk/src/testing/sdr/dom/deps/ticket_490_dep_single/softpkgDep.spd.xml index 04b695b8e..81e27d10c 100644 --- a/redhawk/src/testing/sdr/dom/deps/ticket_490_dep_single/softpkgDep.spd.xml +++ b/redhawk/src/testing/sdr/dom/deps/ticket_490_dep_single/softpkgDep.spd.xml @@ -29,6 +29,16 @@ with this program. If not, see http://www.gnu.org/licenses/. An empty SPD to test softpkg dependencies + + + This implementation should never be matchable. + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/deps/ticket_490_dep_single/ticket_490_dep_single_x86_64/__init__.py b/redhawk/src/testing/sdr/dom/deps/ticket_490_dep_single/ticket_490_dep_single_x86_64/__init__.py new file mode 100644 index 000000000..5c719e450 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/deps/ticket_490_dep_single/ticket_490_dep_single_x86_64/__init__.py @@ -0,0 +1,20 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + diff --git a/redhawk/src/testing/sdr/dom/logcfg/log4j.kickdomain.cfg b/redhawk/src/testing/sdr/dom/logcfg/log4j.kickdomain.cfg new file mode 100644 index 000000000..cd3f3b8b0 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/logcfg/log4j.kickdomain.cfg @@ -0,0 +1,21 @@ +log4j.rootLogger=INFO,stdout + +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + +log4j.appender.dom_stdout=org.apache.log4j.ConsoleAppender +log4j.appender.dom_stdout.Target=System.out +log4j.appender.dom_stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.dom_stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + +log4j.appender.dev_stdout=org.apache.log4j.ConsoleAppender +log4j.appender.dev_stdout.Target=System.out +log4j.appender.dev_stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.dev_stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + +log4j.category.DomainManager_impl=INFO, dom_stdout +log4j.additivity.DomainManager_impl=False + +log4j.category.DeviceManager_impl=INFO, dev_stdout +log4j.additivity.DeviceManager_impl=False diff --git a/redhawk/src/testing/sdr/dom/waveforms/BasicSharedCollocWave/BasicSharedCollocWave.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/BasicSharedCollocWave/BasicSharedCollocWave.sad.xml new file mode 100644 index 000000000..03154e070 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/BasicSharedCollocWave/BasicSharedCollocWave.sad.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + BasicShared_1 + + + + + + + + + BasicShared_2 + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/BasicSharedWave/BasicSharedWave.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/BasicSharedWave/BasicSharedWave.sad.xml new file mode 100644 index 000000000..6a9c59d3e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/BasicSharedWave/BasicSharedWave.sad.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + BasicShared_1 + + + + + + + + + BasicShared_2 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/MessageEventTest/MessageEventTest.sad.xml.cpp b/redhawk/src/testing/sdr/dom/waveforms/MessageEventTest/MessageEventTest.sad.xml.cpp new file mode 100644 index 000000000..ea2f04a4d --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/MessageEventTest/MessageEventTest.sad.xml.cpp @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + MessageSenderCpp_1 + + + + + + + + + + + + + output + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/MessageEventTest/MessageEventTest.sad.xml.java b/redhawk/src/testing/sdr/dom/waveforms/MessageEventTest/MessageEventTest.sad.xml.java new file mode 100644 index 000000000..e58cdec56 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/MessageEventTest/MessageEventTest.sad.xml.java @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + MessageSenderJava_1 + + + + + + + + + + + + + output + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/MessageEventTest/MessageEventTest.sad.xml.py b/redhawk/src/testing/sdr/dom/waveforms/MessageEventTest/MessageEventTest.sad.xml.py new file mode 100644 index 000000000..e5afb56ea --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/MessageEventTest/MessageEventTest.sad.xml.py @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + MessageSenderPy_1 + + + + + + + + + + + + + output + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/MessageTestCpp/MessageTestCpp.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/MessageTestCpp/MessageTestCpp.sad.xml index f2b3fcbc9..955217aee 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/MessageTestCpp/MessageTestCpp.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/MessageTestCpp/MessageTestCpp.sad.xml @@ -32,7 +32,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + MessageReceiverCpp_1 @@ -41,7 +41,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + MessageSenderCpp_1 @@ -50,32 +50,32 @@ with this program. If not, see http://www.gnu.org/licenses/. - + - + message_out - + message_in - + - + message_in - + - + message_out - + diff --git a/redhawk/src/testing/sdr/dom/waveforms/MessageTestJava/MessageTestJava.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/MessageTestJava/MessageTestJava.sad.xml index f6340ca37..056e50f30 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/MessageTestJava/MessageTestJava.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/MessageTestJava/MessageTestJava.sad.xml @@ -32,7 +32,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + EventReceiveJava_1 @@ -41,7 +41,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + EventSenderJava_1 @@ -50,32 +50,32 @@ with this program. If not, see http://www.gnu.org/licenses/. - + - + message_out - + message_in - + - + message_in - + - + message_out - + diff --git a/redhawk/src/testing/sdr/dom/waveforms/MessageTestPy/MessageTestPy.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/MessageTestPy/MessageTestPy.sad.xml index 71f2c44e8..b8069f349 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/MessageTestPy/MessageTestPy.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/MessageTestPy/MessageTestPy.sad.xml @@ -32,7 +32,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + MessageReceiverPy_1 @@ -41,7 +41,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + MessageSenderPy_1 @@ -50,32 +50,32 @@ with this program. If not, see http://www.gnu.org/licenses/. - + - + message_out - + message_in - + - + message_in - + - + message_out - + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCpp.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCpp.sad.xml new file mode 100644 index 000000000..72070bb08 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCpp.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + NicAllocCpp_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppCollocated.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppCollocated.sad.xml new file mode 100644 index 000000000..d0094b29b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppCollocated.sad.xml @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + NicAllocCppIdentifier_1 + + + + + + + + + NicAllocCpp_1 + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppIdentifier.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppIdentifier.sad.xml new file mode 100644 index 000000000..08b19352d --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppIdentifier.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + NicAllocCppIdentifier_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppShared.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppShared.sad.xml new file mode 100644 index 000000000..3e0ca4162 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppShared.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + NicAllocCppShared_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppSharedCollocated.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppSharedCollocated.sad.xml new file mode 100644 index 000000000..b36111c06 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppSharedCollocated.sad.xml @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + NicAllocCppSharedIdentifier_1 + + + + + + + + + NicAllocCppShared_1 + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppSharedIdentifier.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppSharedIdentifier.sad.xml new file mode 100644 index 000000000..fdcb67e00 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveCppSharedIdentifier.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + NicAllocCppSharedIdentifier_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveJava.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveJava.sad.xml new file mode 100644 index 000000000..da0cec070 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveJava.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + NicAllocJava_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveJavaCollocated.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveJavaCollocated.sad.xml new file mode 100644 index 000000000..f7424cec2 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveJavaCollocated.sad.xml @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + NicAllocJava_1 + + + + + + + + + NicAllocJavaIdentifier_1 + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveJavaIdentifier.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveJavaIdentifier.sad.xml new file mode 100644 index 000000000..5ffba3b15 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWaveJavaIdentifier.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + NicAllocJavaIdentifier_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWavePy.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWavePy.sad.xml new file mode 100644 index 000000000..718af013c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWavePy.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + NicAllocPy_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWavePyCollocated.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWavePyCollocated.sad.xml new file mode 100644 index 000000000..4c9793a94 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWavePyCollocated.sad.xml @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + NicAllocPy_1 + + + + + + + + + NicAllocPyIdentifier_1 + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWavePyIdentifier.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWavePyIdentifier.sad.xml new file mode 100644 index 000000000..c7635c2ed --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/NicAllocWave/NicAllocWavePyIdentifier.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + NicAllocPyIdentifier_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/PortConnectExternalPort/PortConnectExternalPort.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/PortConnectExternalPort/PortConnectExternalPort.sad.xml index fa966f5ba..0260235cd 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/PortConnectExternalPort/PortConnectExternalPort.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/PortConnectExternalPort/PortConnectExternalPort.sad.xml @@ -29,7 +29,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + PortTest1 @@ -38,7 +38,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + PortTest2 @@ -47,7 +47,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + PortTest2-1 @@ -56,20 +56,20 @@ with this program. If not, see http://www.gnu.org/licenses/. - + resource_out - + resource_out - + resource_in - + diff --git a/redhawk/src/testing/sdr/dom/waveforms/TestComplexPropsSADOverrides/TestComplexPropsSADOverrides.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/TestComplexPropsSADOverrides/TestComplexPropsSADOverrides.sad.xml index cef445147..7b174e001 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/TestComplexPropsSADOverrides/TestComplexPropsSADOverrides.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/TestComplexPropsSADOverrides/TestComplexPropsSADOverrides.sad.xml @@ -42,6 +42,49 @@ with this program. If not, see http://www.gnu.org/licenses/. + + + + -5+j5 + 9-j8 + -13-j24 + 21-j22 + 31 + j431 + -j567 + -3567 + -5.25+j5.25 + 9.25-j8.25 + + + + + + 1+j2 + 10+j20 + + + + + + + + 45+j55 + 69+j78 + + + + + + + + 145+j155 + 169+j178 + 279+j998 + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/TestCppProps/TestCppProps.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/TestCppProps/TestCppProps.sad.xml index 875ad83d9..9dc5b0501 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/TestCppProps/TestCppProps.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/TestCppProps/TestCppProps.sad.xml @@ -38,6 +38,7 @@ with this program. If not, see http://www.gnu.org/licenses/. + diff --git a/redhawk/src/testing/sdr/dom/waveforms/TestJavaProps/TestJavaProps.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/TestJavaProps/TestJavaProps.sad.xml index 4fb2a9dee..2c6d63fb1 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/TestJavaProps/TestJavaProps.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/TestJavaProps/TestJavaProps.sad.xml @@ -46,6 +46,7 @@ with this program. If not, see http://www.gnu.org/licenses/. + diff --git a/redhawk/src/testing/sdr/dom/waveforms/TestPythonProps/TestPythonProps.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/TestPythonProps/TestPythonProps.sad.xml index 3c8e85a4f..ea97336b3 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/TestPythonProps/TestPythonProps.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/TestPythonProps/TestPythonProps.sad.xml @@ -31,6 +31,9 @@ with this program. If not, see http://www.gnu.org/licenses/. TestPythonProps1 + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/busycomp_w/busycomp_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/busycomp_w/busycomp_w.sad.xml new file mode 100644 index 000000000..d8eb080cc --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/busycomp_w/busycomp_w.sad.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + busycomp_1 + + + + + + + + + busycomp_2 + + + + + + + + + msg_through_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/cpp_comp_aware_w/cpp_comp_aware_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/cpp_comp_aware_w/cpp_comp_aware_w.sad.xml new file mode 100644 index 000000000..5e88c0b6c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/cpp_comp_aware_w/cpp_comp_aware_w.sad.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + cpp_comp_1 + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_green/device_requires_green.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_green/device_requires_green.sad.xml new file mode 100644 index 000000000..f12b1cb1a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_green/device_requires_green.sad.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + SimpleComponent_Green + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_multicolor/device_requires_multicolor.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_multicolor/device_requires_multicolor.sad.xml new file mode 100644 index 000000000..5854961ff --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_multicolor/device_requires_multicolor.sad.xml @@ -0,0 +1,58 @@ + + + + + + + + + + + + + + SimpleComponent_Red + + + + + + + + + + + + + SimpleComponent_Green + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_multicolor_colloc/device_requires_multicolor_colloc.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_multicolor_colloc/device_requires_multicolor_colloc.sad.xml new file mode 100644 index 000000000..7f22f8d33 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_multicolor_colloc/device_requires_multicolor_colloc.sad.xml @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + SimpleComponent_Red + + + + + + + + + + + + + SimpleComponent_Green + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_nocolor/device_requires_nocolor.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_nocolor/device_requires_nocolor.sad.xml new file mode 100644 index 000000000..44abca3a9 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_nocolor/device_requires_nocolor.sad.xml @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + SimpleComponent_Red + + + + + + + + + SimpleComponent_Green + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red/device_requires_red.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red/device_requires_red.sad.xml new file mode 100644 index 000000000..99a848495 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red/device_requires_red.sad.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + SimpleComponent_Red + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc/device_requires_red_colloc.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc/device_requires_red_colloc.sad.xml new file mode 100644 index 000000000..23081c3fd --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc/device_requires_red_colloc.sad.xml @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + + + + + SimpleComponent_Red + + + + + + + + + + + + + C2_Red + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc_and_nocolloc/device_requires_red_colloc_and_nocolloc.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc_and_nocolloc/device_requires_red_colloc_and_nocolloc.sad.xml new file mode 100644 index 000000000..03f25bc81 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc_and_nocolloc/device_requires_red_colloc_and_nocolloc.sad.xml @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + SimpleComponent_Red + + + + + + + + + + + + + C2_Red_2 + + + + + + + + + + C2_Red + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc_and_nocolloc_green/device_requires_red_colloc_and_nocolloc_green.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc_and_nocolloc_green/device_requires_red_colloc_and_nocolloc_green.sad.xml new file mode 100644 index 000000000..37711eaef --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc_and_nocolloc_green/device_requires_red_colloc_and_nocolloc_green.sad.xml @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + SimpleComponent_Red + + + + + + + + + + + + + + C2_Green + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc_and_nocolloc_red/device_requires_red_colloc_and_nocolloc_red.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc_and_nocolloc_red/device_requires_red_colloc_and_nocolloc_red.sad.xml new file mode 100644 index 000000000..2c56edfef --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_colloc_and_nocolloc_red/device_requires_red_colloc_and_nocolloc_red.sad.xml @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + SimpleComponent_Red + + + + + + + + + + + + + + C2_Red + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_so/device_requires_red_so.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_so/device_requires_red_so.sad.xml new file mode 100644 index 000000000..06d2ffa8a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/device_requires/device_requires_red_so/device_requires_red_so.sad.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + SF1_Red + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.emptyvalue.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.emptyvalue.sad.xml new file mode 100644 index 000000000..29070f1a0 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.emptyvalue.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.eparam.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.eparam.sad.xml new file mode 100644 index 000000000..34a6752dc --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.eparam.sad.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.novalue.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.novalue.sad.xml new file mode 100644 index 000000000..5e3958f3f --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.novalue.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.sad.xml new file mode 100644 index 000000000..29070f1a0 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.cpp.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.emptyvalue.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.emptyvalue.sad.xml new file mode 100644 index 000000000..9a70f819d --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.emptyvalue.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.eparam.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.eparam.sad.xml new file mode 100644 index 000000000..e4e66e3e5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.eparam.sad.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.novalue.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.novalue.sad.xml new file mode 100644 index 000000000..517e737cc --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.novalue.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.sad.xml new file mode 100644 index 000000000..9a70f819d --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.java.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.emptyvalue.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.emptyvalue.sad.xml new file mode 100644 index 000000000..796062444 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.emptyvalue.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.eparam.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.eparam.sad.xml new file mode 100644 index 000000000..ce17a28fa --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.eparam.sad.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.novalue.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.novalue.sad.xml new file mode 100644 index 000000000..dfee19295 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.novalue.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.sad.xml new file mode 100644 index 000000000..796062444 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.py.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.sad.xml new file mode 100644 index 000000000..2db1fd1d7 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/empty_wf/empty_wf.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + EmptyString_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/java_softpkg_deps/java_softpkg_deps.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/java_softpkg_deps/java_softpkg_deps.sad.xml new file mode 100644 index 000000000..35a5d6ab3 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/java_softpkg_deps/java_softpkg_deps.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + javaSoftpkgJarDep_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/logger_config/logger_config.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/logger_config/logger_config.sad.xml new file mode 100644 index 000000000..ce401888a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/logger_config/logger_config.sad.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + + logger_1 + file:///@@@CWD@@@/high_thresh.cfg + + + + + + + + + logger_2 + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/logger_java_w/logger_java_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/logger_java_w/logger_java_w.sad.xml new file mode 100644 index 000000000..13817e7ae --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/logger_java_w/logger_java_w.sad.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + logger_java_1 + + + + + + + + + logger_java_2 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/logger_overload_w/logger_overload_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/logger_overload_w/logger_overload_w.sad.xml new file mode 100644 index 000000000..989e5b44b --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/logger_overload_w/logger_overload_w.sad.xml @@ -0,0 +1,35 @@ + + + + + + + + + + + + + logger_1 + + + + + + + + + logger_2 + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/logger_py_w/logger_py_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/logger_py_w/logger_py_w.sad.xml new file mode 100644 index 000000000..23b9c3fac --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/logger_py_w/logger_py_w.sad.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + logger_py_1 + + + + + + + + + logger_py_2 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/logger_w/logger_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/logger_w/logger_w.sad.xml new file mode 100644 index 000000000..97a630372 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/logger_w/logger_w.sad.xml @@ -0,0 +1,32 @@ + + + + + + + + + + + + + logger_1 + + + + + + + + + logger_2 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp.sad.xml index 8541c3264..ddbc20f36 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp.sad.xml @@ -1,21 +1,21 @@ diff --git a/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_logcfg.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_logcfg.sad.xml new file mode 100644 index 000000000..03d51517c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_logcfg.sad.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + C2_1 + sca://logcfg/log.logcfg.c2 + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_logcfg_debug.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_logcfg_debug.sad.xml new file mode 100644 index 000000000..03d51517c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_logcfg_debug.sad.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + C2_1 + sca://logcfg/log.logcfg.c2 + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_props.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_props.sad.xml index 2b02540dc..ec0e9b06c 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_props.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_props.sad.xml @@ -1,21 +1,21 @@ diff --git a/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_props_debug.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_props_debug.sad.xml index 9a117254c..d50100c7f 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_props_debug.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/loggingconfig/TestCpp/TestCpp_props_debug.sad.xml @@ -1,21 +1,21 @@ diff --git a/redhawk/src/testing/sdr/dom/waveforms/long_stop/long_stop.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/long_stop/long_stop.sad.xml new file mode 100644 index 000000000..8b881cd8c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/long_stop/long_stop.sad.xml @@ -0,0 +1,55 @@ + + + + + + + + + + + + + + hanging_stop_1 + + + + + + + + + hanging_stop_2 + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/long_stop_builtin_def/long_stop_builtin_def.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/long_stop_builtin_def/long_stop_builtin_def.sad.xml new file mode 100644 index 000000000..16f4be7a6 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/long_stop_builtin_def/long_stop_builtin_def.sad.xml @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + hanging_stop_1 + + + + + + + + + hanging_stop_2 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/math_py_w/math_py_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/math_py_w/math_py_w.sad.xml new file mode 100644 index 000000000..35d8fa63a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/math_py_w/math_py_w.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + math_py_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/newtime_w/newtime_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/newtime_w/newtime_w.sad.xml new file mode 100644 index 000000000..55ed55dc9 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/newtime_w/newtime_w.sad.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + newtime_1 + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/noop_waveform/noop_waveform.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/noop_waveform/noop_waveform.sad.xml new file mode 100644 index 000000000..5ae4e39e4 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/noop_waveform/noop_waveform.sad.xml @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + NOOP_SIMP_1 + + + + + + + + + NOOP_SIMP_2 + + + + + + + + + NOOP_SIMP_3 + + + + + + + + + NOOP_SIMP_4 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/props_bad_numbers_w/props_bad_numbers_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/props_bad_numbers_w/props_bad_numbers_w.sad.xml new file mode 100644 index 000000000..cbe734022 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/props_bad_numbers_w/props_bad_numbers_w.sad.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + props_bad_numbers_1 + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/python_softpkg_deps/python_softpkg_deps.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/python_softpkg_deps/python_softpkg_deps.sad.xml new file mode 100644 index 000000000..7a199ea9c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/python_softpkg_deps/python_softpkg_deps.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + pythonSoftpkgDep_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/slow_stop_cpp_w/slow_stop_cpp_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/slow_stop_cpp_w/slow_stop_cpp_w.sad.xml new file mode 100644 index 000000000..ad5797ab5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/slow_stop_cpp_w/slow_stop_cpp_w.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + slow_stop_cpp_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/slow_stop_w/slow_stop_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/slow_stop_w/slow_stop_w.sad.xml new file mode 100644 index 000000000..173c31cc5 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/slow_stop_w/slow_stop_w.sad.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + slow_stop_1 + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/svc_connect/svc_connect.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/svc_connect/svc_connect.sad.xml new file mode 100644 index 000000000..266522a82 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/svc_connect/svc_connect.sad.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + ServiceComponent_1 + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/svc_error_cpp_w/svc_error_cpp_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/svc_error_cpp_w/svc_error_cpp_w.sad.xml deleted file mode 100644 index 558b4d2b3..000000000 --- a/redhawk/src/testing/sdr/dom/waveforms/svc_error_cpp_w/svc_error_cpp_w.sad.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - - - - - - - - - - svc_fn_error_cpp_1 - - - - - - - - - - diff --git a/redhawk/src/testing/sdr/dom/waveforms/svc_fn_error_cpp_w/svc_fn_error_cpp_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/svc_fn_error_cpp_w/svc_fn_error_cpp_w.sad.xml new file mode 100644 index 000000000..cb9b0e777 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/svc_fn_error_cpp_w/svc_fn_error_cpp_w.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + svc_fn_error_cpp_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/svc_fn_error_java_w/svc_fn_error_java_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/svc_fn_error_java_w/svc_fn_error_java_w.sad.xml new file mode 100644 index 000000000..c8b93ac1e --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/svc_fn_error_java_w/svc_fn_error_java_w.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + svc_fn_error_java_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/svc_fn_error_py_w/svc_fn_error_py_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/svc_fn_error_py_w/svc_fn_error_py_w.sad.xml new file mode 100644 index 000000000..2566bb69a --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/svc_fn_error_py_w/svc_fn_error_py_w.sad.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + svc_fn_error_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/svc_one_error_w/svc_one_error_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/svc_one_error_w/svc_one_error_w.sad.xml index eb1d80d01..3d24138c5 100644 --- a/redhawk/src/testing/sdr/dom/waveforms/svc_one_error_w/svc_one_error_w.sad.xml +++ b/redhawk/src/testing/sdr/dom/waveforms/svc_one_error_w/svc_one_error_w.sad.xml @@ -22,7 +22,7 @@ with this program. If not, see http://www.gnu.org/licenses/. - + @@ -40,10 +40,10 @@ with this program. If not, see http://www.gnu.org/licenses/. - - svc_error_cpp_1 + + svc_fn_error_cpp_1 - + @@ -58,6 +58,6 @@ with this program. If not, see http://www.gnu.org/licenses/. - + diff --git a/redhawk/src/testing/sdr/dom/waveforms/test_wav_res/test_wav_res.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/test_wav_res/test_wav_res.sad.xml new file mode 100644 index 000000000..97033a910 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/test_wav_res/test_wav_res.sad.xml @@ -0,0 +1,58 @@ + + + + + + + + + + + + + + + + + + + NOOP_ADDITIONAL_DEP_1 + + + + + + + + + NOOP_ROLL_2 + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/ticket_cf_1067_wf/ticket_cf_1067_wf.spec b/redhawk/src/testing/sdr/dom/waveforms/ticket_cf_1067_wf/ticket_cf_1067_wf.spec deleted file mode 100644 index 5c015e908..000000000 --- a/redhawk/src/testing/sdr/dom/waveforms/ticket_cf_1067_wf/ticket_cf_1067_wf.spec +++ /dev/null @@ -1,56 +0,0 @@ -# -# This file is protected by Copyright. Please refer to the COPYRIGHT file -# distributed with this source distribution. -# -# This file is part of REDHAWK core. -# -# REDHAWK core is free software: you can redistribute it and/or modify it under -# the terms of the GNU Lesser General Public License as published by the Free -# Software Foundation, either version 3 of the License, or (at your option) any -# later version. -# -# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more -# details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see http://www.gnu.org/licenses/. -# -# RPM package for ticket_cf_1067_wf -# This file is regularly AUTO-GENERATED by the IDE. DO NOT MODIFY. - -# By default, the RPM will install to the standard REDHAWK SDR root location (/var/redhawk/sdr) -# You can override this at install time using --prefix /new/sdr/root when invoking rpm (preferred method, if you must) -%{!?_sdrroot: %define _sdrroot /var/redhawk/sdr} -%define _prefix %{_sdrroot} -Prefix: %{_prefix} - -Name: ticket_cf_1067_wf -Summary: Waveform ticket_cf_1067_wf -Version: 1.0.0 -Release: 1 -License: None -Group: REDHAWK/Waveforms -Source: %{name}-%{version}.tar.gz -# Require the controller whose SPD is referenced -Requires: ticket_cf_1067_comp -# Require each referenced component -Requires: ticket_cf_1067_comp -BuildArch: noarch -BuildRoot: %{_tmppath}/%{name}-%{version} - -%description - -%prep -%setup - -%install -%__rm -rf $RPM_BUILD_ROOT -%__mkdir_p "$RPM_BUILD_ROOT%{_prefix}/dom/waveforms/%{name}" -%__install -m 644 ticket_cf_1067_wf.sad.xml $RPM_BUILD_ROOT%{_prefix}/dom/waveforms/%{name}/ticket_cf_1067_wf.sad.xml - -%files -%defattr(-,redhawk,redhawk) -%dir %{_prefix}/dom/waveforms/%{name} -%{_prefix}/dom/waveforms/%{name}/ticket_cf_1067_wf.sad.xml diff --git a/redhawk/src/testing/sdr/dom/waveforms/time_cp_now_w/time_cp_now_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/time_cp_now_w/time_cp_now_w.sad.xml new file mode 100644 index 000000000..ec67247e1 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/time_cp_now_w/time_cp_now_w.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + time_cp_now_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/time_ja_now_w/time_ja_now_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/time_ja_now_w/time_ja_now_w.sad.xml new file mode 100644 index 000000000..e1992c8a3 --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/time_ja_now_w/time_ja_now_w.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + time_ja_now_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/dom/waveforms/time_py_now_w/time_py_now_w.sad.xml b/redhawk/src/testing/sdr/dom/waveforms/time_py_now_w/time_py_now_w.sad.xml new file mode 100644 index 000000000..38757263c --- /dev/null +++ b/redhawk/src/testing/sdr/dom/waveforms/time_py_now_w/time_py_now_w.sad.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + time_py_now_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/logcfg/log4j.event_appender b/redhawk/src/testing/sdr/logcfg/log4j.event_appender new file mode 100644 index 000000000..a82308165 --- /dev/null +++ b/redhawk/src/testing/sdr/logcfg/log4j.event_appender @@ -0,0 +1,21 @@ + + + +#log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n + +log4j.rootLogger=ALL,stdout, pse + +# Direct log messages to stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + +# Direct log messages to stdout +log4j.appender.pse=org.ossie.logging.RH_LogEventAppender +log4j.appender.pse.name_context=TEST_APPENDER +log4j.appender.pse.event_channel=TEST_EVT_CH1 +log4j.appender.pse.producer_id=PRODUCER1 +log4j.appender.pse.producer_name=THE BIG CHEESE +log4j.appender.pse.layout=org.apache.log4j.PatternLayout +log4j.appender.pse.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n diff --git a/redhawk/src/testing/sdr/logcfg/log4j.stdout b/redhawk/src/testing/sdr/logcfg/log4j.stdout new file mode 100644 index 000000000..a632ff660 --- /dev/null +++ b/redhawk/src/testing/sdr/logcfg/log4j.stdout @@ -0,0 +1,9 @@ + + +log4j.rootLogger=INFO,stdout + +# Direct log messages to stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n diff --git a/redhawk/src/testing/sdr/logcfg/log4j.sync_appender b/redhawk/src/testing/sdr/logcfg/log4j.sync_appender new file mode 100644 index 000000000..e90764d7d --- /dev/null +++ b/redhawk/src/testing/sdr/logcfg/log4j.sync_appender @@ -0,0 +1,24 @@ + +#log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n + +log4j.rootLogger=ALL,stdout, mp + +# Direct log messages to stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + +# Direct log messages to stdout +log4j.appender.mp=org.ossie.logging.RH_SyncRollingAppender +log4j.appender.mp.Retries=2 +log4j.appender.mp.WaitOnLock=30 +log4j.appender.mp.MaxFileSize=5MB +log4j.appender.mp.MaxBackupIndex=10 +log4j.appender.mp.File=MP_RedhawkTest +log4j.appender.mp.Cleanup=False +log4j.appender.mp.layout=org.apache.log4j.PatternLayout +log4j.appender.mp.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + + + diff --git a/redhawk/src/testing/sdr/logcfg/log4j.sync_appender2 b/redhawk/src/testing/sdr/logcfg/log4j.sync_appender2 new file mode 100644 index 000000000..d98c5acc5 --- /dev/null +++ b/redhawk/src/testing/sdr/logcfg/log4j.sync_appender2 @@ -0,0 +1,26 @@ + + + +#log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n + +log4j.rootLogger=ALL,stdout, mp + +# Direct log messages to stdout +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n + +# Direct log messages to stdout +log4j.appender.mp=org.ossie.logging.RH_SyncRollingAppender +log4j.appender.mp.Retries=2 +log4j.appender.mp.WaitOnLock=30 +log4j.appender.mp.MaxFileSize=5MB +log4j.appender.mp.MaxBackupIndex=10 +log4j.appender.mp.File=MP_RedhawkTest +log4j.appender.mp.Cleanup=False +log4j.appender.mp.layout=org.apache.log4j.PatternLayout +log4j.appender.mp.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} COMP2 %-5p %c:%L - %m%n + + + diff --git a/redhawk/src/testing/sdr/parser_tests/affinity.dcd.xml b/redhawk/src/testing/sdr/parser_tests/affinity.dcd.xml new file mode 100644 index 000000000..cf8ca5ba1 --- /dev/null +++ b/redhawk/src/testing/sdr/parser_tests/affinity.dcd.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + GPP_1 + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/parser_tests/affinity.sad.xml b/redhawk/src/testing/sdr/parser_tests/affinity.sad.xml new file mode 100644 index 000000000..f165eb339 --- /dev/null +++ b/redhawk/src/testing/sdr/parser_tests/affinity.sad.xml @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + SimpleComponent_Red + + + + + path/to/my/log/file + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/parser_tests/deployerrequires.dcd.xml b/redhawk/src/testing/sdr/parser_tests/deployerrequires.dcd.xml new file mode 100644 index 000000000..140d8bf02 --- /dev/null +++ b/redhawk/src/testing/sdr/parser_tests/deployerrequires.dcd.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + test_GPP_green::GPP_1 + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/parser_tests/devicerequires.sad.xml b/redhawk/src/testing/sdr/parser_tests/devicerequires.sad.xml new file mode 100644 index 000000000..9d39f68ea --- /dev/null +++ b/redhawk/src/testing/sdr/parser_tests/devicerequires.sad.xml @@ -0,0 +1,58 @@ + + + + + + + + + + + + + + SimpleComponent_Red + + + + + + + + + + + + + SimpleComponent_Green + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/parser_tests/loggingconfig.dcd.xml b/redhawk/src/testing/sdr/parser_tests/loggingconfig.dcd.xml new file mode 100644 index 000000000..b6edd994a --- /dev/null +++ b/redhawk/src/testing/sdr/parser_tests/loggingconfig.dcd.xml @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + test_GPP_green::GPP_1 + path/to/my/log/file + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/parser_tests/loggingconfig.sad.xml b/redhawk/src/testing/sdr/parser_tests/loggingconfig.sad.xml new file mode 100644 index 000000000..645a06bdc --- /dev/null +++ b/redhawk/src/testing/sdr/parser_tests/loggingconfig.sad.xml @@ -0,0 +1,60 @@ + + + + + + + + + + + + + + SimpleComponent_Red + path/to/my/log/file + + + + + + + + + + + + + SimpleComponent_Green + path/to/my/log/file2 + + + + + + + + + + + + + diff --git a/redhawk/src/testing/sdr/parser_tests/usesdeviceref.sad.xml b/redhawk/src/testing/sdr/parser_tests/usesdeviceref.sad.xml new file mode 100644 index 000000000..955ce1a42 --- /dev/null +++ b/redhawk/src/testing/sdr/parser_tests/usesdeviceref.sad.xml @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + P1_1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/redhawk/src/testing/setup.py b/redhawk/src/testing/setup.py index bcd05571d..3d47a1795 100644 --- a/redhawk/src/testing/setup.py +++ b/redhawk/src/testing/setup.py @@ -47,7 +47,7 @@ '_unitTestHelpers.runtestHelpers', '_unitTestHelpers.buildconfig'] -version='2.0.9' +version='2.2.1' setup( name='unitTestHelper', diff --git a/redhawk/src/testing/tests/test_01_DeviceManager.py b/redhawk/src/testing/tests/test_01_DeviceManager.py index 7f5f59c3f..83e5d081f 100644 --- a/redhawk/src/testing/tests/test_01_DeviceManager.py +++ b/redhawk/src/testing/tests/test_01_DeviceManager.py @@ -22,6 +22,7 @@ from _unitTestHelpers import scatest from omniORB import URI, any, CORBA from ossie.cf import CF +from ossie import properties import commands import CosNaming import tempfile @@ -74,11 +75,19 @@ def tearDown(self): (status,output) = commands.getstatusoutput('rm -rf devmgr_runtest.props') def test_NoWriteCache(self): - (status,output) = commands.getstatusoutput('mkdir -p '+os.getcwd()+'/sdr/cache/.BasicTestDevice_node') - (status,output) = commands.getstatusoutput('chmod 000 '+os.getcwd()+'/sdr/cache/.BasicTestDevice_node') - devmgr_nb, devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml") + cachedir = os.getcwd()+'/sdr/cache/.BasicTestDevice_node' + (status,output) = commands.getstatusoutput('mkdir -p '+cachedir) + (status,output) = commands.getstatusoutput('chmod 000 '+cachedir) + self.assertFalse(os.access(cachedir, os.R_OK|os.W_OK|os.X_OK), 'Current user can still access directory') + try: + devmgr_nb, devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml") + except Exception, e: + pass + begin_time = time.time() + while time.time()-begin_time < 5 and devmgr_nb.returncode == None: + devmgr_nb.poll() + time.sleep(0.1) self.assertEquals(255, devmgr_nb.returncode) - self.assertEquals(devMgr, None) class DeviceManagerTest(scatest.CorbaTestCase): def setUp(self): @@ -232,6 +241,11 @@ def test_IgnoreDeviceDuplicate(self): self.assertEqual(len(self._domMgr._get_deviceManagers()), 1) self.assertEqual(len(devMgr._get_registeredDevices()), 1) + def test_DeviceBadOverload(self): + # This device manager fails to launch because of a bad overloaded value + devmgr_nb, devMgr = self.launchDeviceManager("/nodes/dev_props_bad_numbers_node/DeviceManager.dcd.xml") + self.assertEquals(devMgr, None) + def test_DeviceInitializeFail(self): # These two nodes use the same identifier, but have different names to distinguish them devmgr_nb, devMgr = self.launchDeviceManager("/nodes/bad_init_device_node/DeviceManager.dcd.xml") @@ -601,6 +615,17 @@ def test_ZeroLengthDev(self): # NOTE These assert check must be kept in-line with the DeviceManager.dcd.xml self.assertEqual(len(devMgr._get_registeredDevices()), 1) + dev = devMgr._get_registeredDevices()[0] + prop = dev.query([]) + for p in prop: + if p.id == 'mystruct': + val = p.value.value() + for v in val: + if v.id == 'mystruct::mysimpleseq': + found = len(v.value.value()) == 0 + + self.assertTrue(found) + def test_ComponentPropertyOverride_cpp(self): devmgr_nb, devMgr = self.launchDeviceManager("/nodes/SimpleDevMgr/DeviceManager.dcd.xml") self.assertNotEqual(devMgr, None) @@ -622,7 +647,7 @@ def test_ComponentPropertyOverride_cpp(self): self._domMgr.installApplication(sadpath) appFact = self._domMgr._get_applicationFactories()[0] self._app = appFact.create(appFact._get_name(), [], []) - except Exception, e: + except: pass propId = "DCE:c709f95e-6b05-439a-9db9-dba95e70888e" @@ -1304,6 +1329,46 @@ def test_ValgrindOption(self): if ub_patch: os.unlink(altpath+'.bin') + def test_Service_Startup(self): + devmgr_nb, devMgr = self.launchDeviceManager("/nodes/test_service_startup_node/DeviceManager.dcd.xml") + from ossie.utils import redhawk + d=redhawk.attach(self._domainManager._get_name()) + + svc=None + svc_pre=None + for s in d.services: + if s._id == 'S2_1': + svc = s + if s._id == 'S2_pre_1': + svc_pre = s + self.assertNotEqual(svc, None) + self.assertNotEqual(svc_pre, None) + + # get p1 from service + res=svc.query([CF.DataType(id='p1', value=any.to_any(None))]) + p1=properties.props_to_dict(res) + self.assertEqual(p1['p1'],'p1 set by DCD file') + + # get p2 from service + res=svc.query([CF.DataType(id='p2', value=any.to_any(None))]) + p1=properties.props_to_dict(res) + self.assertEqual(p1['p2'],123456) + + + # get p1 from service + res=svc_pre.query([CF.DataType(id='p1', value=any.to_any(None))]) + p1=properties.props_to_dict(res) + self.assertEqual(p1['p1'],'pre p1 set by DCD file') + + # get p2 from service + res=svc_pre.query([CF.DataType(id='p2', value=any.to_any(None))]) + p1=properties.props_to_dict(res) + self.assertEqual(p1['p2'],654321) + + self.assertRaises(CF.PropertySet.InvalidConfiguration, svc.configure, [CF.DataType(id='fake', value=any.to_any(None))] ) + + self.assertRaises(CF.PropertySet.InvalidConfiguration, svc_pre.configure, [CF.DataType(id='fake', value=any.to_any(None))] ) + def _test_DeviceExecParamReadonly_(self, dcdfile, true_or_false ): devmgr_nb, devMgr = self.launchDeviceManager(dcdfile) self.assertNotEqual(devMgr, None) @@ -1600,6 +1665,62 @@ def test_ServiceExecParamReadonly_NoEmpty(self): self.assertNotEqual(p, None) + def test_DuplicateService(self): + # The first node provides the service + nb1, devMgr1 = self.launchDeviceManager('/nodes/test_BasicService_node/DeviceManager.dcd.xml') + + # Check that the same service is reported via the DeviceManager and the + # naming service + services = devMgr1._get_registeredServices() + self.assertEqual(1, len(services)) + service_name = URI.stringToName(scatest.getTestDomainName() + '/BasicService1') + service = self._root.resolve(service_name) + self.assertTrue(service._is_equivalent(services[0].serviceObject)) + + # Launching the second node, it should time out after about 5 seconds + # waiting for its service to show up in the registered services, which + # should never happen because the DomainManager should reject it + nb2, devMgr2 = self.launchDeviceManager('/nodes/DuplicateService_node/DeviceManager.dcd.xml') + + # The first node's service was registered first, so it should be the + # only one we find, fetching it again to be sure + services = devMgr1._get_registeredServices() + self.assertEqual(1, len(services)) + self.assertEqual('BasicService1', services[0].serviceName) + self.assertEqual(0, len(devMgr2._get_registeredServices())) + service = self._root.resolve(service_name) + self.assertTrue(service._is_equivalent(services[0].serviceObject)) + + # The duplicate service should have been terminated + self.assertEqual(0, len(getChildren(nb2.pid))) + + # Launch an executable device for the test application + nb3, devMgr3 = self.launchDeviceManager("/nodes/test_PortTestDevice2_node/DeviceManager.dcd.xml") + + # Use the service name connection test waveform to make sure that the + # DomainManager is connecting it correctly to the first service + sad_file = '/waveforms/PortConnectServiceName/PortConnectServiceName.sad.xml' + app = self._domMgr.createApplication(sad_file, 'good', [], []) + components = app._get_registeredComponents() + comp = components[0].componentObject + self.assertEqual(1, len(components)) + port = comp.getPort('propset_out') + connections = port._get_connections() + self.assertEqual(1, len(connections)) + service = connections[0].port + props = service.query([CF.DataType('PARAM1', any.to_any(None))]) + value = any.from_any(props[0].value) + self.assertEqual('ABCD', value) + app.releaseObject() + + # Terminate the BasicService node, which should prevent the waveform + # from launching (one more way to verify that it's the first node's + # service that's being used) + devMgr1.shutdown() + self.assertTrue(self.waitTermination(nb1), "Nodebooter did not die after shutdown") + + self.assertRaises(CF.ApplicationFactory.CreateApplicationError, self._domMgr.createApplication, sad_file, 'fail', [], []) + class DeviceManagerDepsTest(scatest.CorbaTestCase): def setUp(self): diff --git a/redhawk/src/testing/tests/test_01_DomainManager.py b/redhawk/src/testing/tests/test_01_DomainManager.py index 152f5d4c3..b94c80884 100644 --- a/redhawk/src/testing/tests/test_01_DomainManager.py +++ b/redhawk/src/testing/tests/test_01_DomainManager.py @@ -403,7 +403,7 @@ def test_installApplicationFailures_dep_missing_dep_dir(self): self.dep_dir=dep_dir shutil.move(dep_dir, dep_dir+".XXX") # should work refers to code's localfile - self._domMgr.installApplication(sadfile) + self.assertRaises(CF.DomainManager.ApplicationInstallationError, self._domMgr.installApplication, sadfile) ## reset file shutil.move( dep_dir+".XXX", dep_dir) @@ -456,7 +456,7 @@ def test_installApplicationFailures_recdep_bad_dir(self): dep_spd=scatest.getSdrPath()+"/dom/deps/cpp_dep2/cpp_dep2.spd.xml" shutil.copy( dep_spd+".TEST.bad.dir", dep_spd) # code's localfile not checked during install - self._domMgr.installApplication(sadfile) + self.assertRaises(CF.DomainManager.ApplicationInstallationError, self._domMgr.installApplication, sadfile) ## reset file shutil.copy(dep_spd+".ORIG", dep_spd) @@ -515,7 +515,7 @@ def test_createApplication_sad_missing_impl(self): self.assertEqual(len(self._domMgr._get_applications()), 1) def test_createApplicationFailures_sad_invalid(self): - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, "/waveforms/cpp_deps_wf.OUCH/cpp_deps_wf.sad.xml","",[],[]) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, "/waveforms/cpp_deps_wf.OUCH/cpp_deps_wf.sad.xml","",[],[]) def test_createApplicationFailures_sad_compref_refid(self): wf_name=self.wf_name @@ -524,7 +524,7 @@ def test_createApplicationFailures_sad_compref_refid(self): ## copy bad compref test 1 to sad filex wf_sad=scatest.getSdrPath()+"/dom"+sadfile shutil.copy( wf_sad+".TEST.bad.compref", wf_sad) - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset sad file shutil.copy( wf_sad+".ORIG", wf_sad) @@ -536,7 +536,7 @@ def test_createApplicationFailures_sad_compref_file(self): ## copy bad compref test 1 to sad file wf_sad=scatest.getSdrPath()+"/dom"+sadfile shutil.copy( wf_sad+".TEST.bad.comp.file", wf_sad) - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset sad file shutil.copy( wf_sad+".ORIG", wf_sad) @@ -552,7 +552,7 @@ def test_createApplicationFailures_comp_missing(self): os.remove(comp_spd) except: pass - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset spd file shutil.copy(comp_spd+".ORIG", comp_spd) @@ -568,7 +568,7 @@ def test_createApplicationFailures_comp_missing2(self): os.remove(comp_scd) except: pass - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.copy(comp_scd+".ORIG", comp_scd) @@ -583,7 +583,7 @@ def test_createApplicationFailures_comp_missing3(self): os.remove(comp_prf) except: pass - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.copy(comp_prf+".ORIG", comp_prf) @@ -594,7 +594,7 @@ def test_createApplicationFailures_comp_bad_prf_file(self): comp_spd=scatest.getSdrPath()+"/dom/components/cpp_with_deps/cpp_with_deps.spd.xml" shutil.copy(comp_spd+".TEST.bad.prf", comp_spd) - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.copy(comp_spd+".ORIG", comp_spd) @@ -605,7 +605,7 @@ def test_createApplicationFailures_comp_bad_scd_file(self): comp_spd=scatest.getSdrPath()+"/dom/components/cpp_with_deps/cpp_with_deps.spd.xml" shutil.copy(comp_spd+".TEST.bad.scd", comp_spd) - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.copy(comp_spd+".ORIG", comp_spd) @@ -616,28 +616,11 @@ def test_createApplicationFailures_comp_bad_dep_ref(self): comp_spd=scatest.getSdrPath()+"/dom/components/cpp_with_deps/cpp_with_deps.spd.xml" shutil.copy(comp_spd+".TEST.bad.dep", comp_spd) - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.copy(comp_spd+".ORIG", comp_spd) - def test_createApplicationFailures_deps_readissue_dir(self): - wf_name=self.wf_name - sadfile=self.sadfile - - dep_dir=scatest.getSdrPath()+"/dom/deps/cpp_dep2/cpp/lib" - os.chmod(dep_dir,0000) - comp_spd=scatest.getSdrPath()+"/dom/components/cpp_with_deps/cpp_with_deps.spd.xml" - shutil.copy(comp_spd+".TEST.readissue", comp_spd) - self.assertRaises( CF.ApplicationFactory.CreateApplicationError, self._domMgr.createApplication, sadfile, "", [], []) - try: - self._domMgr.createApplication(sadfile, "", [], []) - except Exception, e: - self.assertEqual(e.msg,'Failed to load file') - ## reset file - os.chmod(dep_dir,0775) - shutil.copy(comp_spd+".ORIG", comp_spd) - def test_createApplicationFailures_dep_missing(self): wf_name=self.wf_name sadfile=self.sadfile @@ -645,7 +628,7 @@ def test_createApplicationFailures_dep_missing(self): dep_dir=scatest.getSdrPath()+"/dom/deps/cpp_dep1" self.dep_dir=dep_dir shutil.move(dep_dir, dep_dir+".XXX") - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.move(dep_dir+".XXX", dep_dir) @@ -658,7 +641,7 @@ def test_createApplicationFailures_dep_missing_dep_dir(self): dep_dir=scatest.getSdrPath()+"/dom/deps/cpp_dep1/cpp" self.dep_dir = dep_dir shutil.move(dep_dir, dep_dir+".XXX") - self.assertRaises( CF.ApplicationFactory.CreateApplicationError, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.move( dep_dir+".XXX", dep_dir) @@ -671,7 +654,7 @@ def test_createApplicationFailures_dep_bad_rec_dep_file(self): dep_spd=scatest.getSdrPath()+"/dom/deps/cpp_dep1/cpp_dep1.spd.xml" shutil.copy(dep_spd+".TEST.bad.rec.dep.file", dep_spd) - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.copy(dep_spd+".ORIG", dep_spd) @@ -684,7 +667,7 @@ def test_createApplicationFailures_recdep_missing_dir(self): dep_spd=scatest.getSdrPath()+"/dom/deps/cpp_dep2" self.dep_dir=dep_spd shutil.move(dep_spd, dep_spd+".XXX") - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.move(dep_spd+".XXX", dep_spd) @@ -697,7 +680,7 @@ def test_createApplicationFailures_recdep_missing_file(self): dep_spd=scatest.getSdrPath()+"/dom/deps/cpp_dep2/cpp_dep2.spd.xml" self.dep_dir=dep_spd shutil.move(dep_spd, dep_spd+".XXX") - self.assertRaises( CF.InvalidProfile, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.move(dep_spd+".XXX", dep_spd) @@ -710,7 +693,7 @@ def test_createApplicationFailures_recdep_bad_dir(self): dep_spd=scatest.getSdrPath()+"/dom/deps/cpp_dep2/cpp_dep2.spd.xml" shutil.copy( dep_spd+".TEST.bad.dir", dep_spd) - self.assertRaises( CF.ApplicationFactory.CreateApplicationError, self._domMgr.createApplication, sadfile, "", [], []) + self.assertRaises( (CF.InvalidProfile,CF.DomainManager.ApplicationInstallationError), self._domMgr.createApplication, sadfile, "", [], []) ## reset file shutil.copy(dep_spd+".ORIG", dep_spd) diff --git a/redhawk/src/testing/tests/test_01_nodeBooter.py b/redhawk/src/testing/tests/test_01_nodeBooter.py index 9dac6796e..4126c7f8d 100644 --- a/redhawk/src/testing/tests/test_01_nodeBooter.py +++ b/redhawk/src/testing/tests/test_01_nodeBooter.py @@ -20,7 +20,7 @@ # import unittest, os, signal, time, sys -from subprocess import Popen +from subprocess import Popen, PIPE from _unitTestHelpers import scatest from xml.dom import minidom from omniORB import URI, any @@ -95,6 +95,48 @@ def test_nodeBooterDomainNameFromDMD(self): except CosNaming.NamingContext.NotFound: pass # This exception is expected + def test_UserOrGroupNoDaemon(self): + """Test that we read the correct domainname from the DMD file, the test domain + should have been created by the test runner""" + domainName = scatest.getTestDomainName() + # Test that we don't already have a bound domain + try: + domMgr = self._root.resolve(scatest.getDomainMgrURI()) + self.assertEqual(domMgr, None) + except CosNaming.NamingContext.NotFound: + pass # This exception is expected + + args = ["../../control/framework/nodeBooter","-D","-debug", "9","--nopersist",'--user','domuser','--group','somegroup' ] + nb = Popen( args, cwd=scatest.getSdrPath(), stderr=PIPE, stdout=PIPE) + self.assertNotEqual(nb.stderr.read().find('If either group or user are specified, daemon must be set'),-1) + + args = ["../../control/framework/nodeBooter","-D","-debug", "9","--nopersist",'--group','somegroup' ] + nb = Popen( args, cwd=scatest.getSdrPath(), stderr=PIPE, stdout=PIPE) + self.assertNotEqual(nb.stderr.read().find('If either group or user are specified, daemon must be set'),-1) + + args = ["../../control/framework/nodeBooter","-D","-debug", "9","--nopersist",'--user','domuser' ] + nb = Popen( args, cwd=scatest.getSdrPath(), stderr=PIPE, stdout=PIPE) + self.assertNotEqual(nb.stderr.read().find('If either group or user are specified, daemon must be set'),-1) + + def test_BadUserOrBadGroup(self): + """Test that we read the correct domainname from the DMD file, the test domain + should have been created by the test runner""" + domainName = scatest.getTestDomainName() + # Test that we don't already have a bound domain + try: + domMgr = self._root.resolve(scatest.getDomainMgrURI()) + self.assertEqual(domMgr, None) + except CosNaming.NamingContext.NotFound: + pass # This exception is expected + + args = ["../../control/framework/nodeBooter","-D","-debug", "9","--nopersist",'--user=domuser'] + nb = Popen( args, cwd=scatest.getSdrPath(), stderr=PIPE, stdout=PIPE) + self.assertNotEqual(nb.stderr.read().find('Separator must be a space'),-1) + + args = ["../../control/framework/nodeBooter","-D","-debug", "9","--nopersist",'--group=somegroup'] + nb = Popen( args, cwd=scatest.getSdrPath(), stderr=PIPE, stdout=PIPE) + self.assertNotEqual(nb.stderr.read().find('Separator must be a space'),-1) + def test_nodeBooterShutdown(self): """Test that nodeBooter correctly cleans up. In OSSIE 0.7.4, and possibly before, killing a nodebooter that was running diff --git a/redhawk/src/testing/tests/test_02_logging_config.py b/redhawk/src/testing/tests/test_02_logging_config.py index 7c3e39a6c..023b6f246 100644 --- a/redhawk/src/testing/tests/test_02_logging_config.py +++ b/redhawk/src/testing/tests/test_02_logging_config.py @@ -376,7 +376,7 @@ def getProcessLogArgs(self, pname ): pass - def test_Sad_NoSettings(self): + def test_DCD_LoggingConfig(self): # Double check the DomainManager LOGGING_CONFIG_URI prop = CF.DataType(id="LOGGING_CONFIG_URI", value=any.to_any(None)) @@ -386,7 +386,29 @@ def test_Sad_NoSettings(self): devLoggingConfigURI = result[0].value._v expectedDomLoggingConfigUri = "file://" + os.path.join(scatest.getSdrPath(), "dom/mgr/logging.properties") self.assertEqual(devLoggingConfigURI, expectedDomLoggingConfigUri) + + # get command line arguments for process + self.getProcessLogArgs('devices/BasicTestDevice') + + self.assertNotEqual(self.logcfg_uri,None) + self.assertEqual(self.logcfg_uri.split("?fs=")[0], "sca:///logcfg/log.basic.props") + execparamObj = self._orb.string_to_object(self.logcfg_uri.split("?fs=")[1]) + # Need to compare actual objects since the IOR strings could potentially differ for the same object + self.assert_(self._devMgr._get_fileSys()._is_equivalent(execparamObj)) + self.assertEqual(self.debug_level,"2") + + def test_Sad_NoSettings(self): + + # Double check the DomainManager LOGGING_CONFIG_URI + prop = CF.DataType(id="LOGGING_CONFIG_URI", value=any.to_any(None)) + result = self._domMgr.query([prop]) + self.assertEqual(len(result), 1) + self.assertEqual(result[0].id, "LOGGING_CONFIG_URI") + devLoggingConfigURI = result[0].value._v + expectedDomLoggingConfigUri = "file://" + os.path.join(scatest.getSdrPath(), "dom/mgr/logging.properties") + self.assertEqual(devLoggingConfigURI, expectedDomLoggingConfigUri) + app=self._rhDom.createApplication('/waveforms/loggingconfig/TestCpp/TestCpp.sad.xml') comp = app.comps[0] self.assertNotEqual(comp, None) @@ -396,7 +418,7 @@ def test_Sad_NoSettings(self): self.assertNotEqual(self.logcfg_uri,None) self.assertEqual(self.logcfg_uri.split("?fs=")[0], expectedDomLoggingConfigUri) - self.assertEqual(self.debug_level,"3") + self.assertEqual(self.debug_level,None) def test_Sad_CompProps_LogCfg(self): @@ -409,7 +431,7 @@ def test_Sad_CompProps_LogCfg(self): expectedDomLoggingConfigUri = "file://" + os.path.join(scatest.getSdrPath(), "dom/mgr/logging.properties") self.assertEqual(devLoggingConfigURI, expectedDomLoggingConfigUri) - + app=self._rhDom.createApplication('/waveforms/loggingconfig/TestCpp/TestCpp_props.sad.xml') comp = app.comps[0] self.assertNotEqual(comp, None) @@ -436,8 +458,7 @@ def test_Sad_CompProps_LogCfg_Debug(self): expectedDomLoggingConfigUri = "file://" + os.path.join(scatest.getSdrPath(), "dom/mgr/logging.properties") self.assertEqual(devLoggingConfigURI, expectedDomLoggingConfigUri) - - app=self._rhDom.createApplication('/waveforms/loggingconfig/TestCpp/TestCpp_props_debug.sad.xml',initConfiguration={"LOGGING_CONFIG_URI":"sca:///mgr/logging.properties"}) + app=self._rhDom.createApplication('/waveforms/loggingconfig/TestCpp/TestCpp_props_debug.sad.xml') comp = app.comps[0] self.assertNotEqual(comp, None) @@ -451,5 +472,74 @@ def test_Sad_CompProps_LogCfg_Debug(self): # Need to compare actual objects since the IOR strings could potentially differ for the same object self.assert_(self._domMgr._get_fileMgr()._is_equivalent(execparamObj)) self.assertEqual(self.debug_level,"5") + app.releaseObject() + + app_1=self._rhDom.createApplication('/waveforms/loggingconfig/TestCpp/TestCpp_props_debug.sad.xml',initConfiguration={"LOGGING_CONFIG_URI":"sca:///mgr/logging.properties"}) + comp = app.comps[0] + self.assertNotEqual(comp, None) + + # get command line arguments for proc + self.getProcessLogArgs('components/C2') + + expect_logcfg="sca:///mgr/logging.properties" + self.assertNotEqual(self.logcfg_uri,None) + self.assertEqual(self.logcfg_uri.split("?fs=")[0], expect_logcfg) + execparamObj = self._orb.string_to_object(self.logcfg_uri.split("?fs=")[1]) + # Need to compare actual objects since the IOR strings could potentially differ for the same object + self.assert_(self._domMgr._get_fileMgr()._is_equivalent(execparamObj)) + self.assertEqual(self.debug_level,"5") + + def test_Sad_LoggingConfig(self): + + # Double check the DomainManager LOGGING_CONFIG_URI + prop = CF.DataType(id="LOGGING_CONFIG_URI", value=any.to_any(None)) + result = self._domMgr.query([prop]) + self.assertEqual(len(result), 1) + self.assertEqual(result[0].id, "LOGGING_CONFIG_URI") + devLoggingConfigURI = result[0].value._v + expectedDomLoggingConfigUri = "file://" + os.path.join(scatest.getSdrPath(), "dom/mgr/logging.properties") + self.assertEqual(devLoggingConfigURI, expectedDomLoggingConfigUri) + + + app=self._rhDom.createApplication('/waveforms/loggingconfig/TestCpp/TestCpp_logcfg.sad.xml') + comp = app.comps[0] + self.assertNotEqual(comp, None) + + # get command line arguments for proc + self.getProcessLogArgs('components/C2') + + expect_logcfg="sca://logcfg/log.logcfg.c2" + self.assertNotEqual(self.logcfg_uri,None) + self.assertEqual(self.logcfg_uri.split("?fs=")[0], expect_logcfg) + execparamObj = self._orb.string_to_object(self.logcfg_uri.split("?fs=")[1]) + # Need to compare actual objects since the IOR strings could potentially differ for the same object + self.assert_(self._domMgr._get_fileMgr()._is_equivalent(execparamObj)) + + + def test_Sad_LoggingConfig_Debug(self): + + # Double check the DomainManager LOGGING_CONFIG_URI + prop = CF.DataType(id="LOGGING_CONFIG_URI", value=any.to_any(None)) + result = self._domMgr.query([prop]) + self.assertEqual(len(result), 1) + self.assertEqual(result[0].id, "LOGGING_CONFIG_URI") + devLoggingConfigURI = result[0].value._v + expectedDomLoggingConfigUri = "file://" + os.path.join(scatest.getSdrPath(), "dom/mgr/logging.properties") + self.assertEqual(devLoggingConfigURI, expectedDomLoggingConfigUri) + + app=self._rhDom.createApplication('/waveforms/loggingconfig/TestCpp/TestCpp_logcfg_debug.sad.xml') + comp = app.comps[0] + self.assertNotEqual(comp, None) + + # get command line arguments for proc + self.getProcessLogArgs('components/C2') + + expect_logcfg="sca://logcfg/log.logcfg.c2" + self.assertNotEqual(self.logcfg_uri,None) + self.assertEqual(self.logcfg_uri.split("?fs=")[0], expect_logcfg) + execparamObj = self._orb.string_to_object(self.logcfg_uri.split("?fs=")[1]) + # Need to compare actual objects since the IOR strings could potentially differ for the same object + self.assert_(self._domMgr._get_fileMgr()._is_equivalent(execparamObj)) + self.assertEqual(self.debug_level,"0") diff --git a/redhawk/src/testing/tests/test_03_DeviceLifeCycle.py b/redhawk/src/testing/tests/test_03_DeviceLifeCycle.py index 229d9b292..84134ab7c 100644 --- a/redhawk/src/testing/tests/test_03_DeviceLifeCycle.py +++ b/redhawk/src/testing/tests/test_03_DeviceLifeCycle.py @@ -21,7 +21,13 @@ import unittest, os from _unitTestHelpers import scatest from test_01_DeviceManager import killChildProcesses +from ossie.utils import redhawk +from ossie.cf import CF +from ossie.events import Subscriber +from ossie import properties +from omniORB import any as _any import time +import Queue class DeviceLifeCycleTest(scatest.CorbaTestCase): def setUp(self): @@ -46,7 +52,118 @@ def test_DeviceLifeCycle(self): def test_DeviceLifeCycleNoKill(self): pass - + +class DeviceStartorder(scatest.CorbaTestCase): + def setUp(self): + domBooter, domMgr = self.launchDomainManager() + + # Create an event channel to receive the device and service start/stop + # messages (the name must match the findbys in the DCD), and connect a + # subscriber + eventMgr = domMgr._get_eventChannelMgr() + channel = eventMgr.createForRegistrations('test_events') + self._started = Queue.Queue() + self._stopped = Queue.Queue() + self._subscriber = Subscriber(channel, dataArrivedCB=self._messageReceived) + + def tearDown(self): + self._subscriber.terminate() + + scatest.CorbaTestCase.tearDown(self) + + def _messageReceived(self, message): + payload = message.value(CF._tc_Properties) + if not payload: + return + for dt in payload: + if dt.id == 'state_change': + value = properties.props_to_dict(dt.value.value(CF._tc_Properties)) + identifier = value['state_change::identifier'] + if value['state_change::event'] == 'start': + self._started.put(identifier) + elif value['state_change::event'] == 'stop': + self._stopped.put(identifier) + + def _verifyStartOrder(self, startorder): + for identifier in startorder: + try: + received = self._started.get(timeout=1.0) + except Queue.Empty: + self.fail('Did not receive start message for ' + identifier) + self.assertEqual(received, identifier) + self.failUnless(self._started.empty(), msg='Too many start messages received') + + def _verifyStopOrder(self, startorder): + for identifier in startorder[::-1]: + try: + received = self._stopped.get(timeout=1.0) + except Queue.Empty: + self.fail('Did not receive stop message for ' + identifier) + self.assertEqual(received, identifier) + self.failUnless(self._stopped.empty(), msg='Too many stop messages received') + + def test_StartOrder(self): + """ + Test that device/service start order runs correctly + """ + devBooter, devMgr = self.launchDeviceManager("/nodes/startorder_events/DeviceManager.dcd.xml") + + startorder = ('startorder_events:start_event_device_3', + 'start_event_service_1', + 'startorder_events:start_event_device_1') + + # Verify that start calls were received in the right order + self._verifyStartOrder(startorder) + + # Check that the devices are started as expected + for dev in devMgr._get_registeredDevices(): + dev_id = dev._get_identifier() + expected = dev_id in startorder + self.assertEqual(expected, dev._get_started(), msg='Device '+dev_id+' started state is incorrect') + + # Also services, if supported + for svc in devMgr._get_registeredServices(): + expected = svc.serviceName in startorder + if svc.serviceObject._is_a(CF.Resource._NP_RepositoryId): + started = svc.serviceObject._narrow(CF.Resource)._get_started() + else: + started = False + self.assertEqual(expected, started, msg='Service '+svc.serviceName+' started state is incorrect') + + # Shut down the node so that it stops all of the devices and services + devMgr.shutdown() + + # Check that stop was called in the reverse order of start + self._verifyStopOrder(startorder) + + def test_StartOrderException(self): + """ + Test that the node continues along the device/service start order even + if one of them throws an exception + """ + devBooter, devMgr = self.launchDeviceManager("/nodes/startorder_fail/DeviceManager.dcd.xml") + + startorder = ('startorder_fail:start_event_device_1', + 'startorder_fail:fail_device_1', + 'startorder_fail:start_event_device_2') + + # Verify that start calls were received in the right order, and that + # the device manager continued after the failing device + self._verifyStartOrder(startorder) + + # Check that the devices are started as expected, with the device that + # was configured to fail not started + for dev in devMgr._get_registeredDevices(): + label = dev._get_label() + expected = not label.startswith('fail_') + self.assertEqual(expected, dev._get_started(), msg='Device '+label+' started state is incorrect') + + # Shut down the node so that it stops all of the devices and services + devMgr.shutdown() + + # Check that stop was called in the reverse order of start + self._verifyStopOrder(startorder) + class DeviceDeviceManagerTest(scatest.CorbaTestCase): def setUp(self): diff --git a/redhawk/src/testing/tests/test_04_ApplicationFactory.py b/redhawk/src/testing/tests/test_04_ApplicationFactory.py index 5ec6ef276..c5331bbb7 100644 --- a/redhawk/src/testing/tests/test_04_ApplicationFactory.py +++ b/redhawk/src/testing/tests/test_04_ApplicationFactory.py @@ -24,7 +24,7 @@ from xml.dom import minidom from omniORB import CORBA, URI, any import omniORB -from ossie.cf import CF, CF__POA +from ossie.cf import CF, CF__POA, ExtendedCF import commands from ossie.utils import redhawk from ossie import properties @@ -344,6 +344,20 @@ def test_NamespacedWaveformPython(self): def test_NamespacedWaveformJava(self): self._test_NamespacedWaveform('javawave') + def test_BadOverload(self): + nodebooter, domMgr = self.launchDomainManager() + self.assertNotEqual(domMgr, None) + nodebooter, devMgr = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + self.assertNotEqual(devMgr, None) + + self.assertRaises(CF.ApplicationFactory.CreateApplicationError, domMgr.createApplication, "/waveforms/props_bad_numbers_w/props_bad_numbers_w.sad.xml", "props_app", [], [], ) + try: + app = domMgr.createApplication("/waveforms/props_bad_numbers_w/props_bad_numbers_w.sad.xml", "props_app", [], []) + except Exception, e: + pass + self.assertNotEqual(e.msg.find('Unable to perform conversion'), -1) + self.assertEqual(len(domMgr._get_applications()), 0) + def test_PartialStructConfiguration(self): nodebooter, domMgr = self.launchDomainManager() self.assertNotEqual(domMgr, None) @@ -361,6 +375,15 @@ def test_PartialStructConfiguration(self): app.releaseObject() self.assertEqual(len(domMgr._get_applications()), 0) + def test_DependencyActionDefaultKind(self): + nodebooter, domMgr = self.launchDomainManager() + self.assertNotEqual(domMgr, None) + nodebooter, devMgr = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + self.assertNotEqual(devMgr, None) + + domMgr.createApplication("/waveforms/math_py_w/math_py_w.sad.xml", 'some_app', [], []) + self.assertEqual(len(domMgr._get_applications()), 1) + def test_PartialStructProp(self): nodebooter, domMgr = self.launchDomainManager() self.assertNotEqual(domMgr, None) @@ -413,6 +436,18 @@ def test_nocommandline_props(self): app.releaseObject() self.assertEqual(len(domMgr._get_applications()), 0) + def test_cppSlowStop(self): + nodebooter, domMgr = self.launchDomainManager() + self.assertNotEqual(domMgr, None) + nodebooter, devMgr = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + self.assertNotEqual(devMgr, None) + + app = domMgr.createApplication("/waveforms/slow_stop_cpp_w/slow_stop_cpp_w.sad.xml", 'slow_stop_cpp_w', [], []) + app.start() + self.assertEquals(app._get_started(), True) + app.stop() + self.assertEquals(app._get_started(), False) + def test_NoTimeout(self): nodebooter, domMgr = self.launchDomainManager() self.assertNotEqual(domMgr, None) @@ -425,6 +460,13 @@ def test_NoTimeout(self): self.assertTrue(end_time-begin_time >= 9.5) app.releaseObject() + def test_InvalidFile(self): + nodebooter, domMgr = self.launchDomainManager() + self.assertNotEqual(domMgr, None) + nodebooter, devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml") + self.assertNotEqual(devMgr, None) + self.assertRaises(CF.DomainManager.ApplicationInstallationError, domMgr.createApplication, "AppDoesNotExist", 'my_application', [], []) + def test_NonScaCompliant(self): nodebooter, domMgr = self.launchDomainManager() self.assertNotEqual(domMgr, None) @@ -1490,27 +1532,14 @@ def test_hostCollocationFail(self): self.fail('Application creation should fail') def test_NoAssemblyController(self): - # Test that creating an application that uses host collocation fails - # if all the components cannot be allocated on the same device. + # Test that installing an application without an assembly controller fails nodebooter, domMgr = self.launchDomainManager() self.assertNotEqual(domMgr, None) nodebooter, devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml") self.assertNotEqual(devMgr, None) - domMgr.installApplication("/waveforms/CommandWrapperNoAssembly/CommandWrapper.sad.xml") - self.assertEqual(len(domMgr._get_applicationFactories()), 1) - - appFact = domMgr._get_applicationFactories()[0] - - try: - app = appFact.create(appFact._get_name(), [], []) - except: - pass - else: - app.stop() - app.releaseObject() - self.fail('Application creation should fail') - domMgr._get_identifier() + sadFile = "/waveforms/CommandWrapperNoAssembly/CommandWrapper.sad.xml" + self.assertRaises(CF.DomainManager.ApplicationInstallationError, domMgr.installApplication, sadFile) def test_hostCollocationDAS(self): # Test that creating an application that uses host collocation with @@ -1753,6 +1782,36 @@ def test_cacheCleanup(self): self.assertEqual(len(os.listdir(deviceCacheDir + "/components/CommandWrapper")), 0) self.assertEqual(len(os.listdir(deviceCacheDir + "/components/CommandWrapperWithDirectoryLoad")), 0) + def test_FailStartup2(self): + # Verify that if a component fails to start, any allocated resources are restored + nodebooter, domMgr = self.launchDomainManager() + self.assertNotEqual(domMgr, None) + + # Set the component name binding timeout to a more reasonable 2 seconds, since the + # failures are pretty quick. + id = "COMPONENT_BINDING_TIMEOUT" + value = CORBA.Any(CORBA.TC_ulong, 2) + domMgr.configure([CF.DataType(id, value)]) + + self.assertEqual(len(domMgr._get_applicationFactories()), 0) + self.assertEqual(len(domMgr._get_applications()), 0) + + domMgr.installApplication("/waveforms/FailStartup/FailStartup.sad.xml") + self.assertEqual(len(domMgr._get_applicationFactories()), 1) + appFact = domMgr._get_applicationFactories()[0] + self.assertEqual(len(domMgr._get_applications()), 0) + + nodebooter, devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml") + self.assertNotEqual(devMgr, None) + self.assertEqual(len(domMgr._get_deviceManagers()), 1) + self.assertEqual(len(devMgr._get_registeredDevices()), 1) + device = devMgr._get_registeredDevices()[0] + + failurePos="identifier" + self.assertRaises(CF.ApplicationFactory.CreateApplicationError, appFact.create, appFact._get_name(), [CF.DataType(id="FAIL_AT", value=any.to_any(failurePos))], []) + + + def test_FailStartup(self): # Verify that if a component fails to start, any allocated resources are restored nodebooter, domMgr = self.launchDomainManager() @@ -1788,7 +1847,7 @@ def test_FailStartup(self): self.assertEqual(nicCapacity.value._v, 100.0) self.assertEqual(fakeCapacity.value._v, 3) - for failurePos in ("constructor", "initializeProperties", "initialize"): + for failurePos in ("constructor", "identifier", "initializeProperties", "initialize"): self.assertRaises(CF.ApplicationFactory.CreateApplicationError, appFact.create, appFact._get_name(), [CF.DataType(id="FAIL_AT", value=any.to_any(failurePos))], []) self.assertEqual(len(domMgr._get_applications()), 0) # Verify that capacity was not allocated @@ -2697,6 +2756,45 @@ def test_NoDefaultExecParam(self): apps = domMgr._get_applications() self.assertEqual(len(apps),0) + def _test_ExecParamToConstruct(self, lang): + """ + Test execparams will pass to constructor + """ + nb, domMgr = self.launchDomainManager() + self.assertNotEqual(domMgr, None) + + nb, devMgr = self.launchDeviceManager('/nodes/test_GPP_node/DeviceManager.dcd.xml') + self.assertNotEqual(devMgr, None) + dom=redhawk.attach(domMgr._get_name()) + + app=dom.createApplication('/waveforms/empty_wf/empty_wf.'+lang+'.emptyvalue.sad.xml') + self.assertNotEqual(app,None) + self.assertEqual(app.comps[0].estr,"") + app.releaseObject() + + app=dom.createApplication('/waveforms/empty_wf/empty_wf.'+lang+'.eparam.sad.xml') + self.assertNotEqual(app,None) + self.assertEqual(app.comps[0].estr,"eparam1") + app.releaseObject() + + app=dom.createApplication('/waveforms/empty_wf/empty_wf.'+lang+'.novalue.sad.xml') + self.assertNotEqual(app,None) + self.assertEqual(app.comps[0].estr,"ctor-value") + app.releaseObject() + + + def test_ExecParamToConstruct_cpp(self): + self._test_ExecParamToConstruct('cpp') + + def test_ExecParamToConstruct_py(self): + self._test_ExecParamToConstruct('py') + + @scatest.requireJava + def test_ExecParamToConstruct_java(self): + self._test_ExecParamToConstruct('java') + + + def test_StopAllComponents(self): nb, domMgr = self.launchDomainManager(debug=self.debuglevel) self.assertNotEqual(domMgr, None) @@ -2734,6 +2832,138 @@ def test_StopAllComponents(self): comp.componentObject.configure(props) app.releaseObject() + def test_StopTimeout(self): + nb, domMgr = self.launchDomainManager(debug=self.debuglevel) + self.assertNotEqual(domMgr, None) + + nb, devMgr = self.launchDeviceManager('/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml', debug=self.debuglevel) + self.assertNotEqual(devMgr, None) + + # Create the application, which is pre-configured such that the last + # component in the start order (and hence, first in stop) will fail on + # stop(). + app = domMgr.createApplication('/waveforms/long_stop/long_stop.sad.xml', 'long_stop', [], []) + app.start() + begin_stop = time.time() + try: + app.stop() + except: + pass + end_stop = time.time() + stop_time = end_stop-begin_stop + app.releaseObject() + end_release = time.time() + release_time = end_release-end_stop + self.assertTrue(16<=stop_time<=17) + self.assertTrue(20<=release_time<=22) + + def test_StopTimeoutBuiltinDefault(self): + nb, domMgr = self.launchDomainManager(debug=self.debuglevel) + self.assertNotEqual(domMgr, None) + + nb, devMgr = self.launchDeviceManager('/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml', debug=self.debuglevel) + self.assertNotEqual(devMgr, None) + + # Create the application, which is pre-configured such that the last + # component in the start order (and hence, first in stop) will fail on + # stop(). + app = domMgr.createApplication('/waveforms/long_stop_builtin_def/long_stop_builtin_def.sad.xml', 'long_stop', [], []) + app.start() + begin_stop = time.time() + try: + app.stop() + except: + pass + end_stop = time.time() + stop_time = end_stop-begin_stop + app.releaseObject() + end_release = time.time() + release_time = end_release-end_stop + self.assertTrue(6<=stop_time<=7) + self.assertTrue(20<=release_time<=22) + + def test_StopTimeoutLiveOverride(self): + nb, domMgr = self.launchDomainManager(debug=self.debuglevel) + self.assertNotEqual(domMgr, None) + + nb, devMgr = self.launchDeviceManager('/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml', debug=self.debuglevel) + self.assertNotEqual(devMgr, None) + + # Create the application, which is pre-configured such that the last + # component in the start order (and hence, first in stop) will fail on + # stop(). + initconfig = [CF.DataType(id=ExtendedCF.WKP.STOP_TIMEOUT, value=any.to_any(4))] + app = domMgr.createApplication('/waveforms/long_stop/long_stop.sad.xml', 'long_stop', initconfig, []) + app.start() + begin_stop = time.time() + try: + app.stop() + except: + pass + end_stop = time.time() + stop_time = end_stop-begin_stop + app.releaseObject() + end_release = time.time() + release_time = end_release-end_stop + self.assertTrue(8<=stop_time<=9) + self.assertTrue(20<=release_time<=22) + + def test_StopTimeoutChange(self): + nb, domMgr = self.launchDomainManager(debug=self.debuglevel) + self.assertNotEqual(domMgr, None) + + nb, devMgr = self.launchDeviceManager('/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml', debug=self.debuglevel) + self.assertNotEqual(devMgr, None) + + # Create the application, which is pre-configured such that the last + # component in the start order (and hence, first in stop) will fail on + # stop(). + initconfig = [CF.DataType(id=ExtendedCF.WKP.STOP_TIMEOUT, value=any.to_any(4))] + app = domMgr.createApplication('/waveforms/long_stop/long_stop.sad.xml', 'long_stop', initconfig, []) + app.start() + curr_stoptimeout = app._get_stopTimeout() + self.assertEquals(curr_stoptimeout, 4.0) + app._set_stopTimeout(5) + begin_stop = time.time() + try: + app.stop() + except: + pass + end_stop = time.time() + stop_time = end_stop-begin_stop + app.releaseObject() + end_release = time.time() + release_time = end_release-end_stop + self.assertTrue(10<=stop_time<=11) + self.assertTrue(20<=release_time<=22) + + def test_StopTimeoutIndefinite(self): + nb, domMgr = self.launchDomainManager(debug=self.debuglevel) + self.assertNotEqual(domMgr, None) + + nb, devMgr = self.launchDeviceManager('/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml', debug=self.debuglevel) + self.assertNotEqual(devMgr, None) + + # Create the application, which is pre-configured such that the last + # component in the start order (and hence, first in stop) will fail on + # stop(). + app = domMgr.createApplication('/waveforms/slow_stop_w/slow_stop_w.sad.xml', 'slow_stop', [], []) + app.start() + curr_stoptimeout = app._get_stopTimeout() + self.assertEquals(curr_stoptimeout, -1) + begin_stop = time.time() + try: + app.stop() + except: + pass + end_stop = time.time() + stop_time = end_stop-begin_stop + app.releaseObject() + end_release = time.time() + release_time = end_release-end_stop + self.assertTrue(5<=stop_time<=6) + self.assertTrue(8<=release_time<=9) + def _test_ValgrindCppDevice(self, appFact, valgrind): # Clear the device cache to prevent false positives deviceCacheDir = os.path.join(scatest.getSdrCache(), ".ExecutableDevice_node", "ExecutableDevice1") diff --git a/redhawk/src/testing/tests/test_04_ApplicationMetrics.py b/redhawk/src/testing/tests/test_04_ApplicationMetrics.py new file mode 100644 index 000000000..2e6d51e93 --- /dev/null +++ b/redhawk/src/testing/tests/test_04_ApplicationMetrics.py @@ -0,0 +1,205 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest, time +from _unitTestHelpers import scatest +from ossie.cf import CF, ExtendedCF +from omniORB import any, CORBA +from ossie import properties +from ossie.utils import redhawk +import traceback + +class ApplicationMetrics(scatest.CorbaTestCase): + def setUp(self): + self._app = None + + def tearDown(self): + if self._app: + self._app.ref.stop() + self._app.ref.releaseObject() + + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + scatest.CorbaTestCase.tearDown(self) + + def test_AppAllMetrics(self): + domBooter, self._domMgr = self.launchDomainManager() + dommgr = redhawk.attach(self._domMgr._get_name()) + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + dommgr.devices[0].threshold_cycle_time = 100 + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._plainnode, None) + + time.sleep(1) + + self._app = dommgr.createApplication('/waveforms/busycomp_w/busycomp_w.sad.xml', 'busycomp_w', [], []) + self.assertNotEqual(self._app, None) + time.sleep(1) + self.assertRaises(CF.Application.InvalidMetric, self._app.metrics, ['utilization'], []) + + bc=self._app.metrics(['busycomp_1'], [])[0].value._v + value = -1 + for _val in bc: + if _val.id == 'cores': + value = _val.value._v + self.assertTrue(value<0.1) + bc=self._app.metrics(['busycomp_2'], [])[0].value._v + value = -1 + for _val in bc: + if _val.id == 'cores': + value = _val.value._v + self.assertTrue(value<0.1) + bc=self._app.metrics(['msg_through_1'], [])[0].value._v + value = -1 + for _val in bc: + if _val.id == 'cores': + value = _val.value._v + self.assertTrue(value<0.1) + + self._app.start() + for comp in self._app.comps: + if comp.name == 'msg_through': + comp.stop() + break + time.sleep(2) + + begin_time = time.time() + diff_ok = False + while time.time()-begin_time < 5 and not diff_ok: + bc=self._app.metrics(['busycomp_1'], [])[0].value._v + value = -1 + for _val in bc: + if _val.id == 'cores': + value = _val.value._v + if abs(value-2) < 0.05: + diff_ok = True + break + self.assertAlmostEquals(value, 2, places=1) + + begin_time = time.time() + diff_ok = False + while time.time()-begin_time < 5 and not diff_ok: + bc=self._app.metrics(['busycomp_2'], [])[0].value._v + value = -1 + for _val in bc: + if _val.id == 'cores': + value = _val.value._v + if abs(value-2) < 0.05: + diff_ok = True + break + self.assertAlmostEquals(value, 2, places=1) + + bc=self._app.metrics(['msg_through_1'], [])[0].value._v + value = -1 + for _val in bc: + if _val.id == 'cores': + value = _val.value._v + self.assertTrue(value<0.1) + + bc = self._app.metrics([], []) + util_total = {} + moving_total = {} + for _i in bc: + if _i.id == 'busycomp_2': + continue + if _i.id == 'application utilization': + for v in _i.value._v: + if v.id == 'valid': + continue + util_total[v.id] = v.value._v + continue + for v in _i.value._v: + if v.id == 'componenthost': + continue + if v.id == 'valid': + continue + if moving_total.has_key(v.id): + moving_total[v.id] += v.value._v + else: + moving_total[v.id] = v.value._v + for key in util_total: + self.assertAlmostEquals(util_total[key],moving_total[key], places=1) + + def test_AppIndividualMetrics(self): + domBooter, self._domMgr = self.launchDomainManager() + dommgr = redhawk.attach(self._domMgr._get_name()) + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + dommgr.devices[0].threshold_cycle_time = 100 + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._plainnode, None) + + time.sleep(1) + + self._app = dommgr.createApplication('/waveforms/busycomp_w/busycomp_w.sad.xml', 'busycomp_w', [], []) + self.assertNotEqual(self._app, None) + time.sleep(1) + self.assertRaises(CF.Application.InvalidMetric, self._app.metrics, ['utilization'], []) + + bc=self._app.metrics([], ['memory']) + self.assertEquals(len(bc), 4) + self.assertEquals(len(bc[0].value._v), 1) + self.assertEquals(bc[0].value._v[0].id, 'memory') + self.assertEquals(bc[3].value._v[0].id, 'memory') + bc=self._app.metrics(['busycomp_1'], ['memory']) + self.assertEquals(len(bc), 1) + self.assertEquals(bc[0].id, 'busycomp_1') + self.assertEquals(len(bc[0].value._v), 1) + self.assertEquals(bc[0].value._v[0].id, 'memory') + bc=self._app.metrics(['application utilization'], ['memory']) + self.assertEquals(len(bc), 1) + self.assertEquals(bc[0].id, 'application utilization') + self.assertEquals(len(bc[0].value._v), 1) + self.assertEquals(bc[0].value._v[0].id, 'memory') + bc=self._app.metrics(['msg_through_1','busycomp_1'], ['memory']) + self.assertEquals(len(bc), 2) + self.assertEquals(bc[0].id, 'msg_through_1') + self.assertEquals(len(bc[0].value._v), 1) + self.assertEquals(bc[0].value._v[0].id, 'memory') + self.assertEquals(bc[1].id, 'busycomp_1') + self.assertEquals(len(bc[1].value._v), 1) + self.assertEquals(bc[1].value._v[0].id, 'memory') + + bc=self._app.metrics([], ['cores', 'memory']) + self.assertEquals(len(bc), 4) + self.assertEquals(len(bc[0].value._v), 2) + self.assertEquals(bc[0].value._v[0].id, 'cores') + self.assertEquals(bc[0].value._v[1].id, 'memory') + self.assertEquals(bc[3].value._v[0].id, 'cores') + self.assertEquals(bc[3].value._v[1].id, 'memory') + bc=self._app.metrics(['busycomp_1'], ['cores', 'memory']) + self.assertEquals(len(bc), 1) + self.assertEquals(bc[0].id, 'busycomp_1') + self.assertEquals(len(bc[0].value._v), 2) + self.assertEquals(bc[0].value._v[0].id, 'cores') + self.assertEquals(bc[0].value._v[1].id, 'memory') + bc=self._app.metrics(['msg_through_1','busycomp_1'], ['cores', 'memory']) + self.assertEquals(len(bc), 2) + self.assertEquals(bc[0].id, 'msg_through_1') + self.assertEquals(len(bc[0].value._v), 2) + self.assertEquals(bc[0].value._v[0].id, 'cores') + self.assertEquals(bc[0].value._v[1].id, 'memory') + self.assertEquals(bc[1].id, 'busycomp_1') + self.assertEquals(len(bc[1].value._v), 2) + self.assertEquals(bc[1].value._v[0].id, 'cores') + self.assertEquals(bc[1].value._v[1].id, 'memory') + + self.assertRaises(CF.Application.InvalidMetric, self._app.metrics, [], ['cord', 'memory']) diff --git a/redhawk/src/testing/tests/test_04_ApplicationRegistrar.py b/redhawk/src/testing/tests/test_04_ApplicationRegistrar.py index 26949930e..81b162f42 100644 --- a/redhawk/src/testing/tests/test_04_ApplicationRegistrar.py +++ b/redhawk/src/testing/tests/test_04_ApplicationRegistrar.py @@ -350,6 +350,28 @@ def test_cppCompUnaware(self): app.releaseObject() self.assertEqual(len(domMgr._get_applications()), 0) + def test_cppCompWaveformOverride(self): + nodebooter, domMgr = self.launchDomainManager() + self.assertNotEqual(domMgr, None) + nodebooter, devMgr = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + self.assertNotEqual(devMgr, None) + + domMgr.installApplication("/waveforms/cpp_comp_aware_w/cpp_comp_aware_w.sad.xml") + self.assertEqual(len(domMgr._get_applicationFactories()), 1) + appFact = domMgr._get_applicationFactories()[0] + + app = appFact.create(appFact._get_name(), [], []) + self.assertEqual(len(domMgr._get_applications()), 1) + app_id = app._get_registeredComponents()[0].componentObject.query([CF.DataType(id='app_id',value=any.to_any(None))])[0].value._v + number_components = app._get_registeredComponents()[0].componentObject.query([CF.DataType(id='number_components',value=any.to_any(None))])[0].value._v + dom_id = app._get_registeredComponents()[0].componentObject.query([CF.DataType(id='dom_id',value=any.to_any(None))])[0].value._v + self.assertEqual(app_id, app._get_identifier()) + self.assertEqual(number_components, 0) + self.assertEqual(dom_id, "") + self.assertEqual(app._get_aware(), False) + app.releaseObject() + self.assertEqual(len(domMgr._get_applications()), 0) + def test_pyCompUnaware(self): nodebooter, domMgr = self.launchDomainManager() self.assertNotEqual(domMgr, None) diff --git a/redhawk/src/testing/tests/test_04_ComponentHost.py b/redhawk/src/testing/tests/test_04_ComponentHost.py new file mode 100644 index 000000000..0866bd7eb --- /dev/null +++ b/redhawk/src/testing/tests/test_04_ComponentHost.py @@ -0,0 +1,67 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +from omniORB.any import to_any, from_any + +from _unitTestHelpers import scatest +from ossie.cf import CF + +class ComponentHostTest(scatest.CorbaTestCase): + def setUp(self): + self.launchDomainManager() + self.launchDeviceManager('/nodes/test_GPP_node/DeviceManager.dcd.xml') + + def test_BasicShared(self): + self.assertNotEqual(self._domainManager, None) + + app = self._domainManager.createApplication('/waveforms/BasicSharedWave/BasicSharedWave.sad.xml', + 'BasicSharedWave', [], []) + + comps = app._get_registeredComponents() + self.assertEqual(len(comps), 2) + + request = [CF.DataType('pid', to_any(None))] + props1 = comps[0].componentObject.query(request) + props2 = comps[1].componentObject.query(request) + + self.assertEqual(len(props1), 1) + self.assertEqual(props1[0].id, 'pid') + self.assertEqual(len(props2), 1) + self.assertEqual(props2[0].id, 'pid') + self.assertEqual(from_any(props1[0].value), from_any(props2[0].value)) + + def test_CollocBasicShared(self): + self.assertNotEqual(self._domainManager, None) + + app = self._domainManager.createApplication('/waveforms/BasicSharedCollocWave/BasicSharedCollocWave.sad.xml', + 'BasicSharedCollocWave', [], []) + + comps = app._get_registeredComponents() + self.assertEqual(len(comps), 2) + + request = [CF.DataType('pid', to_any(None))] + props1 = comps[0].componentObject.query(request) + props2 = comps[1].componentObject.query(request) + + self.assertEqual(len(props1), 1) + self.assertEqual(props1[0].id, 'pid') + self.assertEqual(len(props2), 1) + self.assertEqual(props2[0].id, 'pid') + self.assertEqual(from_any(props1[0].value), from_any(props2[0].value)) diff --git a/redhawk/src/testing/tests/test_04_Net.py b/redhawk/src/testing/tests/test_04_Net.py index 231c4120b..073615ff7 100644 --- a/redhawk/src/testing/tests/test_04_Net.py +++ b/redhawk/src/testing/tests/test_04_Net.py @@ -143,3 +143,67 @@ def test_javaCompNet(self): self.assertTrue(nic_name in nic_names) app.releaseObject() self.assertEqual(len(domMgr._get_applications()), 0) + +class NicAllocTest(scatest.CorbaTestCase): + def setUp(self): + nodebooter, self.domMgr = self.launchDomainManager() + self.assertNotEqual(self.domMgr, None) + nodebooter, self.devMgr = self.launchDeviceManager("/nodes/test_NicAllocation_node/DeviceManager.dcd.xml") + self.assertNotEqual(self.devMgr, None) + self.dev = self.devMgr._get_registeredDevices()[0] + props = self.dev.query([CF.DataType('nic_list', any.to_any(None))]) + self.nicNames = any.from_any(props[0].value) + + def _testNicAlloc(self, waveform, cmdline=True): + sad_file = '/waveforms/NicAllocWave/%s.sad.xml' % waveform + app = self.domMgr.createApplication(sad_file, waveform, [], []) + + if cmdline: + for comp in app._get_componentProcessIds(): + with open('/proc/%d/cmdline' % comp.processId, 'r') as fp: + args = fp.read().split('\0') + self.failUnless('NIC' in args, "%s did not get NIC command line argument" % comp.componentId) + + for comp in app._get_registeredComponents(): + props = comp.componentObject.query([CF.DataType(id='nic_name',value=any.to_any(None))]) + nic_name = any.from_any(props[0].value) + self.assertTrue(nic_name in self.nicNames, "%s has invalid nic '%s'" % (comp.identifier, nic_name)) + + def test_CppNicAlloc(self): + self._testNicAlloc('NicAllocWaveCpp') + + def test_CppNicAllocIdentifier(self): + self._testNicAlloc('NicAllocWaveCppIdentifier') + + def test_CppNicAllocCollocated(self): + self._testNicAlloc('NicAllocWaveCppCollocated') + + def test_CppSharedNicAlloc(self): + self._testNicAlloc('NicAllocWaveCppShared', cmdline=False) + + def test_CppSharedNicAllocIdentifier(self): + self._testNicAlloc('NicAllocWaveCppSharedIdentifier', cmdline=False) + + def test_CppSharedNicAllocCollocated(self): + self._testNicAlloc('NicAllocWaveCppSharedCollocated', cmdline=False) + + def test_PyNicAlloc(self): + self._testNicAlloc('NicAllocWavePy') + + def test_PyNicAllocIdentifier(self): + self._testNicAlloc('NicAllocWavePyIdentifier') + + def test_PyNicAllocCollocated(self): + self._testNicAlloc('NicAllocWavePyCollocated') + + @scatest.requireJava + def test_JavaNicAlloc(self): + self._testNicAlloc('NicAllocWaveJava') + + @scatest.requireJava + def test_JavaNicAllocIdentifier(self): + self._testNicAlloc('NicAllocWaveJavaIdentifier') + + @scatest.requireJava + def test_JavaNicAllocCollocated(self): + self._testNicAlloc('NicAllocWaveJavaCollocated') diff --git a/redhawk/src/testing/tests/test_05_CollocationApplicationFactory.py b/redhawk/src/testing/tests/test_05_CollocationApplicationFactory.py index 3fcee65b3..f424f2c6e 100644 --- a/redhawk/src/testing/tests/test_05_CollocationApplicationFactory.py +++ b/redhawk/src/testing/tests/test_05_CollocationApplicationFactory.py @@ -139,6 +139,37 @@ def test_collocationMixed(self): self._domMgr.uninstallApplication(appFact._get_identifier()) + def test_res(self): + nodebooter, domMgr = self.launchDomainManager() + self.assertNotEqual(domMgr, None) + nodebooter, devMgr = self.launchDeviceManager("/nodes/test_collocation_nodes_1dev4cap/DeviceManager.dcd.xml") + + self.assertNotEqual(devMgr, None) + + domMgr.installApplication("/waveforms/test_wav_res/test_wav_res.sad.xml") + self.assertEqual(len(domMgr._get_applicationFactories()), 1) + + appFact = domMgr._get_applicationFactories()[0] + + app = None + try: + app = appFact.create(appFact._get_name(), [], []) + except: + pass + + ## need to check that all the comopnents were allocated to devices from test_collocation_node1_2dev2cap + + self.assertNotEqual(app, None ) + + if ( app ) : + app.stop() + app.releaseObject() + + device = devMgr._get_registeredDevices()[0] + self.assertEqual(self._getProperty(device, 'allocation_attempts'), 1) + + self._domMgr.uninstallApplication(appFact._get_identifier()) + def test_collocationCombinedAllocationCall(self): nodebooter, domMgr = self.launchDomainManager() self.assertNotEqual(domMgr, None) @@ -424,11 +455,12 @@ def test_collocationFailFast(self): allocations_pre = self._getProperty(device, 'allocation_attempts') try: app = appFact.create(appFact._get_name(), [], []) - app.releaseObject() - self.fail("Expected app creation to fail") - except CF.ApplicationFactory.CreateApplicationRequestError: + except CF.ApplicationFactory.CreateApplicationError: # This is expected pass + else: + app.releaseObject() + self.fail("Expected app creation to fail") # Clean up a little domMgr.uninstallApplication(appFact._get_identifier()) diff --git a/redhawk/src/testing/tests/test_05_DeviceWriteOnly.py b/redhawk/src/testing/tests/test_05_DeviceWriteOnly.py new file mode 100644 index 000000000..a496b0e2d --- /dev/null +++ b/redhawk/src/testing/tests/test_05_DeviceWriteOnly.py @@ -0,0 +1,80 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest, os +from _unitTestHelpers import scatest +from ossie.cf import CF +from omniORB import any +from ossie.utils import sb + +@scatest.requireJava +class TestDeviceJavaWO(scatest.CorbaTestCase): + + def setUp(self): + self.dev=sb.launch('writeonly_java') + + def tearDown(self): + sb.release() + + def test_writeonly_java(self): + simple = CF.DataType(id='foo', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [simple]) + simple_seq = CF.DataType(id='foo_seq', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [simple_seq]) + struct = CF.DataType(id='foo_struct', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [struct]) + struct_seq = CF.DataType(id='foo_struct_seq', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [struct_seq]) + +class TestDeviceCppWO(scatest.CorbaTestCase): + + def setUp(self): + self.dev=sb.launch('writeonly_cpp') + + def tearDown(self): + sb.release() + + def test_writeonly_cpp(self): + simple = CF.DataType(id='foo', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [simple]) + simple_seq = CF.DataType(id='foo_seq', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [simple_seq]) + struct = CF.DataType(id='foo_struct', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [struct]) + struct_seq = CF.DataType(id='foo_struct_seq', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [struct_seq]) + +class TestDevicePythonWO(scatest.CorbaTestCase): + + def setUp(self): + self.dev=sb.launch('writeonly_py') + + def tearDown(self): + sb.release() + + def test_writeonly_py(self): + simple = CF.DataType(id='foo', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [simple]) + simple_seq = CF.DataType(id='foo_seq', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [simple_seq]) + struct = CF.DataType(id='foo_struct', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [struct]) + struct_seq = CF.DataType(id='foo_struct_seq', value=any.to_any(None)) + self.assertRaises(CF.UnknownProperties, self.dev.query, [struct_seq]) diff --git a/redhawk/src/testing/tests/test_05_FileSystem.py b/redhawk/src/testing/tests/test_05_FileSystem.py index 9d13cdc90..736064061 100644 --- a/redhawk/src/testing/tests/test_05_FileSystem.py +++ b/redhawk/src/testing/tests/test_05_FileSystem.py @@ -630,6 +630,9 @@ def test_readException(self): # Issue #533 self.assertRaises(CF.DomainManager.ApplicationInstallationError, self._domMgr.installApplication, '/waveforms') def test_ExistsException(self): + self.assertNotEqual(self._domMgr, None) + fileMgr = self._domMgr._get_fileMgr() + # Makes sure that FileSystem::exists() throws correct exception and # doesn't kill domain for files in directories it cannot access dirname = '/noaccess' @@ -639,9 +642,8 @@ def test_ExistsException(self): else: os.chmod(testdir, 0644) - self.assertNotEqual(self._domMgr, None) - fileMgr = self._domMgr._get_fileMgr() try: + self.assertFalse(os.access(testdir, os.R_OK|os.X_OK), 'Current user can still access directory') self.assertRaises(CF.InvalidFileName, fileMgr.exists, os.path.join(dirname, 'testfile')) finally: os.rmdir(testdir) diff --git a/redhawk/src/testing/tests/test_05_LoadableDeviceTest.py b/redhawk/src/testing/tests/test_05_LoadableDeviceTest.py index 312b5f329..4913a52c6 100644 --- a/redhawk/src/testing/tests/test_05_LoadableDeviceTest.py +++ b/redhawk/src/testing/tests/test_05_LoadableDeviceTest.py @@ -18,7 +18,7 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # -import unittest, os +import unittest, os, commands from _unitTestHelpers import scatest from omniORB import CORBA, URI, any from ossie.cf import CF @@ -151,7 +151,7 @@ def test_cpp_RefreshSubdir(self): def test_cpp_DirectoryLoad(self): self.assertNotEqual(self._domMgr, None) - # Verify in the devices cache is emtpy + # Verify in the devices cache is empty componentDir = os.path.join(scatest.getSdrPath(), "dom", "components", "CommandWrapperWithDirectoryLoad") deviceCacheDir = os.path.join(scatest.getSdrCache(), ".ExecutableDevice_node", "ExecutableDevice1", "components", "CommandWrapperWithDirectoryLoad") if os.path.exists(deviceCacheDir): @@ -258,7 +258,7 @@ def test_py_DirectoryLoad(self): self.assertEqual(len(self._domMgr._get_applicationFactories()), 0) self.assertEqual(len(self._domMgr._get_applications()), 0) - # Verify in the devices cache is emtpy + # Verify in the devices cache is empty componentDir = os.path.join(scatest.getSdrPath(), "dom", "components", "CommandWrapperWithDirectoryLoad") deviceCacheDir = os.path.join(scatest.getSdrCache(), ".BasicTestDevice_node", "BasicTestDevice1", "components", "CommandWrapperWithDirectoryLoad") if os.path.exists(deviceCacheDir): @@ -583,6 +583,162 @@ def test_cpp_SharedLibraryLoad(self): except CORBA.COMM_FAILURE: self.fail('Device died loading shared library with short path') + @scatest.requireJava + def test_java_SharedLibraryLoad(self): + fp = open('sdr/dev/nodes/test_GPP_node/DeviceManager.dcd.xml.cache_working_dir','r') + dcd_contents = fp.read() + fp.close() + dcd_contents = dcd_contents.replace('@@DIR1@@', os.getcwd() + "/tmp_cache") + dcd_contents = dcd_contents.replace('@@DIR2@@', os.getcwd() + "/tmp_working") + fp = open('sdr/dev/nodes/test_GPP_node/tmp.dcd.xml','w') + fp.write(dcd_contents) + fp.close() + + self.assertNotEqual(self._domMgr, None) + + self.assertEqual(len(self._domMgr._get_applicationFactories()), 0) + self.assertEqual(len(self._domMgr._get_applications()), 0) + + # Verify in the devices cache is empty + componentDir = os.path.join(scatest.getSdrPath(), "dom", "components", "javaSoftpkgJarDep") + deviceCacheDir = os.path.join(os.getcwd(), "/tmp_cache") + if os.path.exists(deviceCacheDir): + os.system("rm -rf %s" % deviceCacheDir) + + # self._domMgr.installApplication("/waveforms/CommandWrapperWithDirectoryLoad/CommandWrapper.sad.xml") + self._domMgr.installApplication("/waveforms/java_softpkg_deps/java_softpkg_deps.sad.xml") + self.assertEqual(len(self._domMgr._get_applicationFactories()), 1) + self.assertEqual(len(self._domMgr._get_applications()), 0) + + # Ensure the expected device is available + devBooter, devMgr = self.launchDeviceManager("/nodes/test_GPP_node/tmp.dcd.xml") + self.assertNotEqual(devMgr, None) + self.assertEqual(len(devMgr._get_registeredDevices()), 1) + device = devMgr._get_registeredDevices()[0] + + appFact = self._domMgr._get_applicationFactories()[0] + + app = appFact.create(appFact._get_name(), [], []) # LOOK MA, NO DAS! + + self.assertEqual(len(self._domMgr._get_applicationFactories()), 1) + self.assertEqual(len(self._domMgr._get_applications()), 1) + + # Verify that properties have been changed from their defaults + self.assertEqual(len(app._get_componentNamingContexts()), 1) + compName = app._get_componentNamingContexts()[0] + comp = self._root.resolve(URI.stringToName(compName.elementId))._narrow(CF.Resource) + self.assertNotEqual(comp, None) + + cmd = comp.query([CF.DataType(id="hello", value=any.to_any(None))])[0] + self.assertEqual(cmd.value._v, "world") + + comp.start() + + cmd = comp.query([CF.DataType(id="hello", value=any.to_any(None))])[0] + self.assertEqual(cmd.value._v, "Java is so cool") + + app.stop() + app.releaseObject() + self.assertEqual(len(self._domMgr._get_applicationFactories()), 1) + self.assertEqual(len(self._domMgr._get_applications()), 0) + + self._domMgr.uninstallApplication(appFact._get_identifier()) + + (status,output) = commands.getstatusoutput('rm -rf tmp_cache') + (status,output) = commands.getstatusoutput('rm -rf tmp_working') + self._testFiles.append('sdr/dev/nodes/test_GPP_node/tmp.dcd.xml') + + def _test_py_SharedLibraryLoad(self, node): + self.assertNotEqual(self._domMgr, None) + + self.assertEqual(len(self._domMgr._get_applicationFactories()), 0) + self.assertEqual(len(self._domMgr._get_applications()), 0) + + # Verify in the devices cache is empty + componentDir = os.path.join(scatest.getSdrPath(), "dom", "components", "pythonSoftpkgDep") + deviceCacheDir = os.path.join(scatest.getSdrCache(), ".test_GPP_node", "GPP_1", "components", "pythonSoftpkgDep") + if os.path.exists(deviceCacheDir): + os.system("rm -rf %s" % deviceCacheDir) + + self._domMgr.installApplication("/waveforms/python_softpkg_deps/python_softpkg_deps.sad.xml") + self.assertEqual(len(self._domMgr._get_applicationFactories()), 1) + self.assertEqual(len(self._domMgr._get_applications()), 0) + + # Ensure the expected device is available + devBooter, devMgr = self.launchDeviceManager(node) + self.assertNotEqual(devMgr, None) + self.assertEqual(len(devMgr._get_registeredDevices()), 1) + device = devMgr._get_registeredDevices()[0] + + appFact = self._domMgr._get_applicationFactories()[0] + + app = appFact.create(appFact._get_name(), [], []) # LOOK MA, NO DAS! + + self.assertEqual(len(self._domMgr._get_applicationFactories()), 1) + self.assertEqual(len(self._domMgr._get_applications()), 1) + + # Verify that properties have been changed from their defaults + self.assertEqual(len(app._get_componentNamingContexts()), 1) + compName = app._get_componentNamingContexts()[0] + comp = self._root.resolve(URI.stringToName(compName.elementId))._narrow(CF.Resource) + self.assertNotEqual(comp, None) + + cmd = comp.query([CF.DataType(id="prop1", value=any.to_any(None))])[0] + self.assertEqual(cmd.value._v, "hello") + cmd = comp.query([CF.DataType(id="prop2", value=any.to_any(None))])[0] + self.assertEqual(cmd.value._v, "world") + cmd = comp.query([CF.DataType(id="prop3", value=any.to_any(None))])[0] + self.assertEqual(cmd.value._v, "helloworld") + + comp.start() + + cmd = comp.query([CF.DataType(id="prop1", value=any.to_any(None))])[0] + self.assertEqual(cmd.value._v, "jones") + cmd = comp.query([CF.DataType(id="prop2", value=any.to_any(None))])[0] + self.assertEqual(cmd.value._v, "goober") + cmd = comp.query([CF.DataType(id="prop3", value=any.to_any(None))])[0] + self.assertEqual(cmd.value._v, "testing") + + app.stop() + app.releaseObject() + self.assertEqual(len(self._domMgr._get_applicationFactories()), 1) + self.assertEqual(len(self._domMgr._get_applications()), 0) + + self._domMgr.uninstallApplication(appFact._get_identifier()) + + def test_py_SharedLibraryLoad1(self): + self._test_py_SharedLibraryLoad("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + def test_py_SharedLibraryLoad2(self): + fp = open('sdr/dev/nodes/test_GPP_node/DeviceManager.dcd.xml.working_dir','r') + dcd_contents = fp.read() + fp.close() + dcd_contents = dcd_contents.replace('@@DIR1@@', os.getcwd() + "/tmp_working") + fp = open('sdr/dev/nodes/test_GPP_node/tmp.dcd.xml','w') + fp.write(dcd_contents) + fp.close() + + self._test_py_SharedLibraryLoad("/nodes/test_GPP_node/tmp.dcd.xml") + + (status,output) = commands.getstatusoutput('rm -rf tmp_working') + self._testFiles.append('sdr/dev/nodes/test_GPP_node/tmp.dcd.xml') + + def test_py_SharedLibraryLoad3(self): + fp = open('sdr/dev/nodes/test_GPP_node/DeviceManager.dcd.xml.cache_working_dir','r') + dcd_contents = fp.read() + fp.close() + dcd_contents = dcd_contents.replace('@@DIR1@@', os.getcwd() + "/tmp_cache") + dcd_contents = dcd_contents.replace('@@DIR2@@', os.getcwd() + "/tmp_working") + fp = open('sdr/dev/nodes/test_GPP_node/tmp.dcd.xml','w') + fp.write(dcd_contents) + fp.close() + + self._test_py_SharedLibraryLoad("/nodes/test_GPP_node/tmp.dcd.xml") + + (status,output) = commands.getstatusoutput('rm -rf tmp_cache') + (status,output) = commands.getstatusoutput('rm -rf tmp_working') + self._testFiles.append('sdr/dev/nodes/test_GPP_node/tmp.dcd.xml') + def _failIfOpen(self, fileSys, path): for info in fileSys.list(path): for prop in info.fileProperties: diff --git a/redhawk/src/testing/tests/test_07_PythonParsers.py b/redhawk/src/testing/tests/test_07_PythonParsers.py index 2d91c6e5e..500419e31 100644 --- a/redhawk/src/testing/tests/test_07_PythonParsers.py +++ b/redhawk/src/testing/tests/test_07_PythonParsers.py @@ -171,6 +171,236 @@ def test_SADParser(self): except OSError: pass + def test_SADParser_usesdeviceref(self): + sad = parsers.SADParser.parse("sdr/parser_tests/usesdeviceref.sad.xml") + self.assertEqual(sad.get_id(), "colloc_usesdev_1") + self.assertEqual(sad.get_name(), "colloc_usesdev") + self.assertEqual(len(sad.componentfiles.get_componentfile()), 1) + self.assertEqual(len(sad.partitioning.get_hostcollocation()), 1) + colloc=sad.partitioning.get_hostcollocation()[0] + self.assertEqual(len(colloc.get_componentplacement()),1) + comp_place =colloc.get_componentplacement()[0] + self.assertEqual(len(comp_place.get_componentinstantiation()),1) + comp_ci=comp_place.get_componentinstantiation()[0] + self.assertEqual(comp_ci.id_, "P1_1") + self.assertEqual(comp_ci.get_usagename(), "P1_1") + self.assertEqual(len(colloc.get_usesdeviceref()),1) + udev_ref =colloc.get_usesdeviceref()[0] + self.assertEqual(udev_ref.refid, "FrontEndTuner_1") + + # Verify that we can write the output and still be DTD valid + tmpfile = tempfile.mktemp() + try: + tmp = open(tmpfile, "w") + sad.export(tmp, 0) + tmp.close() + status = self._xmllint(tmpfile, "SAD") + self.assertEqual(status, 0, "Python parser did not emit DTD compliant XML") + finally: + try: + os.remove(tmpfile) + except OSError: + pass + + def test_SADParser_devicerequires(self): + sad = parsers.SADParser.parse("sdr/parser_tests/devicerequires.sad.xml") + self.assertEqual(sad.get_id(), "device_requires_multicolor") + self.assertEqual(sad.get_name(), "device_requires_multicolor") + self.assertEqual(len(sad.componentfiles.get_componentfile()), 1) + self.assertEqual(len(sad.partitioning.get_componentplacement()), 2) + comp_place=sad.partitioning.get_componentplacement()[0] + comp_in=comp_place.get_componentinstantiation()[0] + self.assertEqual(comp_place.componentfileref.refid, "SimpleComponent_SPD_1") + self.assertEqual(comp_in.id_, "SimpleComponent_Red") + self.assertEqual(comp_in.get_usagename(), "SimpleComponent_Red") + self.assertEqual(len(comp_in.devicerequires.get_requires()),2) + self.assertEqual(comp_in.devicerequires.get_requires()[0].id, "color") + self.assertEqual(comp_in.devicerequires.get_requires()[0].value, "RED") + self.assertEqual(comp_in.devicerequires.get_requires()[1].id, "rank") + self.assertEqual(comp_in.devicerequires.get_requires()[1].value, "15") + comp_place=sad.partitioning.get_componentplacement()[1] + comp_in=comp_place.get_componentinstantiation()[0] + self.assertEqual(comp_place.componentfileref.refid, "SimpleComponent_SPD_1") + self.assertEqual(comp_in.id_, "SimpleComponent_Green") + self.assertEqual(comp_in.get_usagename(), "SimpleComponent_Green") + self.assertEqual(len(comp_in.devicerequires.get_requires()),1) + self.assertEqual(comp_in.devicerequires.get_requires()[0].id, "color") + self.assertEqual(comp_in.devicerequires.get_requires()[0].value, "GREEN") + + + # Verify that we can write the output and still be DTD valid + tmpfile = tempfile.mktemp() + try: + tmp = open(tmpfile, "w") + sad.export(tmp, 0) + tmp.close() + status = self._xmllint(tmpfile, "SAD") + self.assertEqual(status, 0, "Python parser did not emit DTD compliant XML") + finally: + try: + os.remove(tmpfile) + except OSError: + pass + + def test_SADParser_loggingconfig(self): + sad = parsers.SADParser.parse("sdr/parser_tests/loggingconfig.sad.xml") + self.assertEqual(sad.get_id(), "device_requires_multicolor") + self.assertEqual(sad.get_name(), "device_requires_multicolor") + self.assertEqual(len(sad.componentfiles.get_componentfile()), 1) + self.assertEqual(len(sad.partitioning.get_componentplacement()), 2) + comp_place=sad.partitioning.get_componentplacement()[0] + comp_in=comp_place.get_componentinstantiation()[0] + self.assertEqual(comp_place.componentfileref.refid, "SimpleComponent_SPD_1") + self.assertEqual(comp_in.id_, "SimpleComponent_Red") + self.assertEqual(comp_in.get_usagename(), "SimpleComponent_Red") + self.assertEqual(comp_in.loggingconfig.level, "ERROR") + self.assertEqual(comp_in.loggingconfig.value, "path/to/my/log/file") + comp_place=sad.partitioning.get_componentplacement()[1] + comp_in=comp_place.get_componentinstantiation()[0] + self.assertEqual(comp_place.componentfileref.refid, "SimpleComponent_SPD_1") + self.assertEqual(comp_in.id_, "SimpleComponent_Green") + self.assertEqual(comp_in.get_usagename(), "SimpleComponent_Green") + self.assertEqual(comp_in.loggingconfig.value, "path/to/my/log/file2") + + # Verify that we can write the output and still be DTD valid + tmpfile = tempfile.mktemp() + try: + tmp = open(tmpfile, "w") + sad.export(tmp, 0) + tmp.close() + status = self._xmllint(tmpfile, "SAD") + self.assertEqual(status, 0, "Python parser did not emit DTD compliant XML") + finally: + try: + os.remove(tmpfile) + except OSError: + pass + + def test_SADParser_affinityconfig(self): + sad = parsers.SADParser.parse("sdr/parser_tests/affinity.sad.xml") + self.assertEqual(sad.get_id(), "device_requires_multicolor") + self.assertEqual(sad.get_name(), "device_requires_multicolor") + self.assertEqual(len(sad.componentfiles.get_componentfile()), 1) + self.assertEqual(len(sad.partitioning.get_componentplacement()), 1) + comp_place=sad.partitioning.get_componentplacement()[0] + comp_in=comp_place.get_componentinstantiation()[0] + self.assertEqual(comp_place.componentfileref.refid, "SimpleComponent_SPD_1") + self.assertEqual(comp_in.id_, "SimpleComponent_Red") + self.assertEqual(comp_in.get_usagename(), "SimpleComponent_Red") + self.assertEqual(comp_in.loggingconfig.level, "ERROR") + self.assertEqual(comp_in.loggingconfig.value, "path/to/my/log/file") + self.assertEqual(len(comp_in.affinity.get_simpleref()),2) + self.assertEqual(comp_in.affinity.get_simpleref()[0].refid, "affinity::exec_directive_class") + self.assertEqual(comp_in.affinity.get_simpleref()[0].value, "socket") + self.assertEqual(comp_in.affinity.get_simpleref()[1].refid, "affinity::exec_directive_value") + self.assertEqual(comp_in.affinity.get_simpleref()[1].value, "0") + + + # Verify that we can write the output and still be DTD valid + tmpfile = tempfile.mktemp() + try: + tmp = open(tmpfile, "w") + sad.export(tmp, 0) + tmp.close() + status = self._xmllint(tmpfile, "SAD") + self.assertEqual(status, 0, "Python parser did not emit DTD compliant XML") + finally: + try: + os.remove(tmpfile) + except OSError: + pass + + def test_DCDParser_deployerrequires(self): + dcd = parsers.DCDParser.parse("sdr/parser_tests/deployerrequires.dcd.xml") + self.assertEqual(dcd.get_id(), "test_GPP_green") + self.assertEqual(dcd.get_name(), "test_GPP_green") + self.assertEqual(len(dcd.componentfiles.get_componentfile()), 1) + self.assertEqual(len(dcd.partitioning.get_componentplacement()), 1) + gpp=dcd.partitioning.get_componentplacement()[0] + gpp_ci=gpp.get_componentinstantiation()[0] + self.assertEqual(gpp.get_componentfileref().get_refid(), "GPP1_file_1") + self.assertEqual(gpp_ci.get_id(), "test_GPP_green::GPP_1") + self.assertEqual(gpp_ci.get_usagename(), "test_GPP_green::GPP_1") + self.assertEqual(len(gpp_ci.deployerrequires.get_requires()), 1) + self.assertEqual(gpp_ci.deployerrequires.get_requires()[0].id, "color") + self.assertEqual(gpp_ci.deployerrequires.get_requires()[0].value, "GREEN") + + # Verify that we can write the output and still be DTD valid + tmpfile = tempfile.mktemp() + try: + tmp = open(tmpfile, "w") + dcd.export(tmp, 0) + tmp.close() + status = self._xmllint(tmpfile, "DCD") + self.assertEqual(status, 0, "Python parser did not emit DTD compliant XML") + finally: + try: + os.remove(tmpfile) + except OSError: + pass + + + def test_DCDParser_loggingconfig(self): + dcd = parsers.DCDParser.parse("sdr/parser_tests/loggingconfig.dcd.xml") + self.assertEqual(dcd.get_id(), "test_GPP_green") + self.assertEqual(dcd.get_name(), "test_GPP_green") + self.assertEqual(len(dcd.componentfiles.get_componentfile()), 1) + self.assertEqual(len(dcd.partitioning.get_componentplacement()), 1) + gpp=dcd.partitioning.get_componentplacement()[0] + gpp_ci=gpp.get_componentinstantiation()[0] + self.assertEqual(gpp.get_componentfileref().get_refid(), "GPP1_file_1") + self.assertEqual(gpp_ci.get_id(), "test_GPP_green::GPP_1") + self.assertEqual(gpp_ci.get_usagename(), "test_GPP_green::GPP_1") + self.assertEqual(gpp_ci.loggingconfig.level, "ERROR") + self.assertEqual(gpp_ci.loggingconfig.value, "path/to/my/log/file") + + # Verify that we can write the output and still be DTD valid + tmpfile = tempfile.mktemp() + try: + tmp = open(tmpfile, "w") + dcd.export(tmp, 0) + tmp.close() + status = self._xmllint(tmpfile, "DCD") + self.assertEqual(status, 0, "Python parser did not emit DTD compliant XML") + finally: + try: + os.remove(tmpfile) + except OSError: + pass + + def test_DCDParser_affinity(self): + dcd = parsers.DCDParser.parse("sdr/parser_tests/affinity.dcd.xml") + self.assertEqual(dcd.get_id(), "affinity_parse_1") + self.assertEqual(dcd.get_name(), "test_affinity_node_socket") + self.assertEqual(len(dcd.componentfiles.get_componentfile()), 1) + self.assertEqual(len(dcd.partitioning.get_componentplacement()), 1) + gpp=dcd.partitioning.get_componentplacement()[0] + gpp_ci=gpp.get_componentinstantiation()[0] + self.assertEqual(gpp.get_componentfileref().get_refid(), "GPP_File_1") + self.assertEqual(gpp_ci.get_id(), "test_affinity_node:GPP_1") + self.assertEqual(gpp_ci.get_usagename(), "GPP_1") + self.assertEqual(len(gpp_ci.affinity.get_simpleref()),2) + self.assertEqual(gpp_ci.affinity.get_simpleref()[0].refid, "affinity::exec_directive_class") + self.assertEqual(gpp_ci.affinity.get_simpleref()[0].value, "socket") + self.assertEqual(gpp_ci.affinity.get_simpleref()[1].refid, "affinity::exec_directive_value") + self.assertEqual(gpp_ci.affinity.get_simpleref()[1].value, "0") + + # Verify that we can write the output and still be DTD valid + tmpfile = tempfile.mktemp() + try: + tmp = open(tmpfile, "w") + dcd.export(tmp, 0) + tmp.close() + status = self._xmllint(tmpfile, "DCD") + self.assertEqual(status, 0, "Python parser did not emit DTD compliant XML") + finally: + try: + os.remove(tmpfile) + except OSError: + pass + + + def test_startorder(self): sad = parsers.SADParser.parse("sdr/dom/waveforms/CommandWrapperStartOrderTests/CommandWrapperWithOrder.sad.xml") self.assertEqual(sad.get_id(), "DCE:e6b136d5-6bf2-48ee-b2ec-52ceb9b80194") diff --git a/redhawk/src/testing/tests/test_08_DeployerRequires.py b/redhawk/src/testing/tests/test_08_DeployerRequires.py new file mode 100644 index 000000000..91d7ae0dc --- /dev/null +++ b/redhawk/src/testing/tests/test_08_DeployerRequires.py @@ -0,0 +1,516 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +from _unitTestHelpers import scatest +from ossie.cf import CF, ExtendedCF +from omniORB import any, CORBA +from ossie import properties +import traceback + +class DeviceRequires(scatest.CorbaTestCase): + def setUp(self): + self._app = None + self._red_app = None + self._green_app = None + + def tearDown(self): + if self._app: + self._app.stop() + self._app.releaseObject() + + if self._red_app: + self._red_app.stop() + self._red_app.releaseObject() + + if self._green_app: + self._green_app.stop() + self._green_app.releaseObject() + + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + scatest.CorbaTestCase.tearDown(self) + + def _createApp(self, appName, exc=None, appdir='device_requires'): + self.assertNotEqual(self._domMgr, None) + app=None + + try: + sadpath = '/waveforms/'+ appdir + '/'+appName+'/'+appName+'.sad.xml' + self._domMgr.installApplication(sadpath) + except Exception, e: + return app + + appFact=None + for x in self._domMgr._get_applicationFactories(): + if x._get_name() == appName: + appFact = x + + self.assertNotEqual(appFact, None) + if exc: + self.assertRaises(exc, appFact.create, appFact._get_name(), [], []) + else: + try: + app = appFact.create(appFact._get_name(), [], []) + except: + self.fail("Did not create application ") + return app + + def test_nocolors_redprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + + self._app = self._createApp('device_requires_nocolor', CF.ApplicationFactory.CreateApplicationError) + + self.assertEqual(self._app, None) + + def test_nocolors_greenprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._greennode, None) + + self._app = self._createApp('device_requires_nocolor', CF.ApplicationFactory.CreateApplicationError) + + self.assertEqual(self._app, None) + + def test_nocolors_redmix(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_nocolor') + + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + def test_nocolors_greenmix(self): + domBooter, self._domMgr = self.launchDomainManager() + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._greennode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_nocolor') + + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + def test_mixdevrequires_nocolor(self): + domBooter, self._domMgr = self.launchDomainManager() + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_multicolor', CF.ApplicationFactory.CreateApplicationError) + + self.assertEqual(self._app, None) + + + def test_reddevrequires_nocolors(self): + domBooter, self._domMgr = self.launchDomainManager() + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_red', CF.ApplicationFactory.CreateApplicationError) + + self.assertEqual(self._app, None) + + def test_greendevrequires_nocolors(self): + domBooter, self._domMgr = self.launchDomainManager() + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_green', CF.ApplicationFactory.CreateApplicationError) + + self.assertEqual(self._app, None) + + def test_reddevrequires_redprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + + self._app = self._createApp('device_requires_red') + + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + def test_reddevrequires_redprovided_so(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + + self._app = self._createApp('device_requires_red_so') + + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + def test_greendevrequires_colormismatch(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + + self._app = self._createApp('device_requires_green', CF.ApplicationFactory.CreateApplicationError) + + self.assertEqual(self._app, None) + + def test_mixdevrequires_redprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + + self._app = self._createApp('device_requires_multicolor', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + + def test_mixdevrequires_greenprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._greennode, None) + + self._app = self._createApp('device_requires_multicolor', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + def test_reddevrequires_greenprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._greennode, None) + + self._app = self._createApp('device_requires_red', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + def test_reddevrequires_redmulti(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_red') + + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + def test_greendevrequires_redmulti(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_green', CF.ApplicationFactory.CreateApplicationError) + + self.assertEqual(self._app, None) + + + def test_mixdevrequires_redmulti(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_multicolor', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + def test_mixcolor_mixdevices(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._greennode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_multicolor') + + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + + def test_reddevrequires_mixcolors(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._greennode, None) + + self._app = self._createApp('device_requires_red') + + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + def test_greendevrequires_mixcolors(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._greennode, None) + + self._app = self._createApp('device_requires_green') + + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + + def test_redgreendevrequires_mixcolors(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._greennode, None) + + self._green_app = self._createApp('device_requires_green') + self.assertNotEqual(self._green_app, None) + + self._red_app = self._createApp('device_requires_red') + self.assertNotEqual(self._red_app, None) + + + def test_mixcolors_multidevices(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._greennode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_multicolor') + self.assertNotEqual(self._app, None) + + self._green_app = self._createApp('device_requires_green') + self.assertNotEqual(self._green_app, None) + + self._red_app = self._createApp('device_requires_red') + self.assertNotEqual(self._red_app, None) + + def test_collocation_multidevicerequires_greenprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._greennode, None) + + self._app = self._createApp('device_requires_multicolor_colloc', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + def test_collocation_multidevicerequires_nonprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_multicolor_colloc', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + def test_collocation_multidevicerequires_mixedcolors(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._greennode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_multicolor_colloc', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + def test_collocation_requiresred_redprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + + self._app = self._createApp('device_requires_red_colloc') + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + def test_collocation_requiresred_nocolloc_nocolors_redprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + + self._app = self._createApp('device_requires_red_colloc_and_nocolloc', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + def test_collocation_requiresred_nocolloc_nocolors_mixprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_red_colloc_and_nocolloc') + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + def test_collocation_requiresred_nocolloc_redrequired_redprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + + self._app = self._createApp('device_requires_red_colloc_and_nocolloc_red') + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + def test_collocation_requiresred_nocolloc_redrequired_greenprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._greennode, None) + + self._app = self._createApp('device_requires_red_colloc_and_nocolloc_red', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + def test_collocation_requiresred_nocolloc_redrequired_nocolors(self): + domBooter, self._domMgr = self.launchDomainManager() + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_red_colloc_and_nocolloc_red', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + def test_collocation_requiresred_nocolloc_greenrequired_redprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + + self._app = self._createApp('device_requires_red_colloc_and_nocolloc_green', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + + def test_collocation_requiresred_nocolloc_greenrequired_mixprovided(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._greennode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_red_colloc_and_nocolloc_green') + self.assertNotEqual(self._app, None) + xx=self._app.query([]) + + def test_collocation_requiresred_nocolloc_greenrequired_mix1provided(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._plainnode, None) + + self._app = self._createApp('device_requires_red_colloc_and_nocolloc_green', CF.ApplicationFactory.CreateApplicationError) + self.assertEqual(self._app, None) + +class DeployerRequiresTest(scatest.CorbaTestCase): + def setUp(self): + self._app = None + + def tearDown(self): + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + scatest.CorbaTestCase.tearDown(self) + + def test_deployerRedNode(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._devMgr, None) + + def test_deployerGreenNode(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._devMgr, None) + + def test_deployerMixNode(self): + domBooter, self._domMgr = self.launchDomainManager() + redBooter, self._rednode = self.launchDeviceManager("/nodes/test_GPP_red/DeviceManager.dcd.xml") + greenBooter, self._greennode = self.launchDeviceManager("/nodes/test_GPP_green/DeviceManager.dcd.xml") + plainBooter, self._plainnode = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self.assertNotEqual(self._domMgr, None) + self.assertNotEqual(self._rednode, None) + self.assertNotEqual(self._greennode, None) + self.assertNotEqual(self._plainnode, None) diff --git a/redhawk/src/testing/tests/test_08_EventChannelManager.py b/redhawk/src/testing/tests/test_08_EventChannelManager.py index 0d04bae2b..fd7453f92 100644 --- a/redhawk/src/testing/tests/test_08_EventChannelManager.py +++ b/redhawk/src/testing/tests/test_08_EventChannelManager.py @@ -23,6 +23,7 @@ from omniORB import URI, any from ossie.cf import CF from ossie.properties import * +from ossie.events import Manager import threading import time @@ -349,4 +350,65 @@ def test_ECM_redhawkUtilsAccess(self): self.assertRaises( CF.EventChannelManager.ChannelDoesNotExist, self.ecm.release, 'ecm_test') + def test_EM_selfUnregister(self): + + class domContainer: + def __init__(self, dommgr): + self.dom = dommgr + def getRef(self): + return self.dom + + class resourceContainer: + def __init__(self, dommgr): + self.dom = domContainer(dommgr) + def getDomainManager(self): + return self.dom + + self._devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml", self._domMgr) + self.assertNotEqual(self._devBooter, None) + ecm = self._domMgr._get_eventChannelMgr() + + # get list of channels (should be ODM/IDM + clist,citer = self.ecm.listChannels(2) + self.assertEqual(citer, None ) + self.assertEqual(len(clist), 2 ) + + clist,citer = self.ecm.listRegistrants('IDM_Channel', 3) + self.assertEqual(citer, None ) + self.assertEqual(len(clist), 2 ) + evt_reg = CF.EventChannelManager.EventRegistration( channel_name = 'IDM_Channel', reg_id = 'my_reg_id') + reg = ecm.registerResource(evt_reg) + res = resourceContainer(self._domMgr) + mgr = Manager.GetManager(res) + em_pub = mgr.Publisher('IDM_Channel', 'foo') + em_sub = mgr.Subscriber('IDM_Channel', 'hello') + + # push some data and make sure it arrives + em_pub.push(any.to_any(['hello'])) + time.sleep(1) + self.assertEquals(em_sub.getData()._v, ['hello']) + + # release the subscriber and push some data and make sure it does not arrive + em_sub.terminate() + em_pub.push(any.to_any(['hello'])) + time.sleep(1) + self.assertEquals(em_sub.getData(), None) + + # create a new subscriber and push some data and make sure the publisher is still ok + em_sub_2 = mgr.Subscriber('IDM_Channel', 'hello_2') + em_pub.push(any.to_any(['hello'])) + time.sleep(1) + self.assertEquals(em_sub_2.getData()._v, ['hello']) + + # release the publisher and push some data and make sure it does not arrive + em_pub_2 = mgr.Publisher('IDM_Channel', 'foo_2') + em_pub.terminate() + em_pub.push(any.to_any(['hello'])) + time.sleep(1) + self.assertEquals(em_sub_2.getData(), None) + + # create a new publisher and push some data and make sure the subcriber is still ok + em_pub_2.push(any.to_any(['hello'])) + time.sleep(1) + self.assertEquals(em_sub_2.getData()._v, ['hello']) diff --git a/redhawk/src/testing/tests/test_08_Messaging.py b/redhawk/src/testing/tests/test_08_Messaging.py index 3b71241c2..461af6e8b 100644 --- a/redhawk/src/testing/tests/test_08_Messaging.py +++ b/redhawk/src/testing/tests/test_08_Messaging.py @@ -25,10 +25,29 @@ import threading import time from ossie.utils import sb -from ossie.properties import simple_property, simpleseq_property +from ossie.properties import simple_property, simpleseq_property, props_to_dict +import CosEventComm,CosEventComm__POA +import CosEventChannelAdmin, CosEventChannelAdmin__POA +from ossie.cf import StandardEvent import os globalsdrRoot = os.environ['SDRROOT'] +class Consumer_i(CosEventComm__POA.PushConsumer): + def __init__(self, parent): + self.parent = parent + self.parent.messages_passed = 0 + self.valid_string = ''.join(["1234567890"]*20000) + + def push(self, data): + props=any.from_any(data)[0] + if props['value'][0]['id'] == "string_payload": + msg_val = props['value'][0]['value'] + if msg_val == self.valid_string: + self.parent.messages_passed += 1 + + def disconnect_push_consumer (self): + pass + class Foo(object): a = simple_property(id_="a",type_="string") b = simple_property(id_="b",type_="string") @@ -61,6 +80,34 @@ def isStruct(cls): def getMembers(self): return [("a",self.a),("b",self.b),("c",self.c)] +class MyMsg(object): + string_payload = simple_property(id_="string_payload",type_="string") + + def __init__(self, **kw): + """Construct an initialized instance of this struct definition""" + for classattr in type(self).__dict__.itervalues(): + if isinstance(classattr, (simple_property, simpleseq_property)): + classattr.initialize(self) + for k,v in kw.items(): + setattr(self,k,v) + + def __str__(self): + """Return a string representation of this structure""" + d = {} + d["string_payload"] = self.string_payload + return str(d) + + @classmethod + def getId(cls): + return "my_msg" + + @classmethod + def isStruct(cls): + return True + + def getMembers(self): + return [("string_payload",self.string_payload)] + class MessagMarshalErrorTest(scatest.CorbaTestCase): def setUp(self): sb.setDEBUG(False) @@ -71,6 +118,13 @@ def setUp(self): sb.domainless._sandbox.shutdown() sb.domainless._sandbox = None self.rcv_msg = None + self.valid_string = ''.join(["1234567890"]*20000) + self.messages_passed = 0 + + def filtering_callback(self, _id, _data): + msg_val = _data.string_payload + if msg_val == self.valid_string: + self.messages_passed += 1 def tearDown(self): sb.domainless._getSandbox().shutdown() @@ -79,11 +133,11 @@ def tearDown(self): @scatest.requireLog4cxx def test_MessageMarshalCpp(self): - snk=sb.MessageSink('') + snk=sb.MessageSink('my_msg',MyMsg,self.filtering_callback) c=sb.launch('huge_msg_cpp', execparams={'LOGGING_CONFIG_URI':'file://'+os.getcwd()+'/logconfig.cfg'}) c.connect(snk) sb.start() - time.sleep(1) + time.sleep(5) fp = None try: fp = open('foo/bar/test.log','r') @@ -104,16 +158,17 @@ def test_MessageMarshalCpp(self): os.rmdir('foo') except: pass - number_warnings = log_contents.count('Could not deliver the message. Maximum message size exceeded') + number_warnings = log_contents.count('Maximum message size exceeded') self.assertEquals(number_warnings, 2) + self.assertEqual(self.messages_passed, 101) @scatest.requireJava def test_MessageMarshalJava(self): - snk=sb.MessageSink('') + snk=sb.MessageSink('my_msg',MyMsg,self.filtering_callback) c=sb.launch('huge_msg_java', execparams={'LOGGING_CONFIG_URI':'file://'+os.getcwd()+'/logconfig.cfg'}) c.connect(snk) sb.start() - time.sleep(3) + time.sleep(5) fp = None try: fp = open('foo/bar/test.log','r') @@ -134,15 +189,18 @@ def test_MessageMarshalJava(self): os.rmdir('foo') except: pass + number_warnings = log_contents.count('Could not deliver the message. Maximum message size exceeded, trying individually.') + self.assertEquals(number_warnings, 1) number_warnings = log_contents.count('Could not deliver the message. Maximum message size exceeded') - self.assertEquals(number_warnings, 2) + self.assertEquals(number_warnings, 3) + self.assertEqual(self.messages_passed, 101) def test_MessageMarshalPython(self): - snk=sb.MessageSink('') + snk=sb.MessageSink('my_msg',MyMsg,self.filtering_callback) c=sb.launch('huge_msg_python', execparams={'LOGGING_CONFIG_URI':'file://'+os.getcwd()+'/logconfig.cfg'}) c.connect(snk) sb.start() - time.sleep(1) + time.sleep(5) fp = None try: fp = open('foo/bar/test.log','r') @@ -163,8 +221,11 @@ def test_MessageMarshalPython(self): os.rmdir('foo') except: pass - number_warnings = log_contents.count('Could not deliver the message. Maximum message size exceeded') - self.assertEquals(number_warnings, 2) + number_warnings = log_contents.count('Could not deliver the message. Maximum message size exceeded, trying individually') + self.assertEquals(number_warnings, 1) + number_warnings = log_contents.count('Maximum message size exceeded') + self.assertEquals(number_warnings, 3) + self.assertEqual(self.messages_passed, 101) class MessagingCompatibilityTest(scatest.CorbaTestCase): def setUp(self): @@ -386,6 +447,54 @@ def tearDown(self): # class tearDown, or failures will occur. scatest.CorbaTestCase.tearDown(self) + @scatest.requireLog4cxx + def test_EventChannelConnectionCpp(self): + self._test_EventChannelConnection("/waveforms/MessageEventTest/MessageEventTest.sad.xml.cpp") + + @scatest.requireJava + def test_EventChannelConnectionJava(self): + self._test_EventChannelConnection("/waveforms/MessageEventTest/MessageEventTest.sad.xml.java", 5) + + def test_EventChannelConnectionPy(self): + self._test_EventChannelConnection("/waveforms/MessageEventTest/MessageEventTest.sad.xml.py") + + def _test_EventChannelConnection(self, sad_file, sleep_time=2): + self.localEvent = threading.Event() + self.eventFlag = False + + self._devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml", self._domMgr) + self.assertNotEqual(self._devBooter, None) + + # rh 1.11 and forward event channels belong to the Domain... + req = CF.EventChannelManager.EventRegistration( 'message_events', '') + try: + ecm = self._domMgr._get_eventChannelMgr() + creg = ecm.registerResource( req ) + appChannel = creg.channel + except: + self.assertEqual(False, True) + else: + self.assertEqual(True, True) + + # resolve the consumer for the event + consumer_admin = appChannel.for_consumers() + _proxy_supplier = consumer_admin.obtain_push_supplier() + _consumer = Consumer_i(self) + _proxy_supplier.connect_push_consumer(_consumer._this()) + + self.assertEqual(self.messages_passed, 0) + + self._domMgr.installApplication(sad_file) + appFact = self._domMgr._get_applicationFactories()[0] + self.assertNotEqual(appFact, None) + app = appFact.create(appFact._get_name(), [], []) + self.assertNotEqual(app, None) + app.start() + time.sleep(sleep_time) + + self.assertEqual(self.messages_passed, 101) + app.releaseObject() + def test_EventDevicePortConnection(self): self.localEvent = threading.Event() self.eventFlag = False @@ -402,10 +511,33 @@ def test_EventDevicePortConnection(self): components = app._get_registeredComponents() for component in components: print component.componentObject._get_identifier() - if 'DCE:b1fe6cc1-2562-4878-9a69-f191f89a6ef8' in component.componentObject._get_identifier(): + if 'MessageReceiverPy_1' in component.componentObject._get_identifier(): stuff = component.componentObject.query([]) recval = any.from_any(stuff[0].value) self.assertEquals(6, len(recval)) for val in recval: self.assertEquals('test_message' in val, True) + + def test_QueryablePortPython(self): + self._devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml", self._domMgr) + self.assertNotEqual(self._devBooter, None) + self._domMgr.installApplication("/waveforms/MessageTestPy/MessageTestPy.sad.xml") + appFact = self._domMgr._get_applicationFactories()[0] + app = appFact.create(appFact._get_name(), [], []) + for component in app._get_registeredComponents(): + if 'MessageSenderPy_1' in component.componentObject._get_identifier(): + sender_port = component.componentObject.getPort('message_out') + elif 'MessageReceiverPy_1' in component.componentObject._get_identifier(): + receiver_port = component.componentObject.getPort('message_in') + + # There should be two connections, one to the receiver's port and + # another to the event channel + connections = sender_port._get_connections() + self.assertEqual(len(connections), 2) + for connection in connections: + if 'direct' in connection.connectionId: + self.assertTrue(connection.port._is_equivalent(receiver_port)) + else: + channel = connection.port._narrow(CosEventChannelAdmin.EventChannel) + self.assertNotEqual(channel, None) app.releaseObject() diff --git a/redhawk/src/testing/tests/test_08_MessagingCpp.py b/redhawk/src/testing/tests/test_08_MessagingCpp.py index 4793eed9d..f75310c4b 100644 --- a/redhawk/src/testing/tests/test_08_MessagingCpp.py +++ b/redhawk/src/testing/tests/test_08_MessagingCpp.py @@ -25,6 +25,7 @@ import threading import time +import CosEventChannelAdmin class EventPortConnectionsTest(scatest.CorbaTestCase): def setUp(self): @@ -96,10 +97,34 @@ def test_EventDevicePortConnectionCppOnly(self): components = app._get_registeredComponents() for component in components: print component.componentObject._get_identifier() - if 'DCE:b1fe6cc1-2562-4878-9a69-f191f89a6ef8' in component.componentObject._get_identifier(): + if 'MessageReceiverCpp_1' in component.componentObject._get_identifier(): stuff = component.componentObject.query([]) recval = any.from_any(stuff[0].value) self.assertEquals(6, len(recval)) for val in recval: self.assertEquals('test_message' in val, True) app.releaseObject() # kill producer/consumer + + def test_QueryablePortCpp(self): + self._devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml", self._domMgr) + self.assertNotEqual(self._devBooter, None) + self._domMgr.installApplication("/waveforms/MessageTestCpp/MessageTestCpp.sad.xml") + appFact = self._domMgr._get_applicationFactories()[0] + app = appFact.create(appFact._get_name(), [], []) + for component in app._get_registeredComponents(): + if 'MessageSenderCpp_1' in component.componentObject._get_identifier(): + sender_port = component.componentObject.getPort('message_out') + elif 'MessageReceiverCpp_1' in component.componentObject._get_identifier(): + receiver_port = component.componentObject.getPort('message_in') + + # There should be two connections, one to the receiver's port and + # another to the event channel + connections = sender_port._get_connections() + self.assertEqual(len(connections), 2) + for connection in connections: + if 'direct' in connection.connectionId: + self.assertTrue(connection.port._is_equivalent(receiver_port)) + else: + channel = connection.port._narrow(CosEventChannelAdmin.EventChannel) + self.assertNotEqual(channel, None) + app.releaseObject() diff --git a/redhawk/src/testing/tests/test_08_MessagingJava.py b/redhawk/src/testing/tests/test_08_MessagingJava.py index 28de1c66b..4565e50f9 100644 --- a/redhawk/src/testing/tests/test_08_MessagingJava.py +++ b/redhawk/src/testing/tests/test_08_MessagingJava.py @@ -25,6 +25,8 @@ import threading import time +import CosEventChannelAdmin + @scatest.requireJava class EventPortConnectionsTest(scatest.CorbaTestCase): def setUp(self): @@ -96,10 +98,34 @@ def test_EventDevicePortConnectionJavaOnly(self): time.sleep(2) for component in components: print component.componentObject._get_identifier() - if 'DCE:b1fe6cc1-2562-4878-9a69-f191f89a6ef8' in component.componentObject._get_identifier(): + if 'EventReceiveJava_1' in component.componentObject._get_identifier(): stuff = component.componentObject.query([CF.DataType("received_messages", any.to_any(None))]) recval = any.from_any(stuff[0].value) self.assertEquals(6, len(recval)) for val in recval: self.assertEquals('test_message' in val, True) app.releaseObject() # kill producer/consumer + + def test_QueryablePortJava(self): + self._devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml", self._domMgr) + self.assertNotEqual(self._devBooter, None) + self._domMgr.installApplication("/waveforms/MessageTestJava/MessageTestJava.sad.xml") + appFact = self._domMgr._get_applicationFactories()[0] + app = appFact.create(appFact._get_name(), [], []) + for component in app._get_registeredComponents(): + if 'EventSenderJava_1' in component.componentObject._get_identifier(): + sender_port = component.componentObject.getPort('message_out') + elif 'EventReceiveJava_1' in component.componentObject._get_identifier(): + receiver_port = component.componentObject.getPort('message_in') + + # There should be two connections, one to the receiver's port and + # another to the event channel + connections = sender_port._get_connections() + self.assertEqual(len(connections), 2) + for connection in connections: + if 'direct' in connection.connectionId: + self.assertTrue(connection.port._is_equivalent(receiver_port)) + else: + channel = connection.port._narrow(CosEventChannelAdmin.EventChannel) + self.assertNotEqual(channel, None) + app.releaseObject() diff --git a/redhawk/src/testing/tests/test_08_PropertyChangeListener.py b/redhawk/src/testing/tests/test_08_PropertyChangeListener.py index a135b457a..ee6f745fd 100644 --- a/redhawk/src/testing/tests/test_08_PropertyChangeListener.py +++ b/redhawk/src/testing/tests/test_08_PropertyChangeListener.py @@ -32,35 +32,18 @@ from ossie.events import ChannelManager from ossie.utils import redhawk from ossie.events import Subscriber +from ossie.utils import sb execDeviceNode = "/nodes/test_GPP_node/DeviceManager.dcd.xml" -class Consumer_i(CosEventComm__POA.PushConsumer): - def __init__(self, parent): - self.parent = parent - self.count = 0 - - def push(self, data): - if data: - self.parent.eventFlag = True - self.parent.localEvent.set() - self.count = self.count +1 - - def disconnect_push_consumer (self): - pass - - -class PropertyChangeListener_Receiver(CF__POA.PropertyChangeListener): - def __init__(self): - self.count = 0 - - def propertyChange( self, pce ) : - self.count = self.count +1 - - class PropertyChangeListenerTest(scatest.CorbaTestCase): def setUp(self): self._domBooter, self._domMgr = self.launchDomainManager() + self.dom=redhawk.attach(scatest.getTestDomainName()) + self.count = 0 + + def property_change_callback(self, event_id, registration_id, resource_id, properties, timestamp): + self.count = self.count + 1 def tearDown(self): try: @@ -88,32 +71,28 @@ def tearDown(self): # class tearDown, or failures will occur. scatest.CorbaTestCase.tearDown(self) - def test_PropertyChangeListener_CPP(self): + def _test_PropertyChangeListener(self, app_name, comp_name): self.localEvent = threading.Event() self.eventFlag = False self._devBooter, self._devMgr = self.launchDeviceManager(execDeviceNode, self._domMgr) self.assertNotEqual(self._devBooter, None) - self._domMgr.installApplication("/waveforms/PropertyChangeListenerNoJava/PropertyChangeListenerNoJava.sad.xml") + self._domMgr.installApplication(app_name) appFact = self._domMgr._get_applicationFactories()[0] self.assertNotEqual(appFact, None) app = appFact.create(appFact._get_name(), [], []) self.assertNotEqual(app, None) self._app = app - ps=None c=None - d=redhawk.attach(scatest.getTestDomainName()) - a=d.apps[0] - c=filter( lambda c : c.name == 'PropertyChange_C1', a.comps )[0] + a=self.dom.apps[0] + c=filter( lambda c : c.name == comp_name, a.comps )[0] self.assertNotEqual(c,None) - ps = c.ref._narrow(CF.PropertySet) - self.assertNotEqual(ps,None) - # create listener interface - myl = PropertyChangeListener_Receiver() + # create listener instance + myl = sb.PropertyChangeListener(changeCallbacks={'prop1':self.property_change_callback}) t=float(0.5) - regid=ps.registerPropertyListener( myl._this(), ['prop1'],t) + regid=c.registerPropertyListener( myl, ['prop1'], t) app.start() time.sleep(1) @@ -127,161 +106,39 @@ def test_PropertyChangeListener_CPP(self): time.sleep(.6) # wait for listener to receive notice # now check results - self.assertEquals(myl.count,4) + self.assertEquals(self.count,4) # change unmonitored property c.prop2 = 100 time.sleep(.6) # wait for listener to receive notice # now check results - self.assertEquals(myl.count,4) + self.assertEquals(self.count,4) # unregister - ps.unregisterPropertyListener( regid ) + c.unregisterPropertyListener( regid ) c.prop1 = 100.0 time.sleep(.6) # wait for listener to receive notice # now check results, should be same... - self.assertEquals(myl.count,4) + self.assertEquals(self.count,4) self.assertRaises( CF.InvalidIdentifier, - ps.unregisterPropertyListener, regid ) - + c.unregisterPropertyListener, regid ) app.releaseObject() self._app=None - - - def test_PropertyChangeListener_PYTHON(self): - self.localEvent = threading.Event() - self.eventFlag = False - - self._devBooter, self._devMgr = self.launchDeviceManager(execDeviceNode, self._domMgr) - self.assertNotEqual(self._devBooter, None) - self._domMgr.installApplication("/waveforms/PropertyChangeListenerNoJava/PropertyChangeListenerNoJava.sad.xml") - appFact = self._domMgr._get_applicationFactories()[0] - self.assertNotEqual(appFact, None) - app = appFact.create(appFact._get_name(), [], []) - self.assertNotEqual(app, None) - self._app=app - - ps=None - c=None - d=redhawk.attach(scatest.getTestDomainName()) - a=d.apps[0] - c=filter( lambda c : c.name == 'PropertyChange_P1', a.comps )[0] - self.assertNotEqual(c,None) - ps = c.ref._narrow(CF.PropertySet) - self.assertNotEqual(ps,None) - - # create listener interface - myl = PropertyChangeListener_Receiver() - t=float(0.5) - regid=ps.registerPropertyListener( myl._this(), ['prop1'],t) - app.start() - time.sleep(1) - - # assign 3 changed values - c.prop1 = 100.0 - time.sleep(.6) # wait for listener to receive notice - c.prop1 = 200.0 - time.sleep(.6) # wait for listener to receive notice - c.prop1 = 300.0 - time.sleep(.6) # wait for listener to receive notice - - # now check results - self.assertEquals(myl.count,4) - - # change unmonitored property - c.prop2 = 100 - time.sleep(.6) # wait for listener to receive notice - - # now check results - self.assertEquals(myl.count,4) - - # unregister - ps.unregisterPropertyListener( regid ) - - c.prop1 = 100.0 - time.sleep(.6) # wait for listener to receive notice - # now check results, should be same... - self.assertEquals(myl.count,4) - - self.assertRaises( CF.InvalidIdentifier, - ps.unregisterPropertyListener, regid ) - - - app.releaseObject() - self._app=None + def test_PropertyChangeListener_CPP(self): + self._test_PropertyChangeListener("/waveforms/PropertyChangeListenerNoJava/PropertyChangeListenerNoJava.sad.xml", 'PropertyChange_C1') + def test_PropertyChangeListener_PYTHON(self): + self._test_PropertyChangeListener("/waveforms/PropertyChangeListenerNoJava/PropertyChangeListenerNoJava.sad.xml", 'PropertyChange_P1') @scatest.requireJava def test_PropertyChangeListener_JAVA(self): - self.localEvent = threading.Event() - self.eventFlag = False - - self._devBooter, self._devMgr = self.launchDeviceManager(execDeviceNode, self._domMgr) - self.assertNotEqual(self._devBooter, None) - self._domMgr.installApplication("/waveforms/PropertyChangeListener/PropertyChangeListener.sad.xml") - appFact = self._domMgr._get_applicationFactories()[0] - self.assertNotEqual(appFact, None) - app = appFact.create(appFact._get_name(), [], []) - self.assertNotEqual(app, None) - self._app=app - - ps=None - c=None - d=redhawk.attach(scatest.getTestDomainName()) - a=d.apps[0] - c=filter( lambda c : c.name == 'PropertyChange_J1', a.comps )[0] - self.assertNotEqual(c,None) - ps = c.ref._narrow(CF.PropertySet) - self.assertNotEqual(ps,None) - - # create listener interface - myl = PropertyChangeListener_Receiver() - t=float(0.5) - regid=ps.registerPropertyListener( myl._this(), ['prop1'],t) - - app.start() - time.sleep(1) - - # assign 3 changed values - c.prop1 = 100.0 - time.sleep(.6) # wait for listener to receive notice - c.prop1 = 200.0 - time.sleep(.6) # wait for listener to receive notice - c.prop1 = 300.0 - time.sleep(.6) # wait for listener to receive notice - - # now check results - self.assertEquals(myl.count,4) - - # change unmonitored property - c.prop2 = 100 - time.sleep(.6) # wait for listener to receive notice - - # now check results - self.assertEquals(myl.count,4) - - # unregister - ps.unregisterPropertyListener( regid ) - - c.prop1 = 100.0 - time.sleep(.6) # wait for listener to receive notice - - # now check results, should be same... - self.assertEquals(myl.count,4) - - self.assertRaises( CF.InvalidIdentifier, - ps.unregisterPropertyListener, regid ) - - - app.releaseObject() - self._app=None - + self._test_PropertyChangeListener("/waveforms/PropertyChangeListener/PropertyChangeListener.sad.xml", 'PropertyChange_J1') def test_PropertyChangeListener_APP(self): self.localEvent = threading.Event() @@ -296,10 +153,8 @@ def test_PropertyChangeListener_APP(self): self.assertNotEqual(app, None) self._app=app - ps=None c=None - d=redhawk.attach(scatest.getTestDomainName()) - a=d.apps[0] + a=self.dom.apps[0] # component with external property c=filter( lambda c : c.name == 'PropertyChange_C1', a.comps )[0] # assembly controller @@ -307,13 +162,11 @@ def test_PropertyChangeListener_APP(self): self.assertNotEqual(a,None) self.assertNotEqual(c,None) self.assertNotEqual(c2,None) - ps = a.ref._narrow(CF.PropertySet) - self.assertNotEqual(ps,None) # create listener interface - myl = PropertyChangeListener_Receiver() + myl = sb.PropertyChangeListener(defaultCallback = self.property_change_callback) t=float(0.5) - regid=ps.registerPropertyListener( myl._this(), ['prop1', 'app_prop1'],t) + regid=a.registerPropertyListener( myl, ['prop1', 'app_prop1'], t) app.start() time.sleep(1) @@ -330,7 +183,7 @@ def test_PropertyChangeListener_APP(self): time.sleep(.6) # wait for listener to receive notice # now check results - self.assertEquals(myl.count,8) + self.assertEquals(self.count,8) # change unmonitored property c.prop2 = 100 @@ -338,20 +191,20 @@ def test_PropertyChangeListener_APP(self): time.sleep(.6) # wait for listener to receive notice # now check results - self.assertEquals(myl.count,8) + self.assertEquals(self.count,8) # unregister - ps.unregisterPropertyListener( regid ) + a.unregisterPropertyListener( regid ) c.prop1 = 100.0 c2.prop1 = 100.0 time.sleep(.6) # wait for listener to receive notice # now check results, should be same... - self.assertEquals(myl.count,8) + self.assertEquals(self.count,8) self.assertRaises( CF.InvalidIdentifier, - ps.unregisterPropertyListener, regid ) + a.unregisterPropertyListener, regid ) app.releaseObject() self._app=None @@ -360,18 +213,21 @@ def test_PropertyChangeListener_APP(self): class PropertyChangeListenerEventTest(scatest.CorbaTestCase): def setUp(self): self._domBooter, self._domMgr = self.launchDomainManager() + self.dom=redhawk.attach(scatest.getTestDomainName()) # create listener interface - orb = CORBA.ORB_init() - self.chanMgr = ChannelManager(orb) self._app=None - # Force creation - self.channel1 = self.chanMgr.createEventChannel("TestChan", force=True) + self.channel_name = "TestChan" + self.chanMgr = self.dom._get_eventChannelMgr() + try: + self.channel1 = self.chanMgr.create(self.channel_name) + except CF.EventChannelManager.ChannelAlreadyExists: + self.channel1 = self.chanMgr.get(self.channel_name) def tearDown(self): try: if self.channel1: - self.chanMgr.destroyEventChannel("TestChan") + self.chanMgr.release(self.channel_name) except: pass @@ -401,36 +257,32 @@ def tearDown(self): scatest.CorbaTestCase.tearDown(self) - def test_PropertyChangeListener_EC_CPP(self): + def _test_PropertyChangeListener_EC_Comps(self, app_name, comp_name): self.localEvent = threading.Event() self.eventFlag = False self._devBooter, self._devMgr = self.launchDeviceManager(execDeviceNode, self._domMgr) self.assertNotEqual(self._devBooter, None) - self._domMgr.installApplication("/waveforms/PropertyChangeListenerNoJava/PropertyChangeListenerNoJava.sad.xml") + self._domMgr.installApplication(app_name) appFact = self._domMgr._get_applicationFactories()[0] self.assertNotEqual(appFact, None) app = appFact.create(appFact._get_name(), [], []) self.assertNotEqual(app, None) self._app = app - ps=None c=None - d=redhawk.attach(scatest.getTestDomainName()) - a=d.apps[0] - c=filter( lambda c : c.name == 'PropertyChange_C1', a.comps )[0] + a=self.dom.apps[0] + c=filter( lambda c : c.name == comp_name, a.comps )[0] self.assertNotEqual(c,None) - ps = c.ref._narrow(CF.PropertySet) - self.assertNotEqual(ps,None) # check if channel is valid self.assertNotEqual(self.channel1, None) self.assertNotEqual(self.channel1._narrow(CosEventChannelAdmin.EventChannel), None) - sub = Subscriber( self.channel1 ) + sub = Subscriber( self.dom, self.channel_name ) t=float(0.5) - regid=ps.registerPropertyListener( self.channel1, ['prop1'],t) + regid=c.registerPropertyListener( self.channel1, ['prop1'],t) app.start() time.sleep(1) @@ -447,122 +299,24 @@ def test_PropertyChangeListener_EC_CPP(self): self.assertNotEqual(xx, None) # unregister - ps.unregisterPropertyListener( regid ) + c.unregisterPropertyListener( regid ) self.assertRaises( CF.InvalidIdentifier, - ps.unregisterPropertyListener, regid ) + c.unregisterPropertyListener, regid ) + app.stop() app.releaseObject() self._app=None - def test_PropertyChangeListener_EC_PYTHON(self): - self.localEvent = threading.Event() - self.eventFlag = False - - self._devBooter, self._devMgr = self.launchDeviceManager(execDeviceNode, self._domMgr) - self.assertNotEqual(self._devBooter, None) - self._domMgr.installApplication("/waveforms/PropertyChangeListenerNoJava/PropertyChangeListenerNoJava.sad.xml") - appFact = self._domMgr._get_applicationFactories()[0] - self.assertNotEqual(appFact, None) - app = appFact.create(appFact._get_name(), [], []) - self.assertNotEqual(app, None) - self._app = app - - ps=None - c=None - d=redhawk.attach(scatest.getTestDomainName()) - a=d.apps[0] - c=filter( lambda c : c.name == 'PropertyChange_P1', a.comps )[0] - self.assertNotEqual(c,None) - ps = c.ref._narrow(CF.PropertySet) - self.assertNotEqual(ps,None) - - # check if channel is valid - self.assertNotEqual(self.channel1, None) - self.assertNotEqual(self.channel1._narrow(CosEventChannelAdmin.EventChannel), None) - - sub = Subscriber( self.channel1 ) - - t=float(0.5) - regid=ps.registerPropertyListener( self.channel1, ['prop1'],t) - app.start() - time.sleep(1) - - # assign 3 changed values - c.prop1 = 100.0 - time.sleep(.6) # wait for listener to receive notice - c.prop1 = 200.0 - time.sleep(.6) # wait for listener to receive notice - c.prop1 = 300.0 - time.sleep(.6) # wait for listener to receive notice - - for n in range(4): - xx=sub.getData() - self.assertNotEqual(xx, None) - - # unregister - ps.unregisterPropertyListener( regid ) - - self.assertRaises( CF.InvalidIdentifier, - ps.unregisterPropertyListener, regid ) + def test_PropertyChangeListener_EC_CPP(self): + self._test_PropertyChangeListener_EC_Comps("/waveforms/PropertyChangeListenerNoJava/PropertyChangeListenerNoJava.sad.xml", 'PropertyChange_C1') - app.releaseObject() - self._app=None + def test_PropertyChangeListener_EC_PYTHON(self): + self._test_PropertyChangeListener_EC_Comps("/waveforms/PropertyChangeListenerNoJava/PropertyChangeListenerNoJava.sad.xml", 'PropertyChange_P1') @scatest.requireJava def test_PropertyChangeListener_EC_JAVA(self): - self.localEvent = threading.Event() - self.eventFlag = False - - self._devBooter, self._devMgr = self.launchDeviceManager(execDeviceNode, self._domMgr) - self.assertNotEqual(self._devBooter, None) - self._domMgr.installApplication("/waveforms/PropertyChangeListener/PropertyChangeListener.sad.xml") - appFact = self._domMgr._get_applicationFactories()[0] - self.assertNotEqual(appFact, None) - app = appFact.create(appFact._get_name(), [], []) - self.assertNotEqual(app, None) - self._app = app - - ps=None - c=None - d=redhawk.attach(scatest.getTestDomainName()) - a=d.apps[0] - c=filter( lambda c : c.name == 'PropertyChange_J1', a.comps )[0] - self.assertNotEqual(c,None) - ps = c.ref._narrow(CF.PropertySet) - self.assertNotEqual(ps,None) - - # check if channel is valid - self.assertNotEqual(self.channel1, None) - self.assertNotEqual(self.channel1._narrow(CosEventChannelAdmin.EventChannel), None) - - sub = Subscriber( self.channel1 ) - - t=float(0.5) - regid=ps.registerPropertyListener( self.channel1, ['prop1'],t) - app.start() - time.sleep(1) - - # assign 3 changed values - c.prop1 = 100.0 - time.sleep(.6) # wait for listener to receive notice - c.prop1 = 200.0 - time.sleep(.6) # wait for listener to receive notice - c.prop1 = 300.0 - time.sleep(.6) # wait for listener to receive notice - - for n in range(4): - xx=sub.getData() - self.assertNotEqual(xx, None) - - # unregister - ps.unregisterPropertyListener( regid ) - - self.assertRaises( CF.InvalidIdentifier, - ps.unregisterPropertyListener, regid ) - - app.releaseObject() - self._app=None + self._test_PropertyChangeListener_EC_Comps("/waveforms/PropertyChangeListener/PropertyChangeListener.sad.xml", 'PropertyChange_J1') def test_PropertyChangeListener_EC_APP(self): self.localEvent = threading.Event() @@ -577,10 +331,8 @@ def test_PropertyChangeListener_EC_APP(self): self.assertNotEqual(app, None) self._app = app - ps=None c=None - d=redhawk.attach(scatest.getTestDomainName()) - a=d.apps[0] + a=self.dom.apps[0] # component with external property c=filter( lambda c : c.name == 'PropertyChange_C1', a.comps )[0] # assembly controller @@ -588,8 +340,6 @@ def test_PropertyChangeListener_EC_APP(self): self.assertNotEqual(a,None) self.assertNotEqual(c,None) self.assertNotEqual(c2,None) - ps = a.ref._narrow(CF.PropertySet) - self.assertNotEqual(ps,None) # check if channel is valid self.assertNotEqual(self.channel1, None) @@ -598,7 +348,7 @@ def test_PropertyChangeListener_EC_APP(self): sub = Subscriber( self.channel1 ) t=float(0.5) - regid=ps.registerPropertyListener( self.channel1, ['prop1', 'app_prop1'],t) + regid=a.registerPropertyListener( self.channel1, ['prop1', 'app_prop1'],t) app.start() time.sleep(1) @@ -615,10 +365,10 @@ def test_PropertyChangeListener_EC_APP(self): self.assertNotEqual(xx, None) # unregister - ps.unregisterPropertyListener( regid ) + a.unregisterPropertyListener( regid ) self.assertRaises( CF.InvalidIdentifier, - ps.unregisterPropertyListener, regid ) + a.unregisterPropertyListener, regid ) app.releaseObject() self._app=None diff --git a/redhawk/src/testing/tests/test_08_SADConnections.py b/redhawk/src/testing/tests/test_08_SADConnections.py index f0f8d8fd5..c1995fea2 100644 --- a/redhawk/src/testing/tests/test_08_SADConnections.py +++ b/redhawk/src/testing/tests/test_08_SADConnections.py @@ -97,7 +97,7 @@ def test_FindByNamingService(self): def test_FindByAbsoluteNamingService(self): from ossie.utils import sb - comp=sb.Component('PortTest') + comp = sb.launch('PortTest') orb = CORBA.ORB_init() obj = orb.resolve_initial_references("NameService") rootContext = obj._narrow(CosNaming.NamingContext) diff --git a/redhawk/src/testing/tests/test_08_SADProperties.py b/redhawk/src/testing/tests/test_08_SADProperties.py index b20db1bd8..c34d16e4d 100644 --- a/redhawk/src/testing/tests/test_08_SADProperties.py +++ b/redhawk/src/testing/tests/test_08_SADProperties.py @@ -102,11 +102,11 @@ def test_ExternalProps(self): # Configure all props = [pythonProp, cppProp] - number_props = 6 + number_props = 9 to_find = 2 if java_support: props.append(javaProp) - number_props = 7 + number_props = 10 to_find = 3 self._app.configure(props) # Make sure all were set @@ -227,19 +227,8 @@ def test_badInternalId(self): self.assertRaises(CF.ApplicationFactory.CreateApplicationError, appFact.create, appFact._get_name(), [], []) def test_badCompRef(self): - self.assertNotEqual(self._domMgr, None) - self.assertNotEqual(self._devMgr, None) - - sadpath = '/waveforms/ExternalProperties/ExternalPropertiesBadCompRef.sad.xml' - self._domMgr.installApplication(sadpath) - self.assertEqual(len(self._domMgr._get_applicationFactories()), 1) - appFact = self._domMgr._get_applicationFactories()[0] - # Bad compref tag in externalproperties should throw appropriate error - self.assertRaises(CF.ApplicationFactory.CreateApplicationError, appFact.create, appFact._get_name(), [], []) - - self.assertNotEqual(self._domMgr, None) - self.assertNotEqual(self._devMgr, None) + self._test_installFail('BadCompRef') def test_ExternalPropOverride(self): self.assertNotEqual(self._domMgr, None) diff --git a/redhawk/src/testing/tests/test_09_DomainPersistence.py b/redhawk/src/testing/tests/test_09_DomainPersistence.py index 9f52c477a..79f09945f 100644 --- a/redhawk/src/testing/tests/test_09_DomainPersistence.py +++ b/redhawk/src/testing/tests/test_09_DomainPersistence.py @@ -32,6 +32,7 @@ from ossie import properties from ossie.cf import StandardEvent from ossie.utils import uuid +from ossie.events import Publisher class Supplier_i(CosEventComm__POA.PushSupplier): @@ -291,6 +292,20 @@ def test_EventAppPortConnectionSIGKILL(self): app = appFact.create(appFact._get_name(), [], []) app.start() + eventChannelMgr = domMgr._get_eventChannelMgr() + connMgr = domMgr._get_connectionMgr() + initial_connections = connMgr.listConnections(100)[0] + + _consumer = Consumer_i(self) + evt_reg = CF.EventChannelManager.EventRegistration(channel_name = 'anotherChannel', reg_id = 'my_reg_id') + reg = eventChannelMgr.registerConsumer(_consumer._this(), evt_reg) + pub = Publisher(domMgr, 'anotherChannel') + + _channels = eventChannelMgr.listChannels(100)[0] + _channel_registrations = {} + for _channel in _channels: + _channel_registrations[_channel.channel_name] = _channel.reg_count + # Kill the domainMgr os.kill(self._nb_domMgr.pid, signal.SIGKILL) if not self.waitTermination(self._nb_domMgr, 5.0): @@ -299,40 +314,40 @@ def test_EventAppPortConnectionSIGKILL(self): # Restart the Domain Manager (which should restore the old channel) self._nb_domMgr, domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) + current_connections = connMgr.listConnections(100)[0] + current_connection_ids = [] + for _cc in current_connections: + current_connection_ids.append(_cc.connectionRecordId) + initial_connection_ids = [] + for _cc in initial_connections: + initial_connection_ids.append(_cc.connectionRecordId) + self.assertEquals(len(current_connection_ids), len(initial_connection_ids)) + for _cc in initial_connection_ids: + self.assertTrue(_cc in current_connection_ids) + newappFact = domMgr._get_applicationFactories()[0] app2 = newappFact.create(appFact._get_name(), [], []) app2.start() - channelName = URI.stringToName("%s/%s" % (domainName, 'anotherChannel')) - try: - appChannel = self._root.resolve(channelName)._narrow(CosEventChannelAdmin.EventChannel) - except: - self.assertEqual(False, True) - else: - self.assertEqual(True, True) - - # resolve the producer for the event - supplier_admin = appChannel.for_suppliers() - _proxy_consumer = supplier_admin.obtain_push_consumer() - _supplier = Supplier_i() - _proxy_consumer.connect_push_supplier(_supplier._this()) - - # resolve the consumer for the event - consumer_admin = appChannel.for_consumers() - _proxy_supplier = consumer_admin.obtain_push_supplier() - _consumer = Consumer_i(self) - _proxy_supplier.connect_push_consumer(_consumer._this()) + appChannel = eventChannelMgr.get('anotherChannel') # a flag is raised only when two responses come back (one for each running app) - _proxy_consumer.push(any.to_any("message")) + pub.push(any.to_any("message")) self.localEvent.wait(5.0) self.assertEqual(self.eventFlag, True) + _channels = eventChannelMgr.listChannels(100)[0] + self.assertEquals(len(_channel_registrations), len(_channels)) + for _channel in _channels: + self.assertTrue(_channel_registrations.has_key(_channel.channel_name)) + self.assertEquals(_channel_registrations[_channel.channel_name], _channel.reg_count) + _channel_registrations[_channel.channel_name] = _channel.reg_count + self.eventFlag = False # this step tests whether the number of subscribers to the channel is restored app2.releaseObject() self.localEvent.clear() - _proxy_consumer.push(any.to_any("message")) + pub.push(any.to_any("message")) self.localEvent.wait(5.0) self.assertEqual(self.eventFlag, True) app.releaseObject() @@ -350,6 +365,20 @@ def test_EventAppPortConnectionSIGTERM(self): app = appFact.create(appFact._get_name(), [], []) app.start() + eventChannelMgr = domMgr._get_eventChannelMgr() + connMgr = domMgr._get_connectionMgr() + initial_connections = connMgr.listConnections(100)[0] + + _consumer = Consumer_i(self) + evt_reg = CF.EventChannelManager.EventRegistration(channel_name = 'anotherChannel', reg_id = 'my_reg_id') + reg = eventChannelMgr.registerConsumer(_consumer._this(), evt_reg) + pub = Publisher(domMgr, 'anotherChannel') + + _channels = eventChannelMgr.listChannels(100)[0] + _channel_registrations = {} + for _channel in _channels: + _channel_registrations[_channel.channel_name] = _channel.reg_count + # Kill the domainMgr os.kill(self._nb_domMgr.pid, signal.SIGTERM) if not self.waitTermination(self._nb_domMgr, 5.0): @@ -358,49 +387,48 @@ def test_EventAppPortConnectionSIGTERM(self): # Restart the Domain Manager (which should restore the old channel) self._nb_domMgr, domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) + current_connections = connMgr.listConnections(100)[0] + current_connection_ids = [] + for _cc in current_connections: + current_connection_ids.append(_cc.connectionRecordId) + initial_connection_ids = [] + for _cc in initial_connections: + initial_connection_ids.append(_cc.connectionRecordId) + self.assertEquals(len(current_connection_ids), len(initial_connection_ids)) + for _cc in initial_connection_ids: + self.assertTrue(_cc in current_connection_ids) + newappFact = domMgr._get_applicationFactories()[0] app2 = newappFact.create(appFact._get_name(), [], []) app2.start() - channelName = URI.stringToName("%s/%s" % (domainName, 'anotherChannel')) - try: - appChannel = self._root.resolve(channelName)._narrow(CosEventChannelAdmin.EventChannel) - except: - self.assertEqual(False, True) - else: - self.assertEqual(True, True) - - # resolve the producer for the event - supplier_admin = appChannel.for_suppliers() - _proxy_consumer = supplier_admin.obtain_push_consumer() - _supplier = Supplier_i() - _proxy_consumer.connect_push_supplier(_supplier._this()) - - # resolve the consumer for the event - consumer_admin = appChannel.for_consumers() - _proxy_supplier = consumer_admin.obtain_push_supplier() - _consumer = Consumer_i(self) - _proxy_supplier.connect_push_consumer(_consumer._this()) # a flag is raised only when two responses come back (one for each running app) - _proxy_consumer.push(any.to_any("message")) + pub.push(any.to_any("message")) self.localEvent.wait(5.0) self.assertEqual(self.eventFlag, True) + _channels = eventChannelMgr.listChannels(100)[0] + self.assertEquals(len(_channel_registrations), len(_channels)) + for _channel in _channels: + self.assertTrue(_channel_registrations.has_key(_channel.channel_name)) + self.assertEquals(_channel_registrations[_channel.channel_name], _channel.reg_count) + _channel_registrations[_channel.channel_name] = _channel.reg_count + self.eventFlag = False # this step tests whether the number of subscribers to the channel is restored app2.releaseObject() self.localEvent.clear() - _proxy_consumer.push(any.to_any("message")) + pub.push(any.to_any("message")) self.localEvent.wait(5.0) self.assertEqual(self.eventFlag, True) app.releaseObject() - def test_EventAppPortConnectionSIGTERMNoPersist(self): + def test_EventAppPortConnectionSIGQUIT(self): self.localEvent = threading.Event() self.eventFlag = False - self._nb_domMgr, domMgr = self.launchDomainManager("--nopersist", endpoint="giop:tcp::5679", dbURI=self._dbfile) + self._nb_domMgr, domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) self._nb_devMgr, devMgr = self.launchDeviceManager("/nodes/test_EventPortTestDevice_node/DeviceManager.dcd.xml") domainName = scatest.getTestDomainName() @@ -409,35 +437,19 @@ def test_EventAppPortConnectionSIGTERMNoPersist(self): app = appFact.create(appFact._get_name(), [], []) app.start() - # Kill the domainMgr - os.kill(self._nb_domMgr.pid, signal.SIGTERM) - if not self.waitTermination(self._nb_domMgr, 5.0): - self.fail("Domain Manager Failed to Die") - - # Restart the Domain Manager (which should restore the old channel) - self._nb_domMgr, domMgr = self.launchDomainManager("--nopersist", endpoint="giop:tcp::5679", dbURI=self._dbfile) - - newappFact = domMgr._get_applicationFactories() - self.assertEqual(len(newappFact), 0) - - apps = domMgr._get_applications() - self.assertEqual(len(apps), 0) - - devMgrs = domMgr._get_deviceManagers() - self.assertEqual(len(devMgrs), 0) - - def test_EventAppPortConnectionSIGQUIT(self): - self.localEvent = threading.Event() - self.eventFlag = False + eventChannelMgr = domMgr._get_eventChannelMgr() + connMgr = domMgr._get_connectionMgr() + initial_connections = connMgr.listConnections(100)[0] - self._nb_domMgr, domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) - self._nb_devMgr, devMgr = self.launchDeviceManager("/nodes/test_EventPortTestDevice_node/DeviceManager.dcd.xml") + _consumer = Consumer_i(self) + evt_reg = CF.EventChannelManager.EventRegistration(channel_name = 'anotherChannel', reg_id = 'my_reg_id') + reg = eventChannelMgr.registerConsumer(_consumer._this(), evt_reg) + pub = Publisher(domMgr, 'anotherChannel') - domainName = scatest.getTestDomainName() - domMgr.installApplication("/waveforms/PortConnectFindByDomainFinderEvent/PortConnectFindByDomainFinderEvent.sad.xml") - appFact = domMgr._get_applicationFactories()[0] - app = appFact.create(appFact._get_name(), [], []) - app.start() + _channels = eventChannelMgr.listChannels(100)[0] + _channel_registrations = {} + for _channel in _channels: + _channel_registrations[_channel.channel_name] = _channel.reg_count # Kill the domainMgr os.kill(self._nb_domMgr.pid, signal.SIGQUIT) @@ -447,74 +459,44 @@ def test_EventAppPortConnectionSIGQUIT(self): # Restart the Domain Manager (which should restore the old channel) self._nb_domMgr, domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) + current_connections = connMgr.listConnections(100)[0] + current_connection_ids = [] + for _cc in current_connections: + current_connection_ids.append(_cc.connectionRecordId) + initial_connection_ids = [] + for _cc in initial_connections: + initial_connection_ids.append(_cc.connectionRecordId) + self.assertEquals(len(current_connection_ids), len(initial_connection_ids)) + for _cc in initial_connection_ids: + self.assertTrue(_cc in current_connection_ids) + newappFact = domMgr._get_applicationFactories()[0] app2 = newappFact.create(appFact._get_name(), [], []) app2.start() - channelName = URI.stringToName("%s/%s" % (domainName, 'anotherChannel')) - try: - appChannel = self._root.resolve(channelName)._narrow(CosEventChannelAdmin.EventChannel) - except: - self.assertEqual(False, True) - else: - self.assertEqual(True, True) - - # resolve the producer for the event - supplier_admin = appChannel.for_suppliers() - _proxy_consumer = supplier_admin.obtain_push_consumer() - _supplier = Supplier_i() - _proxy_consumer.connect_push_supplier(_supplier._this()) - - # resolve the consumer for the event - consumer_admin = appChannel.for_consumers() - _proxy_supplier = consumer_admin.obtain_push_supplier() - _consumer = Consumer_i(self) - _proxy_supplier.connect_push_consumer(_consumer._this()) # a flag is raised only when two responses come back (one for each running app) - _proxy_consumer.push(any.to_any("message")) + pub.push(any.to_any("message")) self.localEvent.wait(5.0) self.assertEqual(self.eventFlag, True) + _channels = eventChannelMgr.listChannels(100)[0] + + self.assertEquals(len(_channel_registrations), len(_channels)) + for _channel in _channels: + self.assertTrue(_channel_registrations.has_key(_channel.channel_name)) + self.assertEquals(_channel_registrations[_channel.channel_name], _channel.reg_count) + _channel_registrations[_channel.channel_name] = _channel.reg_count + self.eventFlag = False # this step tests whether the number of subscribers to the channel is restored app2.releaseObject() self.localEvent.clear() - _proxy_consumer.push(any.to_any("message")) + pub.push(any.to_any("message")) self.localEvent.wait(5.0) self.assertEqual(self.eventFlag, True) app.releaseObject() - def test_EventAppPortConnectionSIGQUITNoPersist(self): - self.localEvent = threading.Event() - self.eventFlag = False - - self._nb_domMgr, domMgr = self.launchDomainManager("--nopersist", endpoint="giop:tcp::5679", dbURI=self._dbfile) - self._nb_devMgr, devMgr = self.launchDeviceManager("/nodes/test_EventPortTestDevice_node/DeviceManager.dcd.xml") - - domainName = scatest.getTestDomainName() - domMgr.installApplication("/waveforms/PortConnectFindByDomainFinderEvent/PortConnectFindByDomainFinderEvent.sad.xml") - appFact = domMgr._get_applicationFactories()[0] - app = appFact.create(appFact._get_name(), [], []) - app.start() - - # Kill the domainMgr - os.kill(self._nb_domMgr.pid, signal.SIGQUIT) - if not self.waitTermination(self._nb_domMgr, 5.0): - self.fail("Domain Manager Failed to Die") - - # Restart the Domain Manager (which should restore the old channel) - self._nb_domMgr, domMgr = self.launchDomainManager("--nopersist", endpoint="giop:tcp::5679", dbURI=self._dbfile) - - newappFact = domMgr._get_applicationFactories() - self.assertEqual(len(newappFact), 0) - - apps = domMgr._get_applications() - self.assertEqual(len(apps), 0) - - devMgrs = domMgr._get_deviceManagers() - self.assertEqual(len(devMgrs), 0) - def test_EventAppPortConnectionSIGINT(self): self.localEvent = threading.Event() self.eventFlag = False @@ -1002,3 +984,54 @@ def _compareAllocation(self, lhs, rhs): self.assertEqual(lhsProps, rhsProps) self.assert_(lhs.allocatedDevice._is_equivalent(rhs.allocatedDevice)) self.assert_(lhs.allocationDeviceManager._is_equivalent(rhs.allocationDeviceManager)) + + + def test_DomainAndGPPDisappear(self): + # startup domain manager, dev manager and a GPP + self._nb_domMgr, self._domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) + self._nb_devMgr, self._devMgr = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + # launch a waveform, save off pids for later + from ossie.utils import redhawk + dom=redhawk.attach(scatest.getTestDomainName()) + self.assertNotEqual(dom, None) + + app=dom.createApplication("/waveforms/noop_waveform/noop_waveform.sad.xml") + cpids=[ int(x._pid) for x in app.comps ] + cpids.sort() + self.assertNotEqual(app,None) + self.assertNotEqual(cpids, 4) + + # Kill the domain manager, device manager, gpp + os.kill(self._nb_domMgr.pid, signal.SIGKILL) + if not self.waitTermination(self._nb_domMgr): + self.fail("Domain Manager Failed to Die") + + os.killpg(self._nb_devMgr.pid, signal.SIGKILL) + if not self.waitTermination(self._nb_devMgr): + self.fail("Device Manager Failed to Die") + + self._nb_domMgr, self._domMgr = self.launchDomainManager(endpoint="giop:tcp::5679", dbURI=self._dbfile) + self.assertNotEqual( self._domMgr, None ) + self._nb_devMgr, self._devMgr = self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + self.assertNotEqual( self._devMgr, None ) + + from ossie.utils import redhawk + for i in xrange(5): + dom=redhawk.attach(scatest.getTestDomainName()) + if dom == None: + time.sleep(.25) + + self.assertNotEqual(dom, None) + self.assertEqual(len(dom.apps),1) + + newpids=[ int(x._pid) for x in app.comps ] + newpids.sort() + self.assertEqual(cpids == newpids, True) + + # terminate the app, make sure each component process is terminated + dom.apps[0].releaseObject() + cpids=[ int(x._pid) for x in app.comps ] + for p in cpids: + self.assertRaises(OSError, os.kill, p, 0 ) + diff --git a/redhawk/src/testing/tests/test_10_ComponentTermination.py b/redhawk/src/testing/tests/test_10_ComponentTermination.py new file mode 100644 index 000000000..fa3c5f4d6 --- /dev/null +++ b/redhawk/src/testing/tests/test_10_ComponentTermination.py @@ -0,0 +1,83 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import os +import threading +from _unitTestHelpers import scatest + +from ossie.events import Subscriber +from ossie.cf import CF, StandardEvent + +class ComponentTerminationTest(scatest.CorbaTestCase): + def setUp(self): + self.launchDomainManager() + self.launchDeviceManager("/nodes/test_GPP_node/DeviceManager.dcd.xml") + + self._event = threading.Event() + self._subscriber = Subscriber(self._domainManager, 'IDM_Channel', self._idmMessageReceived) + + def tearDown(self): + # Remove the subscriber + self._subscriber.terminate() + + # Continue normal teardown + scatest.CorbaTestCase.tearDown(self) + + def _idmMessageReceived(self, message): + if message.typecode().equal(StandardEvent._tc_AbnormalComponentTerminationEventType): + self._event.set() + + def _test_UnhandledException(self, lang): + waveform_name = 'svc_fn_error_' + lang + '_w' + sad_file = os.path.join('/waveforms', waveform_name, waveform_name + '.sad.xml') + app = self._domainManager.createApplication(sad_file, waveform_name, [], []) + + try: + app.start() + except CF.Resource.StartError: + # Python in particular will throw a CORBA exception if the start() + # call has not finished when the exception handler terminates the + # component. It doesn't matter for this test, so ignore it. + pass + + self._event.wait(1) + self.failUnless(self._event.isSet(), 'No unexpected termination message received') + + def test_UnhandledExceptionCpp(self): + """ + Test that unhandled exceptions in C++ service function cause components + to terminate abnormally. + """ + self._test_UnhandledException('cpp') + + def test_UnhandledExceptionPy(self): + """ + Test that unhandled exceptions in Python service function cause + components to terminate abnormally. + """ + self._test_UnhandledException('py') + + @scatest.requireJava + def test_UnhandledExceptionJava(self): + """ + Test that unhandled exceptions in Java service function cause + components to terminate abnormally. + """ + self._test_UnhandledException('java') diff --git a/redhawk/src/testing/tests/test_11_AllPropTypes.py b/redhawk/src/testing/tests/test_11_AllPropTypes.py index a5c8f9472..fdbee13a1 100644 --- a/redhawk/src/testing/tests/test_11_AllPropTypes.py +++ b/redhawk/src/testing/tests/test_11_AllPropTypes.py @@ -21,10 +21,178 @@ from omniORB import any import unittest from _unitTestHelpers import scatest -from ossie.cf import CF +from ossie.cf import CF, CF__POA from ossie.utils import redhawk from omniORB import CORBA -import struct +from ossie.utils import sb, rhtime, redhawk +import struct, time, os + +globalsdrRoot = os.environ['SDRROOT'] + +class PropertyChangeListener_Receiver(CF__POA.PropertyChangeListener): + def __init__(self): + self.rcv_event = None + + def propertyChange( self, pce ) : + self.rcv_event = pce + +class TimeTest(scatest.CorbaTestCase): + def setUp(self): + sb.setDEBUG(False) + self.test_comp = "Sandbox" + # Flagrant violation of sandbox API: if the sandbox singleton exists, + # clean up previous state and dispose of it. + if sb.domainless._sandbox: + sb.domainless._sandbox.shutdown() + sb.domainless._sandbox = None + + def tearDown(self): + sb.release() + sb.setDEBUG(False) + os.environ['SDRROOT'] = globalsdrRoot + + def basetest_getTime(self, comp_name): + comp = sb.launch(comp_name) + _prop=CF.DataType(id='prop',value=any.to_any(None)) + _retval = comp.query([_prop]) + self.assertEqual(_retval[0].value._v, 'value') + self.assertEqual(len(_retval), 1) + + _retval = comp.query([]) + self.assertEqual(_retval[0].value._v, 'value') + self.assertEqual(len(_retval), 1) + + _retval = comp.query([_prop, rhtime.queryTimestamp()]) + self.assertEqual(_retval[0].value._v, 'value') + self.assertEqual(len(_retval), 2) + + myl = PropertyChangeListener_Receiver() + t=float(0.5) + regid=comp.registerPropertyListener( myl._this(), ['prop'],t) + + comp.prop = 'hello' + time.sleep(1) + + _retval = comp.query([_prop]) + self.assertEqual(_retval[0].value._v, 'hello') + self.assertEqual(myl.rcv_event.properties[0].value._v, 'hello') + + _retval = comp.query([rhtime.queryTimestamp()]) + self.assertEqual(len(_retval), 1) + self.assertEqual(_retval[0].value._v.tcstatus, 1) + _time1 = myl.rcv_event.timestamp.twsec + myl.rcv_event.timestamp.tfsec + _time2 = _retval[0].value._v.twsec + _retval[0].value._v.tfsec + between = True + if _time2 - _time1 < 0.25 or _time2 - _time1 > 0.75: + between = False + self.assertEqual(between, True) + + def test_getTimeCpp(self): + self.basetest_getTime('timeprop_cpp') + + def test_getTimePython(self): + self.basetest_getTime('timeprop_py') + + @scatest.requireJava + def test_getTimeJava(self): + self.basetest_getTime('timeprop_java') + +class UTCTimeTestWaveform(scatest.CorbaTestCase): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_BasicTestDevice_node/DeviceManager.dcd.xml") + self._rhDom = redhawk.attach(scatest.getTestDomainName()) + self.assertEquals(len(self._rhDom._get_applications()), 0) + + def tearDown(self): + if self._app: + self._app.stop() + self._app.releaseObject() + + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + scatest.CorbaTestCase.tearDown(self) + + def basetest_Now(self, app_name): + self.basetest_Overload(app_name) + prop = CF.DataType(id='simple1970',value=any.to_any(None)) + retval = self._app.comps[0].ref.query([prop]) + self.assertEqual(retval[0].value._v.twsec, 0) + self.assertEqual(retval[0].value._v.tfsec, 0) + prop = CF.DataType(id='simpleSeqDefNow',value=any.to_any(None)) + retval = self._app.comps[0].ref.query([prop]) + self.assertEquals(len(retval[0].value._v), 1) + self.assertEquals(retval[0].value._v[0].tcstatus, 1) + self.assertNotEquals(retval[0].value._v[0].twsec, 0) + self.assertNotEquals(retval[0].value._v[0].tfsec, 0) + prop = CF.DataType(id='simpleSeqNoDef',value=any.to_any(None)) + retval = self._app.comps[0].ref.query([prop]) + self.assertEquals(len(retval[0].value._v), 0) + prop = CF.DataType(id='simpleSeq1970',value=any.to_any(None)) + retval = self._app.comps[0].ref.query([prop]) + self.assertEquals(len(retval[0].value._v), 1) + self.assertEquals(retval[0].value._v[0].tcstatus, 1) + self.assertEquals(retval[0].value._v[0].twsec, 0) + self.assertEquals(retval[0].value._v[0].tfsec, 0) + + def basetest_Overload(self, app_name): + self._app = self._rhDom.createApplication("/waveforms/"+app_name+"/"+app_name+".sad.xml") + self.assertNotEqual(self._app, None) + cur_time = rhtime.now() + app_time = self._app.comps[0].rightnow.queryValue() + _cur_time = cur_time.twsec + cur_time.tfsec + _app_time = app_time.twsec + app_time.tfsec + self.assertTrue(abs(_cur_time-_app_time)<1, True) + + def test_nowOverload(self): + self.basetest_Overload('newtime_w') + + def test_nowWaveCpp(self): + self.basetest_Now('time_cp_now_w') + + def test_nowWavePython(self): + self.basetest_Now('time_py_now_w') + + @scatest.requireJava + def test_nowWaveJava(self): + self.basetest_Now('time_ja_now_w') + +class UTCTimeTestSandbox(scatest.CorbaTestCase): + def setUp(self): + sb.setDEBUG(False) + # Flagrant violation of sandbox API: if the sandbox singleton exists, + # clean up previous state and dispose of it. + if sb.domainless._sandbox: + sb.domainless._sandbox.shutdown() + sb.domainless._sandbox = None + + def tearDown(self): + sb.release() + sb.setDEBUG(False) + os.environ['SDRROOT'] = globalsdrRoot + + def basetest_Now(self, comp_name): + comp = sb.launch(comp_name) + self.assertNotEqual(comp, None) + cur_time = rhtime.now() + comp_time = comp.rightnow.queryValue() + _cur_time = cur_time.twsec + cur_time.tfsec + _comp_time = comp_time.twsec + comp_time.tfsec + self.assertTrue(abs(_cur_time-_comp_time)<1, True) + prop = CF.DataType(id='simple1970',value=any.to_any(None)) + retval = comp.ref.query([prop]) + self.assertEqual(retval[0].value._v.twsec, 0) + self.assertEqual(retval[0].value._v.tfsec, 0) + + def test_nowSbCpp(self): + self.basetest_Now('time_cp_now') + + def test_nowSbPython(self): + self.basetest_Now('time_py_now') + + @scatest.requireJava + def test_nowSbJava(self): + self.basetest_Now('time_ja_now') class TestAllTypes(scatest.CorbaTestCase): def setUp(self): diff --git a/redhawk/src/testing/tests/test_11_ComplexProperties.py b/redhawk/src/testing/tests/test_11_ComplexProperties.py index 66b2bff33..5bd1a0403 100644 --- a/redhawk/src/testing/tests/test_11_ComplexProperties.py +++ b/redhawk/src/testing/tests/test_11_ComplexProperties.py @@ -21,6 +21,7 @@ from omniORB import any from _unitTestHelpers import scatest from ossie.cf import CF +from ossie.utils import sb from omniORB import CORBA import numpy from _unitTestHelpers import runtestHelpers @@ -34,92 +35,52 @@ def __init__(self, id, default, override, typecode): self.override = override self.typecode = typecode - +def _compareComplex(parent, item_1, item_2): + parent.assertEquals(item_1.real, item_2.real) + parent.assertEquals(item_1.imag, item_2.imag) + +def _compareStructs(parent, struct_1, struct_2): + for _struct_mem_idx in range(len(struct_2)): + if type(struct_2[_struct_mem_idx]) == list: + for idx_val in range(len(struct_2[_struct_mem_idx])): + _compareComplex(parent, struct_1[_struct_mem_idx].value._v[idx_val], struct_2[_struct_mem_idx][idx_val]) + else: + _compareComplex(parent, struct_1[_struct_mem_idx].value._v, struct_2[_struct_mem_idx]) + +def _compareComplexValues(parent, val1, val2): + if type(val2) == list: + if len(val2) != 0: + if type(val2[0]) == tuple: # sequence of structs + for _struct_seq_idx in range(len(val2)): + _compareStructs(parent, val1[_struct_seq_idx]._v, val2[_struct_seq_idx]) + else: + for idx_val in range(len(val1)): + _compareComplex(parent, val1[idx_val], val2[idx_val]) + elif type(val2) == tuple: + _compareStructs(parent, val1, val2) + else: + _compareComplex(parent, val1, val2) class _TestVector: - ''' - def test_complexBoolean(self): - testStruct = (_DataTypeTest("complexBooleanProp", - CF.complexBoolean(False, True), - CF.complexBoolean(True, False), - CF._tc_complexBoolean)) - self._runTest(testStruct) - - def test_complexULong(self): - testStruct = (_DataTypeTest("complexULongProp", - CF.complexULong(4, 5), - CF.complexULong(2, 3), - CF._tc_complexULong)) - self._runTest(testStruct) - def test_complexShort(self): - testStruct = (_DataTypeTest("complexShortProp", - CF.complexShort(4,5), - CF.complexShort(2,3), - CF._tc_complexShort)) - self._runTest(testStruct) - ''' - def test_complexFloat(self): - testStruct = (_DataTypeTest("complexFloatProp", - CF.complexFloat(4.0, 5.0), - CF.complexFloat(2.0, 3.0), - CF._tc_complexFloat)) - self._runTest(testStruct) - ''' - def test_complexOctet(self): - testStruct = (_DataTypeTest("complexOctetProp", - CF.complexOctet(4, 5), - CF.complexOctet(2, 3), - CF._tc_complexOctet)) - self._runTest(testStruct) - - def test_complexUShort(self): - testStruct = (_DataTypeTest("complexUShort", - CF.complexUShort(4, 5), - CF.complexUShort(2, 3), - CF._tc_complexUShort)) - self._runTest(testStruct) - - def test_complexDouble(self): - testStruct = (_DataTypeTest("complexDouble", - CF.complexDouble(4.0, 5.0), - CF.complexDouble(2.0, 3.0), - CF._tc_complexDouble)) - self._runTest(testStruct) - - def test_complexLong(self): - testStruct = (_DataTypeTest("complexLong", - CF.complexLong(4, 5), - CF.complexLong(2, 3), - CF._tc_complexLong)) - self._runTest(testStruct) + def test_complexOverrides(self): + testStruct = [(_DataTypeTest("complexBooleanProp", CF.complexBoolean(False, True), CF.complexBoolean(True, False), CF._tc_complexBoolean)), + (_DataTypeTest("complexULongProp", CF.complexULong(4, 5), CF.complexULong(2, 3), CF._tc_complexULong)), + (_DataTypeTest("complexShortProp", CF.complexShort(4,5), CF.complexShort(2,3), CF._tc_complexShort)), + (_DataTypeTest("complexFloatProp", CF.complexFloat(4.0, 5.0), CF.complexFloat(2.0, 3.0), CF._tc_complexFloat)), + (_DataTypeTest("complexOctetProp", CF.complexOctet(4, 5), CF.complexOctet(2, 3), CF._tc_complexOctet)), + (_DataTypeTest("complexUShort", CF.complexUShort(4, 5), CF.complexUShort(2, 3), CF._tc_complexUShort)), + (_DataTypeTest("complexDouble", CF.complexDouble(4.0, 5.0), CF.complexDouble(2.0, 3.0), CF._tc_complexDouble)), + (_DataTypeTest("complexLong", CF.complexLong(4, 5), CF.complexLong(2, 3), CF._tc_complexLong)), + (_DataTypeTest("complexLongLong", CF.complexLongLong(4, 5), CF.complexLongLong(2, 3), CF._tc_complexLongLong)), + (_DataTypeTest("complexULongLong", CF.complexULongLong(4, 5), CF.complexULongLong(2, 3), CF._tc_complexULongLong)), + (_DataTypeTest("complexFloatSequence", [CF.complexFloat(6, 7), CF.complexFloat(4, 5), CF.complexFloat(8, 9)], [CF.complexFloat(1, 2), CF.complexFloat(10, 20)], None)), + (_DataTypeTest("complexFloatStruct", (CF.complexFloat(6, 7), [CF.complexFloat(3, 4)]), (CF.complexFloat(6, 7), [CF.complexFloat(-5, 5), CF.complexFloat(9, -8), CF.complexFloat(-13, -24), CF.complexFloat(21, -22), CF.complexFloat(31, 0), CF.complexFloat(0, 431), CF.complexFloat(0, -567), CF.complexFloat(-3567, 0), CF.complexFloat(-5.25, 5.25), CF.complexFloat(9.25, -8.25)]), ['complexFloatStructMember', 'complexFloatStruct::complex_float_seq'])), + (_DataTypeTest("complexFloatStructSequence", [(CF.complexFloat(9, 4), [CF.complexFloat(6, 5)])], [(CF.complexFloat(32, 33), [CF.complexFloat(45, 55), CF.complexFloat(69, 78)]), (CF.complexFloat(42, 43), [CF.complexFloat(145, 155), CF.complexFloat(169, 178), CF.complexFloat(279, 998)])], ['complexFloatStructSequenceMemberMemember', 'complexFloatStructSequence::complex_float_seq']))] - def test_complexLongLong(self): - testStruct = (_DataTypeTest("complexLongLong", - CF.complexLongLong(4, 5), - CF.complexLongLong(2, 3), - CF._tc_complexLongLong)) self._runTest(testStruct) - def test_complexULongLong(self): - testStruct = (_DataTypeTest("complexULongLong", - CF.complexULongLong(4, 5), - CF.complexULongLong(2, 3), - CF._tc_complexULongLong)) - self._runTest(testStruct) - ''' -# def test_complexChar(self): -# testStruct = (_DataTypeTest( -# "complexCharProp", -# CF.complexChar(0, 1), -# CF.complexChar(2, 3), -# CF._tc_complexChar)) -# self._runTest(testStruct) - class SetupCommon: - def _compareComplexValues(self, val1, val2): - self.assertEquals(val1.real, val2.real) - self.assertEquals(val1.imag, val2.imag) def setUp_(self, sadpath): domBooter, self._domMgr = self.launchDomainManager() devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_ExecutableDevice_node/DeviceManager.dcd.xml") @@ -131,6 +92,7 @@ def setUp_(self, sadpath): self._app = appFact.create(appFact._get_name(), [], []) except: pass + self.assertNotEqual(self._app, None) def tearDown_(self): if self._app: @@ -147,7 +109,7 @@ def preconditions_(self): self.assertNotEqual(self._app, None, "Application not created") -class CppPropertiesSADOverridesTest(scatest.CorbaTestCase, _TestVector, SetupCommon): +class CppPropertiesSADOverridesLaunchTest(scatest.CorbaTestCase, _TestVector, SetupCommon): def setUp(self): SetupCommon.setUp_(self, sadpath = "/waveforms/TestComplexPropsSADOverrides/TestComplexPropsSADOverrides.sad.xml") @@ -157,17 +119,13 @@ def tearDown(self): def preconditions(self): SetupCommon.preconditions_(self) - def _runTest(self, dataTypeTest): - # Create a property structure - prop = CF.DataType(id = dataTypeTest.id, - value = CORBA.Any(dataTypeTest.typecode, - dataTypeTest.default)) - # Check the default property value via query - defaultProps = self._app.query([prop]) - self._compareComplexValues(defaultProps[0].value.value(), dataTypeTest.override) - + def _runTest(self, dataTypeTests): + for dataTypeTest in dataTypeTests: + prop = CF.DataType(id = dataTypeTest.id, value = any.to_any(None)) + defaultProps = self._app.query([prop]) + _compareComplexValues(self, defaultProps[0].value.value(), dataTypeTest.override) -class SandboxTest(scatest.CorbaTestCase, _TestVector, SetupCommon): +class CppPropertiesSADConfigureTest(scatest.CorbaTestCase, _TestVector, SetupCommon): def setUp(self): SetupCommon.setUp_(self, sadpath = "/waveforms/TestComplexPropsWaveform/TestComplexPropsWaveform.sad.xml") @@ -177,7 +135,7 @@ def tearDown(self): def preconditions(self): SetupCommon.preconditions_(self) - def _runTest(self, dataTypeTest): + def _runTest(self, dataTypeTests): ''' 1. Check the default value of the property via the query method. 2. Configure the property with an override value. @@ -185,63 +143,153 @@ def _runTest(self, dataTypeTest): ''' # Create a property structure - prop = CF.DataType(id = dataTypeTest.id, - value = CORBA.Any(dataTypeTest.typecode, - dataTypeTest.override)) - - # Check the default property value via query - defaultProps = self._app.query([prop]) - self._compareComplexValues(defaultProps[0].value.value(), dataTypeTest.default) - - # Call configure with the property with an override value - # then check, via query, if the configuration worked - self._app.configure([prop]) - newProps = self._app.query([prop]) - self._compareComplexValues(newProps[0].value.value(), dataTypeTest.override) - - def _queryDefaults(self, component): - defaults = {"boolean" : component.complexBooleanProp, - "ulong" : component.complexULongProp, - "short" : component.complexShortProp, - "float" : component.complexFloatProp, - "octet" : component.complexOctetProp, - "ushort" : component.complexUShort, - "double" : component.complexDouble, - "long" : component.complexLong, - "longlong" : component.complexLongLong, - "ulonglong" : component.complexULongLong} - # TODO: char - #"char" : component.complexCharProp, - #"char" : numpy.complex(0,1), - return defaults + for dataTypeTest in dataTypeTests: + if dataTypeTest.typecode == None: + _anyvalue = any.to_any(dataTypeTest.override) + elif type(dataTypeTest.typecode) == list: + if type(dataTypeTest.override) == tuple: + _val = [] + for name_idx in range(len(dataTypeTest.typecode)): + _val.append(CF.DataType(id=dataTypeTest.typecode[name_idx], value=any.to_any(dataTypeTest.override[name_idx]))) + _anyvalue = any.to_any(_val) + elif type(dataTypeTest.override) == list: + _val = [] + for _override in dataTypeTest.override: + _inner_val = [] + for name_idx in range(len(dataTypeTest.typecode)): + _inner_val.append(CF.DataType(id=dataTypeTest.typecode[name_idx], value=any.to_any(_override[name_idx]))) + _val.append(any.to_any(_inner_val)) + _anyvalue = any.to_any(_val) + else: + a=b + else: + _anyvalue = CORBA.Any(dataTypeTest.typecode, dataTypeTest.override) + query_prop = CF.DataType(id = dataTypeTest.id, value = any.to_any(None)) + configure_prop = CF.DataType(id = dataTypeTest.id, value = _anyvalue) + + # Check the default property value via query + defaultProps = self._app.query([query_prop]) + _compareComplexValues(self, defaultProps[0].value.value(), dataTypeTest.default) + + # Call configure with the property with an override value + # then check, via query, if the configuration worked + self._app.configure([configure_prop]) + newProps = self._app.query([query_prop]) + _compareComplexValues(self, newProps[0].value.value(), dataTypeTest.override) + +class CppPropertiesSADCreateTest(scatest.CorbaTestCase, _TestVector, SetupCommon): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_ExecutableDevice_node/DeviceManager.dcd.xml") - def test_sandboxComplexProps(self): - from ossie.utils import sb - - # values from the component PRF file - expectedDefaults = { - "boolean" : numpy.complex(False, True), - "ulong" : numpy.complex(4,5), - "short" : numpy.complex(4,5), - "float" : numpy.complex(4.,5.), - "octet" : numpy.complex(4,5), - "ushort" : numpy.complex(4,5), - "double" : numpy.complex(4.,5.), - "long" : numpy.complex(4,5), - "longlong" : numpy.complex(4,5), - "ulonglong" : numpy.complex(4,5)} + def tearDown(self): + SetupCommon.tearDown_(self) + + def preconditions(self): + SetupCommon.preconditions_(self) + def _runTest(self, dataTypeTests): + sadpath = "/waveforms/TestComplexPropsWaveform/TestComplexPropsWaveform.sad.xml" ''' - "cFloatSeq" : component.complexFloatSeq, - "cFloatStruct" : component.complexFloatStruct, - "cFloatStructSeq" : component.complexFloatStructSeq} - "cFloatSeq" : [CF.complexFloat(real=1.0, imag=0.0), - CF.complexFloat(real=1.0, imag=0.0), - CF.complexFloat(real=1.0, imag=0.0)], - "cFloatStruct" : {"complexFloatStructMember": CF.complexFloat(real=1.0, imag=0.0)}, - "cFloatStructSeq" : [{"complexFloatStructMember": CF.complexFloat(real=1.0, imag=0.0)}]} + 1. Create the set of properties with the override values. + 2. Create the application specifying the override values. + 3. Query each property to make sure the override value has been set. + ''' + initProps = [] + + # Create a property structure with the override values for the create call + for dataTypeTest in dataTypeTests: + if dataTypeTest.typecode == None: + _anyvalue = any.to_any(dataTypeTest.override) + elif type(dataTypeTest.typecode) == list: + if type(dataTypeTest.override) == tuple: + _val = [] + for name_idx in range(len(dataTypeTest.typecode)): + _val.append(CF.DataType(id=dataTypeTest.typecode[name_idx], value=any.to_any(dataTypeTest.override[name_idx]))) + _anyvalue = any.to_any(_val) + elif type(dataTypeTest.override) == list: + _val = [] + for _override in dataTypeTest.override: + _inner_val = [] + for name_idx in range(len(dataTypeTest.typecode)): + _inner_val.append(CF.DataType(id=dataTypeTest.typecode[name_idx], value=any.to_any(_override[name_idx]))) + _val.append(any.to_any(_inner_val)) + _anyvalue = any.to_any(_val) + else: + a=b + else: + _anyvalue = CORBA.Any(dataTypeTest.typecode, dataTypeTest.override) + initProps.append(CF.DataType(id = dataTypeTest.id, value = _anyvalue)) + + self._app = None + if self._domMgr: + try: + self._domMgr.installApplication(sadpath) + appFact = self._domMgr._get_applicationFactories()[0] + self._app = appFact.create(appFact._get_name(), initProps, []) + except: + pass + self.assertNotEqual(self._app, None) + + # Create a property structure + for dataTypeTest in dataTypeTests: + if dataTypeTest.typecode == None: + _anyvalue = any.to_any(dataTypeTest.override) + elif type(dataTypeTest.typecode) == list: + if type(dataTypeTest.override) == tuple: + _val = [] + for name_idx in range(len(dataTypeTest.typecode)): + _val.append(CF.DataType(id=dataTypeTest.typecode[name_idx], value=any.to_any(dataTypeTest.override[name_idx]))) + _anyvalue = any.to_any(_val) + elif type(dataTypeTest.override) == list: + _val = [] + for _override in dataTypeTest.override: + _inner_val = [] + for name_idx in range(len(dataTypeTest.typecode)): + _inner_val.append(CF.DataType(id=dataTypeTest.typecode[name_idx], value=any.to_any(_override[name_idx]))) + _val.append(any.to_any(_inner_val)) + _anyvalue = any.to_any(_val) + else: + a=b + else: + _anyvalue = CORBA.Any(dataTypeTest.typecode, dataTypeTest.override) + query_prop = CF.DataType(id = dataTypeTest.id, value = any.to_any(None)) + + # Check the property value via query is the overriden property value, not the default value + defaultProps = self._app.query([query_prop]) + _compareComplexValues(self, defaultProps[0].value.value(), dataTypeTest.override) + +class _SandboxDataTypeTest: + def __init__(self, _id, expected, typecode): + self._id = _id + self.expected = expected + self.typecode = typecode + +class SandboxTests(scatest.CorbaTestCase): + def setUp(self): + if sb.domainless._sandbox: + sb.domainless._sandbox.shutdown() + sb.domainless._sandbox = None + + def tearDown(self): + sb.domainless._getSandbox().shutdown() + + def test_sandboxComplexProps(self): + testSet = [(_SandboxDataTypeTest(["complexBooleanProp"], CF.complexBoolean(False, True), CF._tc_complexBoolean)), + (_SandboxDataTypeTest(["complexULongProp"], CF.complexULong(4, 5), CF._tc_complexULong)), + (_SandboxDataTypeTest(["complexShortProp"], CF.complexShort(4,5), CF._tc_complexShort)), + (_SandboxDataTypeTest(["complexFloatProp"], CF.complexFloat(4.0, 5.0), CF._tc_complexFloat)), + (_SandboxDataTypeTest(["complexOctetProp"], CF.complexOctet(4, 5), CF._tc_complexOctet)), + (_SandboxDataTypeTest(["complexUShort"], CF.complexUShort(4, 5), CF._tc_complexUShort)), + (_SandboxDataTypeTest(["complexDouble"], CF.complexDouble(4.0, 5.0), CF._tc_complexDouble)), + (_SandboxDataTypeTest(["complexLong"], CF.complexLong(4, 5), CF._tc_complexLong)), + (_SandboxDataTypeTest(["complexLongLong"], CF.complexLongLong(4, 5), CF._tc_complexLongLong)), + (_SandboxDataTypeTest(["complexULongLong"], CF.complexULongLong(4, 5), CF._tc_complexULongLong)), + (_SandboxDataTypeTest(["complexFloatSequence"], [CF.complexFloat(6, 7), CF.complexFloat(4, 5), CF.complexFloat(8, 9)], None)), + (_SandboxDataTypeTest(['complexFloatStruct', ['complexFloatStructMember', 'complexFloatStruct::complex_float_seq']], (CF.complexFloat(6, 7), [CF.complexFloat(3, 4)]), None)), + (_SandboxDataTypeTest(['complexFloatStructSequence', ['complexFloatStructSequenceMemberMemember', 'complexFloatStructSequence::complex_float_seq']], [(CF.complexFloat(9, 4), [CF.complexFloat(6, 5)])], None))] # Create an instance of the test component in all 3 languages components = {"cpp" : sb.launch("TestComplexProps", impl="cpp"), @@ -251,34 +299,74 @@ def test_sandboxComplexProps(self): sb.start() + prop_idx = {} + for idx in range(len(components['cpp']._properties)): + prop_idx[components['cpp']._properties[idx].id] = idx for language in components.keys(): - # allow for visual inspection of complex sequences - # TODO: replace this with an automated comparison - print language - print components[language].complexFloatProp - print "simple struct member" - print components[language].FloatStruct.FloatStructMember - components[language].FloatStruct.FloatStructMember = 9 - print components[language].FloatStruct.FloatStructMember - print "complex struct member" - print components[language].complexFloatStruct.complexFloatStructMember - components[language].complexFloatStruct.complexFloatStructMember = complex(9,10) - print components[language].complexFloatStruct.complexFloatStructMember - - - print components[language].complexFloatSequence - components[language].complexFloatSequence = [complex(6,7)]*3 - print components[language].complexFloatSequence - print "" - - - for componentKey in components.keys(): - # loop through all three languages and query for the default - # property values - defaults = self._queryDefaults(components[componentKey]) - for key in defaults.keys(): - # Loop through the default property values and compare them - # to the expected values. - self._compareComplexValues(defaults[key], expectedDefaults[key]) - - sb.domainless._cleanUpLaunchedComponents() + for _test in testSet: + _prop = components[language]._properties[prop_idx[_test._id[0]]] + _value = _prop._queryValue().value() + _compareComplexValues(self, _value, _test.expected) + components[language].releaseObject() + + def test_complexLoadSADFile(self): + testSet = [(_SandboxDataTypeTest(["complexBooleanProp"], CF.complexBoolean(False, True), CF._tc_complexBoolean)), + (_SandboxDataTypeTest(["complexULongProp"], CF.complexULong(4, 5), CF._tc_complexULong)), + (_SandboxDataTypeTest(["complexShortProp"], CF.complexShort(4,5), CF._tc_complexShort)), + (_SandboxDataTypeTest(["complexFloatProp"], CF.complexFloat(4.0, 5.0), CF._tc_complexFloat)), + (_SandboxDataTypeTest(["complexOctetProp"], CF.complexOctet(4, 5), CF._tc_complexOctet)), + (_SandboxDataTypeTest(["complexUShort"], CF.complexUShort(4, 5), CF._tc_complexUShort)), + (_SandboxDataTypeTest(["complexDouble"], CF.complexDouble(4.0, 5.0), CF._tc_complexDouble)), + (_SandboxDataTypeTest(["complexLong"], CF.complexLong(4, 5), CF._tc_complexLong)), + (_SandboxDataTypeTest(["complexLongLong"], CF.complexLongLong(4, 5), CF._tc_complexLongLong)), + (_SandboxDataTypeTest(["complexULongLong"], CF.complexULongLong(4, 5), CF._tc_complexULongLong)), + (_SandboxDataTypeTest(["complexFloatSequence"], [CF.complexFloat(6, 7), CF.complexFloat(4, 5), CF.complexFloat(8, 9)], None)), + (_SandboxDataTypeTest(['complexFloatStruct', ['complexFloatStructMember', 'complexFloatStruct::complex_float_seq']], (CF.complexFloat(6, 7), [CF.complexFloat(3, 4)]), None)), + (_SandboxDataTypeTest(['complexFloatStructSequence', ['complexFloatStructSequenceMemberMemember', 'complexFloatStructSequence::complex_float_seq']], [(CF.complexFloat(9, 4), [CF.complexFloat(6, 5)])], None))] + + retval = sb.loadSADFile('sdr/dom/waveforms/TestComplexPropsWaveform/TestComplexPropsWaveform.sad.xml') + self.assertEquals(retval, True) + comp_ac = sb.getComponent('TestComplexProps_1') + + sb.start() + + prop_idx = {} + for idx in range(len(comp_ac._propertySet)): + prop_idx[comp_ac._propertySet[idx].id] = idx + for _test in testSet: + if _test._id[0]=='complexBooleanProp': + continue + _prop = comp_ac._propertySet[prop_idx[_test._id[0]]] + _value = _prop._queryValue().value() + _compareComplexValues(self, _value, _test.expected) + + def test_complexLoadSADFileOverride(self): + testSet = [(_SandboxDataTypeTest(["complexBooleanProp"], CF.complexBoolean(True, False), CF._tc_complexBoolean)), + (_SandboxDataTypeTest(["complexULongProp"], CF.complexULong(2, 3), CF._tc_complexULong)), + (_SandboxDataTypeTest(["complexShortProp"], CF.complexShort(2, 3), CF._tc_complexShort)), + (_SandboxDataTypeTest(["complexFloatProp"], CF.complexFloat(2, 3), CF._tc_complexFloat)), + (_SandboxDataTypeTest(["complexOctetProp"], CF.complexOctet(2, 3), CF._tc_complexOctet)), + (_SandboxDataTypeTest(["complexUShort"], CF.complexUShort(2, 3), CF._tc_complexUShort)), + (_SandboxDataTypeTest(["complexDouble"], CF.complexDouble(2, 3), CF._tc_complexDouble)), + (_SandboxDataTypeTest(["complexLong"], CF.complexLong(2, 3), CF._tc_complexLong)), + (_SandboxDataTypeTest(["complexLongLong"], CF.complexLongLong(2, 3), CF._tc_complexLongLong)), + (_SandboxDataTypeTest(["complexULongLong"], CF.complexULongLong(2, 3), CF._tc_complexULongLong)), + (_SandboxDataTypeTest(["complexFloatSequence"], [CF.complexFloat(1, 2), CF.complexFloat(10, 20)], None)), + (_SandboxDataTypeTest(['complexFloatStruct', ['complexFloatStructMember', 'complexFloatStruct::complex_float_seq']], (CF.complexFloat(6, 7), [CF.complexFloat(-5, 5), CF.complexFloat(9, -8), CF.complexFloat(-13, -24), CF.complexFloat(21, -22), CF.complexFloat(31, 0), CF.complexFloat(0, 431), CF.complexFloat(0, -567), CF.complexFloat(-3567, 0), CF.complexFloat(-5.25, 5.25), CF.complexFloat(9.25, -8.25)]), None)), + (_SandboxDataTypeTest(['complexFloatStructSequence', ['complexFloatStructSequenceMemberMemember', 'complexFloatStructSequence::complex_float_seq']], [(CF.complexFloat(32, 33), [CF.complexFloat(45, 55), CF.complexFloat(69, 78)]), (CF.complexFloat(42, 43), [CF.complexFloat(145, 155), CF.complexFloat(169, 178), CF.complexFloat(279, 998)])], None))] + + retval = sb.loadSADFile('sdr/dom/waveforms/TestComplexPropsSADOverrides/TestComplexPropsSADOverrides.sad.xml') + self.assertEquals(retval, True) + comp_ac = sb.getComponent('TestComplexProps_1') + + sb.start() + + prop_idx = {} + for idx in range(len(comp_ac._propertySet)): + prop_idx[comp_ac._propertySet[idx].id] = idx + for _test in testSet: + if _test._id[0]=='complexBooleanProp': + continue + _prop = comp_ac._propertySet[prop_idx[_test._id[0]]] + _value = _prop._queryValue().value() + _compareComplexValues(self, _value, _test.expected) diff --git a/redhawk/src/testing/tests/test_11_CppProperties.py b/redhawk/src/testing/tests/test_11_CppProperties.py index 71bcfeed6..d58098357 100644 --- a/redhawk/src/testing/tests/test_11_CppProperties.py +++ b/redhawk/src/testing/tests/test_11_CppProperties.py @@ -72,6 +72,21 @@ def test_LegacyPropertyCallbacks(self): for result in self._app.runTest(1, props): self.assert_(result.value._v) + def test_UTCTime(self): + prop = self._app.query([CF.DataType('simple_utctime', any.to_any(None))]) + datetime = time.gmtime(prop[0].value.value().twsec) + self.assertEquals(datetime.tm_year,2017) + self.assertEquals(datetime.tm_mon,2) + self.assertEquals(datetime.tm_mday,1) + self.assertEquals(datetime.tm_hour,10) + self.assertEquals(datetime.tm_min,1) + self.assertEquals(datetime.tm_sec,0) + self.assertEquals(prop[0].value.value().tfsec,0.123) + self._app.configure([CF.DataType('reset_utctime', any.to_any(True))]) + prop = self._app.query([CF.DataType('simple_utctime', any.to_any(None))]) + now = time.time() + self.assertEquals(abs(now-(prop[0].value.value().twsec+prop[0].value.value().tfsec))<0.1,True) + def test_NilProperty(self): self.preconditions() diff --git a/redhawk/src/testing/tests/test_11_JavaProperties.py b/redhawk/src/testing/tests/test_11_JavaProperties.py index 751a83406..32ff14568 100644 --- a/redhawk/src/testing/tests/test_11_JavaProperties.py +++ b/redhawk/src/testing/tests/test_11_JavaProperties.py @@ -40,7 +40,7 @@ def tearDown(self): def test_EmptyQuery (self): results = self.comp.query([]) - self.assertEqual(len(results), 6) + self.assertEqual(len(results), 9) ids = set(r.id for r in results) expected = ("ulong_prop", @@ -738,6 +738,50 @@ def test_Property_JAVA(self): app.releaseObject() +@scatest.requireJava +class JavaUTCTimeTest(scatest.CorbaTestCase): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_ExecutableDevice_node/DeviceManager.dcd.xml") + self._app = None + if self._domMgr: + try: + sadpath = "/waveforms/TestJavaProps/TestJavaProps.sad.xml" + self._domMgr.installApplication(sadpath) + appFact = self._domMgr._get_applicationFactories()[0] + self._app = appFact.create(appFact._get_name(), [], []) + except: + pass + + def tearDown(self): + if self._app: + self._app.stop() + self._app.releaseObject() + + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + scatest.CorbaTestCase.tearDown(self) + + def preconditions(self): + self.assertNotEqual(self._domMgr, None, "DomainManager not available") + self.assertNotEqual(self._devMgr, None, "DeviceManager not available") + self.assertNotEqual(self._app, None, "Application not created") + + def test_UTCTimeJava(self): + prop = self._app.query([CF.DataType('simple_utctime', any.to_any(None))]) + datetime = time.gmtime(prop[0].value.value().twsec) + self.assertEquals(datetime.tm_year,2017) + self.assertEquals(datetime.tm_mon,2) + self.assertEquals(datetime.tm_mday,1) + self.assertEquals(datetime.tm_hour,10) + self.assertEquals(datetime.tm_min,1) + self.assertEquals(datetime.tm_sec,0) + self.assertEquals(prop[0].value.value().tfsec,0.123) + self._app.configure([CF.DataType('reset_utctime', any.to_any(True))]) + prop = self._app.query([CF.DataType('simple_utctime', any.to_any(None))]) + now = time.time() + self.assertEquals(abs(now-(prop[0].value.value().twsec+prop[0].value.value().tfsec))<0.1,True) + @scatest.requireJava class JavaPropertiesReadOnly(scatest.CorbaTestCase): diff --git a/redhawk/src/testing/tests/test_11_PyProperties.py b/redhawk/src/testing/tests/test_11_PyProperties.py index ad915b108..966aced86 100644 --- a/redhawk/src/testing/tests/test_11_PyProperties.py +++ b/redhawk/src/testing/tests/test_11_PyProperties.py @@ -1156,3 +1156,46 @@ def test_readonly_sad(self): self.assertNotEqual(comp,None) readonly_prop=CF.DataType("readOnly", any.to_any("try_again")) self.assertRaises(CF.PropertySet.InvalidConfiguration, comp.configure, [ readonly_prop ] ) + +class PythonUTCTimeTest(scatest.CorbaTestCase): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_ExecutableDevice_node/DeviceManager.dcd.xml") + self._app = None + if self._domMgr: + try: + sadpath = "/waveforms/TestPythonProps/TestPythonProps.sad.xml" + self._domMgr.installApplication(sadpath) + appFact = self._domMgr._get_applicationFactories()[0] + self._app = appFact.create(appFact._get_name(), [], []) + except: + pass + + def tearDown(self): + if self._app: + self._app.stop() + self._app.releaseObject() + + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + scatest.CorbaTestCase.tearDown(self) + + def preconditions(self): + self.assertNotEqual(self._domMgr, None, "DomainManager not available") + self.assertNotEqual(self._devMgr, None, "DeviceManager not available") + self.assertNotEqual(self._app, None, "Application not created") + + def test_UTCTimePython(self): + prop = self._app.query([CF.DataType('simple_utctime', any.to_any(None))]) + datetime = time.gmtime(prop[0].value.value().twsec) + self.assertEquals(datetime.tm_year,2017) + self.assertEquals(datetime.tm_mon,2) + self.assertEquals(datetime.tm_mday,1) + self.assertEquals(datetime.tm_hour,10) + self.assertEquals(datetime.tm_min,1) + self.assertEquals(datetime.tm_sec,0) + self.assertEquals(prop[0].value.value().tfsec,0.123) + self._app.configure([CF.DataType('reset_utctime', any.to_any(True))]) + prop = self._app.query([CF.DataType('simple_utctime', any.to_any(None))]) + now = time.time() + self.assertEquals(abs(now-(prop[0].value.value().twsec+prop[0].value.value().tfsec))<0.1,True) diff --git a/redhawk/src/testing/tests/test_13_RedhawkModule.py b/redhawk/src/testing/tests/test_13_RedhawkModule.py index 1d7d9395f..f8add8f9a 100644 --- a/redhawk/src/testing/tests/test_13_RedhawkModule.py +++ b/redhawk/src/testing/tests/test_13_RedhawkModule.py @@ -21,14 +21,87 @@ import unittest from _unitTestHelpers import scatest import time +import contextlib +import cStringIO +import tempfile +import re +import sys as _sys from omniORB import CORBA +from omniORB import any as _any from xml.dom import minidom import os as _os +import Queue +import StringIO from ossie.cf import CF from ossie.utils import redhawk from ossie.utils import type_helpers +from ossie.utils import rhconnection +from ossie.utils import allocations from ossie.utils import sb from ossie.utils.model import NoMatchingPorts +from ossie.events import Subscriber, Publisher +from ossie.cf import CF +import traceback + +class RedhawkModuleEventChannelTest(scatest.CorbaTestCase): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager() + self.ecm = self._domMgr._get_eventChannelMgr() + self.channelName = 'TestChan' + try: + self.channel = self.ecm.create(self.channelName) + except CF.EventChannelManager.ChannelAlreadyExists: + pass + self.channel = self.ecm.get(self.channelName) + + def tearDown(self): + try: + self.ecm.release(self.channelName) + except CF.EventChannelManager.ChannelDoesNotExist: + pass + scatest.CorbaTestCase.tearDown(self) + + def _waitData(self, sub, timeout): + end = time.time() + timeout + while time.time() < end: + data = sub.getData() + if data: + return data._v + return None + + def test_eventChannelPull(self): + sub = Subscriber(self._domMgr, self.channelName) + pub = Publisher(self._domMgr, self.channelName) + payload = 'hello' + data = _any.to_any(payload) + pub.push(data) + rec_data = self._waitData(sub, 1.0) + self.assertEquals(rec_data, payload) + pub.terminate() + sub.terminate() + + def test_eventChannelForceDelete(self): + sub = Subscriber(self._domMgr, self.channelName) + pub = Publisher(self._domMgr, self.channelName) + payload = 'hello' + data = _any.to_any(payload) + pub.push(data) + rec_data = self._waitData(sub, 1.0) + self.assertEquals(rec_data, payload) + self.ecm.forceRelease(self.channelName) + self.assertRaises(CF.EventChannelManager.ChannelDoesNotExist, self.ecm.release, self.channelName) + + def test_eventChannelCB(self): + queue = Queue.Queue() + sub = Subscriber(self._domMgr, self.channelName, dataArrivedCB=queue.put) + pub = Publisher(self._domMgr, self.channelName) + payload = 'hello' + data = _any.to_any(payload) + pub.push(data) + rec_data = queue.get(timeout=1.0) + self.assertEquals(rec_data._v, payload) + pub.terminate() + sub.terminate() class RedhawkModuleTest(scatest.CorbaTestCase): def setUp(self): @@ -52,11 +125,15 @@ def preconditions(self): self.assertNotEqual(self._devMgr, None, "DeviceManager not available") def test_API_remap(self): + import _omnipy + v=int(_omnipy.__version__[0]) orig_api = dir(self._rhDom.ref) remap_api = dir(self._rhDom) not_remap = ['_NP_RepositoryId','_Object__release','__getattribute__','__getstate__','__hash__','__setattr__','__setstate__','__weakref__', '__methods__','_duplicate','_dynamic_op','_hash','_is_a','_is_equivalent','_narrow','_nil','_obj', - '__del__','__omni_obj','_release','_unchecked_narrow', '_non_existent'] + '__del__','__omni_obj','_release','_unchecked_narrow', '_non_existent', + 'retrieve_records', 'retrieve_records_by_date', 'retrieve_records_from_date' ] + if v > 3 : not_remap += ['log_level'] for entry in orig_api: if entry in not_remap: continue @@ -99,7 +176,7 @@ def test_createBadCompApplication(self): # Automatically clean up redhawk.setTrackApps(True) # Create Application from $SDRROOT path - app = self._rhDom.createApplication("/waveforms/svc_error_cpp_w/svc_error_cpp_w.sad.xml") + app = self._rhDom.createApplication("/waveforms/svc_fn_error_cpp_w/svc_fn_error_cpp_w.sad.xml") app_2 = self._rhDom.createApplication("/waveforms/svc_one_error_w/svc_one_error_w.sad.xml") self.assertNotEqual(app, None, "Application not created") self.assertEquals(len(self._rhDom._get_applications()), 2) @@ -123,8 +200,9 @@ def test_createApplication1(self): self.assertEquals(len(self._rhDom.apps), 1) # Ensure that api() works. + _destfile=StringIO.StringIO() try: - app.api() + app.api(destfile=_destfile) except: self.fail('App.api() raised an exception') @@ -177,8 +255,9 @@ def test_createApplicationNoCleanup(self): self.assertEquals(len(self._rhDom.apps), 1) # Ensure that api() works. + _destfile=StringIO.StringIO() try: - app.api() + app.api(destfile=_destfile) except: self.fail('App.api() raised an exception') @@ -243,7 +322,8 @@ def test_apiHostCollocation(self): self.assertEquals(provides_ports, {}) uses_ports = object.__getattribute__(app,'_usesPortDict') self.assertEquals(uses_ports, {}) - app.api() + _destfile=StringIO.StringIO() + app.api(destfile=_destfile) provides_ports = object.__getattribute__(app,'_providesPortDict') self.assertEquals(len(provides_ports), 1) self.assertEquals(provides_ports.keys()[0], 'input') @@ -710,6 +790,142 @@ def test_connect(self): self.assertTrue(len(post) > len(pre)) + def test_connectionMgrApp(self): + """ + Tests that applications can make connections between their external ports + """ + self.launchDeviceManager('/nodes/test_PortTestDevice_node/DeviceManager.dcd.xml') + + app1 = self._rhDom.createApplication('/waveforms/PortConnectExternalPort/PortConnectExternalPort.sad.xml') + app2 = self._rhDom.createApplication('/waveforms/PortConnectExternalPort/PortConnectExternalPort.sad.xml') + + # Tally up the connections prior to making an app-to-app connection; + # the PortTest component's runTest method returns the identifiers of + # any connected Resources + pre = [] + for comp in app1.comps + app2.comps: + pre.extend(comp.runTest(0, [])) + + ep1=rhconnection.makeEndPoint(app1, 'resource_out') + ep2=rhconnection.makeEndPoint(app2, '') + cMgr = self._rhDom._get_connectionMgr() + cMgr.connect(ep1,ep2) + + # Tally up the connections to check that a new one has been made + post = [] + for comp in app1.comps + app2.comps: + post.extend(comp.runTest(0, [])) + + self.assertTrue(len(post) > len(pre)) + + def test_connectionMgrComp(self): + """ + Tests that applications can make connections between their external ports + """ + self.launchDeviceManager('/nodes/test_PortTestDevice_node/DeviceManager.dcd.xml') + + app1 = self._rhDom.createApplication('/waveforms/PortConnectExternalPort/PortConnectExternalPort.sad.xml') + app2 = self._rhDom.createApplication('/waveforms/PortConnectExternalPort/PortConnectExternalPort.sad.xml') + + foundcomp=False + for _comp in app1.comps: + if _comp._id[:33] == 'PortTest1:PortConnectExternalPort': + self.assertEquals(_comp.instanceName, "PortTest1") + foundcomp = True + break + self.assertTrue(foundcomp) + + for _port in _comp.ports: + if _port.name == 'resource_out': + break + + self.assertEquals(len(_port._get_connections()), 0) + + ep1=rhconnection.makeEndPoint(_comp, 'resource_out') + print ep1 + ep2=rhconnection.makeEndPoint(app2, '') + print ep2 + cMgr = self._rhDom._get_connectionMgr() + cMgr.connect(ep1,ep2) + + self.assertEquals(len(_port._get_connections()), 1) + +class RedhawkModuleAllocationMgrTest(scatest.CorbaTestCase): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/dev_alloc_node/DeviceManager.dcd.xml") + self._rhDom = redhawk.attach(scatest.getTestDomainName()) + self.am=self._rhDom._get_allocationMgr() + self.assertEquals(len(self._rhDom._get_applications()), 0) + + def tearDown(self): + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + redhawk.core._cleanUpLaunchedApps() + scatest.CorbaTestCase.tearDown(self) + # need to let event service clean up event channels...... + # cycle period is 10 milliseconds + time.sleep(0.1) + redhawk.setTrackApps(False) + + def test_allocMgrSimple(self): + """ + Tests that applications can make connections between their external ports + """ + prop = allocations.createProps({'si_prop':3}) + rq=self.am.createRequest('foo',prop) + resp = self.am.allocate([rq]) + self.assertEquals(len(resp),1) + self.assertEquals(self.am.listAllocations(CF.AllocationManager.LOCAL_ALLOCATIONS, 100)[0][0].allocationID, resp[0].allocationID) + self.am.deallocate([resp[0].allocationID]) + + def test_allocMgrSimSeq(self): + """ + Tests that applications can make connections between their external ports + """ + prop = allocations.createProps({'se_prop':[1.0,2.0]}) + rq=self.am.createRequest('foo',prop) + resp = self.am.allocate([rq]) + self.assertEquals(len(resp),1) + self.am.deallocate([resp[0].allocationID]) + prop = allocations.createProps({'se_prop':[1.0,2.0]}, prf='sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.prf.xml') + rq=self.am.createRequest('foo',prop) + resp = self.am.allocate([rq]) + self.assertEquals(len(resp),1) + self.assertEquals(self.am.listAllocations(CF.AllocationManager.LOCAL_ALLOCATIONS, 100)[0][0].allocationID, resp[0].allocationID) + self.am.deallocate([resp[0].allocationID]) + + def test_allocMgrStruct(self): + """ + Tests that applications can make connections between their external ports + """ + prop = allocations.createProps({'s_prop':{'s_prop::a':'hello','s_prop::b':5}}) + rq=self.am.createRequest('foo',prop) + resp = self.am.allocate([rq]) + self.assertEquals(len(resp),1) + self.am.deallocate([resp[0].allocationID]) + prop = allocations.createProps({'s_prop':{'s_prop::a':'hello','s_prop::b':5}}, prf='sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.prf.xml') + rq=self.am.createRequest('foo',prop) + resp = self.am.allocate([rq]) + self.assertEquals(len(resp),1) + self.assertEquals(self.am.listAllocations(CF.AllocationManager.LOCAL_ALLOCATIONS, 100)[0][0].allocationID, resp[0].allocationID) + self.am.deallocate([resp[0].allocationID]) + + def test_allocMgrStrSeq(self): + """ + Tests that applications can make connections between their external ports + """ + prop = allocations.createProps({'sq_prop':[{'sq_prop::b':'hello','sq_prop::a':5},{'sq_prop::b':'another','sq_prop::a':7}]}) + rq=self.am.createRequest('foo',prop) + resp = self.am.allocate([rq]) + self.assertEquals(len(resp),1) + self.am.deallocate([resp[0].allocationID]) + prop = allocations.createProps({'sq_prop':[{'sq_prop::b':'hello','sq_prop::a':5},{'sq_prop::b':'another','sq_prop::a':7}]}, prf='sdr/dev/devices/dev_alloc_cpp/dev_alloc_cpp.prf.xml') + rq=self.am.createRequest('foo',prop) + resp = self.am.allocate([rq]) + self.assertEquals(len(resp),1) + self.assertEquals(self.am.listAllocations(CF.AllocationManager.LOCAL_ALLOCATIONS, 100)[0][0].allocationID, resp[0].allocationID) + self.am.deallocate([resp[0].allocationID]) class MixedRedhawkSandboxTest(scatest.CorbaTestCase): def setUp(self): @@ -734,3 +950,322 @@ def test_BadApplicationConnection(self): sink = sb.DataSink() self.assertRaises(NoMatchingPorts, app.connect, source) + + +class DomainMgrLoggingAPI(scatest.CorbaTestCase): + def setUp(self): + self.lcfg=_os.environ['OSSIEUNITTESTSLOGCONFIG'] + _os.environ['OSSIEUNITTESTSLOGCONFIG']="" + domBooter, self._domMgr = self.launchDomainManager() + self.dom = redhawk.attach(scatest.getTestDomainName()) + + def tearDown(self): + redhawk.core._cleanUpLaunchedApps() + scatest.CorbaTestCase.tearDown(self) + time.sleep(0.1) + _os.environ['OSSIEUNITTESTSLOGCONFIG']=self.lcfg + + def test123log_level(self): + """ + Tests set debug level api is working + """ + from ossie.cf import CF + self.assertNotEqual( self.dom, None ) + + self.dom.ref._set_log_level( CF.LogLevels.TRACE ) + ret=self.dom.ref._get_log_level( ) + self.assertEqual( ret, CF.LogLevels.TRACE ) + + self.dom.ref._set_log_level( CF.LogLevels.DEBUG ) + ret=self.dom.ref._get_log_level() + self.assertEqual( ret, CF.LogLevels.DEBUG ) + + self.dom.ref._set_log_level( CF.LogLevels.INFO ) + ret=self.dom.ref._get_log_level() + self.assertEqual( ret, CF.LogLevels.INFO ) + + self.dom.ref._set_log_level( CF.LogLevels.WARN ) + ret=self.dom.ref._get_log_level() + self.assertEqual( ret, CF.LogLevels.WARN ) + + self.dom.ref._set_log_level( CF.LogLevels.ERROR ) + ret=self.dom.ref._get_log_level() + self.assertEqual( ret, CF.LogLevels.ERROR ) + + self.dom.ref._set_log_level( CF.LogLevels.FATAL ) + ret=self.dom.ref._get_log_level() + self.assertEqual( ret, CF.LogLevels.FATAL ) + + def test_default_logconfig(self): + cfg = "log4j.rootLogger=INFO,STDOUT\n" + \ + "# Direct log messages to STDOUT\n" + \ + "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ + "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ + "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{3}:%L - %m%n\n" + + c_cfg=self.dom.ref.getLogConfig() + + ## remove extra white space + cfg=cfg.replace(" ","") + c_cfg=c_cfg.replace(" ","") + self.assertEquals( cfg, c_cfg) + + + def test_logconfig(self): + cfg = "log4j.rootLogger=ERROR,STDOUT\n" + \ + "# Direct log messages to STDOUT\n" + \ + "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ + "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ + "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + + self.dom.ref.setLogConfig(cfg) + + c_cfg=self.dom.ref.getLogConfig() + cfg=cfg.replace(" ","") + c_cfg=c_cfg.replace(" ","") + self.assertEquals( cfg, c_cfg) + + + def test_macro_config(self): + cfg = "log4j.rootLogger=ERROR,STDOUT\n " + \ + "# Direct log messages to STDOUT\n" + \ + "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ + "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ + "log4j.appender.STDOUT.layout.ConversionPattern=@@@DOMAIN.NAME@@@\n" + + self.dom.ref.setLogConfig(cfg) + + c_cfg=self.dom.ref.getLogConfig() + + res=c_cfg.find(scatest.getTestDomainName()) + + self.assertNotEquals( res, -1 ) + + def test_macro_config2(self): + cfg = "@@@DOMAIN.NAME@@@" + self.dom.ref.setLogConfig(cfg) + c_cfg=self.dom.ref.getLogConfig() + res=c_cfg.find(scatest.getTestDomainName()) + self.assertNotEquals( res, -1 ) + + + + +class RedhawkStartup(scatest.CorbaTestCase): + def setUp(self): + pass + + def tearDown(self): + redhawk.base._cleanup_domain() + scatest.CorbaTestCase.tearDown(self) + import commands + try: + s,o = commands.getstatusoutput('pkill -9 -f nodeBooter ') + s,o = commands.getstatusoutput('pkill -9 -f dev/devices ') + s,o = commands.getstatusoutput('pkill -9 -f DomainManager ') + s,o = commands.getstatusoutput('pkill -9 -f DeviceManager ') + except: + pass + + def _try_kick_domain(self, logcfg, epatterns, debug_level=None, kick_device_managers=False, dev_mgrs=[], dev_mgr_levels=[] ): + + tmpfile=tempfile.mktemp() + self._rhDom = redhawk.kickDomain(domain_name=scatest.getTestDomainName(), + logfile=scatest.getSdrPath()+'/dom/logcfg/'+logcfg, + kick_device_managers=kick_device_managers, + device_managers = dev_mgrs, + stdout=tmpfile, + debug_level=debug_level, + device_managers_debug_levels = dev_mgr_levels ) + + if kick_device_managers and len(dev_mgrs)> 0 : + for devm in dev_mgrs: + try: + self.waitForDeviceManager(devm) + except: + traceback.print_exc() + pass + + time.sleep(2) + new_stdout=open(tmpfile,'r') + for k, epat in epatterns.iteritems(): + epat.setdefault('results',[]) + + for x in new_stdout.readlines(): + #print "Line -> ", x + for k, pat in epatterns.iteritems(): + for epat in pat['patterns' ]: + m=re.search( epat, x ) + if m : + #print "MATCH -> ", epat, " LINE ", x + pat['results'].append(True) + + for k,pat in epatterns.iteritems(): + if type(pat['match']) == list: + lmatch = len(pat['results']) == len(pat['match']) and pat['results'] == pat['match'] + self.assertEqual(lmatch, True ) + + if type(pat['match']) == tuple: + reslen=len(pat['results']) + c=pat['match'] + res=eval("'" + str(reslen) + " " + str(c[0]) + " " + str(c[1]) + "'") + self.assertTrue(res) + + if type(pat['match']) == int: + # ignore + if pat['match'] == -1 : + pass + else: + self.assertEqual( pat['match'] , len(pat['results']) ) + + @scatest.requireLog4cxx + def test_kick_trace(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ + "yes" : { 'patterns': [" TRACE ", " DEBUG ", " INFO ", " WARN ", " ERROR ", " FATAL " ], 'match': -1 }, + }, + debug_level="TRACE") + + @scatest.requireLog4cxx + def test_kick_trace_both(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ + "yes" : { 'patterns': [" TRACE ", " DEBUG ", " INFO ", " WARN ", " ERROR ", " FATAL " ], 'match': -1 }, + }, + debug_level="TRACE", + kick_device_managers=True, + dev_mgrs = [ 'test_BasicTestDevice_node' ], + dev_mgr_levels= [ "TRACE" ], + ) + + @scatest.requireLog4cxx + def test_kick_debug(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ "no" : { 'patterns': [" TRACE " ], 'match': [] }, + "yes" : { 'patterns': [" DEBUG ", " INFO ", " WARN ", " ERROR ", " FATAL " ], 'match': -1 }, + }, + debug_level="DEBUG") + + + @scatest.requireLog4cxx + def test_kick_debug_both(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ "no" : { 'patterns': [" TRACE " ], 'match': [] }, + "yes" : { 'patterns': [ " DEBUG ", " INFO ", " WARN ", " ERROR ", " FATAL " ], 'match': -1 }, + }, + debug_level="DEBUG", + kick_device_managers=True, + dev_mgrs = [ 'test_BasicTestDevice_node', "test_BasicTestDevice2_node" ], + dev_mgr_levels= [ "DEBUG", "DEBUG" ] + ) + + @scatest.requireLog4cxx + def test_kick_info(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ "no" : { 'patterns': [" TRACE ", " DEBUG " ], 'match': [] }, + "yes" : { 'patterns': [ " INFO ", " WARN ", " ERROR ", " FATAL " ], 'match': -1 }, + }, + debug_level="INFO") + + @scatest.requireLog4cxx + def test_kick_info_both(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ "no" : { 'patterns': [" TRACE ", " DEBUG " ], 'match': [] }, + "yes" : { 'patterns': [ " INFO ", " WARN ", " ERROR ", " FATAL " ], 'match': -1 }, + }, + debug_level="INFO", + kick_device_managers=True, + dev_mgrs = [ 'test_BasicTestDevice_node', "test_BasicTestDevice2_node" ], + dev_mgr_levels= [ "INFO", "INFO" ] + ) + + @scatest.requireLog4cxx + def test_kick_warn(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ "no" : { 'patterns': [" TRACE ", " DEBUG ", " INFO ", ], 'match': [] }, + "yes" : { 'patterns': [" WARN ", " ERROR ", " FATAL " ], 'match': -1 }, + }, + debug_level="WARN") + + @scatest.requireLog4cxx + def test_kick_warn_both(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ "no" : { 'patterns': [" TRACE ", " DEBUG " ], 'match': [] }, + "no2" : { 'patterns': [ " INFO ", ], 'match': ("<=", 12 ) }, + "yes" : { 'patterns': [ " WARN ", " ERROR ", " FATAL " ], 'match': -1 }, + }, + debug_level="WARN", + kick_device_managers=True, + dev_mgrs = [ 'test_BasicTestDevice_node', "test_BasicTestDevice2_node" ], + dev_mgr_levels= [ "WARN", "WARN" ] + ) + + @scatest.requireLog4cxx + def test_kick_error(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ "no" : { 'patterns': [" TRACE ", " DEBUG ", " INFO ", " WARN " ], 'match': [] }, + "yes" : { 'patterns': [ " ERROR ", " FATAL " ], 'match': -1 }, + }, + debug_level="ERROR") + + @scatest.requireLog4cxx + def test_kick_error_both(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ "no" : { 'patterns': [" TRACE ", " DEBUG " ] , 'match': [] }, + "no2" : { 'patterns': [ " INFO ", " WARN " ], 'match': ('<=', 18) }, + "yes" : { 'patterns': [ " ERROR ", " FATAL " ], 'match': -1 }, + }, + debug_level="ERROR", + kick_device_managers=True, + dev_mgrs = [ 'test_BasicTestDevice_node', "test_BasicTestDevice2_node" ], + dev_mgr_levels= [ "ERROR", "ERROR" ] + ) + + @scatest.requireLog4cxx + def test_kick_fatal(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ "no" : { 'patterns': [" TRACE ", " DEBUG ", " INFO ", " WARN ", " ERROR " ], 'match': [] }, + "yes" : { 'patterns': [" FATAL " ], 'match': -1 }, + }, + debug_level="ERROR") + + @scatest.requireLog4cxx + def test_kick_fatal_both(self): + self._try_kick_domain('log4j.kickdomain.cfg', + epatterns={ "no" : { 'patterns': [" TRACE ", " DEBUG ", " ERROR " ], 'match': [] }, + "no2" : { 'patterns': [ " INFO ", " WARN " ], 'match': ('<=', 18 ) }, + "yes" : { 'patterns': [ " FATAL " ], 'match': -1 }, + }, + debug_level="FATAL", + kick_device_managers=True, + dev_mgrs = [ 'test_BasicTestDevice_node', "test_BasicTestDevice2_node" ], + dev_mgr_levels= [ "FATAL", "FATAL" ] + ) + + def test_kick_devmgr_path(self): + """ + Test $SDRROOT/dev relative paths for DeviceManagers in kickDomain() + """ + dom = redhawk.kickDomain(domain_name=scatest.getTestDomainName(), + kick_device_managers=True, + device_managers=['/nodes/test_GPP_node/DeviceManager.dcd.xml']) + + def check_devmgr(): + return len(dom.devMgrs) == 1 + + self.assertPredicateWithWait(check_devmgr, 'test_GPP_node did not launch') + self.assertEqual(dom.devMgrs[0].label, 'test_GPP_node') + + def test_kick_devmgr_name(self): + """ + Test using node names for DeviceManagers in kickDomain() + """ + dom = redhawk.kickDomain(domain_name=scatest.getTestDomainName(), + kick_device_managers=True, + device_managers=['test_GPP_node']) + + def check_devmgr(): + return len(dom.devMgrs) == 1 + + self.assertPredicateWithWait(check_devmgr, 'test_GPP_node did not launch') + self.assertEqual(dom.devMgrs[0].label, 'test_GPP_node') diff --git a/redhawk/src/testing/tests/test_13_SDDS.py b/redhawk/src/testing/tests/test_13_SDDS.py new file mode 100644 index 000000000..4d9c14a95 --- /dev/null +++ b/redhawk/src/testing/tests/test_13_SDDS.py @@ -0,0 +1,677 @@ +#!/usr/bin/env python +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +import unittest +import os +import sys +import time +import calendar +import contextlib +import cStringIO +import binascii +import struct +import re + +from ossie.utils.sdds import * + +@contextlib.contextmanager +def stdout_redirect(where): + sys.stdout = where + try: + yield where + finally: + sys.stdout = sys.__stdout__ + +class Test_SDDS_Time(unittest.TestCase): + + def setUp(self): + self.tfile = os.tmpfile() + self.cur_year = datetime.date.today().year + self.cur_year_str = str(self.cur_year) + + def _run_test(self, fmt_key): + conv=conversions[fmt_key] + config.strConfig(logcfg+conv[0]) + self.logger=logging.getLogger('') + self.console=self.logger.handlers[0] + self.console.stream=self.tfile + + pval=time.strftime(conv[1]) + + # + self.logger.info('test1') + self.tfile.seek(0) + logline=self.tfile.read() + logline=logline.strip() + if len(conv) > 2: + logline = logline.split(conv[2])[0] + pval = pval.split(conv[2])[0] + self.assertEquals( pval, logline) + + + def test_startofyear(self): + sdds_soy = Time.startOfYear() + + # calculate start of year + soy = datetime.datetime(*(time.strptime(self.cur_year_str+"-01-01 00:00:00", + "%Y-%m-%d %H:%M:%S")[0:6])) + soy_time=calendar.timegm(soy.timetuple()) + self.assertEqual( sdds_soy, soy_time ) + + sdds_time = Time() + sdds_time.setFromTime(soy_time) + # calculate start of year + self.assertEqual( sdds_time.gmtime(), time.gmtime(soy_time) ) + + + def test_init(self): + tod=time.time() + sdds_time = Time() + sdds_time.setFromTime(tod) + + # test current time of day + self.assertEqual( sdds_time.seconds(), tod ) + + # test current time of day struct + self.assertEqual( sdds_time.gmtime(), time.gmtime(tod)) + + # set parts + sdds_time.set( 1234, 5678 ) + self.assertEqual( sdds_time.picoTicks(), 1234 ) + self.assertEqual( sdds_time.picoTicksFractional(), 5678 ) + + # set partial + sdds_time.setFromPartial( 4, .001 ) + self.assertEqual( sdds_time.picoTicks(), (4000000000*4) + long(4000000000*0.001) ) + self.assertEqual( sdds_time.picoTicksFractional(), 0 ) + + + def test_add(self): + soy = Time.startOfYear() + + sdds_time = Time() + sdds_time.setFromTime(soy) + + # add 1 second + sdds_time = sdds_time + 1 + sdds_time_str = str(sdds_time) + + # calculate start of year + match_soy_str = self.cur_year_str+":01:01::00:00:01.000000" + self.assertEqual( sdds_time_str, match_soy_str ) + + # add 59 second + sdds_time = sdds_time + 59 + sdds_time_str = str(sdds_time) + + # + match_soy_str = self.cur_year_str+":01:01::00:01:00.000000" + self.assertEqual( sdds_time_str, match_soy_str ) + + # add 2 minutes + sdds_time = sdds_time + 120 + sdds_time_str = str(sdds_time) + + # + match_soy_str = self.cur_year_str+":01:01::00:03:00.000000" + self.assertEqual( sdds_time_str, match_soy_str ) + + # add 2 hours + sdds_time = sdds_time + (60*60*2) + sdds_time_str = str(sdds_time) + + # + match_soy_str = self.cur_year_str+":01:01::02:03:00.000000" + self.assertEqual( sdds_time_str, match_soy_str ) + + + def test_subtract(self): + soy = Time.startOfYear() + + sdds_time = Time() + sdds_time.setFromTime(soy) + + # add 2 hours + sdds_time = sdds_time + (60*60*2) + sdds_time_str = str(sdds_time) + + # set match + match_soy_str = self.cur_year_str+":01:01::02:00:00.000000" + self.assertEqual( sdds_time_str, match_soy_str ) + + # subtract 10 seconds + sdds_time = sdds_time -10 + sdds_time_str = str(sdds_time) + + # set match + match_soy_str = self.cur_year_str+":01:01::01:59:50.000000" + self.assertEqual( sdds_time_str, match_soy_str ) + + # subtract 9 minutes + sdds_time = sdds_time -(9*60) + sdds_time_str = str(sdds_time) + + # set match + match_soy_str = self.cur_year_str+":01:01::01:50:50.000000" + self.assertEqual( sdds_time_str, match_soy_str ) + + + +class Test_SDDS_Packet(unittest.TestCase): + + def setUp(self): + self.tfile = os.tmpfile() + self.cur_year = datetime.date.today().year + self.cur_year_str = str(self.cur_year) + + + def test_format_identifier_api(self): + + # get default format identifer.. sf=1, sos=1, dm=1 bps=8 all others 0 + res=binascii.a2b_hex('C108') + formatid = format_identifier() + self.assertEqual( formatid.asString(), res ) + + # assign from values sf=1, sos=1, dm =4, bps=16 + formatid= format_identifier.from_buffer_copy('\xC4\x10') + self.assertEqual( formatid.sf, 1 ) + self.assertEqual( formatid.sos, 1 ) + self.assertEqual( formatid.dm, 4 ) + self.assertEqual( formatid.bps, 16 ) + + # assign 32 bit value and get back + formatid= format_identifier() + formatid.set_bps(32) + self.assertEqual( formatid.bps, 31 ) + res=formatid.get_bps() + self.assertEqual( res, 32 ) + + # assign data mode... + formatid.set_dmode( [ 1,1,1 ] ) + self.assertEqual( formatid.dm, 7 ) + + formatid.set_dmode( 5 ) + self.assertEqual( formatid.dm, 5 ) + + + def test_format_identifier_packet(self): + pkt = sdds_pkt.sdds_packet() + + pkt.set_standardformat(True) + self.assertEquals(pkt.get_standardformat(),True) + + pkt.set_startofsequence(True) + self.assertEquals(pkt.get_startofsequence(),True) + + pkt.set_paritypacket(True) + self.assertEquals(pkt.get_paritypacket(),True) + + pkt.set_spectralsense(True) + self.assertEquals(pkt.get_spectralsense(),True) + + pkt.set_originalformat(True) + self.assertEquals(pkt.get_originalformat(),True) + + pkt.set_complex(True) + self.assertEquals(pkt.get_complex(),True) + + pkt.set_vw(True) + self.assertEquals(pkt.get_vw(),True) + + for x in [ 2, 4, 8, 12 ]: + pkt.set_bps(x) + self.assertEquals(pkt.get_bps(),x) + + for x in [ 0,1,2,5,6,7]: + pkt.set_dmode(x) + self.assertEquals(pkt.get_dmode(),x) + + + + def test_frame_sequence(self): + + # get default frame sequence == 0 + res=binascii.a2b_hex('0000') + fsn = frame_sequence() + self.assertEqual( fsn.asString(), res ) + + # test big endian format for number + seq=256 + res=struct.pack("!H",seq) + fsn.seq = seq + self.assertEqual( fsn.asString(), res ) + + # add one... + fsn.inc() + seq +=1 + res=struct.pack("!H",seq) + self.assertEqual( fsn.asString(), res ) + + # set for rollover + seq =65535 + fsn.seq = seq + res=struct.pack("!H",seq) + self.assertEqual( fsn.asString(), res ) + + # set for rollover + fsn.inc() + res=struct.pack("!H",0) + self.assertEqual( fsn.asString(), res ) + + + def test_frame_sequence_packet(self): + pkt = sdds_pkt.sdds_packet() + res=binascii.a2b_hex('0000') + self.assertEqual( pkt.header.fsn.asString(), res ) + + # test big endian format for number + seq=256 + res=struct.pack("!H",seq) + pkt.set_fsn(seq) + self.assertEqual( pkt.header.fsn.asString(), res ) + self.assertEqual( pkt.get_fsn(), seq ) + + # add one... + seq +=1 + res=struct.pack("!H",seq) + pkt.inc_fsn() + self.assertEqual( pkt.header.fsn.asString(), res ) + self.assertEqual( pkt.get_fsn(), seq ) + + # set for rollover + seq =65535 + res=struct.pack("!H",seq) + pkt.set_fsn(seq) + self.assertEqual( pkt.header.fsn.asString(), res ) + self.assertEqual( pkt.get_fsn(), seq ) + + # set for rollover + res=struct.pack("!H",0) + pkt.inc_fsn() + self.assertEqual( pkt.header.fsn.asString(), res ) + self.assertEqual( pkt.get_fsn(), 0 ) + + + def test_msptr_data(self): + + # get default frame sequence == 0 + msptr_=0 + msdelta_=0 + res=struct.pack("!HH",msptr_, msdelta_) + msptr = msptr_data() + self.assertEqual( msptr.asString(), res ) + + # test big endian format for number + msptr_=256 + msdelta_=256 + res=struct.pack("!HH",msptr_, msdelta_) + msptr.msptr=msptr_ + msptr.msdelta=msdelta_ + self.assertEqual( msptr.asString(), res ) + + # set max value + msptr_=2047 + msdelta_=65535 + res=struct.pack("!HH",msptr_, msdelta_) + msptr.msptr=msptr_ + msptr.msdelta=msdelta_ + self.assertEqual( msptr.asString(), res ) + + + def test_msptr_packet(self): + pkt = sdds_pkt.sdds_packet() + + # get default frame sequence == 0 + msptr_=0 + msdelta_=0 + res=struct.pack("!HH",msptr_, msdelta_) + pkt.set_msptr(msptr_) + pkt.set_msdelta(msdelta_) + self.assertEqual( pkt.header.ttag.info.asString(), res ) + self.assertEqual( pkt.get_msptr(), msptr_ ) + self.assertEqual( pkt.get_msdelta(), msdelta_ ) + + # test big endian format for number + msptr_=256 + msdelta_=256 + res=struct.pack("!HH",msptr_, msdelta_) + pkt.set_msptr(msptr_) + pkt.set_msdelta(msdelta_) + self.assertEqual( pkt.header.ttag.info.asString(), res ) + self.assertEqual( pkt.get_msptr(), msptr_ ) + self.assertEqual( pkt.get_msdelta(), msdelta_ ) + + # set max value + msptr_=2047 + msdelta_=65535 + res=struct.pack("!HH",msptr_, msdelta_) + pkt.set_msptr(msptr_) + pkt.set_msdelta(msdelta_) + self.assertEqual( pkt.header.ttag.info.asString(), res ) + self.assertEqual( pkt.get_msptr(), msptr_ ) + self.assertEqual( pkt.get_msdelta(), msdelta_ ) + + + def test_ttag_info_struct(self): + + # get default + res=binascii.a2b_hex('00000000') + ttag_info = ttag_info_struct() + self.assertEqual( ttag_info.asString(), res ) + + # test msv + res=binascii.a2b_hex('80000000') + ttag_info.set_msv() + self.assertEqual( ttag_info.asString(), res ) + + res=binascii.a2b_hex('00000000') + ttag_info.set_msv(False) + self.assertEqual( ttag_info.asString(), res ) + + # test ttv + res=binascii.a2b_hex('40000000') + ttag_info.set_ttv() + self.assertEqual( ttag_info.asString(), res ) + + res=binascii.a2b_hex('00000000') + ttag_info.set_ttv(False) + self.assertEqual( ttag_info.asString(), res ) + + # test sscv + res=binascii.a2b_hex('20000000') + ttag_info.set_sscv() + self.assertEqual( ttag_info.asString(), res ) + + res=binascii.a2b_hex('00000000') + ttag_info.set_sscv(False) + self.assertEqual( ttag_info.asString(), res ) + + # test pi + res=binascii.a2b_hex('10000000') + ttag_info.set_pi() + self.assertEqual( ttag_info.asString(), res ) + + res=binascii.a2b_hex('00000000') + ttag_info.set_pi(False) + self.assertEqual( ttag_info.asString(), res ) + + # test peo + res=binascii.a2b_hex('08000000') + ttag_info.set_peo(True) + self.assertEqual( ttag_info.asString(), res ) + + res=binascii.a2b_hex('00000000') + ttag_info.set_peo(False) + self.assertEqual( ttag_info.asString(), res ) + + + def test_ttag_info_packet(self): + pkt = sdds_pkt.sdds_packet() + + res=binascii.a2b_hex('00000000') + self.assertEqual( pkt.header.ttag.info.asString(), res ) + self.assertEqual( pkt.get_msv(), 0 ) + + res=binascii.a2b_hex('80000000') + pkt.set_msv() + self.assertEqual( pkt.header.ttag.info.asString(), res ) + self.assertEqual( pkt.get_msv(), 1 ) + pkt.set_msv(False) + + res=binascii.a2b_hex('40000000') + pkt.set_ttv() + self.assertEqual( pkt.header.ttag.info.asString(), res ) + self.assertEqual( pkt.get_ttv(), 1 ) + pkt.set_ttv(False) + + res=binascii.a2b_hex('20000000') + pkt.set_sscv() + self.assertEqual( pkt.header.ttag.info.asString(), res ) + self.assertEqual( pkt.get_sscv(), 1 ) + pkt.set_sscv(False) + + + def test_ttag_values(self): + ttag_=0 + ttage_=0 + res=struct.pack("!QI", ttag_, ttage_) + ttag_val = ttag_values() + self.assertEqual( ttag_val.asString(), res ) + + # test big endian format for number + ttag_= 4294967296 + ttage_= 8388608 + res=struct.pack("!QI", ttag_, ttage_) + ttag_val.ttag=ttag_ + ttag_val.ttage=ttage_ + self.assertEqual( ttag_val.asString(), res ) + + def test_ttag_values_packet(self): + pkt = sdds_pkt.sdds_packet() + + ttag_=0 + ttage_=0 + sddstime=Time() + sddstime.set(ttag_,ttage_) + res=struct.pack("!QI", ttag_, ttage_) + pkt.set_time( ttag_, ttage_) + self.assertEqual( pkt.header.ttag.tstamp.asString(), res ) + self.assertEqual( pkt.get_SDDSTime(), sddstime ) + pkt.set_SDDSTime( sddstime ) + self.assertEqual( pkt.get_SDDSTime(), sddstime ) + + ttag_= 4294967296 + ttage_= 8388608 + sddstime=Time() + sddstime.set(ttag_,ttage_) + res=struct.pack("!QI", ttag_, ttage_) + res=struct.pack("!QI", ttag_, ttage_) + pkt.set_time( ttag_, ttage_) + self.assertEqual( pkt.header.ttag.tstamp.asString(), res ) + self.assertEqual( pkt.get_SDDSTime(), sddstime ) + pkt.set_SDDSTime( sddstime ) + self.assertEqual( pkt.get_SDDSTime(), sddstime ) + + def test_ttag_info(self): + msptr_=0 + msdelta_=0 + ttag_=0 + ttage_=0 + res=struct.pack("!HHQI", msptr_, msdelta_, ttag_, ttage_) + ttag_val = ttag_info() + self.assertEqual( ttag_val.asString(), res ) + + # test big endian format for number + ttag_= 4294967296 + ttage_= 8388608 + msptr_=256 + msdelta_=256 + res=struct.pack("!HHQI", msptr_, msdelta_, ttag_, ttage_) + ttag_val.info.msptr.msptr=msptr_ + ttag_val.info.msptr.msdelta=msdelta_ + ttag_val.tstamp.ttag=ttag_ + ttag_val.tstamp.ttage=ttage_ + self.assertEqual( ttag_val.asString(), res ) + + def test_ttag_info_msv(self): + msptr_=0 + msdelta_=0 + ttag_=0 + ttage_=0 + res=struct.pack("!HHQI", msptr_, msdelta_, ttag_, ttage_) + ttag_val = ttag_info() + self.assertEqual( ttag_val.asString(), res ) + + # test big endian format for number + ttag_= 4294967296 + ttage_= 8388608 + msptr_=2047 + msdelta_=256 + res=struct.pack("!HHQI", msptr_, msdelta_, ttag_, ttage_) + ttag_val.info.msptr.msptr=msptr_ + ttag_val.info.msptr.msdelta=msdelta_ + ttag_val.tstamp.ttag=ttag_ + ttag_val.tstamp.ttage=ttage_ + self.assertEqual( ttag_val.asString(), res ) + + ttag_val.info.info.set_msv() + + res=struct.pack("!QI", ttag_, ttage_) + # first bit is msv + tinfo=binascii.a2b_hex('87FF0100') + res=tinfo+res + self.assertEqual( ttag_val.asString(), res ) + + + def test_ttag_info_ttv(self): + msptr_=0 + msdelta_=0 + ttag_=0 + ttage_=0 + res=struct.pack("!HHQI", msptr_, msdelta_, ttag_, ttage_) + ttag_val = ttag_info() + self.assertEqual( ttag_val.asString(), res ) + + # test big endian format for number + ttag_= 4294967296 + ttage_= 8388608 + msptr_=2047 + msdelta_=256 + res=struct.pack("!HHQI", msptr_, msdelta_, ttag_, ttage_) + ttag_val.info.msptr.msptr=msptr_ + ttag_val.info.msptr.msdelta=msdelta_ + ttag_val.tstamp.ttag=ttag_ + ttag_val.tstamp.ttage=ttage_ + self.assertEqual( ttag_val.asString(), res ) + + ttag_val.info.info.set_ttv() + + res=struct.pack("!QI", ttag_, ttage_) + # first bit is msv + tinfo=binascii.a2b_hex('47FF0100') + res=tinfo+res + self.assertEqual( ttag_val.asString(), res ) + + + def test_ttag_info_sscv(self): + msptr_=0 + msdelta_=0 + ttag_=0 + ttage_=0 + res=struct.pack("!HHQI", msptr_, msdelta_, ttag_, ttage_) + ttag_val = ttag_info() + self.assertEqual( ttag_val.asString(), res ) + + # test big endian format for number + ttag_= 4294967296 + ttage_= 8388608 + msptr_=2047 + msdelta_=256 + res=struct.pack("!HHQI", msptr_, msdelta_, ttag_, ttage_) + ttag_val.info.msptr.msptr=msptr_ + ttag_val.info.msptr.msdelta=msdelta_ + ttag_val.tstamp.ttag=ttag_ + ttag_val.tstamp.ttage=ttage_ + self.assertEqual( ttag_val.asString(), res ) + + ttag_val.info.info.set_sscv() + + res=struct.pack("!QI", ttag_, ttage_) + # first bit is msv + tinfo=binascii.a2b_hex('27FF0100') + res=tinfo+res + self.assertEqual( ttag_val.asString(), res ) + + + def test_ttag_info_all_bits(self): + msptr_=0 + msdelta_=0 + ttag_=0 + ttage_=0 + res=struct.pack("!HHQI", msptr_, msdelta_, ttag_, ttage_) + ttag_val = ttag_info() + self.assertEqual( ttag_val.asString(), res ) + + # test big endian format for number + ttag_= 4294967296 + ttage_= 8388608 + msptr_=2047 + msdelta_=256 + res=struct.pack("!HHQI", msptr_, msdelta_, ttag_, ttage_) + ttag_val.info.msptr.msptr=msptr_ + ttag_val.info.msptr.msdelta=msdelta_ + ttag_val.tstamp.ttag=ttag_ + ttag_val.tstamp.ttage=ttage_ + self.assertEqual( ttag_val.asString(), res ) + + ttag_val = ttag_info() + ttag_= 4294967296 + ttage_= 8388608 + msptr_=2047 + msdelta_=256 + ttag_val.tstamp.ttag=ttag_ + ttag_val.tstamp.ttage=ttage_ + ttag_val.info.msptr.msptr=msptr_ + ttag_val.info.msptr.msdelta=msdelta_ + ttag_val.info.info.set_msv() + ttag_val.info.info.set_ttv() + ttag_val.info.info.set_sscv() + res=struct.pack("!QI", ttag_, ttage_) + tinfo=binascii.a2b_hex('E7FF0100') + res=tinfo+res + self.assertEqual( ttag_val.asString(), res ) + + def test_ssc_info_struct(self): + + ssc = sdds_pkt.ssc_info_struct() + self.assertEqual(ssc.get_freq(),0) + self.assertEqual(ssc.get_dfdt(),0) + + ssc.set_freq(5000000) + self.assertEqual(ssc.get_freq(),5000000) + + ssc.set_dfdt(.15) + self.assertAlmostEqual(ssc.get_dfdt(),.15, places=3) + + def test_ssc_info_header(self): + + ssc = sdds_pkt.sdds_header() + self.assertEqual(ssc.get_freq(),0) + self.assertEqual(ssc.get_dfdt(),0) + + ssc.set_freq(5000000) + self.assertEqual(ssc.get_freq(),5000000) + + ssc.set_dfdt(.15) + self.assertAlmostEqual(ssc.get_dfdt(),.15, places=3) + + def test_ssc_info_packet(self): + + ssc = sdds_pkt.sdds_packet() + self.assertEqual(ssc.get_freq(),0) + self.assertEqual(ssc.get_dfdt(),0) + + ssc.set_freq(5000000) + self.assertEqual(ssc.get_freq(),5000000) + + ssc.set_dfdt(.15) + self.assertAlmostEqual(ssc.get_dfdt(),.15, places=3) + diff --git a/redhawk/src/testing/tests/test_13_TestSB.py b/redhawk/src/testing/tests/test_13_TestSB.py index 4d4b9e759..737b6e935 100644 --- a/redhawk/src/testing/tests/test_13_TestSB.py +++ b/redhawk/src/testing/tests/test_13_TestSB.py @@ -18,25 +18,34 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # -import unittest -from _unitTestHelpers import scatest -from ossie.cf import CF -from ossie.cf import StandardEvent -from omniORB import CORBA, any import os import Queue -from ossie.utils import sb -from ossie.utils import type_helpers -from ossie import properties as _properties -import threading -globalsdrRoot = os.environ['SDRROOT'] +import unittest import sys import commands import cStringIO import time import copy +import threading +import warnings +import subprocess +import commands import struct -import ossie.utils.bulkio.bulkio_helpers as _bulkio_helpers +import tempfile + +from omniORB import CORBA, any, tcInternal + +from ossie import properties +from ossie.cf import CF, StandardEvent +from ossie.utils import sb, type_helpers +from ossie.utils.bulkio import bulkio_helpers +from ossie.events import ChannelManager, Subscriber, Publisher +from ossie.utils.bulkio import bulkio_data_helpers + +from _unitTestHelpers import scatest, runtestHelpers +import traceback + +globalsdrRoot = os.environ['SDRROOT'] try: from bulkio.bulkioInterfaces import BULKIO except: @@ -60,6 +69,234 @@ def _initSourceAndSink(dataFormat): return source, sink + +def compareKeywordLists( a, b ): + for keyA, keyB in zip(a, b): + if keyA.id != keyB.id: + return False + if keyA.value._t != keyB.value._t: + if isinstance(keyA.value._t,tcInternal.TypeCode_sequence): + if keyA.value._t.content_type() != keyB.value._t.content_type(): + return False + else: + return False + if keyA.value._v != keyB.value._v: + return False + return True + +@scatest.requireJava +class InteractiveTestJava(scatest.CorbaTestCase): + def setUp(self): + self.message = "Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain" + + def tearDown(self): + pass + + def test_NoInteractiveJavaService(self): + status, output=commands.getstatusoutput('sdr/dev/services/BasicService_java/java/startJava.sh -i') + self.assertNotEquals(output.find(self.message),-1) + + def test_NoInteractiveJavaDevice(self): + status, output=commands.getstatusoutput('sdr/dev/devices/BasicTestDevice_java/java/startJava.sh -i') + self.assertNotEquals(output.find(self.message),-1) + + def test_NoInteractiveJavaComponent(self): + status, output=commands.getstatusoutput('sdr/dom/components/ECM_JAVA/java/startJava.sh -i') + self.assertNotEquals(output.find(self.message),-1) + +class InteractiveTestPython(scatest.CorbaTestCase): + def setUp(self): + self.message = "Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain" + + def tearDown(self): + pass + + def test_NoInteractivePythonService(self): + status, output=commands.getstatusoutput('sdr/dev/services/S1/python/S1.py -i') + self.assertNotEquals(output.find(self.message),-1) + + def test_NoInteractivePythonDevice(self): + status, output=commands.getstatusoutput('sdr/dev/devices/BasicTestDevice/BasicTestDevice.py -i') + self.assertNotEquals(output.find(self.message),-1) + + def test_NoInteractivePythonComponent(self): + status, output=commands.getstatusoutput('sdr/dom/components/ECM_PY/python/ECM_PY.py -i') + self.assertNotEquals(output.find(self.message),-1) + +class InteractiveTestCpp(scatest.CorbaTestCase): + def setUp(self): + self.message = "Interactive mode (-i) no longer supported. Please use the sandbox to run Components/Devices/Services outside the scope of a Domain" + + def tearDown(self): + pass + + def test_NoInteractiveCppService(self): + status, output=commands.getstatusoutput('sdr/dev/services/BasicService_cpp/cpp/BasicService_cpp -i') + self.assertNotEquals(output.find(self.message),-1) + + def test_NoInteractiveCppDevice(self): + status, output=commands.getstatusoutput('sdr/dev/devices/cpp_dev/cpp/cpp_dev -i') + self.assertNotEquals(output.find(self.message),-1) + + def test_NoInteractiveCppComponent(self): + status, output=commands.getstatusoutput('sdr/dom/components/ECM_CPP/cpp/ECM_CPP -i') + self.assertNotEquals(output.find(self.message),-1) + +def wait_on_data(sink, number_timestamps, timeout=1): + begin_time = time.time() + estimate = sink.getDataEstimate() + while estimate.num_timestamps != number_timestamps: + time.sleep(0.1) + estimate = sink.getDataEstimate() + if time.time() - begin_time > timeout: + break + +def wait_for_eos(sink, timeout=10): + begin_time = time.time() + while not sink.eos(): + time.sleep(0.1) + if time.time() - begin_time > timeout: + break + +class SBEventChannelTest(scatest.CorbaTestCase): + def setUp(self): + orb = CORBA.ORB_init() + self.chanMgr = ChannelManager(orb) + # Force creation + self.channel = self.chanMgr.createEventChannel("TestChan", force=True) + sb.setDEBUG(False) + + def tearDown(self): + try: + if self.channel: + self.chanMgr.destroyEventChannel("TestChan") + except: + pass + sb.release() + sb.setDEBUG(False) + os.environ['SDRROOT'] = globalsdrRoot + + def _waitData(self, sub, timeout): + end = time.time() + timeout + while time.time() < end: + data = sub.getData() + if data: + return data._v + return None + + def test_PublishSubscribePull(self): + sub = Subscriber( self.channel ) + pub = Publisher( self.channel ) + payload = 'hello' + data = any.to_any(payload) + pub.push(data) + rec_data = self._waitData(sub, 1.0) + self.assertEquals(rec_data, payload) + pub.terminate() + sub.terminate() + + def test_PublishSubscribeCB(self): + queue = Queue.Queue() + sub = Subscriber(self.channel, dataArrivedCB=queue.put) + pub = Publisher(self.channel) + payload = 'hello' + data = any.to_any(payload) + pub.push(data) + rec_data = queue.get(timeout=1.0) + self.assertEquals(rec_data._v, payload) + pub.terminate() + sub.terminate() + +@scatest.requireLog4cxx +class SBStdOutTest(scatest.CorbaTestCase): + def setUp(self): + sb.setDEBUG(False) + self.test_comp = "Sandbox" + # Flagrant violation of sandbox API: if the sandbox singleton exists, + # clean up previous state and dispose of it. + if sb.domainless._sandbox: + sb.domainless._sandbox.shutdown() + sb.domainless._sandbox = None + + def tearDown(self): + sb.release() + sb.setDEBUG(False) + os.environ['SDRROOT'] = globalsdrRoot + try: + os.remove(self.tmpfile) + except: + pass + + def test_debugCmdExec(self): + self.tmpfile=tempfile.mktemp() + fp_tmpfile=open(self.tmpfile, 'w') + comp = sb.launch('sdr/dom/components/C2/C2.spd.xml', execparams={'DEBUG_LEVEL':5}, stdout=fp_tmpfile) + sb.start() + time.sleep(0.4) + fp_tmpfile.close() + new_stdout=open(self.tmpfile,'r') + stdout_contents=new_stdout.read() + self.assertTrue('serviceFunction() example log message - TRACE' in stdout_contents) + self.assertTrue('TRACE C2_1.system.Resource' in stdout_contents) + self.assertTrue('serviceFunction() example log message - DEBUG' in stdout_contents) + new_stdout.close() + + def test_debugDevCmdExec(self): + self.tmpfile=tempfile.mktemp() + fp_tmpfile=open(self.tmpfile, 'w') + comp = sb.launch('sdr/dev/devices/devcpp/devcpp.spd.xml', execparams={'DEBUG_LEVEL':5}, stdout=fp_tmpfile) + sb.start() + time.sleep(0.4) + fp_tmpfile.close() + new_stdout=open(self.tmpfile,'r') + stdout_contents=new_stdout.read() + self.assertTrue('serviceFunction() example TRACE log message' in stdout_contents) + self.assertTrue('TRACE devcpp_1.system.Device' in stdout_contents) + self.assertTrue('serviceFunction() example DEBUG log message' in stdout_contents) + new_stdout.close() + + def test_debugCmdProp(self): + self.tmpfile=tempfile.mktemp() + fp_tmpfile=open(self.tmpfile, 'w') + comp = sb.launch('sdr/dom/components/C2/C2.spd.xml', properties={'DEBUG_LEVEL':5}, stdout=fp_tmpfile) + sb.start() + time.sleep(0.4) + fp_tmpfile.close() + new_stdout=open(self.tmpfile,'r') + stdout_contents=new_stdout.read() + self.assertTrue('serviceFunction() example log message - TRACE' in stdout_contents) + self.assertTrue('TRACE C2_1.system.Resource' in stdout_contents) + self.assertTrue('serviceFunction() example log message - DEBUG' in stdout_contents) + new_stdout.close() + + def test_debugCmdExecNoMsg(self): + self.tmpfile=tempfile.mktemp() + fp_tmpfile=open(self.tmpfile, 'w') + comp = sb.launch('sdr/dom/components/C2/C2.spd.xml', execparams={'DEBUG_LEVEL':4}, stdout=fp_tmpfile) + sb.start() + time.sleep(0.4) + fp_tmpfile.close() + new_stdout=open(self.tmpfile,'r') + stdout_contents=new_stdout.read() + self.assertFalse('serviceFunction() example log message - TRACE' in stdout_contents) + self.assertFalse('TRACE C2_1.system.Resource' in stdout_contents) + self.assertTrue('serviceFunction() example log message - DEBUG' in stdout_contents) + new_stdout.close() + + def test_debugCmdPropNoMsg(self): + self.tmpfile=tempfile.mktemp() + fp_tmpfile=open(self.tmpfile, 'w') + comp = sb.launch('sdr/dom/components/C2/C2.spd.xml', properties={'DEBUG_LEVEL':4}, stdout=fp_tmpfile) + sb.start() + time.sleep(0.4) + fp_tmpfile.close() + new_stdout=open(self.tmpfile,'r') + stdout_contents=new_stdout.read() + self.assertFalse('serviceFunction() example log message - TRACE' in stdout_contents) + self.assertFalse('TRACE C2_1.system.Resource' in stdout_contents) + self.assertTrue('serviceFunction() example log message - DEBUG' in stdout_contents) + new_stdout.close() + class SBTestTest(scatest.CorbaTestCase): def setUp(self): sb.setDEBUG(False) @@ -74,7 +311,7 @@ def assertComponentCount(self, count): self.assertEquals(len(sb.domainless._getSandbox().getComponents()), count) def tearDown(self): - sb.domainless._getSandbox().shutdown() + sb.release() sb.setDEBUG(False) os.environ['SDRROOT'] = globalsdrRoot @@ -123,7 +360,7 @@ def test_softpkgDepDouble(self): def test_pid(self): a = sb.launch('comp_src') - status,output = commands.getstatusoutput('ps -ww -f | grep comp_src') + status,output = commands.getstatusoutput('ps -ww -f | grep comp_src ') lines = output.split('\n') for line in lines: if 'IOR' in line: @@ -131,6 +368,16 @@ def test_pid(self): _pid = line.split()[1] self.assertEquals(int(_pid), a._pid) + def test_cleanHeap(self): + a = sb.launch('alloc_shm') + ch_pid = a._sandbox._getComponentHost()._pid + self.assertTrue(os.path.isfile('/dev/shm/heap-'+str(ch_pid))) + os.kill(ch_pid, 9) + begin = time.time() + while time.time()-begin < 1 and os.path.isfile('/dev/shm/heap-'+str(ch_pid)): + time.sleep(0.1) + self.assertFalse(os.path.isfile('/dev/shm/heap-'+str(ch_pid))) + def test_doubleNamedConnection(self): a = sb.launch('comp_src') b = sb.launch('comp_snk') @@ -171,7 +418,7 @@ def test_componentInit(self): # Make sure only one instance name and refid can be used comp = sb.launch(self.test_comp, "comp") - comp.api() + comp.api(destfile=sys.stdout) refid = comp._refid self.assertRaises(ValueError, sb.launch, self.test_comp, "comp") self.assertRaises(ValueError, sb.launch, self.test_comp, "new_comp", refid) @@ -210,7 +457,7 @@ def test_LogServiceFunctionException(self): except: pass - self.assertTrue('ERROR:svc_fn_error' in log_contents) + self.assertTrue('test exception in process()' in log_contents) def test_propertyInitialization(self): """ @@ -223,16 +470,27 @@ def test_propertyInitialization(self): self.assertFalse('cmdline' in comp.initialize_props) comp.releaseObject() - # Test with (correct) overrides + # Test with overrides comp = sb.launch('sdr/dom/components/property_init/property_init.spd.xml', - execparams={'cmdline':'override'}, configure={'initial':'override'}) + properties={'cmdline':'override', 'initial':'override'}) self.assertFalse('initial' in comp.cmdline_args) self.assertFalse('cmdline' in comp.initialize_props) self.assertEquals('override', comp.cmdline) self.assertEquals('override', comp.initial) comp.releaseObject() - # Test with misplaced command line property + # Test with overrides in deprecated 'execparams' and 'configure' arguments + with warnings.catch_warnings(): + warnings.simplefilter('ignore', DeprecationWarning) + comp = sb.launch('sdr/dom/components/property_init/property_init.spd.xml', + execparams={'cmdline':'override'}, configure={'initial':'override'}) + self.assertFalse('initial' in comp.cmdline_args) + self.assertFalse('cmdline' in comp.initialize_props) + self.assertEquals('override', comp.cmdline) + self.assertEquals('override', comp.initial) + comp.releaseObject() + + # Test with misplaced command line property in deprecated 'configure' argument comp = sb.launch('sdr/dom/components/property_init/property_init.spd.xml', configure={'cmdline':'override'}) self.assertFalse('initial' in comp.cmdline_args) @@ -240,18 +498,57 @@ def test_propertyInitialization(self): self.assertEquals('override', comp.cmdline) comp.releaseObject() - # A non-command line property in the wrong override should throw an exception - self.assertRaises(ValueError, sb.launch, 'sdr/dom/components/property_init/property_init.spd.xml', - execparams={'initial':'override'}) + def test_writeOnly(self): + dev = sb.launch('writeonly_cpp') + try: + print dev.foo + self.assertTrue(False) + except Exception, e: + self.assertEquals(e.args[0], 'Could not perform query, "foo" is a writeonly property') + try: + print dev.foo_seq + self.assertTrue(False) + except Exception, e: + self.assertEquals(e.args[0], 'Could not perform query, "foo_seq" is a writeonly property') + try: + print dev.foo_struct + self.assertTrue(False) + except Exception, e: + self.assertEquals(e.args[0], 'Could not perform query, "foo_struct" is a writeonly property') + try: + print dev.foo_struct_seq + self.assertTrue(False) + except Exception, e: + self.assertEquals(e.args[0], 'Could not perform query, "foo_struct_seq" is a writeonly property') def test_zeroLengthSeqStruct(self): """ Tests for the correct initialization of 'property' kind properties based on whether command line is set, and overrides via launch(). """ - # First, test with defaults - comp = sb.launch('sdr/dom/components/zero_length/zero_length.spd.xml') + comp = sb.launch('sdr/dom/components/zero_length/zero_length.spd.xml', impl='cpp') + self.assertNotEqual(comp, None) + prop = comp.query([CF.DataType('mystruct', any.to_any(None))]) + found = False + for p in prop: + if p.id == 'mystruct': + val = p.value.value() + for v in val: + if v.id == 'mystruct::mysimpleseq': + found = len(v.value.value()) == 0 + self.assertTrue(found) + + comp = sb.launch('sdr/dom/components/zero_length/zero_length.spd.xml', impl='python') self.assertNotEqual(comp, None) + prop = comp.query([CF.DataType('mystruct', any.to_any(None))]) + found = False + for p in prop: + if p.id == 'mystruct': + val = p.value.value() + for v in val: + if v.id == 'mystruct::mysimpleseq': + found = len(v.value.value()) == 0 + self.assertTrue(found) def test_nestedSoftPkgDeps(self): cwd = os.getcwd() @@ -296,7 +593,7 @@ def initValues(self, comp): def test_simpleComp(self): comp = sb.launch(self.test_comp) - comp.api() + comp.api(destfile=sys.stdout) # Check the init values self.initValues(comp) @@ -457,7 +754,7 @@ def test_simpleComp(self): def test_illegalPropertyNames(self): comp = sb.launch(self.test_comp) - comp.api() + comp.api(destfile=sys.stdout) self.initValues(comp) @@ -502,6 +799,10 @@ def test_loadSADFile(self): comp_ac = sb.getComponent('ticket_462_ac_1') self.assertNotEquals(comp_ac, None) comp = sb.getComponent('ticket_462_1') + comp_id = comp._get_identifier() + self.assertEquals(len(comp_id.split(':')), 2) + self.assertEquals(comp_id.split(':')[0], 'ticket_462_1') + self.assertEquals(comp_id.split(':')[1], 'ticket_462_w') self.assertNotEquals(comp, None) self.assertEquals(comp_ac.my_simple, "foo") self.assertEquals(comp_ac.my_seq, ["initial value"]) @@ -509,6 +810,58 @@ def test_loadSADFile(self): self.assertEquals(comp.over_simple, "override") self.assertEquals(comp.over_struct_seq, [{'a_word': 'something', 'a_number': 1}]) + def test_connectPortSADFile(self): + retval = sb.loadSADFile('sdr/dom/waveforms/PortConnectProvidesPort/PortConnectProvidesPort.sad.xml') + sad=sb.generateSADXML('hello') + + uses_string = '\n \n @__PORTNAME__@\n \n \n' + uses_string = uses_string.replace('@__PORTNAME__@', 'resource_out') + uses_string = uses_string.replace('@__COMPONENTINSTANCE__@', 'DCE:5faf296f-3193-49cc-8751-f8a64b315fdf') + + provides_string = '\n \n @__PORTNAME__@\n \n \n' + provides_string = provides_string.replace('@__PORTNAME__@', 'resource_in') + provides_string = provides_string.replace('@__COMPONENTINSTANCE__@', 'DCE:12ab27fb-01bd-4189-8d1d-0043b87c4f74') + + self.assertNotEqual(sad.find(uses_string), -1) + self.assertNotEqual(sad.find(provides_string), -1) + self.assertEquals(sad.find('DCE:DCE'), -1) + + def test_connectSupportedInterfaceSADFile(self): + retval = sb.loadSADFile('sdr/dom/waveforms/PortConnectComponentSupportedInterface/PortConnectComponentSupportedInterface.sad.xml') + sad=sb.generateSADXML('hello') + + uses_string = '\n \n @__PORTNAME__@\n \n \n' + uses_string = uses_string.replace('@__PORTNAME__@', 'resource_out') + uses_string = uses_string.replace('@__COMPONENTINSTANCE__@', 'DCE:5faf296f-3193-49cc-8751-f8a64b315fdf') + + provides_string = '\n \n @__PORTINTERFACE__@\n \n \n' + provides_string = provides_string.replace('@__PORTINTERFACE__@', 'IDL:CF/Resource:1.0') + provides_string = provides_string.replace('@__COMPONENTINSTANCE__@', 'DCE:12ab27fb-01bd-4189-8d1d-0043b87c4f74') + + self.assertNotEqual(sad.find(uses_string), -1) + self.assertNotEqual(sad.find(provides_string), -1) + self.assertEquals(sad.find('DCE:DCE'), -1) + + def test_connectSandbox(self): + src=sb.launch('PortTest') + snk=sb.launch('PortTest') + src.connect(snk, usesPortName='resource_out') + sad=sb.generateSADXML('hello') + + uses_string = '\n \n @__PORTNAME__@\n \n \n' + uses_string = uses_string.replace('@__PORTNAME__@', 'resource_out') + uses_string = uses_string.replace('@__COMPONENTINSTANCE__@', src._id) + + provides_string = '\n \n @__PORTINTERFACE__@\n \n \n' + provides_string = provides_string.replace('@__PORTINTERFACE__@', 'IDL:CF/Resource:1.0') + provides_string = provides_string.replace('@__COMPONENTINSTANCE__@', snk._id) + + non_colon_connectionid = ') failure") @@ -1420,6 +1773,32 @@ def test_MessageSource(self): self.assertEqual(len(comp.received_messages), 1) self.assertEqual(comp.received_messages[0], "test_message,0.0,'first'") + def test_BasicSharedComponent(self): + """ + Test that two shared library components launched from the sandbox have + the same process ID. + """ + comp1 = sb.launch('BasicShared') + comp2 = sb.launch('BasicShared') + self.assertEqual(int(comp1.pid), int(comp2.pid)) + + def test_NotSharedComponent(self): + """ + Test that forcing a shared library component to run in a non-shared + context reports a different process ID. + """ + comp1 = sb.launch('BasicShared') + comp2 = sb.launch('BasicShared', shared=False) + self.assertNotEqual(int(comp1.pid), int(comp2.pid)) + + +class Test_DataSDDSSource(unittest.TestCase): + + def test_CreateSDDSSource(self): + source = sb.DataSourceSDDS() + self.assertNotEquals(source, None) + + class BulkioTest(unittest.TestCase): XMLDATA = """ @@ -1511,7 +1890,7 @@ def isComplex(x) : return type(x) == type(complex()) if len(filter(isComplex, originalData)): # in this case, the DataSink will return bulkio complex data # convert the originalData to the bulkio data format. - originalData = _bulkio_helpers.pythonComplexListToBulkioComplex(originalData) + originalData = bulkio_helpers.pythonComplexListToBulkioComplex(originalData) # make sure the mode flag was automatically set self.assertEquals(sink.sri().mode, True) @@ -1570,8 +1949,6 @@ def test_DataSourceAndSink(self): dataFormat = format) def test_DataSinkBadTimeStamp(self): - if 'bulkio.bulkioInterfaces.BULKIO' not in sys.modules: - return datasink = sb.DataSink() port=datasink.getPort('shortIn') sb.start() @@ -1702,6 +2079,7 @@ def test_DataSourceChunkedTimeStamp(self): break (_data, _tstamps) = sink.getData(tstamps=True) xdelta = sink.sri().xdelta + print _tstamps self.assertEquals(_tstamps[0][1].twsec, _startTime) self.assertEquals(_tstamps[1][1].twsec, _tstamps[1][0]*sink.sri().xdelta+_startTime) self.assertEquals(_tstamps[2][1].twsec, _tstamps[2][0]*sink.sri().xdelta+_startTime) @@ -1743,7 +2121,7 @@ def test_DataSourceTimeStamp(self): sink = sb.DataSink() source.connect(sink, usesPortName='floatOut') sb.start() - + # test default sample rate _srcData = [1,2,3,4] source.push(_srcData) @@ -1831,6 +2209,312 @@ def test_DataSourceTimeStamp(self): _round_time = int(round(_orig_time*10))/10.0 self.assertEquals(_round_time, toffset+len(_srcData)/_sampleRate) + + def test_DataSourceTimeStampParam(self): + """ + Verify that the time stamp param is honored + """ + _timeout = 1 + _startTime = 10 + _sampleRate = 1.0 + source = sb.DataSource(startTime=_startTime) + sink = sb.DataSink() + source.connect(sink, usesPortName='floatOut') + sb.start() + + # test default sample rate + _srcData = [1,2,3,4] + source.push(_srcData) + source.push(_srcData) + estimate = sink.getDataEstimate() + begin_time = time.time() + while estimate.num_timestamps != 2: + time.sleep(0.1) + estimate = sink.getDataEstimate() + if time.time() - begin_time > _timeout: + break + (_data, _tstamps) = sink.getData(tstamps=True) + self.assertEquals(len(_data), len(_srcData)*2) + self.assertEquals(sink.sri().xdelta, 1) + self.assertEquals(_tstamps[0][1].twsec, _startTime) + self.assertEquals(_tstamps[1][1].twsec, _startTime+len(_srcData)) + + _ts = sb.createTimeStamp() + begin_time = _ts.twsec+_ts.tfsec + _toffset =begin_time + source.push(_srcData, ts=_ts) + source.push(_srcData) + estimate = sink.getDataEstimate() + while estimate.num_timestamps != 2: + time.sleep(0.1) + estimate = sink.getDataEstimate() + if time.time() - begin_time > _timeout: + break + (_data, _tstamps) = sink.getData(tstamps=True) + self.assertEquals(len(_data), len(_srcData)*2) + self.assertEquals(sink.sri().xdelta, 1/_sampleRate) + _pkt_time = _tstamps[0][1].twsec+_tstamps[0][1].tfsec + _rnd_pkt_time = int(round(_pkt_time*10))/10.0 + _rnd_toffset = int(round(_toffset*10))/10.0 + self.assertEquals(_rnd_pkt_time,_rnd_toffset ) + _pkt_time = _tstamps[1][1].twsec+_tstamps[1][1].tfsec + _rnd_pkt_time = int(round(_pkt_time*10))/10.0 + _rnd_toffset = int(round( (_toffset+len(_srcData)/_sampleRate) *10))/10.0 + self.assertEquals(_rnd_pkt_time,_rnd_toffset ) + + + # test modified sample rate + _sampleRate = 10.0 + _ts = sb.createTimeStamp() + begin_time = _ts.twsec+_ts.tfsec + _toffset =begin_time + source.push(_srcData,sampleRate=_sampleRate, ts=_ts) + source.push(_srcData) + estimate = sink.getDataEstimate() + while estimate.num_timestamps != 2: + time.sleep(0.1) + estimate = sink.getDataEstimate() + if time.time() - begin_time > _timeout: + break + (_data, _tstamps) = sink.getData(tstamps=True) + self.assertEquals(len(_data), len(_srcData)*2) + self.assertEquals(sink.sri().xdelta, 1/_sampleRate) + _pkt_time = _tstamps[0][1].twsec+_tstamps[0][1].tfsec + _rnd_pkt_time = int(round(_pkt_time*10))/10.0 + _rnd_toffset = int(round(_toffset*10))/10.0 + self.assertEquals(_rnd_pkt_time,_rnd_toffset ) + _pkt_time = _tstamps[1][1].twsec+_tstamps[1][1].tfsec + _rnd_pkt_time = int(round(_pkt_time*10))/10.0 + _rnd_toffset = int(round( (_toffset+len(_srcData)/_sampleRate) *10))/10.0 + self.assertEquals(_rnd_pkt_time,_rnd_toffset ) + + + # test modified sample rate + _sampleRate=5.0 + _sri=source.sri() + _sri.xdelta = 1.0/_sampleRate + _ts = sb.createTimeStamp() + begin_time = _ts.twsec+_ts.tfsec + _toffset =begin_time + source.push(_srcData,sri=_sri, ts=_ts) + source.push(_srcData) + estimate = sink.getDataEstimate() + while estimate.num_timestamps != 2: + time.sleep(0.1) + estimate = sink.getDataEstimate() + if time.time() - begin_time > _timeout: + break + (_data, _tstamps) = sink.getData(tstamps=True) + self.assertEquals(len(_data), len(_srcData)*2) + self.assertEquals(sink.sri().xdelta, 1/_sampleRate) + _pkt_time = _tstamps[0][1].twsec+_tstamps[0][1].tfsec + _rnd_pkt_time = int(round(_pkt_time*10))/10.0 + _rnd_toffset = int(round(_toffset*10))/10.0 + self.assertEquals(_rnd_pkt_time,_rnd_toffset ) + _pkt_time = _tstamps[1][1].twsec+_tstamps[1][1].tfsec + _rnd_pkt_time = int(round(_pkt_time*10))/10.0 + _rnd_toffset = int(round( (_toffset+len(_srcData)/_sampleRate) *10))/10.0 + self.assertEquals(_rnd_pkt_time,_rnd_toffset ) + + def _fileSourceThrottle(self, _file, rate): + fp=open(_file, 'r') + contents = fp.read() + fp.close() + source = sb.FileSource(_file, dataFormat='octet', sampleRate=rate, throttle=True) + sink = sb.DataSink() + source.connect(sink) + time_estimate = len(contents)/float(rate) + sb.start() + begin_time = time.time() + wait_for_eos(sink) + time_diff = time.time()-begin_time + self.assertTrue(time_difftime_estimate*0.9) + sb.stop() + + def test_FileSourceThrottle(self): + infile = os.path.join(sb.getSDRROOT(), 'dom/mgr/DomainManager.spd.xml') + self._fileSourceThrottle(infile, 1000) + self._fileSourceThrottle(infile, 1500) + + def test_DataSourceThrottle(self): + src = sb.DataSource(dataFormat='float', throttle=True) + snk = sb.DataSink() + src.connect(snk) + sb.start() + _sampleRate = 500 + _dataLength = 100 + time_estimate = (3.0*_dataLength)/(_sampleRate) + begin_time = time.time() + src.push([float(x) for x in range(100)],sampleRate=_sampleRate) + src.push([float(x) for x in range(100)],sampleRate=_sampleRate) + src.push([float(x) for x in range(100)],sampleRate=_sampleRate) + wait_on_data(snk, 3, 5) + end_time = time.time() + time_diff = end_time-begin_time + self.assertTrue(time_difftime_estimate*0.9) + + data=snk.getData() + self.assertEquals(len(data), 3.0*_dataLength) + + _sampleRate = 300 + _dataLength = 100 + time_estimate = (3.0*_dataLength)/(_sampleRate) + begin_time = time.time() + src.push([float(x) for x in range(100)],sampleRate=_sampleRate) + src.push([float(x) for x in range(100)],sampleRate=_sampleRate) + src.push([float(x) for x in range(100)],sampleRate=_sampleRate) + wait_on_data(snk, 3, 5) + end_time = time.time() + time_diff = end_time-begin_time + self.assertTrue(time_difftime_estimate*0.9) + + data=snk.getData() + self.assertEquals(len(data), 3.0*_dataLength) + + class customSink(bulkio_data_helpers.ArraySink): + def __init__(self, porttype): + bulkio_data_helpers.ArraySink.__init__(self, porttype) + + def pushSRI(self, H): + _H = H + _H.xdelta = H.xdelta * 2 + self.sri = _H + + def test_CustomDataSink(self): + src = sb.DataSource(dataFormat='float') + snk = sb.DataSink(sinkClass=self.customSink) + src.connect(snk) + sb.start() + src.push([1,2,3,4,5],sampleRate=100) + src.push([1,2,3,4,5],sampleRate=1000) + src.push([1,2,3,4,5],sampleRate=10000) + wait_on_data(snk, 3) + data=snk.getData(tstamps=True) + self.assertEquals(snk._sink.sri.xdelta, 0.0002) + + def test_DataSourceSRI(self): + _timeout = 1 + _startTime = 10 + source = sb.DataSource(startTime=_startTime) + sink = sb.DataSink() + source.connect(sink, usesPortName='floatOut') + sb.start() + + # get an sri + _sri = source.sri() + + sid = 'test-sri-1' + _sri.streamID=sid + _sri.xdelta = 0.1234 + + # push samples down stream, with custom sri + _srcData = [1,2,3,4] + source.push(_srcData, sri=_sri ) + wait_on_data(sink, 1) + data=sink.getData() + rsri=sink.sri() + self.assertEquals(rsri.streamID, sid ) + self.assertAlmostEquals(rsri.xdelta, 0.1234) + + # add keywords as a param + kws=[] + kws.append(sb.SRIKeyword('kw1',1000,'long')) + kws.append(sb.SRIKeyword('kw2',12456.0,'float')) + kws.append(sb.SRIKeyword('kw3',16,'short')) + kws.append(sb.SRIKeyword('kw4', 200,'octet')) + kws.append(sb.SRIKeyword('kw5','this is a test','string')) + kws.append(sb.SRIKeyword('kw6',[1,2],'[short]')) + + expectedType = properties.getTypeCode('short') + expectedTypeCode = tcInternal.createTypeCode((tcInternal.tv_sequence, expectedType._d, 0)) + kw6 = CORBA.Any(expectedTypeCode, [1,2]) + + matchkws=[ CF.DataType(id='kw1', value=CORBA.Any(CORBA.TC_long, 1000)), + CF.DataType(id='kw2', value=CORBA.Any(CORBA.TC_float, 12456.0)), + CF.DataType(id='kw3', value=CORBA.Any(CORBA.TC_short, 16)), + CF.DataType(id='kw4', value=CORBA.Any(CORBA.TC_octet, 200)), + CF.DataType(id='kw5', value=CORBA.Any(CORBA.TC_string, 'this is a test')), + CF.DataType(id='kw6', value=kw6) + ] + _srcData = [1,2,3,4] + source.push(_srcData, SRIKeywords=kws ) + begin_time = time.time() + wait_on_data(sink, 1) + data=sink.getData() + rsri=sink.sri() + self.assertEquals(rsri.streamID, sid ) + self.assertAlmostEquals(rsri.xdelta, 0.1234) + self.assertEqual(True, compareKeywordLists( rsri.keywords, matchkws) ) + + # Repeat, making sure that a second push with keywords does not fail + source.push(_srcData, SRIKeywords=kws) + wait_on_data(sink, 1) + data=sink.getData() + self.assertTrue(data) + + # add new keywords to sri + matchkws=[ CF.DataType(id='kw1-1', value=CORBA.Any(CORBA.TC_long, 1000)), + CF.DataType(id='kw2-1', value=CORBA.Any(CORBA.TC_float, 12456.0)), + CF.DataType(id='kw3-1', value=CORBA.Any(CORBA.TC_short, 16)), + CF.DataType(id='kw4-1', value=CORBA.Any(CORBA.TC_octet, 200)), + CF.DataType(id='kw5-1', value=CORBA.Any(CORBA.TC_string, 'this is a test')) + ] + _sri.keywords=copy.copy(matchkws) + _srcData = [1,2,3,4] + source.push(_srcData, sri=_sri ) + wait_on_data(sink, 1) + data=sink.getData() + rsri=sink.sri() + self.assertEquals(rsri.streamID, sid ) + self.assertAlmostEquals(rsri.xdelta, 0.1234) + self.assertEqual(True, compareKeywordLists( rsri.keywords, matchkws) ) + + # try pushing using the same sri object with changing attributes + _sri = sb.createSRI() + _sri.streamID=sid + _srcData = [1,2,3,4] + source.push(_srcData, sri=_sri ) + wait_on_data(sink, 1) + data=sink.getData() + rsri=sink.sri() + self.assertEquals(rsri.streamID, sid ) + + _sri.streamID='anewsri' + _srcData = [1,2,3,4] + source.push(_srcData, sri=_sri ) + wait_on_data(sink, 1) + data=sink.getData() + rsri=sink.sri() + self.assertEquals(rsri.streamID, 'anewsri' ) + + _sri.mode=1 + _srcData = [1,2,3,4] + source.push(_srcData, sri=_sri ) + wait_on_data(sink, 1) + data=sink.getData() + rsri=sink.sri() + self.assertEquals(rsri.mode, 1 ) + + _sri.mode=0 + _srcData = [1,2,3,4] + source.push(_srcData, sri=_sri ) + wait_on_data(sink, 1) + data=sink.getData() + rsri=sink.sri() + self.assertEquals(rsri.mode, 0 ) + + _sri.hversion=100 + _srcData = [1,2,3,4] + source.push(_srcData, sri=_sri ) + wait_on_data(sink, 1) + data=sink.getData() + rsri=sink.sri() + self.assertEquals(rsri.hversion, 100 ) + + def test_DataSinkSubsize(self): src=sb.DataSource(dataFormat='short',subsize=5) snk=sb.DataSink() @@ -1860,6 +2544,23 @@ def test_SubsizeComplex(self): self.assertEqual(len(recData),_frames/2) self.assertEqual(len(recData[0]),_subsize*2) + def test_SubsizeComplexNoEOS(self): + # Test interleaved-to-complex + _subsize = 10 + _frames = 4 + inData = range(_subsize * _frames) + src=sb.DataSource(dataFormat='short',subsize=_subsize) + snk=sb.DataSink() + sb.start() + src.connect(snk) + src.push(inData,complexData=True) + + wait_on_data(snk, 1) + recData = snk.getData() + + self.assertEqual(len(recData),_frames/2) + self.assertEqual(len(recData[0]),_subsize*2) + def test_DataSinkChar(self): src=sb.DataSource(dataFormat='char') snk=sb.DataSink() @@ -1999,15 +2700,9 @@ def test_DataSinkOctetSignedData(self): class MessagePortTest(scatest.CorbaTestCase): def setUp(self): sb.setDEBUG(True) - self.test_comp = "Sandbox" - # Flagrant violation of sandbox API: if the sandbox singleton exists, - # clean up previous state and dispose of it. - if sb.domainless._sandbox: - sb.domainless._sandbox.shutdown() - sb.domainless._sandbox = None def tearDown(self): - sb.domainless._getSandbox().shutdown() + sb.release() sb.setDEBUG(False) os.environ['SDRROOT'] = globalsdrRoot @@ -2020,7 +2715,7 @@ def __init__(self, cond): self.count=0 def msgCallback(self, id, msg): - self.msg = _properties.prop_to_dict(msg) + self.msg = properties.prop_to_dict(msg) self.count = self.count + 1 self.cond.acquire() self.cond.notify() @@ -2038,11 +2733,10 @@ def wait_for_msg(cond, timeout=2.0): msrc = sb.MessageSource() cond = threading.Condition() mcb = MCB(cond) - msink = sb.MessageSink( messageCallback=mcb.msgCallback ) + msink = sb.MessageSink(messageCallback=mcb.msgCallback, storeMessages = True) msrc.connect(msink) # Simple messages come across properties list which translates into the following # {'sb_struct': {'sb': 'testing 1'}} - msrc.sendMessage("testing 1") wait_for_msg(cond) @@ -2052,6 +2746,10 @@ def wait_for_msg(cond, timeout=2.0): wait_for_msg(cond) msg = mcb.msg['sb_struct']['sb'] self.assertEquals( msg, "testing 2") + rcv_msg = msink.getMessages() + self.assertEquals(len(rcv_msg), 1) + self.assertEquals(rcv_msg[0], mcb.msg) + self.assertEquals(len(msink.getMessages()), 0) sb.stop() # terminate this sink object @@ -2077,6 +2775,20 @@ def wait_for_msg(cond, timeout=2.0): wait_for_msg(cond) msg = mcb.msg['sb_struct']['sb'] self.assertEquals( msg, "testing 5") + self.assertEquals(len(msink.getMessages()), 0) + sb.stop() + + # terminate this sink object + msink.releaseObject() + + # create new sink and connect to source + msink = sb.MessageSink(messageCallback=None, storeMessages = True) + msrc.connect(msink) + sb.start() + msrc.sendMessage("testing 4") + msrc.sendMessage("testing 5") + time.sleep(2) + self.assertEquals(len(msink.getMessages()), 2) sb.stop() # reset receiver and cycle sandbox state @@ -2097,7 +2809,7 @@ def __init__(self, cond): def msgCallback(self, id, msg): - self.msg = _properties.prop_to_dict(msg) + self.msg = properties.prop_to_dict(msg) self.count = self.count + 1 self.cond.acquire() self.cond.notify() diff --git a/redhawk/src/testing/tests/test_14_bluefile.py b/redhawk/src/testing/tests/test_14_bluefile.py index d9f3d8fdc..743c2a6ca 100644 --- a/redhawk/src/testing/tests/test_14_bluefile.py +++ b/redhawk/src/testing/tests/test_14_bluefile.py @@ -389,8 +389,35 @@ def test_FileSinkTimecode(self): time_out = bluefile_helpers.j1950_to_unix(hdr['timecode']) self.assertAlmostEqual(time_in, time_out,5) + def _test_FileSinkOctet(self, format): + filename = self._tempfileName('sink_octet_' + format.lower()) -class BlueFileHelpersKW(BlueFileHelpers): + source = sb.DataSource(dataFormat='octet') + sink = sb.FileSink(filename, midasFile=True) + source.connect(sink) + sb.start() + + # Push a 256-element ramp (the maximum range of octet) + indata = range(256) + isComplex = bool(format[0] == 'C') + source.push(indata, complexData=isComplex, EOS=True) + sink.waitForEOS() + + # Check the BLUE file format matches + hdr, data = bluefile.read(filename) + self.assertEqual(format, hdr['format']) + + # Have to "cast" the data to unsigned 8-bit, since 'B' is a signed type + # (as are all BLUE formats), and flattening the array re-interleaves + # complex data + outdata = data.view(numpy.uint8).reshape(-1) + self.assertTrue(numpy.array_equal(indata, outdata), msg="Format '%s' %s != %s" % (format, indata, outdata)) + + def test_FileSinkOctet(self): + self._test_FileSinkOctet('SB') + + def test_FileSinkOctetComplex(self): + self._test_FileSinkOctet('CB') def test_keywords_retrieval_int(self): filename='bf-kw-test.out' diff --git a/redhawk/src/testing/tests/test_15_LoggingConfig.py b/redhawk/src/testing/tests/test_15_LoggingConfig.py index f1b269d8c..7958c0bf4 100644 --- a/redhawk/src/testing/tests/test_15_LoggingConfig.py +++ b/redhawk/src/testing/tests/test_15_LoggingConfig.py @@ -18,14 +18,175 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # +import time +import os import unittest from _unitTestHelpers import scatest from ossie.cf import CF from omniORB import URI import CosNaming import CosEventChannelAdmin -from ossie.utils import sb -import os +from ossie.utils import sb, redhawk +import os, time +import contextlib +from ossie.utils import redhawk + +@contextlib.contextmanager +def stdout_redirect(where): + sys.stdout = where + try: + yield where + finally: + sys.stdout = sys.__stdout__ + +@scatest.requireLog4cxx +class CppDomainEventLoggingConfig(scatest.CorbaTestCase): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_ExecutableDevice_node/DeviceManager.dcd.xml") + self._rhDom = redhawk.attach(scatest.getTestDomainName()) + self.app = self._rhDom.createApplication("/waveforms/TestCppProps/TestCppProps.sad.xml") + + def tearDown(self): + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + redhawk.core._cleanUpLaunchedApps() + scatest.CorbaTestCase.tearDown(self) + # need to let event service clean up event channels...... + # cycle period is 10 milliseconds + time.sleep(0.1) + + def test_cpp_event_appender_create_channel(self): + cfg = "log4j.rootLogger=ERROR,STDOUT,pse\n" + \ + "# Direct log messages to STDOUT \n" + \ + "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ + "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ + "log4j.appender.STDOUT.layout.ConversionPattern=@@@COMPONENT.NAME@@@\n" + \ + "# Direct log messages to event channel\n" + \ + "log4j.appender.pse=org.ossie.logging.RH_LogEventAppender\n" + \ + "log4j.appender.pse.name_context="+scatest.getTestDomainName()+"\n" + \ + "log4j.appender.pse.event_channel=TEST_EVT_CH1\n" + \ + "log4j.appender.pse.producer_id=PRODUCER1\n" + \ + "log4j.appender.pse.producer_name=THE BIG CHEESE\n" + \ + "log4j.appender.pse.layout=org.apache.log4j.PatternLayout\n" + \ + "log4j.appender.pse.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n\n" + + comp = self.app.comps[0] + comp.ref.setLogConfig(cfg) + comp.ref.start() + comp.ref.stop() + clist,citer = self._rhDom._get_eventChannelMgr().listChannels(5) + reg_count = -1 + for _c in clist: + if _c.channel_name == 'TEST_EVT_CH1': + reg_count = _c.reg_count + self.assertEquals(reg_count, 2) # both the instance and static root loggers + self.app.releaseObject() + clist,citer = self._rhDom._get_eventChannelMgr().listChannels(5) + reg_count = -1 + for _c in clist: + if _c.channel_name == 'TEST_EVT_CH1': + reg_count = _c.reg_count + self.assertEquals(reg_count, 0) + +class PyDomainEventLoggingConfig(scatest.CorbaTestCase): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_ExecutableDevice_node/DeviceManager.dcd.xml") + self._rhDom = redhawk.attach(scatest.getTestDomainName()) + self.app = self._rhDom.createApplication("/waveforms/TestPythonProps/TestPythonProps.sad.xml") + + def tearDown(self): + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + redhawk.core._cleanUpLaunchedApps() + scatest.CorbaTestCase.tearDown(self) + # need to let event service clean up event channels...... + # cycle period is 10 milliseconds + time.sleep(0.1) + + def test_py_event_appender_create_channel(self): + cfg = "log4j.rootLogger=ERROR,STDOUT,pse\n" + \ + "# Direct log messages to STDOUT \n" + \ + "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ + "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ + "log4j.appender.STDOUT.layout.ConversionPattern=@@@COMPONENT.NAME@@@\n" + \ + "# Direct log messages to event channel\n" + \ + "log4j.appender.pse=org.ossie.logging.RH_LogEventAppender\n" + \ + "log4j.appender.pse.name_context="+scatest.getTestDomainName()+"\n" + \ + "log4j.appender.pse.event_channel=TEST_EVT_CH1\n" + \ + "log4j.appender.pse.producer_id=PRODUCER1\n" + \ + "log4j.appender.pse.producer_name=THE BIG CHEESE\n" + \ + "log4j.appender.pse.layout=org.apache.log4j.PatternLayout\n" + \ + "log4j.appender.pse.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n\n" + + comp = self.app.comps[0] + comp.ref.setLogConfig(cfg) + comp.ref.start() + comp.ref.stop() + clist,citer = self._rhDom._get_eventChannelMgr().listChannels(5) + reg_count = -1 + for _c in clist: + if _c.channel_name == 'TEST_EVT_CH1': + reg_count = _c.reg_count + self.assertEquals(reg_count, 1) + self.app.releaseObject() + clist,citer = self._rhDom._get_eventChannelMgr().listChannels(5) + reg_count = -1 + for _c in clist: + if _c.channel_name == 'TEST_EVT_CH1': + reg_count = _c.reg_count + self.assertEquals(reg_count, 0) + +@scatest.requireJava +class JavaDomainEventLoggingConfig(scatest.CorbaTestCase): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_ExecutableDevice_node/DeviceManager.dcd.xml") + self._rhDom = redhawk.attach(scatest.getTestDomainName()) + self.app = self._rhDom.createApplication("/waveforms/TestJavaProps/TestJavaProps.sad.xml") + + def tearDown(self): + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + redhawk.core._cleanUpLaunchedApps() + scatest.CorbaTestCase.tearDown(self) + # need to let event service clean up event channels...... + # cycle period is 10 milliseconds + time.sleep(0.1) + + def test_java_event_appender_create_channel(self): + cfg = "log4j.rootLogger=ERROR,STDOUT,pse\n" + \ + "# Direct log messages to STDOUT \n" + \ + "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ + "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ + "log4j.appender.STDOUT.layout.ConversionPattern=@@@COMPONENT.NAME@@@\n" + \ + "# Direct log messages to event channel\n" + \ + "log4j.appender.pse=org.ossie.logging.RH_LogEventAppender\n" + \ + "log4j.appender.pse.name_context="+scatest.getTestDomainName()+"\n" + \ + "log4j.appender.pse.event_channel=TEST_EVT_CH1\n" + \ + "log4j.appender.pse.producer_id=PRODUCER1\n" + \ + "log4j.appender.pse.producer_name=THE BIG CHEESE\n" + \ + "log4j.appender.pse.layout=org.apache.log4j.PatternLayout\n" + \ + "log4j.appender.pse.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n\n" + + comp = self.app.comps[0] + comp.ref.setLogConfig(cfg) + comp.ref.start() + comp.ref.stop() + clist,citer = self._rhDom._get_eventChannelMgr().listChannels(5) + reg_count = -1 + for _c in clist: + if _c.channel_name == 'TEST_EVT_CH1': + reg_count = _c.reg_count + self.assertEquals(reg_count, 1) + self.app.releaseObject() + clist,citer = self._rhDom._get_eventChannelMgr().listChannels(5) + reg_count = -1 + for _c in clist: + if _c.channel_name == 'TEST_EVT_CH1': + reg_count = _c.reg_count + self.assertEquals(reg_count, 0) @scatest.requireLog4cxx class CppLoggingConfig(scatest.CorbaTestCase): @@ -33,9 +194,31 @@ def setUp(self): self.cname = "TestLoggingAPI" self.comp = sb.launch(self.cname) + def _try_config_test(self, logcfg, epattern, foundTest=None ): + + with stdout_redirect(cStringIO.StringIO()) as new_stdout: + ossie.utils.log4py.config.strConfig(logcfg,None) + + new_stdout.seek(0) + found = [] + epats=[] + if type(epattern) == str: + epats.append(epattern) + else: + epats = epattern + if foundTest == None: + foundTest = len(epats)*[True] + for x in new_stdout.readlines(): + for epat in epats: + m=re.search( epat, x ) + if m : + found.append( True ) + + self.assertEqual(found, foundTest ) def tearDown(self): self.comp.releaseObject() + sb.release() # Try to clean up the event channel, if it was created context = None @@ -89,7 +272,7 @@ def test_default_logconfig(self): "# Direct log messages to STDOUT\n" + \ "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ - "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{3}:%L - %m%n\n" c_cfg=self.comp.ref.getLogConfig() @@ -166,6 +349,7 @@ def setUp(self): def tearDown(self): self.comp.releaseObject() + sb.release() # Do all application shutdown before calling the base class tearDown, # or failures will probably occur. @@ -200,7 +384,7 @@ def test_default_logconfig(self): "# Direct log messages to STDOUT\n" + \ "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ - "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{3}:%L - %m%n\n" c_cfg=self.comp.ref.getLogConfig() cfg=cfg.replace(" ","") @@ -274,6 +458,7 @@ def setUp(self): def tearDown(self): if self.comp: self.comp.releaseObject() + sb.release() # Do all application shutdown before calling the base class tearDown, # or failures will probably occur. @@ -347,14 +532,165 @@ def test_comp_macro_directories_config_java(self): pass self.assertNotEquals(fp, None) +class TokenLoggingConfig(scatest.CorbaTestCase): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager(loggingURI='file://'+os.getcwd()+'/macro_config.cfg') + self.devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_ExecutableDevice_node/DeviceManager.dcd.xml", loggingURI='file://'+os.getcwd()+'/macro_config.cfg') + self._rhDom = redhawk.attach(scatest.getTestDomainName()) + + def tearDown(self): + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + redhawk.core._cleanUpLaunchedApps() + scatest.CorbaTestCase.tearDown(self) + try: + os.remove('sdr/foo/bar/test.log') + pass + except: + pass + try: + os.rmdir('sdr/foo/bar') + except: + pass + try: + os.rmdir('sdr/foo') + except: + pass + # need to let event service clean up event channels...... + # cycle period is 10 milliseconds + time.sleep(0.1) + + @scatest.requireLog4cxx + def test_token_devmgr(self): + found_devmgr = None + for devmgr in self._rhDom.devMgrs: + if devmgr._instanceName == 'ExecutableDevice_node': + found_devmgr = devmgr + break + logstr = '|||WAVEFORM.NO_INST|||'+found_devmgr._instanceName+'|||'+self._rhDom.name+'-'+os.uname()[1]+'-'+found_devmgr._instanceName+'_'+str(self.devBooter.pid) + fp = None + try: + fp = open('sdr/foo/bar/test.log','r') + except: + pass + self.assertNotEquals(fp, None) + logfile_content = fp.readlines() + found_line = None + for line in logfile_content: + if logstr in line: + if not 'DeviceManager.parsers' in line: + continue + found_line = line + break + self.assertNotEquals(found_line, None) + self.assertNotEquals(found_line.find(logstr), -1) + self.assertNotEquals(found_line.find('DeviceManager.parsers'), -1) + + @scatest.requireLog4cxx + def test_token_config_dev_cpp(self): + found_devmgr = None + for devmgr in self._rhDom.devMgrs: + if devmgr._instanceName == 'ExecutableDevice_node': + found_devmgr = devmgr + break + self.assertNotEquals(found_devmgr, None) + found_dev = None + for dev in found_devmgr.devs: + if dev.name == 'ExecutableDevice': + found_dev = dev + break + self.assertNotEquals(found_dev, None) + logcfg = found_dev.getLogConfig() + logstr = '|||WAVEFORM.NO_INST|||'+found_devmgr._instanceName+'|||'+self._rhDom.name+'-'+os.uname()[1]+'-'+found_devmgr._instanceName+'_'+str(self.devBooter.pid) + self.assertNotEquals(logcfg.find(logstr), -1) + + def test_token_config_dev_py(self): + self.devBooter_2, self._devMgr_2 = self.launchDeviceManager("/nodes/py_dev_n/DeviceManager.dcd.xml", loggingURI='file://'+os.getcwd()+'/macro_config.cfg') + found_devmgr = None + for devmgr in self._rhDom.devMgrs: + if devmgr._instanceName == 'py_dev_n': + found_devmgr = devmgr + break + self.assertNotEquals(found_devmgr, None) + found_dev = None + for dev in found_devmgr.devs: + if dev.name == 'py_dev': + found_dev = dev + break + self.assertNotEquals(found_dev, None) + logcfg = found_dev.getLogConfig() + logstr = '|||WAVEFORM.NO_INST|||'+found_devmgr._instanceName+'|||'+self._rhDom.name+'-'+os.uname()[1]+'-'+found_devmgr._instanceName+'_'+str(self.devBooter_2.pid) + self.assertNotEquals(logcfg.find(logstr), -1) + + @scatest.requireJava + def test_token_config_dev_java(self): + self.devBooter_2, self._devMgr_2 = self.launchDeviceManager("/nodes/java_dev_n/DeviceManager.dcd.xml", loggingURI='file://'+os.getcwd()+'/macro_config.cfg') + found_devmgr = None + for devmgr in self._rhDom.devMgrs: + if devmgr._instanceName == 'java_dev_n': + found_devmgr = devmgr + break + self.assertNotEquals(found_devmgr, None) + found_dev = None + for dev in found_devmgr.devs: + if dev.name == 'java_dev': + found_dev = dev + break + self.assertNotEquals(found_dev, None) + begin = time.time() + logcfg = None + while time.time()-begin < 1 or not logcfg: + logcfg = found_dev.getLogConfig() + self.assertNotEquals(logcfg, None) + logstr = '|||WAVEFORM.NO_INST|||'+found_devmgr._instanceName+'|||'+self._rhDom.name+'-'+os.uname()[1]+'-'+found_devmgr._instanceName+'_'+str(self.devBooter_2.pid) + self.assertNotEquals(logcfg.find(logstr), -1) + + @scatest.requireLog4cxx + def test_token_config_comp_cpp(self): + app = self._rhDom.createApplication("/waveforms/PropertyChangeListenerNoJava/PropertyChangeListenerNoJava.sad.xml") + found_comp = None + for comp in app.comps: + if comp.name == 'PropertyChange_C1': + found_comp = comp + break + self.assertNotEquals(found_comp, None) + logcfg = found_comp.getLogConfig() + logstr = '|||'+app._instanceName+'|||DEV_MGR.NO_NAME|||DEV_MGR.NO_INST' + self.assertNotEquals(logcfg.find(logstr), -1) + + def test_token_config_comp_py(self): + app = self._rhDom.createApplication("/waveforms/PropertyChangeListenerNoJava/PropertyChangeListenerNoJava.sad.xml") + found_comp = None + for comp in app.comps: + if comp.name == 'PropertyChange_P1': + found_comp = comp + break + self.assertNotEquals(found_comp, None) + logcfg = found_comp.getLogConfig() + logstr = '|||'+app._instanceName+'|||DEV_MGR.NO_NAME|||DEV_MGR.NO_INST' + self.assertNotEquals(logcfg.find(logstr), -1) + + @scatest.requireJava + def test_token_config_comp_java(self): + app = self._rhDom.createApplication("/waveforms/PropertyChangeListener/PropertyChangeListener.sad.xml") + found_comp = None + for comp in app.comps: + if comp.name == 'PropertyChange_J1': + found_comp = comp + break + self.assertNotEquals(found_comp, None) + logcfg = found_comp.getLogConfig() + logstr = '|||'+app._instanceName+'|||DEV_MGR.NO_NAME|||DEV_MGR.NO_INST' + self.assertNotEquals(logcfg.find(logstr), -1) class PythonLoggingConfig(scatest.CorbaTestCase): def setUp(self): self.cname = "TestLoggingAPI" - self.comp = sb.launch(self.cname, impl="python" ) - + self.comp = sb.launch(self.cname, impl="python", instanceName="TestLoggingAPI_1" ) + def tearDown(self): self.comp.releaseObject() + sb.release() # Do all application shutdown before calling the base class tearDown, # or failures will probably occur. @@ -390,7 +726,7 @@ def test_default_logconfig(self): "# Direct log messages to STDOUT\n" + \ "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ - "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n\n" c_cfg=self.comp.ref.getLogConfig() cfg=cfg.replace(" ","") @@ -450,7 +786,6 @@ def test_comp_log_event_appender(self): def test_log_callback(self): - self.comp = sb.launch(self.cname, impl="python", instanceName="TestLoggingAPI_1" ) cfg = "log4j.rootLogger=ERROR,STDOUT,pse\n" + \ "# Direct log messages to STDOUT \n" + \ "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ @@ -497,6 +832,7 @@ def setUp(self): def tearDown(self): self.comp.releaseObject() + sb.release() # Do all application shutdown before calling the base class tearDown, # or failures will probably occur. @@ -533,7 +869,7 @@ def test_default_logconfig(self): "# Direct log messages to STDOUT\n" + \ "log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender\n" + \ "log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout\n" + \ - "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n" + "log4j.appender.STDOUT.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c:%L - %m%n\n" c_cfg=self.comp.ref.getLogConfig() cfg=cfg.replace(" ","") @@ -632,7 +968,34 @@ def test_log_callback(self): c_cfg=self.comp.ref.setLogConfig(cfg) self.assertEquals( self.comp.new_log_cfg, exp_cfg) +class DomainTestLogEventAppender(scatest.CorbaTestCase): + def setUp(self): + self.stderr_filename = 'stderr.out' + self.output_file = open(self.stderr_filename,'w') + nb, self._domMgr = self.launchDomainManager(stderr=self.output_file) + nb, self._devMgr = self.launchDeviceManager('/nodes/test_PortTestDevice_node/DeviceManager.dcd.xml') + self.dom=redhawk.attach(self._domMgr._get_name() ) + fp = open('loggers/syncappender/log4j.appender', 'r') + self.logconfig = fp.read() + fp.close() + + def tearDown(self): + scatest.CorbaTestCase.tearDown(self) + try: + self.output_file.close() + except: + pass + try: + os.remove(self.stderr_filename) + except: + pass + def test_logeventappenderDomainManager(self): + self.dom.setLogConfig(self.logconfig) + fp = open(self.stderr_filename, 'r') + contents = fp.read() + fp.close() + self.assertEquals(len(contents), 0) class LoggingConfigCategory(scatest.CorbaTestCase): def setUp(self): @@ -642,6 +1005,7 @@ def setUp(self): def tearDown(self): if self.comp: self.comp.releaseObject() + sb.release() # Do all application shutdown before calling the base class tearDown, # or failures will probably occur. @@ -656,9 +1020,7 @@ def _test_LoggingCategory(self): log4j.appender.stdout.Target=System.out\n \ log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n \ log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n \ - log4j.category.TestLoggingAPI_i=INFO,stdout\n \ - log4j.category.TestLoggingAPI.java.TestLoggingAPI_base=INFO,stdout\n \ - log4j.category.TestLoggingAPI=INFO,stdout\n\n' + log4j.category.TestLoggingAPI_1=INFO,stdout\n\n' self.comp.setLogConfig(x) lvl=self.comp.log_level() @@ -673,9 +1035,7 @@ def _test_LoggingCategory(self): log4j.appender.stdout.Target=System.out\n \ log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n \ log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n\n \ - log4j.category.TestLoggingAPI_i=TRACE,stdout\n \ - log4j.category.TestLoggingAPI.java.TestLoggingAPI_base=TRACE,stdout\n \ - log4j.category.TestLoggingAPI=TRACE,stdout\n\n' + log4j.category.TestLoggingAPI_1=TRACE,stdout\n\n' self.comp.setLogConfig(y) lvl=self.comp.log_level() self.assertEquals( proj, lvl ) diff --git a/redhawk/src/testing/tests/test_16_ConnectionManager.py b/redhawk/src/testing/tests/test_16_ConnectionManager.py index df2a441ae..eb9683dba 100644 --- a/redhawk/src/testing/tests/test_16_ConnectionManager.py +++ b/redhawk/src/testing/tests/test_16_ConnectionManager.py @@ -24,6 +24,7 @@ from ossie.cf import CF from ossie.cf import ExtendedCF from ossie import properties +from ossie.utils import redhawk, rhconnection class ConnectionManagerTest(scatest.CorbaTestCase): def setUp(self): @@ -194,18 +195,21 @@ def setUp(self): super(ConnectionManagerTestRedhawkUtils,self).setUp() nb, self._domMgr = self.launchDomainManager() nb, self._devMgr = self.launchDeviceManager('/nodes/test_PortTestDevice_node/DeviceManager.dcd.xml') - self._connMgr = self._domMgr._get_connectionMgr() # Device IDs taken from the DCD self.devId1 = 'DCE:322fb9b2-de57-42a2-ad03-217bcb244262' self.devId2 = 'DCE:47dc45d8-19b5-4b7e-bcd4-b165babe5b84' - from ossie.utils import redhawk - d=redhawk.Domain(self._domMgr._get_name() ) - self.assertNotEqual(d,None) - + self.dom=redhawk.attach(self._domMgr._get_name() ) + for dev in self.dom.devices: + if dev._get_identifier() == self.devId1: + self.dev1 = dev + if dev._get_identifier() == self.devId2: + self.dev2 = dev + self.assertNotEqual(self.dom,None) + # The DCD has one connection, verify that it is listed - self.cm = d.getConnectionMgr() + self.cm = self.dom.getConnectionMgr() self.assertNotEqual(self.cm, None ) def tearDown(self): @@ -255,7 +259,56 @@ def test_redhawkutils_DeviceConnections(self): # get connnections list clist, citer = self.cm.listConnections() - self.assertEqual(clist, [] ) + self.assertEqual(clist, [] ) + + conn=citer.next_one() + self.assertEqual(conn.usesEndpoint.portName, 'resource_out') + conn=citer.next_one() + self.assertEqual(conn.usesEndpoint.portName, 'resource_out') + conn=citer.next_one() + self.assertEqual(conn, None) + + + # The connection should have been resolved immediately + self.assertTrue(connection.connected) + self.assertEqual(connection.usesEndpoint.portName, 'resource_out') + self.assertEqual(connection.providesEndpoint.portName, '') + + # Verify that the connection was really made + connections = connection.usesEndpoint.endpointObject._get_connections() + self.assertTrue(len(connections) == 1) + self.assertEqual(connections[0].connectionId, 'test_connection') + self.assertEqual(connections[0].port._get_identifier(), self.devId1) + + # Break the connection and make sure the connection went away + self.cm.disconnect(connectionReportId) + connections = self.cm.connections + self.assertEqual(len(connections), 1) + + def test_connect_DeviceConnections(self): + + # The DCD has one connection, verify that it is listed + connections = self.cm.connections + self.assertEqual(len(connections), 1) + + # Use the first device's resource_out port as the uses endpoint + uses = rhconnection.makeEndPoint(self.dev2, 'resource_out') + + # Use the device itself as the provides endpoint + provides = rhconnection.makeEndPoint( self.dev1, '' ) + + # Create a new connection with a known ID + connectionReportId = self.cm.connect(uses, provides, 'test_environment', 'test_connection') + + # Make sure the new connection is listed + connections = self.cm.connections + self.assertEqual(len(connections), 2) + connection = self._findConnection(connections, 'test_connection') + self.assertFalse(connection is None) + + # get connnections list + clist, citer = self.cm.listConnections() + self.assertEqual(clist, [] ) conn=citer.next_one() self.assertEqual(conn.usesEndpoint.portName, 'resource_out') @@ -285,21 +338,65 @@ def test_redhawkutils_DeviceConnections(self): def test_redhawkutils_ComponentConnections(self): app_src = self._domMgr.createApplication('/waveforms/comp_src_w/comp_src_w.sad.xml', 'src_app', [], []) app_snk = self._domMgr.createApplication('/waveforms/comp_snk_w/comp_snk_w.sad.xml', 'snk_app', [], []) - uses = self.cm.componentEndPoint( 'comp_src_1:'+app_src._get_identifier(), 'dataFloat_out') - provides = self.cm.componentEndPoint( 'comp_snk_1:'+app_snk._get_identifier(), 'dataFloat_in') + uses = self.cm.componentEndPoint( 'comp_src_1:'+app_src._get_identifier(), 'output') + provides = self.cm.componentEndPoint( 'comp_snk_1:'+app_snk._get_identifier(), 'input') connectionReportId = self.cm.connect(uses, provides, 'test_environment', 'test_connection') connections = self.cm.connections self.assertEqual(len(connections), 2) connection = self._findConnection(connections,'test_connection') self.assertFalse(connection is None) self.assertTrue(connection.connected) - self.assertEqual(connection.usesEndpoint.portName, 'dataFloat_out') - self.assertEqual(connection.providesEndpoint.portName, 'dataFloat_in') + self.assertEqual(connection.usesEndpoint.portName, 'output') + self.assertEqual(connection.providesEndpoint.portName, 'input') connections = connection.usesEndpoint.endpointObject._narrow(ExtendedCF.QueryablePort)._get_connections() self.assertTrue(len(connections) == 1) self.assertEqual(connections[0].connectionId, 'test_connection') - - + + def test_connect_ComponentConnections(self): + app_src = self.dom.createApplication('/waveforms/comp_src_w/comp_src_w.sad.xml', 'src_app', [], []) + app_snk = self.dom.createApplication('/waveforms/comp_snk_w/comp_snk_w.sad.xml', 'snk_app', [], []) + comp_src = app_src.comps[0] + comp_snk = app_snk.comps[0] + + uses = rhconnection.makeEndPoint( comp_src, 'output') + provides = rhconnection.makeEndPoint( comp_snk, 'input') + connectionReportId = self.cm.connect(uses, provides, 'test_environment', 'test_connection') + connections = self.cm.connections + self.assertEqual(len(connections), 2) + connection = self._findConnection(connections,'test_connection') + self.assertFalse(connection is None) + self.assertTrue(connection.connected) + self.assertEqual(connection.usesEndpoint.portName, 'output') + self.assertEqual(connection.providesEndpoint.portName, 'input') + connections = connection.usesEndpoint.endpointObject._narrow(ExtendedCF.QueryablePort)._get_connections() + out_connections = comp_src.ports[0].ref._get_connections() + self.assertTrue(connections[0].port._is_equivalent(out_connections[0].port)) + self.assertTrue(len(connections) == 1) + self.assertEqual(connections[0].connectionId, 'test_connection') + + def test_connect_ServicesConnections(self): + app_src = self.dom.createApplication('/waveforms/svc_connect/svc_connect.sad.xml', 'svc_connect', [], []) + nb, self._devMgrSvc = self.launchDeviceManager('/nodes/test_BasicService_node/DeviceManager.dcd.xml') + comp_src = app_src.comps[0] + for svc in self.dom.services: + if svc.name == 'BasicService1': + break + + uses = rhconnection.makeEndPoint( comp_src, 'output') + provides = rhconnection.makeEndPoint( svc, '') + connectionReportId = self.cm.connect(uses, provides, 'test_environment', 'test_connection') + connections = self.cm.connections + self.assertEqual(len(connections), 2) + connection = self._findConnection(connections,'test_connection') + self.assertFalse(connection is None) + self.assertTrue(connection.connected) + self.assertEqual(connection.usesEndpoint.portName, 'output') + connections = connection.usesEndpoint.endpointObject._narrow(ExtendedCF.QueryablePort)._get_connections() + out_connections = comp_src.ports[0].ref._get_connections() + self.assertTrue(connections[0].port._is_equivalent(out_connections[0].port)) + self.assertTrue(len(connections) == 1) + self.assertEqual(connections[0].connectionId, 'test_connection') + def test_redhawkutils_ApplicationConnections(self): app = self._createApp('/waveforms/PortConnectExternalPortRename/PortConnectExternalPortRename.sad.xml') @@ -332,6 +429,39 @@ def test_redhawkutils_ApplicationConnections(self): self.assertEqual(len(connections), 1) app.releaseObject() + + def test_connect_ApplicationConnections(self): + app = self.dom.createApplication('/waveforms/PortConnectExternalPortRename/PortConnectExternalPortRename.sad.xml') + + # Connect the application's external uses port to the first device's + # provides port + uses = rhconnection.makeEndPoint( app, 'rename_resource_out') + provides = rhconnection.makeEndPoint( self.dev1, 'resource_in') + connectionReportId = self.cm.connect(uses, provides, 'test_environment', 'test_connection') + + # Make sure the new connection is listed + connections = self.cm.connections + self.assertEqual(len(connections), 2) + connection = self._findConnection(connections,'test_connection') + self.assertFalse(connection is None) + + # The connection should have been resolved immediately + self.assertTrue(connection.connected) + self.assertEqual(connection.usesEndpoint.portName, 'rename_resource_out') + self.assertEqual(connection.providesEndpoint.portName, 'resource_in') + + # Verify that the connection was really made + connections = connection.usesEndpoint.endpointObject._get_connections() + self.assertTrue(len(connections) == 1) + self.assertEqual(connections[0].connectionId, 'test_connection') + self.assertEqual(connections[0].port._get_identifier(), self.devId1+'/resource_in') + + # Break the connection and make sure the connection went away + self.cm.disconnect(connectionReportId) + connections = self.cm.connections + self.assertEqual(len(connections), 1) + + app.releaseObject() def test_redhawkutils_DeferredConnections(self): app = self._createApp('/waveforms/PortConnectExternalPortRename/PortConnectExternalPortRename.sad.xml') diff --git a/redhawk/src/testing/tests/test_16_HierarchicalLogging.py b/redhawk/src/testing/tests/test_16_HierarchicalLogging.py new file mode 100644 index 000000000..06c07b2e9 --- /dev/null +++ b/redhawk/src/testing/tests/test_16_HierarchicalLogging.py @@ -0,0 +1,1023 @@ +# +# This file is protected by Copyright. Please refer to the COPYRIGHT file +# distributed with this source distribution. +# +# This file is part of REDHAWK core. +# +# REDHAWK core is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see http://www.gnu.org/licenses/. +# + +from _unitTestHelpers import scatest +from ossie.utils import sb, redhawk +import unittest, contextlib, time, os +from ossie.cf import CF +import tempfile + +def killDomain(name): + pids = [pid for pid in os.listdir('/proc') if pid.isdigit()] + for pid in pids: + try: + cmdline=open(os.path.join('/proc', pid, 'cmdline'), 'rb').read() + if 'DOMAIN_NAME' in cmdline: + if name in cmdline: + os.kill(int(pid), 2) + except: + pass + +@scatest.requireLog4cxx +class CppHierarchicalDomainLogging(scatest.CorbaTestCase): + def setUp(self): + domBooter, self._domMgr = self.launchDomainManager() + devBooter, self._devMgr = self.launchDeviceManager("/nodes/test_ExecutableDevice_node/DeviceManager.dcd.xml") + self._rhDom = redhawk.attach(scatest.getTestDomainName()) + self.assertEquals(len(self._rhDom._get_applications()), 0) + + def tearDown(self): + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + redhawk.core._cleanUpLaunchedApps() + scatest.CorbaTestCase.tearDown(self) + try: + os.remove('sdr/dom/waveforms/logger_overload_w/tmp.sad.xml') + except: + pass + try: + os.remove('sdr/dom/waveforms/logger_config/tmp.sad.xml') + except: + pass + # need to let event service clean up event channels + # cycle period is 10 milliseconds + time.sleep(0.1) + redhawk.setTrackApps(False) + + def test_logconfiguri_application(self): + self.cname = "logger" + # Automatically clean up + redhawk.setTrackApps(True) + # Create Application from $SDRROOT path + app_1 = self._rhDom.createApplication("/waveforms/logger_w/logger_w.sad.xml", initConfiguration={'LOGGING_CONFIG_URI':'file://'+os.getcwd()+'/high_thresh.cfg'}) + self.assertEquals(app_1.getLogLevel('logger_1'), 30000) + self.assertEquals(app_1.getLogLevel('logger_2'), 30000) + loggers_1 = app_1.getNamedLoggers() + app_2 = self._rhDom.createApplication("/waveforms/logger_w/logger_w.sad.xml") + loggers_2 = app_2.getNamedLoggers() + self.assertEquals(app_1.getLogLevel('logger_1'), 30000) + self.assertEquals(app_1.getLogLevel('logger_2'), 30000) + self.assertEquals(app_2.getLogLevel('logger_1'), 40000) + self.assertEquals(app_2.getLogLevel('logger_2'), 40000) + + def test_logconfiguri_overload(self): + self.cname = "logger" + # Automatically clean up + redhawk.setTrackApps(True) + # Create Application from $SDRROOT path + fp = open('sdr/dom/waveforms/logger_overload_w/logger_overload_w.sad.xml','r') + sad_contents = fp.read() + fp.close() + sad_contents = sad_contents.replace('@@@CWD@@@', os.getcwd()) + fp = open('sdr/dom/waveforms/logger_overload_w/tmp.sad.xml','w') + fp.write(sad_contents) + fp.close() + app_1 = self._rhDom.createApplication("/waveforms/logger_overload_w/tmp.sad.xml") + self.assertEquals(app_1.getLogLevel('logger_2'), 30000) + loggers_1 = app_1.getNamedLoggers() + app_2 = self._rhDom.createApplication("/waveforms/logger_w/logger_w.sad.xml") + loggers_2 = app_2.getNamedLoggers() + self.assertEquals(app_1.getLogLevel('logger_2'), 30000) + self.assertEquals(app_2.getLogLevel('logger_1'), 40000) + + def test_loggingconfig(self): + self.cname = "logger" + fp=open('./runtest.props','r') + runtest_props = fp.read() + fp.close() + fp=open('./high_thresh.cfg','r') + high_thresh_cfg = fp.read() + fp.close() + # Automatically clean up + redhawk.setTrackApps(True) + # Create Application from $SDRROOT path + fp = open('sdr/dom/waveforms/logger_config/logger_config.sad.xml','r') + sad_contents = fp.read() + fp.close() + sad_contents = sad_contents.replace('@@@CWD@@@', os.getcwd()) + fp = open('sdr/dom/waveforms/logger_config/tmp.sad.xml','w') + fp.write(sad_contents) + fp.close() + app_1 = self._rhDom.createApplication("/waveforms/logger_config/tmp.sad.xml") + self.assertEquals(app_1.getLogLevel('logger_1'), 0) + self.assertEquals(app_1.getLogLevel('logger_2'), 50000) + logger_1 = -1 + logger_2 = -1 + for comp_idx in range(len(app_1.comps)): + if app_1.comps[comp_idx].instanceName == 'logger_1': + logger_1 = comp_idx + break + if logger_1 == 0: + logger_2 = 1 + if logger_1 == 1: + logger_2 = 0 + self.assertNotEqual(logger_1, -1) + self.assertNotEqual(logger_2, -1) + self.assertEquals(app_1.comps[logger_1].getLogConfig(), high_thresh_cfg) + self.assertEquals(app_1.comps[logger_2].getLogConfig(), runtest_props) + + loggers_1 = app_1.getNamedLoggers() + app_2 = self._rhDom.createApplication("/waveforms/logger_config/tmp.sad.xml", initConfiguration={'LOGGING_CONFIG_URI':'file://'+os.getcwd()+'/high_thresh.cfg'}) + loggers_2 = app_2.getNamedLoggers() + self.assertEquals(app_2.getLogLevel('logger_1'), 30000) + self.assertEquals(app_2.getLogLevel('logger_2'), 30000) + self.assertEquals(app_2.comps[logger_1].getLogConfig(), high_thresh_cfg) + self.assertEquals(app_2.comps[logger_2].getLogConfig(), high_thresh_cfg) + app_1.start() + time.sleep(1.5) + + def test_application_cpp_access(self): + self.cname = "logger" + self.applicationAccess("/waveforms/logger_w/logger_w.sad.xml") + + def test_application_py_access(self): + self.cname = "logger_py" + self.applicationAccess("/waveforms/logger_py_w/logger_py_w.sad.xml") + + @scatest.requireJava + def test_application_java_access(self): + self.cname = "logger_java" + self.applicationAccess("/waveforms/logger_java_w/logger_java_w.sad.xml") + + def applicationAccess(self, sadfile): + # Automatically clean up + redhawk.setTrackApps(True) + # Create Application from $SDRROOT path + app = self._rhDom.createApplication(sadfile) + loggers = app.getNamedLoggers() + + orig_loggers = {} + orig_loggers[self.cname+'_1'] = app.getLogLevel(self.cname+'_1') + orig_loggers[self.cname+'_1.lower'] = app.getLogLevel(self.cname+'_1.lower') + orig_loggers[self.cname+'_1.namespace.lower'] = app.getLogLevel(self.cname+'_1.namespace.lower') + orig_loggers[self.cname+'_1.user.more_stuff'] = app.getLogLevel(self.cname+'_1.user.more_stuff') + orig_loggers[self.cname+'_1.user.some_stuff'] = app.getLogLevel(self.cname+'_1.user.some_stuff') + + self.assertTrue(self.cname+'_1' in loggers) + self.assertTrue(self.cname+'_1.lower' in loggers) + self.assertTrue(self.cname+'_1.namespace.lower' in loggers) + self.assertTrue(self.cname+'_1.system.PortSupplier' in loggers) + self.assertTrue(self.cname+'_1.system.PropertySet' in loggers) + self.assertTrue(self.cname+'_1.system.Resource' in loggers) + self.assertTrue(self.cname+'_1.user.more_stuff' in loggers) + self.assertTrue(self.cname+'_1.user.some_stuff' in loggers) + + self.assertTrue(self.cname+'_2' in loggers) + self.assertTrue(self.cname+'_2.lower' in loggers) + self.assertTrue(self.cname+'_2.namespace.lower' in loggers) + self.assertTrue(self.cname+'_2.system.PortSupplier' in loggers) + self.assertTrue(self.cname+'_2.system.PropertySet' in loggers) + self.assertTrue(self.cname+'_2.system.Resource' in loggers) + self.assertTrue(self.cname+'_2.user.more_stuff' in loggers) + self.assertTrue(self.cname+'_2.user.some_stuff' in loggers) + + self.assertRaises(CF.UnknownIdentifier, app.setLogLevel, self.cname+'_1.foo', 'all') + self.assertRaises(CF.UnknownIdentifier, app.getLogLevel, self.cname+'_1.foo') + + app.setLogLevel(self.cname+'_1', 'all') + self.assertEquals(app.getLogLevel(self.cname+'_1'), CF.LogLevels.ALL) + self.assertEquals(app.getLogLevel(self.cname+'_1.lower'), CF.LogLevels.ALL) + self.assertEquals(app.getLogLevel(self.cname+'_1.namespace.lower'), CF.LogLevels.ALL) + self.assertEquals(app.getLogLevel(self.cname+'_1.user.more_stuff'), CF.LogLevels.ALL) + self.assertEquals(app.getLogLevel(self.cname+'_1.user.some_stuff'), CF.LogLevels.ALL) + app.setLogLevel(self.cname+'_1', 'off') + self.assertEquals(app.getLogLevel(self.cname+'_1'), CF.LogLevels.OFF) + self.assertEquals(app.getLogLevel(self.cname+'_1.lower'), CF.LogLevels.OFF) + self.assertEquals(app.getLogLevel(self.cname+'_1.namespace.lower'), CF.LogLevels.OFF) + self.assertEquals(app.getLogLevel(self.cname+'_1.user.more_stuff'), CF.LogLevels.OFF) + self.assertEquals(app.getLogLevel(self.cname+'_1.user.some_stuff'), CF.LogLevels.OFF) + + # break the level inheritance + app.setLogLevel(self.cname+'_1.user', 'trace') + app.setLogLevel(self.cname+'_1', 'all') + self.assertEquals(app.getLogLevel(self.cname+'_1'), CF.LogLevels.ALL) + self.assertEquals(app.getLogLevel(self.cname+'_1.lower'), CF.LogLevels.ALL) + self.assertEquals(app.getLogLevel(self.cname+'_1.namespace.lower'), CF.LogLevels.ALL) + self.assertEquals(app.getLogLevel(self.cname+'_1.user.more_stuff'), CF.LogLevels.TRACE) + self.assertEquals(app.getLogLevel(self.cname+'_1.user.some_stuff'), CF.LogLevels.TRACE) + + # set the log with a value rather than the string + app.setLogLevel(self.cname+'_1', CF.LogLevels.DEBUG) + self.assertEquals(app.getLogLevel(self.cname+'_1'), CF.LogLevels.DEBUG) + self.assertEquals(app.getLogLevel(self.cname+'_1.lower'), CF.LogLevels.DEBUG) + self.assertEquals(app.getLogLevel(self.cname+'_1.namespace.lower'), CF.LogLevels.DEBUG) + self.assertEquals(app.getLogLevel(self.cname+'_1.user.more_stuff'), CF.LogLevels.TRACE) + self.assertEquals(app.getLogLevel(self.cname+'_1.user.some_stuff'), CF.LogLevels.TRACE) + + app.resetLog() + self.assertEquals(orig_loggers[self.cname+'_1'], app.getLogLevel(self.cname+'_1')) + self.assertEquals(orig_loggers[self.cname+'_1.lower'], app.getLogLevel(self.cname+'_1.lower')) + self.assertEquals(orig_loggers[self.cname+'_1.namespace.lower'], app.getLogLevel(self.cname+'_1.namespace.lower')) + self.assertEquals(orig_loggers[self.cname+'_1.user.more_stuff'], app.getLogLevel(self.cname+'_1.user.more_stuff')) + self.assertEquals(orig_loggers[self.cname+'_1.user.some_stuff'], app.getLogLevel(self.cname+'_1.user.some_stuff')) + + # verify that inheritance is re-established + app.setLogLevel(self.cname+'_1', 'all') + self.assertEquals(CF.LogLevels.ALL, app.getLogLevel(self.cname+'_1')) + self.assertEquals(CF.LogLevels.ALL, app.getLogLevel(self.cname+'_1.lower')) + self.assertEquals(CF.LogLevels.ALL, app.getLogLevel(self.cname+'_1.namespace.lower')) + self.assertEquals(CF.LogLevels.ALL, app.getLogLevel(self.cname+'_1.user.more_stuff')) + self.assertEquals(CF.LogLevels.ALL, app.getLogLevel(self.cname+'_1.user.some_stuff')) + +@scatest.requireLog4cxx +class ApplicationDomainLogging(scatest.CorbaTestCase): + def setUp(self): + self.tmpfile=tempfile.mktemp() + devmgrs = ['test_ExecutableDevice_node'] + self._rhDom = redhawk.kickDomain(domain_name=scatest.getTestDomainName(), + kick_device_managers=True, + device_managers = devmgrs, + stdout=self.tmpfile, + detached=False) + + try: + self.waitForDeviceManager(devmgrs[0]) + except: + traceback.print_exc() + pass + self.assertEquals(len(self._rhDom._get_applications()), 0) + + def tearDown(self): + # Do all application shutdown before calling the base class tearDown, + # or failures will probably occur. + redhawk.core._cleanUpLaunchedApps() + scatest.CorbaTestCase.tearDown(self) + try: + os.remove(self.tmpfile) + except: + pass + + try: + self._rhDom.terminate() + except: + pass + killDomain(scatest.getTestDomainName()) + # need to let event service clean up event channels + # cycle period is 10 milliseconds + time.sleep(0.1) + + def test_domain_hierarchy_all(self): + self._rhDom.setLogLevel('DomainManager', 'all') + self._rhDom.devMgrs[0].setLogLevel('DeviceManager', 'all') + props = self._rhDom.query([]) + props = self._rhDom.devMgrs[0].query([]) + fp = open(self.tmpfile, 'r') + output=fp.read() + fp.close() + self.assertTrue('DomainManager.PropertySet' in output) + self.assertTrue('DeviceManager.PropertySet' in output) + + def test_domain_hierarchy(self): + self._rhDom.setLogLevel('DomainManager', 'info') + self._rhDom.devMgrs[0].setLogLevel('DeviceManager', 'info') + props = self._rhDom.query([]) + props = self._rhDom.devMgrs[0].query([]) + fp = open(self.tmpfile, 'r') + output=fp.read() + fp.close() + self.assertFalse('DomainManager.PropertySet' in output) + self.assertFalse('DeviceManager.PropertySet' in output) + + def application_default_log(self, appname, compname): + app = self._rhDom.createApplication(appname) + app.setLogLevel(compname+'_1.user.more_stuff', 'all') + app.start() + begin_time = time.time() + while time.time()-begin_time < 1.5: + fp = open(self.tmpfile, 'r') + output=fp.read() + fp.close() + if output.find(compname+'_1.user.more_stuff') != -1: + break + time.sleep(0.1) + app.stop() + self.assertNotEqual(output.find(compname+'_1.user.more_stuff'), -1) + + def test_application_default_log_cpp(self): + self.application_default_log("/waveforms/logger_w/logger_w.sad.xml", 'logger') + + def test_application_default_log_py(self): + self.application_default_log("/waveforms/logger_py_w/logger_py_w.sad.xml", 'logger_py') + + @scatest.requireJava + def test_application_default_log_java(self): + self.application_default_log("/waveforms/logger_java_w/logger_java_w.sad.xml", 'logger_java') + + +def all_log_levels(_obj): + _obj.comp = sb.launch(_obj.cname, properties={'LOGGING_CONFIG_URI':'file://'+os.getcwd()+'/high_thresh.cfg'}) + _obj.comp.start() + # make sure that all the named loggers appear + loggers = _obj.comp.getNamedLoggers() + _obj.assertTrue(_obj.cname+'_1' in loggers) + _obj.assertTrue(_obj.cname+'_1.lower' in loggers) + _obj.assertTrue(_obj.cname+'_1.lower.second.first' in loggers) + _obj.assertTrue(_obj.cname+'_1.lower.third' in loggers) + _obj.assertTrue(_obj.cname+'_1.namespace.lower' in loggers) + _obj.assertTrue(_obj.cname+'_1.user.more_stuff' in loggers) + _obj.assertTrue(_obj.cname+'_1.user.some_stuff' in loggers) + _obj.assertTrue(_obj.cname+'_1.system.PortSupplier' in loggers) + _obj.assertTrue(_obj.cname+'_1.system.PropertySet' in loggers) + _obj.assertTrue(_obj.cname+'_1.system.Resource' in loggers) + for logger in loggers: + _obj.assertTrue(_obj.cname+'_1' in logger) + + # verify that the logger level is inherited + _obj.comp.setLogLevel(_obj.cname+'_1', 'all') + time.sleep(0.5) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1'), CF.LogLevels.ALL) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1.lower'), CF.LogLevels.ALL) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1.lower.second.first'), CF.LogLevels.ALL) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1.lower.third'), CF.LogLevels.ALL) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1.namespace.lower'), CF.LogLevels.ALL) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1.user.more_stuff'), CF.LogLevels.ALL) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1.user.some_stuff'), CF.LogLevels.ALL) + _obj.comp.setLogLevel(_obj.cname+'_1', 'off') + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1'), CF.LogLevels.OFF) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1.lower'), CF.LogLevels.OFF) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1.namespace.lower'), CF.LogLevels.OFF) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1.user.more_stuff'), CF.LogLevels.OFF) + _obj.assertEquals(_obj.comp.getLogLevel(_obj.cname+'_1.user.some_stuff'), CF.LogLevels.OFF) + + # make sure that the log content is correct + content = _obj.readLogFile('foo/bar/test.log') + find_1 = content.find('message from _log') + find_2 = content.find('message from baseline_1_logger') + find_3 = content.find('message from baseline_2_logger') + find_4 = content.find('message from namespaced_logger') + find_5 = content.find('message from basetree_logger') + _obj.assertTrue(find_1 -#include -#include -#include -#include -#include -#include -#include "RH_LogEventAppender.h" - -using namespace log4cxx; -using namespace log4cxx::helpers; - -#define _LL_DEBUG( msg ) \ - { std::ostringstream __os; __os << msg; LogLog::debug(__os.str()); __os.str(""); } - -#define _LLS_DEBUG( os, msg ) \ - os << msg; LogLog::debug(os.str()); os.str(""); - -// Register this class with log4cxx -IMPLEMENT_LOG4CXX_OBJECT(RH_LogEventAppender) - -RH_LogEventAppender::RH_LogEventAppender(): -channelName("LOG_CHANNEL"), - nameContext(""), - prodId("RESOURCE.ID"), - prodName("RESOURCE.Name"), - prodFQN("RESOURCE.FQN"), - _channelName(""), - _nameContext(""), - _reconnect_retries(10), - _reconnect_delay(10) -{ - -} - - -RH_LogEventAppender::~RH_LogEventAppender() {} - - -void RH_LogEventAppender::setOption(const LogString& option, const LogString& value) { - - if(StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("EVENT_CHANNEL"), LOG4CXX_STR("event_channel"))) { - synchronized sync(mutex); - channelName = value; - } - else if(StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("NAME_CONTEXT"), LOG4CXX_STR("name_context"))) { - synchronized sync(mutex); - nameContext = value; - } - else if(StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("PRODUCER_ID"), LOG4CXX_STR("producer_id"))) { - synchronized sync(mutex); - prodId = value; - } - else if(StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("PRODUCER_NAME"), LOG4CXX_STR("producer_name"))) { - synchronized sync(mutex); - prodName = value; - } - else if(StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("PRODUCER_FQN"), LOG4CXX_STR("producer_fqn"))) { - synchronized sync(mutex); - prodFQN = value; - } - else if(StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("ARGV"), LOG4CXX_STR("argv"))) { - synchronized sync(mutex); - _args = value; - } - else if(StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("RETRIES"), LOG4CXX_STR("retries"))) { - synchronized sync(mutex); - int newRetry = StringHelper::toInt(value); - if ( newRetry > 0 ) { - _reconnect_retries = newRetry; - } - } - else if(StringHelper::equalsIgnoreCase(option, LOG4CXX_STR("RETRY_DELAY"), LOG4CXX_STR("retry_delay"))) { - synchronized sync(mutex); - int newDelay = StringHelper::toInt(value); - if ( newDelay > 0 ) { - _reconnect_delay = newDelay; - } - } - else { - AppenderSkeleton::setOption(option, value); - } -} - - -void RH_LogEventAppender::activateOptions(Pool& p) { - - synchronized sync(mutex); - std::ostringstream os; - _LLS_DEBUG( os, "RH_LogEventAppender: CH:" << channelName ); - _LLS_DEBUG( os, "RH_LogEventAppender: NameContext:" << nameContext ); - _LLS_DEBUG( os, "RH_LogEventAppender: Retries:" << _reconnect_retries); - _LLS_DEBUG( os, "RH_LogEventAppender: RetryDelay:" << _reconnect_delay); - - if ( _channelName != channelName && channelName != "" ) { - LOG4CXX_ENCODE_CHAR(t, channelName ); - _channelName = t; - LOG4CXX_ENCODE_CHAR(t2, nameContext ); - _nameContext = t2; - connect_(); - } - - AppenderSkeleton::activateOptions(p); - -} - - -void RH_LogEventAppender::append(const spi::LoggingEventPtr& event, Pool& p){ - if ( this->layout == NULL ) { - LOG4CXX_ENCODE_CHAR(nameStr, name); - std::string msg("No Layout set for the appender named [ "); - msg.append(nameStr); - msg.append(" ]."); - - LOG4CXX_DECODE_CHAR(msgL, msg); - errorHandler->error(msgL); - return; - } - - log4cxx::LogString fMsg; - - this->layout->format(fMsg, event, p); - - LOG4CXX_ENCODE_CHAR(fMsgStr, fMsg); - - // This is the message structure for a Redhawk logging event - CF::LogEvent rh_event; - LOG4CXX_ENCODE_CHAR(t1,prodId); - rh_event.producerId = CORBA::string_dup(t1.c_str()); - LOG4CXX_ENCODE_CHAR(t2,prodName); - rh_event.producerName = CORBA::string_dup(t2.c_str()); - LOG4CXX_ENCODE_CHAR(t3,prodFQN); - rh_event.producerName_fqn = CORBA::string_dup(t3.c_str()); - - CORBA::Long level=CF::LogLevels::FATAL; - if ( event->getLevel() == log4cxx::Level::getError() ) - level=CF::LogLevels::ERROR; - if ( event->getLevel() == log4cxx::Level::getWarn() ) - level=CF::LogLevels::WARN; - if ( event->getLevel() == log4cxx::Level::getInfo() ) - level=CF::LogLevels::INFO; - if ( event->getLevel() == log4cxx::Level::getDebug() ) - level=CF::LogLevels::DEBUG; - if ( event->getLevel() == log4cxx::Level::getTrace() ) - level=CF::LogLevels::TRACE; - if ( event->getLevel() == log4cxx::Level::getAll() ) - level=CF::LogLevels::ALL; - rh_event.level = level; - - //timeStamp in LoggingEventPtr is in microseconds - //need to convert to seconds for rh_event - rh_event.timeStamp = event->getTimeStamp()/1000000; - rh_event.msg = CORBA::string_dup(fMsg.c_str()); - - // push log message to the event channel - if ( _event_channel ) { - if ( _event_channel->push(rh_event) != 0 ) { - _LL_DEBUG( "RH_LogEventAppender::append EVENT CHANNEL, PUSH OPERATION FAILED."); - } - -} - -void RH_LogEventAppender::close() -{ - _LL_DEBUG( "RH_LogEventAppender::close START"); - if ( closed ) return; - _event_channel.reset(); - closed=true; - _LL_DEBUG( "RH_LogEventAppender::close END"); -} - - -int RH_LogEventAppender::connect_() { - - int retval = 0; - if ( _orb == NULL ) { - // RESOLVE need to parse args to list of strings.. - /* - LOG4CXX_ENCODE_CHAR(t,_args); - int largc=args.size(); - - char *largv[args.size()]; - char **pp; - ArgList::iterator ii; - int jj; - - for( pp=largv, ii=args.begin(); ii != args.end(); ii++, pp++ ) { - *pp = &((*ii)[0]); - } - */ - int largc=0; - char **largv=NULL; - try { - _orb = corba::OrbContext::Init(largc, largv ); - } - catch(...) { - retval=1; - } - - } - - _event_channel.reset(); - std::ostringstream os; - _LLS_DEBUG( os, "RH_LogEventAppender::connect Create PushEventSupplier" << _channelName ); - corba::PushEventSupplier *pes=new corba::PushEventSupplier( _orb, - _channelName, - _nameContext, - _reconnect_retries, - _reconnect_delay ); - if (pes != NULL ) { - _LLS_DEBUG( os, "RH_LogEventAppender::connect Create PushEventSupplier Created." ); - _event_channel.reset(pes); - } - - return retval; -} diff --git a/redhawk/src/tools/LogEventAppender/RH_LogEventAppender.h b/redhawk/src/tools/LogEventAppender/RH_LogEventAppender.h deleted file mode 100644 index 8a0fa49fd..000000000 --- a/redhawk/src/tools/LogEventAppender/RH_LogEventAppender.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK core. - * - * REDHAWK core is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef RH_LogEvent_APPENDER_H -#define RH_LogEvent_APPENDER_H -#include -#include -#include -#include -#include -#include -#include "corba.h" - -namespace log4cxx -{ - -class RH_LogEventAppender : public AppenderSkeleton -{ - public: - DECLARE_LOG4CXX_OBJECT(RH_LogEventAppender) - - BEGIN_LOG4CXX_CAST_MAP() - LOG4CXX_CAST_ENTRY(RH_LogEventAppender) - LOG4CXX_CAST_ENTRY_CHAIN(AppenderSkeleton) - END_LOG4CXX_CAST_MAP() - - - RH_LogEventAppender(); - virtual ~RH_LogEventAppender(); - - // - // Called by log4cxx internals to process options - // - void setOption(const LogString& option, const LogString& value); - - void activateOptions( log4cxx::helpers::Pool& p); - - // This method is called by the AppenderSkeleton#doAppend method - void append(const spi::LoggingEventPtr& event, log4cxx::helpers::Pool& p); - - void close(); - - bool isClosed() const { return closed; } - - bool requiresLayout() const { return true; } - - private: - - typedef boost::shared_ptr< corba::PushEventSupplier > PushEventSupplierPtr; - - std::vector< std::string > ArgList; - - // - // perform connect operation to establish a corba context - // - int connect_(); - - // - // Command line arguments used to configure corba util methods - // - LogString _args; - - // - // handle to corba context - // - corba::OrbPtr _orb; - - // - // channel name - // - LogString channelName; - - // - // naming context - // - LogString nameContext; - - // - // Producer Identifier - // - LogString prodId; - - // - // Producer Name - // - LogString prodName; - - // - // Producer FQN - fully qualified domain name for resource - // - LogString prodFQN; - - // - // channel name, shadow variable - // - LogString _channelName; - - // - // naming context, shadow variable - // - LogString _nameContext; - - // - // Handle to requested channel, might want to make this a vector... and this class a singleton - // - PushEventSupplierPtr _event_channel; - - // number of times to retry before calling it quits.. reset after each successfull connection ( -1 try forever ) - int _reconnect_retries; - - // number of milliseconds to delay before retrying to connect to CORBA resoure - int _reconnect_delay; - - // prevent copy and assignment statements - RH_LogEventAppender(const RH_LogEventAppender&); - RH_LogEventAppender& operator=(const RH_LogEventAppender&); - - }; - -}; // end of namespace -#endif diff --git a/redhawk/src/tools/LogEventAppender/XXXX b/redhawk/src/tools/LogEventAppender/XXXX deleted file mode 100644 index 26b0fb91e..000000000 --- a/redhawk/src/tools/LogEventAppender/XXXX +++ /dev/null @@ -1,278 +0,0 @@ - - - // - // GetEventChannel - // - // Will first lookup an event channel given the value of the name parameter... it will try to resolve the - // name using different event channel resolution methods: - // 1) resolve using naming services's resolve_str method - // 2) resolve if channel defined with InitRef method and resolve_initial_reference method - // 3) resolve as corbaname corbaname::#channelname - // 4) resolve with corbaloc - // - // If channel was not found and create==true then create the channel from the EventChannelFactory - // - CosEventChannelAdmin::EventChannel_ptr GetEventChannel ( const std::string& name, - const bool create=false, - const std::string &host="localhost" ); - - - CosEventChannelAdmin::EventChannel_ptr GetEventChannel ( const std::string& name, - const std::string& ns_context, - const bool create=false, - const std::string &host="localhost" ); - - - // - // CreateEventChannel - // - // Create an EventChannel within the current ORB context, once created bind to the same name.... - // - CosEventChannelAdmin::EventChannel_ptr CreateEventChannel( const std::string& name, - ossie::corba::NS_ACTION action=ossie::corba::NS_BIND ); - - CosEventChannelAdmin::EventChannel_ptr CreateEventChannel( const std::string& name, - const std::string& ns_context, - ossie::corba:::NS_ACTION action=ossie::corba::NS_BIND ); - // - // PushEventConsumer - // - // This class will perform the subscription portion of the a publisher/subscriber pattern - // over a CORBA EventChannel. If the Channel does not exist it will try to create - // and register the channel in the NamingService - // - // - class PushEventConsumer { - - public: - // - // Callback interface when data arrives event happens - // - typedef void (*DataArrivedCallbackFn)( const CORBA::Any &data ); - - // - // Interface definition that will be notified when data arrives on a EventChannel - // - class DataArrivedListener { - - public: - virtual void operator() ( const CORBA::Any &data ) = 0; - virtual ~DataArrivedListener() {}; - - }; - - /** - * Allow for member functions to receive connect/disconnect notifications - */ - template - class MemberDataArrivedListener : public DataArrivedListener - { - public: - typedef boost::shared_ptr< MemberDataArrivedListener< T > > SPtr; - - typedef void (T::*MemberFn)( const CORBA::Any &data ); - - static SPtr Create( T &target, MemberFn func ){ - return SPtr( new MemberDataArrivedListener(target, func ) ); - }; - - virtual void operator() ( const CORBA::Any &data ) - { - (target_.*func_)(data); - } - - // Only allow PropertySet_impl to instantiate this class. - MemberDataArrivedListener ( T& target, MemberFn func) : - target_(target), - func_(func) - { - } - private: - T& target_; - MemberFn func_; - }; - - /** - * Wrap Callback functions as ConnectionEventListener objects - */ - class StaticDataArrivedListener : public DataArrivedListener - { - public: - virtual void operator() ( const CORBA::Any &data ) - { - (*func_)(data); - } - - StaticDataArrivedListener ( DataArrivedCallbackFn func) : - func_(func) - { - } - - private: - - DataArrivedCallbackFn func_; - }; - - // - // Define base class for consumers - // - - typedef POA_CosEventComm::PushConsumer Consumer; - typedef CosEventComm::PushConsumer_var Consumer_var; - typedef CosEventComm::PushConsumer_ptr Consumer_ptr; - - - // - // Create the context for a PushEvent Supplier for a CORBA EventService - // - // @param channelName event channel name to subscribe to - // @param consumer actual consumer object that receives pushed data - // @param retries number of retries to perform when trying to establish subscriber interface (-1 tries forever) - // @param retry_wait number of millisecs to wait between retries - PushEventConsumer( const std::string &channelName, - Consumer* consumer, - const int retries=10, - const int retry_wait=10 ); - - PushEventConsumer( const std::string &channelName, - const std::string &ncName, - Consumer* consumer, - const int retries=10, - const int retry_wait=10 ); - - PushEventConsumer( const std::string &channelName, - const int retries=10, - const int retry_wait=10 ); - - PushEventConsumer( const std::string &channelName, - const std::string &ncName, - const int retries=10, - const int retry_wait=10 ); - // - // DTOR - // - virtual ~PushEventConsumer(); - - // - // - // - const Consumer *getConsumer() { return consumer; }; - - // - // Attach/detach sequence does not work for some reason. - // -#if 0 - Consumer *setConsumer( Consumer *newConsumer ) { - detach(); - consumer = newConsumer; - attach(); - } - - void attach(); - void dettach(); -#endif - - // - // Attach callback listener when data arrives to Consumer object - // - template< typename T > inline - void setDataArrivedListener(T &target, void (T::*func)( const CORBA::Any &data ) ) { - dataArrivedCB = boost::make_shared< MemberDataArrivedListener< T > >( boost::ref(target), func ); - }; - - template< typename T > inline - void setDataArrivedListener(T *target, void (T::*func)( const CORBA::Any &data ) ) { - dataArrivedCB = boost::make_shared< MemberDataArrivedListener< T > >( boost::ref(*target), func ); - }; - - void setDataArrivedListener( DataArrivedListener *newListener ); - void setDataArrivedListener( DataArrivedCallbackFn newListener ); - - protected: - - // - // CallbackConsumer - // - class CallbackConsumer : public Consumer { - public: - virtual ~CallbackConsumer() {}; - virtual void push( const CORBA::Any &data ) { - if ( parent.dataArrivedCB ) { - try{ - (*parent.dataArrivedCB)( data ); - } - catch(...){ - } - } - - }; - virtual void disconnect_push_consumer () {} ; - - private: - friend class PushEventConsumer; - - CallbackConsumer ( PushEventConsumer &parent) : - parent(parent) - { - } ; - - protected: - PushEventConsumer &parent; - - }; - - friend class CallbackConsumer; - - // - // Channel name - // - std::string name; - - // - // Naming context where channel is bound - // - std::string nc_name; - - // - // handle to the EventChannel - // - CosEventChannelAdmin::EventChannel_var channel; - - // - // Get Supplier Admin interface - // - CosEventChannelAdmin::ConsumerAdmin_var consumer_admin; - - // - // Get proxy supplier that is providing the data - // - CosEventChannelAdmin::ProxyPushSupplier_var proxy_for_supplier; - - // - // Push Consumer - // - Consumer *consumer; - - // - // PushConsumer Callback... - // - // Used by default Consumer object to call registered callback - // - boost::shared_ptr< DataArrivedListener > dataArrivedCB; - - // - // number of retries to perform (-1 == try forever) - // - int retries; - - // - // number of milliseconds to wait between retry operations - // - int retry_wait; - - private: - - void _init( ); - - - }; // end of PushEventConsumer diff --git a/redhawk/src/tools/LogEventAppender/corba.cpp b/redhawk/src/tools/LogEventAppender/corba.cpp deleted file mode 100644 index ca212d41c..000000000 --- a/redhawk/src/tools/LogEventAppender/corba.cpp +++ /dev/null @@ -1,1485 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK core. - * - * REDHAWK core is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "corba.h" -#include "logdebug.h" - -namespace corba { - - // - // used for boost shared pointer instantion when user - // supplied callback is provided - // - struct null_deleter - { - void operator()(void const *) const - { - } - }; - - - - ////////////////////////////////////////////////////////////////////// - ////////////////////////////////////////////////////////////////////// - - // - // - // Orb - Singleton class methods and declarations - // - - OrbPtr OrbContext::_singleton; - - bool OrbContext::_share = true; - - // - // Terminate - // - // Get the current execution context for interfacing with the ORB. - // resolve NamingService, rootPOA, POAManager, POACurrent, and omniINSPOA - // - void OrbContext::Terminate( bool forceShutdown ) { - if ( forceShutdown || _share == false ) { - if ( CORBA::is_nil(_singleton->orb) == false ) { - //_singleton->orb->shutdown(true); - _singleton->orb->destroy(); - } - } - _singleton.reset(); - } - - - // - // Init - // - OrbPtr OrbContext::Init( ) { - return Init(0,NULL); - } - - // - // Init - // - // Get the current execution context for interfacing with the ORB. - // resolve NamingService, rootPOA, POAManager, POACurrent, and omniINSPOA - // - OrbPtr OrbContext::Init( int argc, char **argv, const char* options[][2], bool share ) { - - int retval=1; - const char *action=""; - _share = share; - try { - _singleton = boost::shared_ptr< OrbContext >( new OrbContext() ); - OrbContext &corba_ctx = *_singleton; - corba_ctx.orb = CORBA::ORB_init(argc,argv, "omniORB4", options); - - corba_ctx.namingServiceCtx=CosNaming::NamingContextExt::_nil(); - corba_ctx.rootPOA=PortableServer::POA::_nil(); - - CORBA::Object_var obj; - action="resolve initial reference 'RootPOA'"; - LNDEBUG( "ORB", action ); - obj=corba_ctx.orb->resolve_initial_references("RootPOA"); - corba_ctx.rootPOA =PortableServer::POA::_narrow(obj); - if(CORBA::is_nil(corba_ctx.rootPOA)) - throw CORBA::OBJECT_NOT_EXIST(); - - action="activate the RootPOA's POAManager"; - LNDEBUG( "ORB", action ); - corba_ctx.poaManager =corba_ctx.rootPOA->the_POAManager(); - corba_ctx.poaManager->activate(); - - action="resolve initial reference 'NameService'"; - LNDEBUG( "ORB", action ); - obj=corba_ctx.orb->resolve_initial_references("NameService"); - corba_ctx.namingService = CosNaming::NamingContext::_narrow(obj); - if(CORBA::is_nil(corba_ctx.namingService)) - throw CORBA::OBJECT_NOT_EXIST(); - - action="resolve initial reference 'NameServiceExt'"; - LNDEBUG( "ORB", action ); - obj=corba_ctx.orb->resolve_initial_references("NameService"); - corba_ctx.namingServiceCtx = CosNaming::NamingContextExt::_narrow(obj); - if(CORBA::is_nil(corba_ctx.namingServiceCtx)) - throw CORBA::OBJECT_NOT_EXIST(); - - retval=0; - } - catch(CORBA::ORB::InvalidName& ex) { // resolve_initial_references - LNERROR( "ORB", "Failed to "< listRootContext( ) { - return listContext( OrbContext::Inst()->namingServiceCtx,"" ); - } - - - std::vector listContext(const CosNaming::NamingContext_ptr ctx, const std::string &dname ) { - CosNaming::BindingIterator_var bi; - CosNaming::BindingList_var bl; - CosNaming::Binding_var b; - const CORBA::ULong CHUNK = 0; - - //std::cout << " DIR:" << dname << std::endl; - std::vector t; - try{ - ctx->list(CHUNK, bl, bi); - while ( CORBA::is_nil(bi) == false && bi->next_one(b) ) { - //std::cout << " Convert Name " << std::endl; - CORBA::String_var s = CORBA::string_dup(b->binding_name[0].id); - std::string bname = s.in(); - if ( b->binding_type == CosNaming::nobject ) { - std::string n = dname; - n = n + "/" + bname; - //std::cout << " OBJ:" << n << std::endl; - t.push_back( n ); - } - else if ( b->binding_type == CosNaming::ncontext ) { - std::vector< std::string > slist; - CORBA::Object_ptr obj=ctx->resolve( b->binding_name ); - if ( CORBA::is_nil(obj) == false ) { - CosNaming::NamingContext_ptr nc= CosNaming::NamingContext::_narrow(obj); - std::string sdir=dname; - sdir = sdir+"/"+ bname; - slist = listContext( nc, sdir ); - t.insert(t.end(), slist.begin(), slist.end() ); - } - } - } - - } - catch(...) { - //std::cout << " Ut ohhhhh.... something bad happened" << std::endl; - } - - return t; - } - - - // - // Create a naming context from root directory - // - int CreateNamingContext( OrbPtr orb, const std::string &namingContext ) { - int retval=1; - CosNaming::NamingContext_ptr ctx = CreateNamingContextPath( orb, namingContext ); - if(!CORBA::is_nil(ctx)) retval=0; - return retval; - } - - - CosNaming::NamingContext_ptr CreateNamingContextPath( OrbPtr orb, const std::string &namingContext) { - - LNDEBUG( "CreateNamingContextPath", " NamingContext: " << namingContext ); - CosNaming::Name cname = str2name(namingContext.c_str()); - CosNaming::NamingContext_ptr ctx = CosNaming::NamingContext::_nil(); - CosNaming::NamingContext_ptr naming_ctx = orb->namingService; - - if(!CORBA::is_nil(orb->namingService) ) { - - try { - CosNaming::Name n; - n.length(1); - - // Drill down through contexts. - for(CORBA::ULong i=0; i<(cname.length()); ++i) { - n[0]=cname[i]; - try { - naming_ctx=naming_ctx->bind_new_context(n); - } - catch(CosNaming::NamingContext::AlreadyBound&) { - CORBA::Object_var obj2 =naming_ctx->resolve(n); - naming_ctx=CosNaming::NamingContext::_narrow(obj2); - } - // One of the context names is already bound to an object. Bail out! - if(CORBA::is_nil(naming_ctx)) - return ctx; - } - - ctx = naming_ctx; - } catch( const CORBA::Exception& ex) { - LNERROR( "CreateNamingContextPath", " CORBA " << ex._name() << " exception during, bind context:" << namingContext ); - } - } - return ctx; - } - - - - CosNaming::NamingContext_ptr ResolveNamingContextPath( OrbPtr orb, const std::string &namingContext ) { - - LNDEBUG( "ResolveNamingContextPath", " NamingContext: " << namingContext ); - CosNaming::Name cname = str2name(namingContext.c_str()); - CosNaming::NamingContext_ptr ctx = CosNaming::NamingContext::_nil(); - CosNaming::NamingContext_ptr naming_ctx = orb->namingService; - - if(!CORBA::is_nil(orb->namingService) ) { - - try { - CosNaming::Name n; - n.length(1); - - // Drill down through contexts. - for(CORBA::ULong i=0; i<(cname.length()); ++i) { - n[0]=cname[i]; - try { - naming_ctx->bind_context(n, naming_ctx ); - } - catch(CosNaming::NamingContext::AlreadyBound&) { - CORBA::Object_var obj2 =naming_ctx->resolve(n); - naming_ctx=CosNaming::NamingContext::_narrow(obj2); - } - - // One of the context names is already bound to an object. Bail out! - if(CORBA::is_nil(naming_ctx)) - return ctx; - } - - ctx = naming_ctx; - } catch( const CORBA::Exception& ex) { - LNERROR( "ResolveNamingContextPath", " CORBA " << ex._name() << " exception during, bind context:" << namingContext ); - } - } - return ctx; - } - - - - // - // Create a naming context from root directory - // - int DeleteNamingContext( OrbPtr orb, const std::string &namingContext ) { - LNDEBUG( "DeleteNamingContext", " NamingContext: " << namingContext ); - int retval=1; - CosNaming::Name cname = str2name(namingContext.c_str()); - if(!CORBA::is_nil(orb->namingService) ) { - try { - - CORBA::Object_var obj = orb->namingService->resolve(cname); - CosNaming::NamingContext_var context - = CosNaming::NamingContext::_narrow(obj); - - if (CORBA::is_nil(context)) return 1; - - context->destroy(); - - orb->namingService->unbind(cname); - - retval = 0; - } catch(CosNaming::NamingContext::NotFound& ex) { - LNWARN( "DeleteNamingContext", " Not Found :" << namingContext ); - } catch(CosNaming::NamingContext::CannotProceed & ex) { - LNWARN( "DeleteNamingContext", " CannotProceed :" << namingContext ); - } catch(CosNaming::NamingContext::InvalidName & ex) { - LNWARN( "DeleteNamingContext", " InvalidName :" << namingContext ); - } catch( const CORBA::Exception& ex) { - LNERROR( "DeleteNamingContext", " CORBA " << ex._name() << " exception during, bind context:" << namingContext ); - } - } - return retval; - } - - - - int DeleteNamingContextPath( OrbPtr orb, const std::string &namingContext ) { - - LNDEBUG( "DeleteNamingContextPath", " NamingContext: " << namingContext ); - int retval=0; - CosNaming::Name cname = str2name(namingContext.c_str()); - std::vector< std::pair< CosNaming::NamingContext_ptr, CosNaming::Name > > nc_list; - CosNaming::NamingContext_ptr naming_ctx = orb->namingService; - if(!CORBA::is_nil(orb->namingService) ) { - - try { - CosNaming::Name n; - n.length(1); - - // Drill down through contexts. - for(CORBA::ULong i=0; i<(cname.length()); ++i) { - n[0]=cname[i]; - try { - naming_ctx->bind_context(n, naming_ctx ); - } - catch(CosNaming::NamingContext::AlreadyBound&) { - CORBA::Object_var obj2 =naming_ctx->resolve(n); - naming_ctx=CosNaming::NamingContext::_narrow(obj2); - } - - // One of the context names is already bound to an object. Bail out! - if(CORBA::is_nil(naming_ctx)) - return -1; - - CosNaming::Name tname; - tname.length(i+1); - for(CORBA::ULong a=0; a( naming_ctx, tname ) ); - } - std::vector< std::pair< CosNaming::NamingContext_ptr, CosNaming::Name > >::reverse_iterator r=nc_list.rbegin(); - for ( ; r != nc_list.rend(); ++r ) { - try { - r->first->destroy(); - orb->namingService->unbind( r->second ); - } catch(CosNaming::NamingContext::NotFound& ex) { - LNWARN( "DeleteNamingContextPath", " Not Found :" << namingContext ); - retval=1; - break; - } catch(CosNaming::NamingContext::CannotProceed & ex) { - LNWARN( "DeleteNamingContextPath", " CannotProceed :" << namingContext ); - retval=1; - break; - } catch(CosNaming::NamingContext::InvalidName & ex) { - LNWARN( "DeleteNamingContextPath", " InvalidName :" << namingContext ); - retval=1; - break; - } catch( const CORBA::Exception& ex) { - LNERROR( "DeleteNamingContextPath", " CORBA " << ex._name() << " exception during, bind context:" << namingContext ); - retval=1; - break; - } - } - } catch( const CORBA::Exception& ex) { - LNERROR( "DeleteNamingContextPath", " CORBA " << ex._name() << " exception during, bind context:" << namingContext ); - } - } - return retval; - }; - - // - // Unbind from a namingcontext - // - int Unbind( const std::string &name , CosNaming::NamingContext_ptr namingContext ) { - - LNDEBUG( "Unbind", " Name: " << name ); - int retval=1; - try { - if(!CORBA::is_nil(namingContext) ) { - CosNaming::Name cname = str2name(name.c_str()); - namingContext->unbind(cname); - retval=0; - } - } - catch(CosNaming::NamingContext::NotFound &ex) { - LNWARN( "Unbind", " NameContext : Name NotFound "); - retval=0; - } - catch(CosNaming::NamingContext::CannotProceed &ex) { - LNERROR( "Unbind", " NameContext : CannotProceed "); - } - catch(CosNaming::NamingContext::InvalidName &ex) { - LNERROR( "Unbind", " NameContext : InvalidName "); - } - catch (const CORBA::Exception& ex) { - LNERROR( "Unbind", " CORBA " << ex._name() << " exception during unbind operation, name:" << name ); - } - - return retval; - - } - - - - // - // Unbind object from naming service - // - int Unbind( OrbPtr orb, const std::string &name , const std::string &namingContext ) { - - LNDEBUG( "Unbind", " NamingContext: <" << namingContext << "> Name:" << name ); - int retval=1; - CosNaming::Name cname = str2name(namingContext.c_str()); - CosNaming::NamingContext_var ctx = CosNaming::NamingContext::_nil(); - if(!CORBA::is_nil(orb->namingService) ) { - try { - if ( namingContext == "" ) { - LNDEBUG( "Unbind", " Use Root NamingContext "); - ctx = orb->namingService; - } - else { - LNDEBUG( "Bind", " LOOK UP NamingContext: " << namingContext ); - orb->namingService->bind_context( cname, ctx ); - } - LNDEBUG( "Unbind", " DIR: <" << namingContext << "> Name:" << name ); - return Unbind( name, ctx ); - } catch(CosNaming::NamingContext::AlreadyBound& ex) { - LNDEBUG( "Unbind", " Already Bound NamingContext : " << namingContext ); - CORBA::Object_var tmp = orb->namingService->resolve(cname); - ctx = CosNaming::NamingContext::_narrow(tmp); - LNDEBUG( "Unbind", " DIR: <" << namingContext << "> Name:" << name ); - return Unbind( name, ctx ); - } catch( const CORBA::Exception& ex) { - LNERROR( "Unbind", " CORBA " << ex._name() << " exception during, bind context:" << namingContext ); - } - - } - return retval; - }; - - - // - // Bind to naming context id = name, kind="" - // - int Bind(const std::string &name, CORBA::Object_ptr obj, CosNaming::NamingContext_ptr namingContext ) { - - LNDEBUG( "Bind", " created event channel " << name ); - int retval=1; - try { - if(!CORBA::is_nil(namingContext) ) { - CosNaming::Name cname = str2name(name.c_str()); - try{ - LNDEBUG( "Bind", "Attempt to Bind, Name:" << name ); - namingContext->bind(cname, obj); - LNDEBUG( "Bind", "SUCCESS, for Name:" << name ); - retval=0; - } catch(CosNaming::NamingContext::AlreadyBound& ex) { - LNDEBUG( "Bind", "Already Bound, Name:" << name ); - namingContext->rebind(cname, obj); - retval=0; - } - } - } catch (const CORBA::Exception& ex) { - LNERROR( "Bind", " CORBA " << ex._name() << " exception during bind operation, name:" << name ); - } - - return retval; - }; - - - // - // Bind to naming context id = name, kind="" - // - int Bind( OrbPtr orb, const std::string &name, CORBA::Object_ptr obj, const std::string &namingContext, bool create_nc ) { - - LNDEBUG( "Bind", " NamingContext: " << namingContext << " Name:" << name ); - int retval=1; - CosNaming::Name cname = str2name(namingContext.c_str()); - CosNaming::NamingContext_ptr ctx = CosNaming::NamingContext::_nil(); - if(!CORBA::is_nil(orb->namingService) ) { - try { - if ( namingContext == "" ) { - LNDEBUG( "Bind", " Use Root NamingContext "); - ctx = orb->namingService; - } - else { - if ( create_nc ) { - LNDEBUG( "Bind", " Create NamingContext Path" << namingContext ); - ctx = CreateNamingContextPath(orb, namingContext); - } - else { - LNDEBUG( "Bind", " LOOK UP NamingContext " << namingContext ); - orb->namingService->bind( cname, ctx ); - } - } - - } catch(CosNaming::NamingContext::AlreadyBound& ex) { - LNDEBUG( "Bind", " Already Bound NamingContext : " << namingContext ); - CORBA::Object_var tmp = orb->namingService->resolve(cname); - ctx = CosNaming::NamingContext::_narrow(tmp); - - } catch( const CORBA::Exception& ex) { - LNERROR( "Bind", " CORBA " << ex._name() << " exception during, bind context:" << namingContext ); - } - - if ( !CORBA::is_nil(ctx) ) { - LNDEBUG( "Bind", " DIR:" << namingContext << " Name:" << name ); - return Bind( name, obj, ctx ); - } - } - return retval; - }; - - - - - //////////////////////////////////////////////////////////////// - //////////////////////////////////////////////////////////////// - - // - // EventChannel Convenience Methods - // - - // - // GetEventChannel - // - // Will first lookup an event channel given the value of the name parameter... it will try to resolve the - // name using different event channel resolution methods: - // -) resolve if channel defined with InitRef method and resolve_initial_reference method - // -) resolve as corbaname corbaname::#channelname - // -) resolve with corbaloc - // - // If channel was not found and create==true then create the channel from the EventChannelFactory - // - CosEventChannelAdmin::EventChannel_ptr GetEventChannel ( corba::OrbPtr &orb, - const std::string& name, - const bool create, - const std::string &host ) { - LNDEBUG("GetEventChannel", " : NamingService look up, Channel " << name ); - - // return value if no event channel was found or error occured - CosEventChannelAdmin::EventChannel_var event_channel = CosEventChannelAdmin::EventChannel::_nil(); - - // - // Look up event channel - // if no channel is found then try to lookup using InitRef - // if no channel is found then try to lookup using corbaname method - // if no channel is found then try to lookup using corbaloc method. - // - // if all options fail then return nil if create== false - // - - bool found=false; - std::string tname; - std::string nc_name(""); - - // - // try to resolve using channel name as InitRef and resolve_initial_references - // - try { - if ( found == false ) { - LNDEBUG( "GetEventChannel", " : Trying InitRef Lookup " << name ); - CORBA::Object_var obj = orb->orb->resolve_initial_references(name.c_str()); - event_channel = CosEventChannelAdmin::EventChannel::_narrow(obj); - found =true; - LNDEBUG( "GetEventChannel", " : FOUND EXISTING, Channel " << name ); - } - }catch (const CORBA::Exception& e) { - LNWARN( "GetEventChannel", " Unable to lookup with InitRef:" << name << ", CORBA RETURNED(" << e._name() << ")" ); - } - - - // - // try to resolve with corbaname and string_to_object method - // - try { - std::ostringstream os; - if ( found == false ) { - if ( name.find("corbaname") == std::string::npos ) { - if ( nc_name != "" ) - os << "corbaname:rir:#"<< nc_name << "/" << name; - else - os << "corbaname:rir:#"<< name; - } - else - os << name; - tname=os.str(); - LNDEBUG( "GetEventChannel", " : Trying corbaname resolution " << tname ); - CORBA::Object_var obj = obj=orb->orb->string_to_object(tname.c_str()); - event_channel = CosEventChannelAdmin::EventChannel::_narrow(obj); - found =true; - LNDEBUG( "GetEventChannel", " : FOUND EXISTING, Channel " << tname ); - } - - }catch (const CORBA::Exception& e) { - LNWARN( "GetEventChannel", " Unable to lookup with corbaname: URI:" << tname << ", CORBA RETURNED(" << e._name() << ")"); - } - - - // - // try to resolve with corbaloc method and string_to_object method - // - try { - if ( found == false ) { - std::ostringstream os; - // - // last gasp... try the corbaloc method...corbaloc::host:11169/ - // - os << "corbaloc::"<orb->string_to_object(tname.c_str()); - if ( !CORBA::is_nil(obj) ) { - event_channel = CosEventChannelAdmin::EventChannel::_narrow(obj); - found = true; - LNDEBUG( "GetEventChannel", " : FOUND EXISTING, Channel " << tname ); - } - else { - LNDEBUG( "GetEventChannel", " : SEARCH FOR Channel " << tname << " FAILED"); - } - } - }catch (const CORBA::Exception& e) { - LNWARN( "GetEventChannel", " Unable to lookup with corbaloc URI:" << tname << ", CORBA RETURNED(" << e._name() << ")" ); - } - - try{ - if ( !found && create ) { - - LNDEBUG( "GetEventChannel", " CREATE NEW CHANNEL " << name ); - event_channel = CreateEventChannel( orb, name ); - if ( !CORBA::is_nil(event_channel) ) - LNINFO( "GetEventChannel", " --- CREATED NEW CHANNEL ---" << name ); - } - } catch (const CORBA::Exception& e) { - LNERROR( "GetEventChannel", " CORBA (" << e._name() << ") during event creation, channel " << name ); - } - - return event_channel._retn(); - } - - - CosEventChannelAdmin::EventChannel_ptr GetEventChannel ( corba::OrbPtr &orb, - const std::string& name, - const std::string &nc_name, - const bool create, - const std::string &host ) - { - - // return value if no event channel was found or error occured - CosEventChannelAdmin::EventChannel_var event_channel = CosEventChannelAdmin::EventChannel::_nil(); - - // - // Look up event channel in NamingService from root context... - // if lookup fails then return nil if create== false, - // else try and create a new EventChannel with name and nc_name - // - - bool found=false; - std::string tname; - - try { - // - // Lookup in NamingService... - // - LNDEBUG("GetEventChannel", " : NamingService look up, NC<"< Channel " << name ); - std::string cname=name; - if ( nc_name != "" ) - cname=nc_name+"/"+name; - - LNDEBUG("GetEventChannel", " : NamingService look up : " << cname ); - CORBA::Object_var obj = orb->namingServiceCtx->resolve_str(cname.c_str()); - event_channel = CosEventChannelAdmin::EventChannel::_narrow(obj); - - LNDEBUG("GetEventChannel", " : FOUND EXISTING, Channel NC<"< Channel " << name ); - found = true; - } catch (const CosNaming::NamingContext::NotFound&) { - LNWARN("GetEventChannel", " Unable to resolve event channel (" << name << ") in NamingService..." ); - } catch (const CORBA::Exception& e) { - LNERROR("GetEventChannel", " CORBA (" << e._name() << ") exception during event channel look up, CH:" << name ); - } - - - try{ - if ( !found && create ) { - - LNDEBUG( "GetEventChannel", " CREATE NEW CHANNEL " << name ); - event_channel = CreateEventChannel( orb, name, nc_name ); - if ( !CORBA::is_nil(event_channel) ) - LNINFO( "GetEventChannel", " --- CREATED NEW CHANNEL ---" << name ); - } - } catch (const CORBA::Exception& e) { - LNERROR( "GetEventChannel", " CORBA (" << e._name() << ") during event creation, channel " << name ); - } - - return event_channel._retn(); - } - - // - // CreateEventChannel - // - // @param orb context of the orb we are associated with - // @param name human readable path to the event channel being requested - // @parm bind bind the channel name to the object in the NamingService if channel was created - // - CosEventChannelAdmin::EventChannel_ptr CreateEventChannel ( corba::OrbPtr &orb, - const std::string& name, - corba::NS_ACTION action ) { - return CreateEventChannel( orb, name, "", action ); - } - - CosEventChannelAdmin::EventChannel_ptr CreateEventChannel ( corba::OrbPtr &orb, - const std::string& name, - const std::string &nc_name, - corba::NS_ACTION action ) - { - - CosEventChannelAdmin::EventChannel_var event_channel = CosEventChannelAdmin::EventChannel::_nil(); - omniEvents::EventChannelFactory_var event_channel_factory = GetEventChannelFactory( orb ); - - LNDEBUG( "CreateEventChannel", " Request to create event channel:" << name << " bind action:" << action ); - - if (CORBA::is_nil(event_channel_factory)) { - LNERROR( "CreateEventChannel", "CHANNEL(CREATE): Could not find EventChannelFactory" ); - return event_channel._retn(); - } - - CosLifeCycle::Key key; - key.length (1); - key[0].id = CORBA::string_dup("EventChannel"); - key[0].kind = CORBA::string_dup("object interface"); - - LNDEBUG( "CreateEventChannel", " action - event channel factory api" ); - if(!event_channel_factory->supports(key)) - { - LNWARN( "CreateEventChannel", " EventChannelFactory does not support Event Channel Interface!" ); - return event_channel._retn(); - } - - // - // Our EventChannels will always be created with InsName - // - LNINFO( "CreateEventChannel", " Request to create event channel:" << name.c_str() << " bind action:" << action ); - CosLifeCycle::Criteria criteria; - criteria.length(2); - criteria[0].name=CORBA::string_dup("InsName"); - criteria[0].value<<=name.c_str(); - criteria[1].name=CORBA::string_dup("CyclePeriod_ns"); - criteria[1].value<<=(CORBA::ULong)10; - - // - // Create Event Channel Object. - // - LNDEBUG( "CreateEventChannel", " action - create EventChannel object" ); - - CORBA::Object_var obj; - try { - obj =event_channel_factory->create_object(key, criteria); - } - catch (CosLifeCycle::CannotMeetCriteria& ex) /* create_object() */ { - LNERROR( "CreateEventChannel", "create_object failed, channel: " << name << " reason: CannotMeetCriteria " ); - } - catch (CosLifeCycle::InvalidCriteria& ex) /* create_object() */ { - LNERROR( "CreateEventChannel", "create_object failed, channel: " << name << " reason: InvalidCriteria " ); - if(ex.invalid_criteria.length()>0) { - int j; - for ( j=0; (unsigned int)j < ex.invalid_criteria.length(); j++ ) { - LNERROR( "CreateEventChannel", "--- Criteria Name: " << ex.invalid_criteria[j].name ); - CORBA::ULong xx; - ex.invalid_criteria[j].value >>= xx; - LNERROR( "CreateEventChannel", "--- Criteria Value : " << xx ); - } - } - } - catch( CORBA::Exception &ex ) { - LNERROR( "CreateEventChannel", " create_object failed, channel:" << name << " reason: corba exception" ); - } - - if (CORBA::is_nil(obj)) { - LNERROR( "CreateEventChannel", " Factory failed to create channel: " << name ); - return event_channel._retn(); - } - - try { - LNDEBUG( "CreateEventChannel", " action - Narrow EventChannel" ); - event_channel = CosEventChannelAdmin::EventChannel::_narrow(obj); - } - catch( CORBA::Exception &ex ) { - LNERROR( "CreateEventChannel", " Failed Narrow to EventChannel for:" << name ); - } - LNDEBUG( "CreateEventChannel", " created event channel " << name ); - try { - - if(!CORBA::is_nil(orb->namingService) && ( action == NS_BIND) ) { - Bind(orb, name, event_channel.in(), nc_name, true ); - } - - } - catch (const CosLifeCycle::InvalidCriteria& ex) { - LNERROR( "CreateEventChannel", " CHANNEL: Invalid Criteria: " << ex._name() << " - for creating event channel " << name ); - } catch (const CORBA::Exception& ex) { - LNERROR( "CreateEventChannel", " CHANNEL: CORBA " << ex._name() << " exception during create operation, CHANNEL:" << name ); - } - - - LNDEBUG( "CreateEventChannel", " completed create event channel : " << name ); - return event_channel._retn(); - }; - - - // - // DeleteEventChannel - // - // @param orb context of the orb we are associated with - // @param name name of the event channel to delete - // @parm unbind perform an unbind operation with the NamingService if channel was deleted - // - // @returns 0 operation passed no issues or errors - // @returns > 0 operation passed but issues were found but not a failure - // @returns < 0 operation failed due to execeptions from the orb. - // - // - // - int DeleteEventChannel ( corba::OrbPtr &orb, const std::string& name, corba::NS_ACTION action ) { - int retval = 0; - - // return value if no event channel was found or error occured - CosEventChannelAdmin::EventChannel_var event_channel = CosEventChannelAdmin::EventChannel::_nil(); - - event_channel = corba::GetEventChannel( orb, name, false ); - if ( CORBA::is_nil(event_channel) == true ) { - LNDEBUG( "DeleteEventChannel", " Cannot find event channel name " << name << " to object, try to remove from naming context." ); - if ( ( action == NS_UNBIND) && Unbind( orb, name ) == 0 ) { - LNINFO( "DeleteEventChannel", "Deregister EventChannel with the NamingService, CHANNEL:" << name ); - } - retval=-1; - return retval; - } - - try { - event_channel->destroy(); - LNINFO( "DeleteEventChannel", " Deleted event channel, CHANNEL: " << name ); - } - catch(CORBA::SystemException& ex) { - // this will happen if channel is destroyed but - LNWARN( "DeleteEventChannel", " System exception occured, ex " << ex._name() ); - retval=-1; - } - catch(CORBA::Exception& ex) { - LNWARN( "DeleteEventChannel", " CORBA exception occured, ex " << ex._name() ); - retval=-1; - } - - try { - if( (action == NS_UNBIND) && Unbind( orb, name ) == 0 ) { - LNINFO( "DeleteEventChannel", "Deregister EventChannel with the NamingService, CHANNEL:" << name ); - } - - } catch(CosNaming::NamingContext::InvalidName& ex) { - LNWARN( "DeleteEventChannel", " Invalid name to unbind, name: " << name ); - } - catch(CosNaming::NamingContext::NotFound& ex) { // resolve - LNWARN( "DeleteEventChannel", " Name not found, name: " << name ); - } - catch(CosNaming::NamingContext::CannotProceed& ex) { // resolve - LNERROR( "DeleteEventChannel", " Cannot Process error, name: " << name ); - retval=-1; - } - - return retval; - } - - int DeleteEventChannel ( corba::OrbPtr &orb, - const std::string& name, - const std::string& nc_name, - corba::NS_ACTION action ) - { - - int retval = 0; - - // return value if no event channel was found or error occured - CosEventChannelAdmin::EventChannel_var event_channel = CosEventChannelAdmin::EventChannel::_nil(); - - event_channel = corba::GetEventChannel( orb, name, nc_name, false ); - if ( CORBA::is_nil(event_channel) == true ) { - LNDEBUG( "DeleteEventChannel", " Cannot find event channel name " << name << " to object, try to remove from naming context." ); - if ( ( action == NS_UNBIND) && Unbind( orb, name, nc_name ) == 0 ) { - LNINFO( "DeleteEventChannel", "Deregister EventChannel with the NamingService, CHANNEL:" << name ); - } - retval=-1; - return retval; - } - - try { - event_channel->destroy(); - LNINFO( "DeleteEventChannel", " Deleted event channel, CHANNEL: " << name ); - } - catch(CORBA::SystemException& ex) { - // this will happen if channel is destroyed but - LNWARN( "DeleteEventChannel", " System exception occured, ex " << ex._name() ); - retval=-1; - } - catch(CORBA::Exception& ex) { - LNWARN( "DeleteEventChannel", " CORBA exception occured, ex " << ex._name() ); - retval=-1; - } - - try { - if( (action == NS_UNBIND) && Unbind( orb, name, nc_name ) == 0 ) { - LNINFO( "DeleteEventChannel", "Deregister EventChannel with the NamingService, CHANNEL:" << name ); - } - - } catch(CosNaming::NamingContext::InvalidName& ex) { - LNWARN( "DeleteEventChannel", " Invalid name to unbind, name: " << name ); - } - catch(CosNaming::NamingContext::NotFound& ex) { // resolve - LNWARN( "DeleteEventChannel", " Name not found, name: " << name ); - } - catch(CosNaming::NamingContext::CannotProceed& ex) { // resolve - LNERROR( "DeleteEventChannel", " Cannot Process error, name: " << name ); - retval=-1; - } - - return retval; - } - - - // - // (Taken from eventc.cc) - // - // - omniEvents::EventChannelFactory_ptr GetEventChannelFactory ( corba::OrbPtr &orb ) { - - CORBA::Object_var ecf_obj; - omniEvents::EventChannelFactory_var ecf = omniEvents::EventChannelFactory::_nil(); - LNDEBUG( "GetEventChannelFactory", " Look up EventChannelFactory" ); - try { - //ecf_obj = orb.namingServiceCtx->resolve_str(str2name("EventChannelFactory")); - ecf_obj = orb->namingServiceCtx->resolve_str("EventChannelFactory"); - if (!CORBA::is_nil(ecf_obj)) { - LNDEBUG( "GetEventChannelFactory", " Narrow object to EventChannelFactory" ); - ecf = omniEvents::EventChannelFactory::_narrow(ecf_obj); - LNDEBUG( "GetEventChannelFactory", " Narrowed to ... EventChannelFactory" ); - } - } catch (const CosNaming::NamingContext::NotFound&) { - LNWARN( "GetEventChannelFactory", " No naming service entry for 'EventChannelFactory'" ); - } catch (const CORBA::Exception& e) { - LNWARN( "GetEventChannelFactory", " CORBA " << e._name() << ", exception looking up EventChannelFactory." ); - } - return ecf._retn(); - - } - - //////////////////////////////////////////////////////////////// - //////////////////////////////////////////////////////////////// - - // - // PushEventSupplier class implementation - // - PushEventSupplier::PushEventSupplier( corba::OrbPtr &orb, - const std::string &channelName, - Supplier *inSupplier, - int retries, - int retry_wait ) : - name(channelName), - nc_name(""), - supplier(inSupplier), - retries(retries), - retry_wait(retry_wait) - { - _init(orb); - } - - - PushEventSupplier::PushEventSupplier( corba::OrbPtr &orb, - const std::string &channelName, - const std::string &ncName, - Supplier *inSupplier, - int retries, - int retry_wait ) : - name(channelName), - nc_name(ncName), - supplier(inSupplier), - retries(retries), - retry_wait(retry_wait) - { - _init(orb); - } - - PushEventSupplier::PushEventSupplier( corba::OrbPtr &orb, - const std::string &channelName, - int retries, - int retry_wait ) : - name(channelName), - nc_name(""), - supplier(NULL), - retries(retries), - retry_wait(retry_wait) - { - _init(orb); - } - - PushEventSupplier::PushEventSupplier( corba::OrbPtr &orb, - const std::string &channelName, - const std::string &ncName, - int retries, - int retry_wait ): - name(channelName), - nc_name(ncName), - supplier(NULL), - retries(retries), - retry_wait(retry_wait) - { - _init(orb); - } - - void PushEventSupplier::_init( corba::OrbPtr &orb ) - { - LNDEBUG("PushEventSupplier", " GetEventChannel " << name ); - channel = corba::GetEventChannel( orb, name, nc_name, true ); - - if ( CORBA::is_nil(channel) == true ) { - LNERROR("PushEventSupplier", " Channel resource not available, channel " << name ); - return; - } - - int tries=retries; - do - { - try { - supplier_admin = channel->for_suppliers (); - break; - } - catch (CORBA::COMM_FAILURE& ex) { - } - if ( retry_wait > 0 ) { - boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); - } else { - boost::this_thread::yield(); - } - tries--; - } while ( tries ); - - if ( CORBA::is_nil(supplier_admin) ) return; - - LNDEBUG("PushEventSupplier", "Obtained SupplierAdmin." ); - - tries=retries; - do { - try { - proxy_for_consumer = supplier_admin->obtain_push_consumer (); - break; - } - catch (CORBA::COMM_FAILURE& ex) { - } - if ( retry_wait > 0 ) { - boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); - } else { - boost::this_thread::yield(); - } - tries--; - } while ( tries ); - - LNDEBUG("PushEventSupplier", "Obtained ProxyPushConsumer." ); - if ( CORBA::is_nil(proxy_for_consumer) ) return; - - if ( supplier == NULL ) { - LNDEBUG("PushEventSupplier", "Create Local Supplier Object." ); - supplier = new corba::PushEventSupplier::Supplier(); - } - - CosEventComm::PushSupplier_var sptr =CosEventComm::PushSupplier::_nil(); - sptr = supplier->_this(); - - // now attach supplier to the proxy - do { - try { - proxy_for_consumer->connect_push_supplier(sptr.in()); - } - catch (CORBA::BAD_PARAM& ex) { - LNERROR("PushEventSupplier", "Caught BAD_PARAM " ); - break; - } - catch (CosEventChannelAdmin::AlreadyConnected& ex) { - break; - } - catch (CORBA::COMM_FAILURE& ex) { - LNERROR("PushEventSupplier", "Caught COMM_FAILURE Exception " << - "connecting Push Supplier! Retrying..." ); - } - if ( retry_wait > 0 ) { - boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); - } else { - boost::this_thread::yield(); - } - tries--; - } while ( tries ); - - - LNDEBUG("PushEventSupplier", "Connected Push Supplier." ); - - }; - - PushEventSupplier::~PushEventSupplier( ) { - - LNDEBUG("PushEventSupplier", "DTOR - START." ); - - int tries = retries; - if ( CORBA::is_nil(proxy_for_consumer) == false ) { - // Disconnect - retrying on Comms Failure. - do { - try { - proxy_for_consumer->disconnect_push_consumer(); - break; - } - catch (CORBA::COMM_FAILURE& ex) { - LNERROR("PushEventSupplier", "Caught COMM_FAILURE Exception " - << "disconnecting Push Supplier! Retrying..." ); - } - if ( retry_wait > 0 ) { - boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); - } else { - boost::this_thread::yield(); - } - tries--; - } while(tries); - LNDEBUG("PushEventSupplier", "ProxyPushConsumer disconnected." ); - - } - - if ( supplier ) { - supplier->_remove_ref(); - } - - LNDEBUG("PushEventSupplier", "DTOR - END." ); - - } - - //////////////////////////////////////////////////////////////// - //////////////////////////////////////////////////////////////// - - // - // PushEventConsumer class implementation - // - PushEventConsumer::PushEventConsumer( corba::OrbPtr &orb, - const std::string &channelName, - Consumer *inConsumer, - const int retries, - const int retry_wait ): - name(channelName), - nc_name(""), - consumer(inConsumer), - retries(retries), - retry_wait(retry_wait) - { - _init(orb); - } - - PushEventConsumer::PushEventConsumer( corba::OrbPtr &orb, - const std::string &channelName, - const std::string &ncName, - Consumer *inConsumer, - const int retries, - const int retry_wait ): - name(channelName), - nc_name(ncName), - consumer(inConsumer), - retries(retries), - retry_wait(retry_wait) - { - _init(orb); - } - - - PushEventConsumer::PushEventConsumer( corba::OrbPtr &orb, - const std::string &channelName, - const int retries, - const int retry_wait ): - name(channelName), - nc_name(""), - consumer(NULL), - retries(retries), - retry_wait(retry_wait) - { - _init(orb); - } - - - PushEventConsumer::PushEventConsumer( corba::OrbPtr &orb, - const std::string &channelName, - const std::string &ncName, - const int retries, - const int retry_wait ): - name(channelName), - nc_name(ncName), - consumer(NULL), - retries(retries), - retry_wait(retry_wait) - { - _init(orb); - } - - - void PushEventConsumer::_init( corba::OrbPtr &orb ) - { - LNDEBUG("PushEventConsumer", " GetEventChannel " << name ); - try { - channel = corba::GetEventChannel( orb, name, nc_name, true ); - } - catch(...){ - LNERROR("PushEventConsumer", " Channel " << name ); - return; - } - - if ( CORBA::is_nil(channel) == true ) { - LNERROR("PushEventConsumer", " Channel resource not available, channel " << name ); - return; - } - - int tries=retries; - do - { - try { - consumer_admin = channel->for_consumers (); - break; - } - catch (CORBA::COMM_FAILURE& ex) { - } - if ( retry_wait > 0 ) { - boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); - } else { - boost::this_thread::yield(); - } - tries--; - } while ( tries ); - - if ( CORBA::is_nil(consumer_admin) ) return; - - LNDEBUG("PushEventConsumer", "Obtained ConsumerAdmin." ); - - tries=retries; - do { - try { - proxy_for_supplier = consumer_admin->obtain_push_supplier (); - break; - } - catch (CORBA::COMM_FAILURE& ex) { - } - if ( retry_wait > 0 ) { - boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); - } else { - boost::this_thread::yield(); - } - tries--; - } while ( tries ); - - LNDEBUG("PushEventConsumer", "Obtained ProxyPushConsumer." ); - if ( CORBA::is_nil(proxy_for_supplier) ) return; - - if ( consumer == NULL ) { - consumer = new corba::PushEventConsumer::CallbackConsumer(*this); - } - if ( consumer == NULL ) return; - CosEventComm::PushConsumer_var sptr =CosEventComm::PushConsumer::_nil(); - sptr = consumer->_this(); - - // now attach supplier to the proxy - do { - try { - // connect the the consumer object to the supplier's proxy - proxy_for_supplier->connect_push_consumer(sptr.in()); - } - catch (CORBA::BAD_PARAM& ex) { - LNERROR("PushEventConsumer", "Caught BAD_PARAM " ); - break; - } - catch (CosEventChannelAdmin::AlreadyConnected& ex) { - break; - } - catch (CORBA::COMM_FAILURE& ex) { - LNERROR("PushEventConsumer", "Caught COMM_FAILURE Exception " << - "connecting Push Consumer! Retrying..." ); - } - if ( retry_wait > 0 ) { - boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); - } else { - boost::this_thread::yield(); - } - tries--; - } while ( tries ); - - - LNDEBUG("PushEventConsumer", "Connected Push Consumer." ); - - }; - - PushEventConsumer::~PushEventConsumer( ) { - - LNDEBUG("PushEventConsumer", "DTOR - START." ); - int tries = retries; - if ( CORBA::is_nil(proxy_for_supplier) == false ) { - // Disconnect - retrying on Comms Failure. - do { - try { - proxy_for_supplier->disconnect_push_supplier(); - break; - } - catch (CORBA::COMM_FAILURE& ex) { - LNERROR("PushEventConsumer", "Caught COMM_FAILURE Exception " - << "disconnecting Push Consumer! Retrying..." ); - } - if ( retry_wait > 0 ) { - boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); - } else { - boost::this_thread::yield(); - } - tries--; - } while(tries); - LNDEBUG("PushEventConsumer", "ProxyPushSupplier disconnected." ); - - } - - if ( consumer ) { - consumer->_remove_ref(); - } - - LNDEBUG("PushEventConsumer", "DTOR - END." ); - } - -#if 0 - void PushEventConsumer::detach() { - - LNDEBUG("PushEventConsumer", "DETTACH - START." ); - int tries = retries; - if ( CORBA::is_nil(proxy_for_supplier) == false ) { - // Disconnect - retrying on Comms Failure. - do { - try { - proxy_for_supplier->disconnect_push_supplier(); - break; - } - catch (CORBA::COMM_FAILURE& ex) { - LNERROR("PushEventConsumer", "Caught COMM_FAILURE Exception " - << "disconnecting Push Consumer! Retrying..." ); - } - if ( retry_wait > 0 ) { - boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); - } else { - boost::this_thread::yield(); - } - tries--; - } while(tries); - LNDEBUG("PushEventConsumer", "ProxyPushSupplier disconnected." ); - - } - else { - LNDEBUG("PushEventConsumer", "DETTACH - ProxyForSupplier is Nil." ); - } - } - - - - void PushEventConsumer::attach() { - - LNDEBUG("PushEventConsumer", "ATTACH - START." ); - if ( consumer == NULL ) return; - - LNDEBUG("PushEventConsumer", "Register Consumer." ); - CosEventComm::PushConsumer_var sptr = consumer->_this(); - int tries = retries; - - if ( CORBA::is_nil(proxy_for_supplier) == false ) { - // now attach supplier to the proxy - do { - try { - // connect the the consumer object to the supplier's proxy - proxy_for_supplier->connect_push_consumer(sptr.in()); - } - catch (CORBA::BAD_PARAM& ex) { - LNERROR("PushEventConsumer", "Caught BAD_PARAM " ); - break; - } - catch (CosEventChannelAdmin::AlreadyConnected& ex) { - LNDEBUG("PushEventConsumer", "ATTACH - Already Connected Consumer." ); - break; - } - catch (CORBA::COMM_FAILURE& ex) { - LNERROR("PushEventConsumer", "Caught COMM_FAILURE Exception " << - "connecting Push Consumer! Retrying..." ); - } - if ( retry_wait > 0 ) { - boost::this_thread::sleep( boost::posix_time::microseconds( retry_wait*1000 ) ); - } else { - boost::this_thread::yield(); - } - tries--; - } while ( tries ); - - } - else { - LNDEBUG("PushEventConsumer", "ATTACH - ProxyForSupplier is Nil." ); - } - } - -#endif - - void PushEventConsumer::setDataArrivedListener( DataArrivedListener *newListener ) { - dataArrivedCB = boost::shared_ptr< DataArrivedListener >(newListener, null_deleter()); - } - - void PushEventConsumer::setDataArrivedListener( DataArrivedCallbackFn newListener ) { - dataArrivedCB = boost::make_shared< StaticDataArrivedListener >( newListener ); - } - - - -}; - - diff --git a/redhawk/src/tools/LogEventAppender/corba.h b/redhawk/src/tools/LogEventAppender/corba.h deleted file mode 100644 index bc12d52c7..000000000 --- a/redhawk/src/tools/LogEventAppender/corba.h +++ /dev/null @@ -1,616 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK core. - * - * REDHAWK core is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef _EVENT_APPENDER_CORBA_H -#define _EVENT_APPENDER_CORBA_H -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - - namespace corba - { - - // naming service actions - enum NS_ACTION { NS_NOBIND=0, NS_BIND=1, NS_REBIND=2, NS_UNBIND=3 }; - - struct OrbContext; - - typedef boost::shared_ptr< OrbContext > OrbPtr; - - // - // OrbContext - // - // Context for access to ORB and common CORBA services - // - struct OrbContext { - - // orb instantiation - CORBA::ORB_var orb; - - // root POA for to handle object requests - PortableServer::POA_var rootPOA; - - // use for activating objects - PortableServer::POAManager_var poaManager; - - // handle to naming service - CosNaming::NamingContext_var namingService; - - // handle to naming service - CosNaming::NamingContextExt_var namingServiceCtx; - - virtual ~OrbContext() {}; - - // - // establish singleton context for ORB ( rootPOA, root NamingService, root activator ) - // - static OrbPtr Init( int argc, char **argv, const char *options[][2]=NULL, bool share=true ); - - static OrbPtr Init(); // setup orb context using /etc/omniORB.cfg - - static OrbPtr Inst( ) { return _singleton; }; - - static void Terminate( bool forceShutdown = false ); - - private: - - OrbContext() {}; - - static OrbPtr _singleton; - - static bool _share; - - }; - - // - // Naming Methods - // - - // - // return a CosNaming sequence for the stringified name - // - CosNaming::Name *stringToName( std::string &name ); - - // - // return a CosNaming sequence for the stringified name - // - CosNaming::Name str2name(const char* namestr); - - - // - std::vector listRootContext(); - std::vector listContext( const CosNaming::NamingContext_ptr ctx, const std::string &dname=""); - - - int CreateNamingContext( OrbPtr orb, const std::string &name ); - CosNaming::NamingContext_ptr CreateNamingContextPath( OrbPtr orb, const std::string &nc ); - CosNaming::NamingContext_ptr ResolveNamingContextPath( OrbPtr orb, const std::string &nc ); - int DeleteNamingContext( OrbPtr orb, const std::string &name ); - int DeleteNamingContextPath( OrbPtr orb, const std::string &name ); - int Bind( const std::string &name, CORBA::Object_ptr obj, CosNaming::NamingContext_ptr namingContext ); - int Bind( OrbPtr orb, const std::string &name, CORBA::Object_ptr obj, const std::string &dir="", const bool create_nc=false ); - - int Unbind( const std::string &name, CosNaming::NamingContext_ptr namingContext ); - int Unbind( OrbPtr orb, const std::string &name, const std::string &namingContext="" ); - - // - // - // Event Channel Methods - // - // - - - // - // Look up the EventChannelFactory for the current ORB context - // - omniEvents::EventChannelFactory_ptr GetEventChannelFactory( corba::OrbPtr &orb ); - - // - // GetEventChannel - // - // Lookup an EventChannel using resolve references routines, (resolve, corbaname, corbaloc) - // if not found and create == true - // create EventChannel in omniEvents, there is no binding to the name service - // - CosEventChannelAdmin::EventChannel_ptr GetEventChannel ( corba::OrbPtr & orb, - const std::string& name, - const bool create=false, - const std::string &host="localhost" ); - - // - // GetEventChannel - // - // Lookup an EventChannel using the omniNames service - // if not found and create == true - // create EventChannel in omniEvents, bind the event channel to nc_name/name - // - CosEventChannelAdmin::EventChannel_ptr GetEventChannel ( corba::OrbPtr & orb, - const std::string& name, - const std::string& ns_context, - const bool create=false, - const std::string &host="localhost" ); - // - // CreateEventChannel - // - // Create an EventChannel within the current ORB context, once created bind to the same name.... - // - CosEventChannelAdmin::EventChannel_ptr CreateEventChannel( corba::OrbPtr & orb, - const std::string& name, - NS_ACTION action=corba::NS_BIND ); - - CosEventChannelAdmin::EventChannel_ptr CreateEventChannel( corba::OrbPtr & orb, - const std::string& name, - const std::string& ns_context, - NS_ACTION action=corba::NS_BIND ); - - // - // Delete Event Channel - // - int DeleteEventChannel( corba::OrbPtr& orb, - const std::string& name, - NS_ACTION action=NS_UNBIND); - - int DeleteEventChannel( corba::OrbPtr& orb, - const std::string& name, - const std::string& nc_name, - NS_ACTION action=NS_UNBIND); - - - // - // PushEventSupplier - // - // This class will perform the publication portion of the a publisher/subscriber pattern - // over a CORBA EventChannel. If the Channel does not exist it will try to create - // and register the channel in the NamingService - // - // - class PushEventSupplier { - - public: - - // - // - // - class Supplier : virtual public POA_CosEventComm::PushSupplier { - public: - Supplier () {} ; - virtual ~Supplier() {}; - virtual void disconnect_push_supplier () {} ; - }; - - - // - // Create the context for a PushEvent Supplier for a CORBA EventService - // - // @param orb reference to corba context and major services - // @param channelName event channel name to subscribe to or create - // @param supplier actual supplier object that pushes information to event channel - // @param retries number of retries to perform when trying to establish publisher interface - // @param retry_wait number of millisecs to wait between retries - PushEventSupplier( corba::OrbPtr &orb, - const std::string &channelName, - Supplier *supplier, - const int retries=10, - const int retry_wait=10 ); - - - - PushEventSupplier( corba::OrbPtr &orb, - const std::string &channelName, - const std::string &ncName, - Supplier *supplier, - const int retries=10, - const int retry_wait=10 ); - - - // - // Create the context for a PushEvent Supplier for a CORBA EventService, uses internal Supplier object - // to perform push event operation - // - // @param orb reference to corba context and major services - // @param channelName event channel name to subscribe to or create - // @param retries number of retries to perform when trying to establish publisher interface - // @param retry_wait number of millisecs to wait between retries - PushEventSupplier( corba::OrbPtr &orb, - const std::string &channelName, - const int retries=10, - const int retry_wait=10 ); - - - PushEventSupplier( corba::OrbPtr &orb, - const std::string &channelName, - const std::string &ncName, - const int retries=10, - const int retry_wait=10 ); - - // - // - // - virtual ~PushEventSupplier(); - - // - // Publish a CORBA Any object or a specific object subscribers.... - // - template< typename T > - int push( T &msg ) { - int retval=0; - try { - CORBA::Any data; - data <<= msg; - if (!CORBA::is_nil(proxy_for_consumer)) { - proxy_for_consumer->push(data); - } - else{ - retval=-1; - } - } - catch( CORBA::Exception& ex) { - retval=-1; - } - return retval; - } - - template< typename T > - int push( T *msg ) { - int retval=0; - try { - CORBA::Any data; - data <<= msg; - if (!CORBA::is_nil(proxy_for_consumer)) { - proxy_for_consumer->push(data); - } - else{ - retval=-1; - } - } - catch( CORBA::Exception& ex) { - retval=-1; - } - return retval; - } - - int push( CORBA::Any &data ) { - int retval=0; - try { - if (!CORBA::is_nil(proxy_for_consumer)) { - proxy_for_consumer->push(data); - } - else{ - retval=-1; - } - } - catch( CORBA::Exception& ex) { - retval=-1; - } - return retval; - - return 0; - } - - - const Supplier *getSupplier() { return supplier; }; - - protected: - - // - // Channel name - // - std::string name; - - // - // Context where name is located - // - std::string nc_name; - - // - // handle to the EventChannel - // - CosEventChannelAdmin::EventChannel_var channel; - - // - // Get Supplier Admin interface - // - CosEventChannelAdmin::SupplierAdmin_var supplier_admin; - - // - // Get proxy consumer - // - CosEventChannelAdmin::ProxyPushConsumer_var proxy_for_consumer; - - // - // Push Supplier... - // - Supplier *supplier; - - // - // number of retries to perform (-1 == try forever) - // - int retries; - - // - // number of milliseconds to wait between retry operations - // - int retry_wait; - - - private: - void _init( corba::OrbPtr &orb ); - - }; - - // - // PushEventConsumer - // - // This class will perform the subscription portion of the a publisher/subscriber pattern - // over a CORBA EventChannel. If the Channel does not exist it will try to create - // and register the channel in the NamingService - // - // - class PushEventConsumer { - - public: - // - // Callback interface used by BULKIO Ports when connect/disconnect event happens - // - typedef void (*DataArrivedCallbackFn)( const CORBA::Any &data ); - - // - // Interface definition that will be notified when data arrives on a EventChannel - // - class DataArrivedListener { - - public: - virtual void operator() ( const CORBA::Any &data ) = 0; - virtual ~DataArrivedListener() {}; - - }; - - /* - * Allow for member functions to receive connect/disconnect notifications - */ - template - class MemberDataArrivedListener : public DataArrivedListener - { - public: - typedef boost::shared_ptr< MemberDataArrivedListener< T > > SPtr; - - typedef void (T::*MemberFn)( const CORBA::Any &data ); - - static SPtr Create( T &target, MemberFn func ){ - return SPtr( new MemberDataArrivedListener(target, func ) ); - }; - - virtual void operator() ( const CORBA::Any &data ) - { - (target_.*func_)(data); - } - - // Only allow PropertySet_impl to instantiate this class. - MemberDataArrivedListener ( T& target, MemberFn func) : - target_(target), - func_(func) - { - } - private: - T& target_; - MemberFn func_; - }; - - /* - * Wrap Callback functions as ConnectionEventListener objects - */ - class StaticDataArrivedListener : public DataArrivedListener - { - public: - virtual void operator() ( const CORBA::Any &data ) - { - (*func_)(data); - } - - StaticDataArrivedListener ( DataArrivedCallbackFn func) : - func_(func) - { - } - - private: - - DataArrivedCallbackFn func_; - }; - - // - // Define base class for consumers - // - - typedef POA_CosEventComm::PushConsumer Consumer; - typedef CosEventComm::PushConsumer_var Consumer_var; - typedef CosEventComm::PushConsumer_ptr Consumer_ptr; - - - // - // Create the context for a PushEvent Supplier for a CORBA EventService - // - // @param orb reference to corba context and major services - // @param channelName event channel name to subscribe to - // @param consumer actual consumer object that receives pushed data - // @param retries number of retries to perform when trying to establish subscriber interface (-1 tries forever) - // @param retry_wait number of millisecs to wait between retries - PushEventConsumer( corba::OrbPtr & orb, - const std::string &channelName, - Consumer* consumer, - const int retries=10, - const int retry_wait=10 ); - - PushEventConsumer( corba::OrbPtr & orb, - const std::string &channelName, - const std::string &ncName, - Consumer* consumer, - const int retries=10, - const int retry_wait=10 ); - - PushEventConsumer( corba::OrbPtr & orb, - const std::string &channelName, - const int retries=10, - const int retry_wait=10 ); - - PushEventConsumer( corba::OrbPtr & orb, - const std::string &channelName, - const std::string &ncName, - const int retries=10, - const int retry_wait=10 ); - // - // DTOR - // - virtual ~PushEventConsumer(); - - // - // - // - const Consumer *getConsumer() { return consumer; }; - - // - // Attach/detach sequence does not work for some reason. - // -#if 0 - Consumer *setConsumer( Consumer *newConsumer ) { - detach(); - consumer = newConsumer; - attach(); - } - - void attach(); - void dettach(); -#endif - - // - // Attach callback listener when data arrives to Consumer object - // - template< typename T > inline - void setDataArrivedListener(T &target, void (T::*func)( const CORBA::Any &data ) ) { - dataArrivedCB = boost::make_shared< MemberDataArrivedListener< T > >( boost::ref(target), func ); - }; - - template< typename T > inline - void setDataArrivedListener(T *target, void (T::*func)( const CORBA::Any &data ) ) { - dataArrivedCB = boost::make_shared< MemberDataArrivedListener< T > >( boost::ref(*target), func ); - }; - - void setDataArrivedListener( DataArrivedListener *newListener ); - void setDataArrivedListener( DataArrivedCallbackFn newListener ); - - protected: - - // - // CallbackConsumer - // - class CallbackConsumer : public Consumer { - public: - virtual ~CallbackConsumer() {}; - virtual void push( const CORBA::Any &data ) { - if ( parent.dataArrivedCB ) { - try{ - (*parent.dataArrivedCB)( data ); - } - catch(...){ - } - } - - }; - virtual void disconnect_push_consumer () {} ; - - private: - friend class PushEventConsumer; - - CallbackConsumer ( PushEventConsumer &parent) : - parent(parent) - { - } ; - - protected: - PushEventConsumer &parent; - - }; - - friend class CallbackConsumer; - - // - // Channel name - // - std::string name; - - // - // Naming context where channel is bound - // - std::string nc_name; - - // - // handle to the EventChannel - // - CosEventChannelAdmin::EventChannel_var channel; - - // - // Get Supplier Admin interface - // - CosEventChannelAdmin::ConsumerAdmin_var consumer_admin; - - // - // Get proxy supplier that is providing the data - // - CosEventChannelAdmin::ProxyPushSupplier_var proxy_for_supplier; - - // - // Push Consumer - // - Consumer *consumer; - - // - // PushConsumer Callback... - // - // Used by default Consumer object to call registered callback - // - boost::shared_ptr< DataArrivedListener > dataArrivedCB; - - // - // number of retries to perform (-1 == try forever) - // - int retries; - - // - // number of milliseconds to wait between retry operations - // - int retry_wait; - - private: - void _init( corba::OrbPtr &orb ); - - - }; // end of PushEventConsumer - - }; // end of corba namespace - - -#endif diff --git a/redhawk/src/tools/LogEventAppender/logdebug.h b/redhawk/src/tools/LogEventAppender/logdebug.h deleted file mode 100644 index ace1bf9ed..000000000 --- a/redhawk/src/tools/LogEventAppender/logdebug.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * This file is protected by Copyright. Please refer to the COPYRIGHT file - * distributed with this source distribution. - * - * This file is part of REDHAWK core. - * - * REDHAWK core is free software: you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by the - * Free Software Foundation, either version 3 of the License, or (at your - * option) any later version. - * - * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see http://www.gnu.org/licenses/. - */ -#ifndef _LL_DEBUG_H -#define _LL_DEBUG_H - -//#define DEBUG_ON -//#define STDOUT_ON - -#ifdef DEBUG_ON - -// set -DLIB_ON when compiling library for debug mode...do no use log4 logging.... -#ifdef STDOUT_ON -#include -extern std::ostream & _logger_; -#define LTRACE( exp ) _logger_ << "TRACE " << exp << std::endl -#define LDEBUG( exp ) _logger_ << "DEBUG " << exp << std::endl -#define LINFO( exp ) _logger_ << "INFO " << exp << std::endl -#define LWARN( exp ) _logger_ << "WARN " << exp << std::endl -#define LERROR( exp ) _logger_ << "ERROR " << exp << std::endl -#define LFATAL( exp ) _logger_ << "FATAL " << exp << std::endl - -#define LNTRACE( lname, exp ) _logger_ << "TRACE " << lname << " " << exp << std::endl -#define LNDEBUG( lname, exp ) _logger_ << "DEBUG " << lname << " " << exp << std::endl -#define LNINFO( lname, exp ) _logger_ << "INFO " << lname << " " << exp << std::endl -#define LNWARN( lname, exp ) _logger_ << "WARN " << lname << " " << exp << std::endl -#define LNERROR( lname, exp ) _logger_ << "ERROR " << lname << " " << exp << std::endl -#define LNFATAL( lname, exp ) _logger_ << "FATAL " << lname << " " << exp << std::endl - -#define LOGGER_CFG( name ) \ - std::ostream &_logger_ = std::cout; - -#define LOGGER_END( name ) - -#else - -// set when compiling library for test mode... OK to use log4 logging.... -#include -#include -extern log4cxx::LoggerPtr _logger_; -#define LTRACE( expression ) LOG4CXX_TRACE( _logger_, expression ) -#define LDEBUG( expression ) LOG4CXX_DEBUG( _logger_ , expression ) -#define LINFO( expression ) LOG4CXX_INFO( _logger_, expression ) -#define LWARN( expression ) LOG4CXX_WARN( _logger_, expression ) -#define LERROR( expression ) LOG4CXX_ERROR( _logger_, expression ) -#define LFATAL( expression ) LOG4CXX_FATAL( _logger_, expression ) - -#define LNTRACE( lname, expression ) LOG4CXX_TRACE( log4cxx::Logger::getLogger(lname), expression ) -#define LNDEBUG( lname, expression ) LOG4CXX_DEBUG( log4cxx::Logger::getLogger(lname), expression ) -#define LNINFO( lname, expression ) LOG4CXX_INFO( log4cxx::Logger::getLogger(lname), expression ) -#define LNWARN( lname, expression ) LOG4CXX_WARN( log4cxx::Logger::getLogger(lname), expression ) -#define LNERROR( lname, expression ) LOG4CXX_ERROR( log4cxx::Logger::getLogger(lname), expression ) -#define LNFATAL( lname, expression ) LOG4CXX_FATAL( log4cxx::Logger::getLogger(lname), expression ) - -#define LOGGER_CFG( name ) \ - log4cxx::LoggerPtr _logger_ = log4cxx::Logger::getLogger(name); - -#define LOGGER_END( name ) \ - log4cxx::LogManager::shutdown(); - -#endif - -#else - -typedef void LOGGER; -// turn off debug code -#define LTRACE( exp ) -#define LDEBUG( exp ) -#define LINFO( exp ) -#define LWARN( exp ) -#define LERROR( exp ) -#define LFATAL( exp ) -#define LNTRACE( lname, exp ) -#define LNDEBUG( lname, exp ) -#define LNINFO( lname, exp ) -#define LNWARN( lname, exp ) -#define LNERROR( lname, exp ) -#define LNFATAL( lname, exp ) -#define LOGGER_CFG( name ) -#define LOGGER_END( name ) - -#endif - -#endif diff --git a/redhawk/src/tools/Makefile.am b/redhawk/src/tools/Makefile.am index e29739ae0..96a079d69 100644 --- a/redhawk/src/tools/Makefile.am +++ b/redhawk/src/tools/Makefile.am @@ -19,3 +19,14 @@ # bin_SCRIPTS = redhawk-softpkg +bin_PROGRAMS = redhawk-shminfo redhawk-shmclean + +OSSIE_LIBS = $(top_builddir)/base/framework/libossiecf.la $(top_builddir)/base/framework/idl/libossieidl.la + +redhawk_shminfo_SOURCES = src/shminfo.cpp src/ShmVisitor.cpp src/ShmVisitor.h +redhawk_shminfo_CXXFLAGS = -Wall $(OSSIE_CFLAGS) +redhawk_shminfo_LDFLAGS = $(OSSIE_LIBS) + +redhawk_shmclean_SOURCES = src/shmclean.cpp src/ShmVisitor.cpp src/ShmVisitor.h +redhawk_shmclean_CXXFLAGS = -Wall $(OSSIE_CFLAGS) +redhawk_shmclean_LDFLAGS = $(OSSIE_LIBS) -lrt diff --git a/redhawk/src/tools/src/ShmVisitor.cpp b/redhawk/src/tools/src/ShmVisitor.cpp new file mode 100644 index 000000000..db2343024 --- /dev/null +++ b/redhawk/src/tools/src/ShmVisitor.cpp @@ -0,0 +1,59 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "ShmVisitor.h" + +#include +#include + +#include + +using redhawk::shm::SuperblockFile; + +void ShmVisitor::visit() +{ + std::string shmdir = redhawk::shm::getSystemPath(); + DIR* dir = opendir(shmdir.c_str()); + if (!dir) { + throw std::runtime_error("could not open shm filesystem " + shmdir); + } + + while (struct dirent* entry = readdir(dir)) { + const std::string filename = entry->d_name; + if ((filename == ".") || (filename == "..")) { + continue; + } + + if (SuperblockFile::IsSuperblockFile(filename)) { + SuperblockFile file(filename); + try { + file.open(false); + } catch (const std::exception& exc) { + heapException(filename, exc); + continue; + } + visitHeap(file); + } else { + visitFile(filename); + } + } + + closedir(dir); +} diff --git a/redhawk/src/tools/src/ShmVisitor.h b/redhawk/src/tools/src/ShmVisitor.h new file mode 100644 index 000000000..44d41d5c7 --- /dev/null +++ b/redhawk/src/tools/src/ShmVisitor.h @@ -0,0 +1,50 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef SHMVISITOR_H +#define SHMVISITOR_H + +#include + +#include + +class ShmVisitor +{ +public: + virtual ~ShmVisitor() + { + } + + void visit(); + + virtual void visitFile(const std::string& filename) + { + } + + virtual void visitHeap(redhawk::shm::SuperblockFile& heap) + { + } + + virtual void heapException(const std::string& name, const std::exception& exc) + { + } +}; + +#endif // SHMVISITOR_H diff --git a/redhawk/src/tools/src/shmclean.cpp b/redhawk/src/tools/src/shmclean.cpp new file mode 100644 index 000000000..9b135c9bf --- /dev/null +++ b/redhawk/src/tools/src/shmclean.cpp @@ -0,0 +1,182 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include "ShmVisitor.h" + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +using redhawk::shm::SuperblockFile; + +namespace { + static std::string executable; + + static void usage() + { + std::cout << "Usage: " << executable << " [OPTION]... [FILE]..." << std::endl; + std::cout << "Remove orphaned REDHAWK heaps and shared memory files." << std::endl; + std::cout << std::endl; + std::cout << " -f, --force remove the named file regardless of type" << std::endl; + std::cout << " -h, --help display this help and exit" << std::endl; + std::cout << " --version output version information and exit" << std::endl; + std::cout << std::endl; + std::cout << "When no file is given, all accessible REDHAWK heaps are scanned and any" << std::endl; + std::cout << "orphaned heap is removed. File names are relative to " << redhawk::shm::getSystemPath() << "." << std::endl; + std::cout << std::endl; + std::cout << "A heap is considered orphaned if its creating process is no longer alive." << std::endl; + std::cout << "The shared memory file is unlinked; however, if any active processes have" << std::endl; + std::cout << "the memory mapped, the shared memory is not freed until they exit." << std::endl; + std::cout << std::endl; + std::cout << "By default, active heaps and non-REDHAWK shared memory files are not" << std::endl; + std::cout << "removed. Use the --force (-f) option to remove these files." << std::endl; + } +} + +class Clean : public ShmVisitor +{ +public: + Clean() : + _force(false) + { + } + + void setForce(bool force) + { + _force = force; + } + + bool cleanFile(const std::string& filename) + { + if (_force) { + if (shm_unlink(filename.c_str())) { + perror(filename.c_str()); + return false; + } + return true; + } + + if (!SuperblockFile::IsSuperblockFile(filename)) { + std::cerr << filename << " is not a heap, use -f to force removal" << std::endl; + return false; + } + + SuperblockFile heap(filename); + try { + heap.open(false); + } catch (const std::exception& exc) { + std::cerr << filename << " could not be opened: " << exc.what() << std::endl; + return false; + } + + if (!heap.isOrphaned()) { + std::cerr << filename << " is in use, use -f to force removal" << std::endl; + return false; + } + + _unlink(heap); + return true; + } + + virtual void visitHeap(SuperblockFile& heap) + { + if (!heap.isOrphaned()) { + return; + } + _unlink(heap); + } + +private: + void _unlink(SuperblockFile& heap) + { + std::cout << "unlinking " << heap.name() << std::endl; + try { + heap.file().unlink(); + } catch (const std::exception& exc) { + std::cerr << "error: " << exc.what() << std::endl; + } + } + + bool _force; +}; + +int main(int argc, char* argv[]) +{ + // Save the executable name for output, removing any paths + executable = argv[0]; + std::string::size_type pos = executable.rfind('/'); + if (pos != std::string::npos) { + executable.erase(0, pos + 1); + } + + struct option long_options[] = { + { "help", no_argument, 0, 'h' }, + { "version", no_argument, 0, 'V' }, + { "force", no_argument, 0, 'f' }, + { 0, 0, 0, 0 } + }; + + int opt; + Clean clean; + while ((opt = getopt_long(argc, argv, "hf", long_options, NULL)) != -1) { + switch (opt) { + case 'h': + usage(); + return 0; + case 'V': + std::cout << executable << " version " << VERSION << std::endl; + return 0; + case 'f': + clean.setForce(true); + break; + default: + std::cerr << "Try `" << executable << " --help` for more information." << std::endl; + return -1; + } + }; + + if (optind < argc) { + int errors = 0; + for (int arg = optind; arg < argc; ++arg) { + if (!clean.cleanFile(argv[arg])) { + errors++; + } + } + if (errors) { + return -1; + } + } else { + try { + clean.visit(); + } catch (const std::exception& exc) { + std::cerr << exc.what() << std::endl; + return -1; + } + } + return 0; +} diff --git a/redhawk/src/tools/src/shminfo.cpp b/redhawk/src/tools/src/shminfo.cpp new file mode 100644 index 000000000..aad711780 --- /dev/null +++ b/redhawk/src/tools/src/shminfo.cpp @@ -0,0 +1,366 @@ +/* + * This file is protected by Copyright. Please refer to the COPYRIGHT file + * distributed with this source distribution. + * + * This file is part of REDHAWK core. + * + * REDHAWK core is free software: you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by the + * Free Software Foundation, either version 3 of the License, or (at your + * option) any later version. + * + * REDHAWK core is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see http://www.gnu.org/licenses/. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "ShmVisitor.h" + +using redhawk::shm::SuperblockFile; + +namespace { + static std::string executable; + + static void usage() + { + std::cout << "Usage: " << executable << " [OPTION]..." << std::endl; + std::cout << "Display status of shared memory file system and REDHAWK heaps." << std::endl; + std::cout << std::endl; + std::cout << " -h, --help display this help and exit" << std::endl; + std::cout << " -a, --all include non-REDHAWK heap shared memory files" << std::endl; + std::cout << " -l do not look up user and group names" << std::endl; + std::cout << " --format=FORMAT display sizes in FORMAT (default 'auto')" << std::endl; + std::cout << " --version output version information and exit" << std::endl; + std::cout << std::endl; + std::cout << "FORMAT may be one of the following:" << std::endl; + std::cout << " auto use human readable sizes (e.g., 4K, 2.4G)" << std::endl; + std::cout << " b bytes" << std::endl; + std::cout << " k kilobytes" << std::endl; + } + + static std::string percent(float percent) + { + std::ostringstream oss; + oss << std::fixed << std::setprecision(1) << percent << "%"; + return oss.str(); + } +} + +class SizeFormatter +{ +public: + enum Format { + BYTES, + KILOBYTES, + AUTO + }; + + explicit SizeFormatter(Format format=AUTO) : + _format(format) + { + } + + static Format Parse(const std::string& strval) + { + std::string format_str; + std::transform(strval.begin(), strval.end(), std::back_inserter(format_str), ::tolower); + if (format_str == "auto") { + return AUTO; + } else if (format_str == "k") { + return KILOBYTES; + } else if (format_str == "b") { + return BYTES; + } else { + throw std::invalid_argument("invalid format '" + strval + "'"); + } + } + + void setFormat(Format format) + { + _format = format; + } + + void setFormat(const std::string& strval) + { + setFormat(SizeFormatter::Parse(strval)); + } + + std::string operator() (size_t size) + { + std::ostringstream oss; + switch (_format) { + case KILOBYTES: + oss << (size/1024) << "K"; + break; + case AUTO: + _toHumanReadable(oss, size); + break; + case BYTES: + default: + oss << size; + break; + } + return oss.str(); + } + +private: + void _toHumanReadable(std::ostream& oss, size_t size) + { + const char* suffix[] = { "", "K", "M", "G", "T", 0 }; + double val = size; + int pow = 0; + while ((val >= 1024.0) && (suffix[pow+1])) { + val /= 1024.0; + pow++; + } + if (pow == 0) { + oss << val; + } else { + oss << std::fixed << std::setprecision(1) << val << suffix[pow]; + } + } + + Format _format; +}; + +class Info : public ShmVisitor +{ +public: + Info() : + _all(false), + _showNames(true), + _format() + { + } + + void setDisplayAll(bool all) + { + _all = all; + } + + void setDisplayUserNames(bool display) + { + _showNames = display; + } + + void setSizeFormatter(SizeFormatter& format) + { + _format = format; + } + + virtual void visitHeap(SuperblockFile& heap) + { + std::cout << std::endl << heap.name() << std::endl;; + std::cout << " type: REDHAWK heap" << std::endl; + // Note: file size and allocated size are always the same for heaps + std::cout << " file size: " << _format(heap.file().size()) << std::endl; + + SuperblockFile::Statistics stats = heap.getStatistics(); + float used_pct = stats.used * 100.0 / stats.size; + std::cout << " heap size: " << _format(stats.size) << std::endl; + std::cout << " heap used: " << _format(stats.used) << " (" << percent(used_pct) << ")" << std::endl; + + pid_t creator = heap.creator(); + std::cout << " creator: " << creator << std::endl; + std::cout << " orphaned: " << std::boolalpha << heap.isOrphaned() << std::endl; + std::cout << " refcount: " << heap.refcount() << std::endl; + + displayFileStats(heap.name(), false); + } + + virtual void visitFile(const std::string& name) + { + if (!_all) { + return; + } + + std::cout << std::endl << name << std::endl;; + std::cout << " type: other" << std::endl; + + displayFileStats(name, true); + } + + virtual void heapException(const std::string& name, const std::exception& exc) + { + std::cout << std::endl << name << std::endl;; + std::cout << " type: REDHAWK heap (unreadable)" << std::endl; + + displayFileStats(name, true); + + std::cerr << " error: " << exc.what() << std::endl; + } + +protected: + void displayFileStats(const std::string& name, bool showSize) + { + const std::string shm_path = redhawk::shm::getSystemPath(); + std::string path = shm_path + "/" + name; + struct stat status; + if (stat(path.c_str(), &status)) { + return; + } + if (showSize) { + std::cout << " file size: " << _format(status.st_size) << std::endl; + std::cout << " allocated: " << _format(status.st_blocks * 512) << std::endl; + } + std::cout << " user: " << _getUserName(status.st_uid) << std::endl; + std::cout << " group: " << _getGroupName(status.st_gid) << std::endl; + int mode = status.st_mode & (S_IRWXU|S_IRWXG|S_IRWXO); + std::cout << " mode: " << std::oct << mode << std::endl; + } + + std::string _getUserName(uid_t uid) + { + UserTable::iterator existing = _userNames.find(uid); + if (existing != _userNames.end()) { + return existing->second; + } + + std::string name; + + // Unless it's disabled, look up the user entry + if (_showNames) { + struct passwd* user = getpwuid(uid); + if (user) { + name = user->pw_name; + } + } + + // Use the ID + if (name.empty()) { + std::ostringstream oss; + oss << uid; + name = oss.str(); + } + + _userNames[uid] = name; + return name; + } + + std::string _getGroupName(gid_t gid) + { + GroupTable::iterator existing = _groupNames.find(gid); + if (existing != _groupNames.end()) { + return existing->second; + } + + std::string name; + + // Unless it's disabled, look up the group entry + if (_showNames) { + struct group* grp = getgrgid(gid); + if (grp) { + name = grp->gr_name; + } + } + + // Use the ID + if (name.empty()) { + std::ostringstream oss; + oss << gid; + name = oss.str(); + } + + _groupNames[gid] = name; + return name; + } + + bool _all; + bool _showNames; + SizeFormatter _format; + + typedef std::map UserTable; + UserTable _userNames; + + typedef std::map GroupTable; + GroupTable _groupNames; +}; + +int main(int argc, char* argv[]) +{ + // Save the executable name for output, removing any paths + executable = argv[0]; + std::string::size_type pos = executable.rfind('/'); + if (pos != std::string::npos) { + executable.erase(0, pos + 1); + } + + struct option long_options[] = { + { "format", required_argument, 0, 'f' }, + { "help", no_argument, 0, 'h' }, + { "all", no_argument, 0, 'a' }, + { "version", no_argument, 0, 'V' }, + { 0, 0, 0, 0 } + }; + + Info info; + SizeFormatter format; + + int opt; + while ((opt = getopt_long(argc, argv, "ahl", long_options, NULL)) != -1) { + switch (opt) { + case 'a': + info.setDisplayAll(true); + break; + case 'f': + try { + format.setFormat(optarg); + } catch (const std::exception& exc) { + std::cerr << exc.what() << std::endl; + return -1; + } + break; + case 'l': + info.setDisplayUserNames(false); + break; + case 'h': + usage(); + return 0; + case 'V': + std::cout << executable << " version " << VERSION << std::endl; + return 0; + default: + std::cerr << "Try `" << executable << " --help` for more information." << std::endl; + return -1; + } + }; + + if (optind < argc) { + std::cerr << "WARNING: arguments ignored" << std::endl; + } + + std::cout << redhawk::shm::getSystemPath() << std::endl; + size_t total_mem = redhawk::shm::getSystemTotalMemory(); + size_t free_mem = redhawk::shm::getSystemFreeMemory(); + float free_pct = free_mem * 100.0 / total_mem; + std::cout << " size: " << format(total_mem) << std::endl; + std::cout << " free: " << format(free_mem) << " (" << percent(free_pct) << ")" << std::endl; + + info.setSizeFormatter(format); + try { + info.visit(); + } catch (const std::exception& exc) { + std::cerr << exc.what() << std::endl; + return -1; + } + return 0; +} diff --git a/redhawk/src/xml/dtd/deviceconfiguration.dtd b/redhawk/src/xml/dtd/deviceconfiguration.dtd index afaadab95..6c971aef1 100644 --- a/redhawk/src/xml/dtd/deviceconfiguration.dtd +++ b/redhawk/src/xml/dtd/deviceconfiguration.dtd @@ -87,9 +87,11 @@ with this program. If not, see http://www.gnu.org/licenses/. ,componentproperties? ,affinity? ,loggingconfig? + ,deployerrequires? )> + id ID #REQUIRED + startorder CDATA #IMPLIED> @@ -104,6 +106,10 @@ with this program. If not, see http://www.gnu.org/licenses/. | structsequenceref )+ > + + + + + + + + + + + ( componentplacement+ + , usesdeviceref* + , reservation* + )> + + + + + + + @@ -263,6 +286,13 @@ with this program. If not, see http://www.gnu.org/licenses/. propid CDATA #REQUIRED externalpropid CDATA #IMPLIED> + + + diff --git a/redhawk/src/xml/xsd/dcd.xsd b/redhawk/src/xml/xsd/dcd.xsd index 46111a6fd..63dd13f0d 100644 --- a/redhawk/src/xml/xsd/dcd.xsd +++ b/redhawk/src/xml/xsd/dcd.xsd @@ -192,12 +192,14 @@ usagename element needs to be unique for each service type. + The componentinstantiation‘s id attribute is a DCE UUID that uniquely identifier the component. + @@ -221,6 +223,17 @@ usagename element needs to be unique for each service type. + + + + + + + + + + + diff --git a/redhawk/src/xml/xsd/prf.xsd b/redhawk/src/xml/xsd/prf.xsd index 414edae7f..1034f4e6d 100644 --- a/redhawk/src/xml/xsd/prf.xsd +++ b/redhawk/src/xml/xsd/prf.xsd @@ -156,6 +156,7 @@ defined. + diff --git a/redhawk/src/xml/xsd/sad.xsd b/redhawk/src/xml/xsd/sad.xsd index ad8569f32..b9fc4247b 100644 --- a/redhawk/src/xml/xsd/sad.xsd +++ b/redhawk/src/xml/xsd/sad.xsd @@ -35,6 +35,7 @@ with this program. If not, see http://www.gnu.org/licenses/. + @@ -79,6 +80,7 @@ with this program. If not, see http://www.gnu.org/licenses/. + @@ -102,6 +104,17 @@ with this program. If not, see http://www.gnu.org/licenses/. + + + + + + + + + + + @@ -195,13 +208,42 @@ with this program. If not, see http://www.gnu.org/licenses/. - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -286,6 +328,15 @@ with this program. If not, see http://www.gnu.org/licenses/. + + + + + + + + + diff --git a/throughput/.gitignore b/throughput/.gitignore index 2a17fee6e..5d20faf25 100644 --- a/throughput/.gitignore +++ b/throughput/.gitignore @@ -1,5 +1,12 @@ +*.csv +*.la +*.lo *.m4 +*.o +*.pyc +*.so .deps/ +.libs/ autom4te.cache/ Makefile Makefile.in @@ -7,6 +14,7 @@ config.* configure depcomp install-sh +libtool ltmain.sh missing streams/bulkio/reader/cpp/reader @@ -20,5 +28,3 @@ streams/corba/reader streams/corba/writer streams/raw/reader streams/raw/writer -*.o -*.csv diff --git a/throughput/Makefile.am b/throughput/Makefile.am index 40ef0173d..3fcb49e53 100644 --- a/throughput/Makefile.am +++ b/throughput/Makefile.am @@ -17,6 +17,6 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # -ACLOCAL_AMFLAGS = -I ${OSSIEHOME}/share/aclocal/ossie +ACLOCAL_AMFLAGS = -I m4 -I ${OSSIEHOME}/share/aclocal/ossie SUBDIRS = streams/raw streams/corba streams/bulkio/reader/cpp streams/bulkio/writer/cpp diff --git a/throughput/benchmark/csv.py b/throughput/benchmark/csv.py index 1090114fa..1059529af 100644 --- a/throughput/benchmark/csv.py +++ b/throughput/benchmark/csv.py @@ -31,7 +31,7 @@ def add_field(self, key, header=None): self.fields.append((key, header)) def test_started(self, name, **kw): - filename = '%s-%d.csv' % (name.lower(), os.getpid()) + filename = '%s-%d.csv' % (self.make_filename(name), os.getpid()) self.file = open(filename, 'w') print >>self.file, ','.join(title for name, title in self.fields) @@ -40,3 +40,8 @@ def sample_added(self, **stats): def test_complete(self, **kw): self.file.close() + + def make_filename(self, name): + name = name.lower() + name = name.replace('(', '').replace(')', '') + return '-'.join(name.split()) diff --git a/throughput/common/timing.h b/throughput/common/timing.h new file mode 100644 index 000000000..1f32ec5d0 --- /dev/null +++ b/throughput/common/timing.h @@ -0,0 +1,13 @@ +#ifndef TIMING_H +#define TIMING_H + +#include + +inline double get_time() +{ + struct timespec now; + clock_gettime(CLOCK_MONOTONIC, &now); + return now.tv_sec + now.tv_nsec*1e-9; +} + +#endif diff --git a/throughput/configure.ac b/throughput/configure.ac index 2b07db0cc..99e4f62da 100644 --- a/throughput/configure.ac +++ b/throughput/configure.ac @@ -19,6 +19,8 @@ # AC_INIT([throughput], 1.0.0) AM_INIT_AUTOMAKE([foreign]) +AC_CONFIG_MACRO_DIR([m4]) +LT_INIT([dlopen disable-static]) AC_PROG_CC AC_PROG_CXX @@ -41,6 +43,8 @@ AX_BOOST_SYSTEM AX_BOOST_THREAD AX_BOOST_REGEX +CPPFLAGS="$CPPFLAGS -I\$(top_srcdir)/common" + AC_CONFIG_FILES([Makefile \ streams/corba/Makefile \ streams/raw/Makefile \ diff --git a/bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/m4/.gitignore b/throughput/m4/.gitignore similarity index 100% rename from bulkioInterfaces/libsrc/testing/components/sri_changed_cpp/cpp/m4/.gitignore rename to throughput/m4/.gitignore diff --git a/throughput/redhawk-benchmark b/throughput/redhawk-benchmark index eef5040bc..4751468ee 100755 --- a/throughput/redhawk-benchmark +++ b/throughput/redhawk-benchmark @@ -277,6 +277,8 @@ class TransferSizeTest(BenchmarkTest): sample = {'time': now-start, 'rate': current_rate, 'size': transfer_size, + 'send_time': stream.send_time(), + 'recv_time': stream.recv_time(), 'write_cpu': writer['cpu'] * sys_cpu, 'write_rss': writer['rss'], 'write_majflt': writer['majflt'], @@ -317,6 +319,7 @@ if __name__ == '__main__': window_size = 5 tolerance = 0.1 nogui = False + interfaces = ['Raw', 'CORBA', 'BulkIO'] opts, args = getopt.getopt(sys.argv[1:], 'hw:t:d:', ['help', 'transport=', 'numa-distance=', 'no-gui']) for key, value in opts: @@ -335,13 +338,18 @@ if __name__ == '__main__': elif key == '--no-gui': nogui = True - interface_list = ('raw', 'corba', 'bulkio') + interface_list = ('raw', 'corba', 'bulkio-corba', 'bulkio-shm', 'bulkio-local') interfaces = [] for arg in args: - if not arg.lower() in interface_list: + name = arg.lower() + if name == 'bulkio': + for name in interface_list: + if name.startswith('bulkio'): + interfaces.append(name) + elif not name in interface_list: raise SystemExit("unknown interface '%s'" % arg) else: - interfaces.append(arg.lower()) + interfaces.append(name) if not interfaces: interfaces = interface_list @@ -361,6 +369,8 @@ if __name__ == '__main__': csv.add_field('time', 'time(s)') csv.add_field('rate', 'rate(Bps)') csv.add_field('size', 'transfer size(B)') + csv.add_field('send_time', 'average send call(s)') + csv.add_field('recv_time', 'average recv call(s)') csv.add_field('write_cpu', 'writer cpu(%)') csv.add_field('write_rss', 'writer rss') csv.add_field('write_majflt', 'writer major faults') @@ -398,9 +408,18 @@ if __name__ == '__main__': elif interface == 'corba': name = 'CORBA' factory = corba.factory(transport) - elif interface == 'bulkio': - name = 'BulkIO' - factory = bulkio.factory(transport) + elif interface == 'bulkio-corba': + os.environ['BULKIO_SHM'] = 'disable' + name = 'BulkIO (CORBA)' + factory = bulkio.factory(transport, local=False) + elif interface == 'bulkio-shm': + if 'BULKIO_SHM' in os.environ: + del os.environ['BULKIO_SHM'] + name = 'BulkIO (shm)' + factory = bulkio.factory(transport, local=False) + elif interface == 'bulkio-local': + name = 'BulkIO (local)' + factory = bulkio.factory(transport, local=True) numa_policy = numa.NumaPolicy(numa_distance) diff --git a/throughput/streams/bulkio/__init__.py b/throughput/streams/bulkio/__init__.py index cdd017fe6..1d7ba3985 100644 --- a/throughput/streams/bulkio/__init__.py +++ b/throughput/streams/bulkio/__init__.py @@ -18,12 +18,19 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # import os +try: + from ossie.utils import sb + from ossie.utils.sandbox.debugger import Debugger +except ImportError: + # Fallback for testing raw and CORBA without BulkIO + sb = None + Debugger = object __all__ = ('factory') PATH = os.path.dirname(__file__) -class NumaLauncher(object): +class NumaLauncher(Debugger): def __init__(self, policy): self.policy = policy @@ -40,13 +47,18 @@ def wrap(self, command, arguments): command = self.policy([command] + arguments) return command[0], command[1:] + def name(self): + return 'numactl' + class BulkioStream(object): - def __init__(self, format, numa_policy): + def __init__(self, format, numa_policy, shared): launcher = NumaLauncher(numa_policy) - self.writer = sb.launch(os.path.join(PATH, 'writer/writer.spd.xml'), debugger=launcher) - self.reader = sb.launch(os.path.join(PATH, 'reader/reader.spd.xml'), debugger=launcher) + self.shared = shared + self.writer = sb.launch(os.path.join(PATH, 'writer/writer.spd.xml'), debugger=launcher, shared=self.shared) + self.reader = sb.launch(os.path.join(PATH, 'reader/reader.spd.xml'), debugger=launcher, shared=self.shared) self.writer.connect(self.reader) + self.container = sb.domainless._getSandbox()._getComponentHost() def start(self): sb.start() @@ -55,33 +67,51 @@ def stop(self): sb.stop() def get_reader(self): - return self.reader._process.pid() + if self.shared: + return self.container._process.pid() + else: + return self.reader._process.pid() def get_writer(self): - return self.writer._process.pid() + if self.shared: + return self.container._process.pid() + else: + return self.writer._process.pid() def transfer_size(self, size): - self.writer.transfer_length = size + self.writer.transfer_length = int(size) def received(self): return int(self.reader.received) + def send_time(self): + return float(self.writer.average_time) + + def recv_time(self): + return float(self.reader.average_time) + def terminate(self): - self.writer.releaseObject() - self.reader.releaseObject() + sb.release() -class BulkioStreamFactory(object): +class BulkioCorbaFactory(object): def __init__(self, transport): configfile = 'config/omniORB-%s.cfg' % transport os.environ['OMNIORB_CONFIG'] = os.path.join(PATH, configfile) - from ossie.utils import sb - globals()['sb'] = sb def create(self, format, numa_policy): - return BulkioStream(format, numa_policy) + return BulkioStream(format, numa_policy, False) def cleanup(self): pass -def factory(transport): - return BulkioStreamFactory(transport) +class BulkioLocalFactory(object): + def create(self, format, numa_policy): + return BulkioStream(format, numa_policy, True) + +def factory(transport, local): + if sb is None: + raise ImportError('BulkIO is not available') + if local: + return BulkioLocalFactory() + else: + return BulkioCorbaFactory(transport) diff --git a/throughput/streams/bulkio/reader/cpp/.md5sums b/throughput/streams/bulkio/reader/cpp/.md5sums index 70ce0fc2f..54398b64b 100644 --- a/throughput/streams/bulkio/reader/cpp/.md5sums +++ b/throughput/streams/bulkio/reader/cpp/.md5sums @@ -1,10 +1,10 @@ -ebdea83f12424c8e125a20cf9fddb60d reader_base.cpp -f2bb62c7e2ca437dd8e3e549cdbbd98e main.cpp +bb780ee6cc14bbe61373f5da8b83cdc4 reader_base.cpp +a93fd9c0203f708c0bccb34cf8b02360 main.cpp 8bfcd22353c3a57fee561ad86ee2a56b reconf -7babbadc243db5a457c46cb3953f7465 configure.ac -a8bc574b27dc23d05cb5632e36afa093 Makefile.am +994527613365ab24d8f01fa45a2c6399 configure.ac +e6c2f53ac60077a6b5bfe2d7df24b1c1 Makefile.am 2aefaeaafd472e72e6f06082f92b1322 Makefile.am.ide 09d75d4d9a1858282f95a52ec7e70552 reader.h -3fa8f27c69574d2ef8db1cab3b21f41a reader_base.h +24dad4cedd3684f44f41b9857dae6681 reader_base.h b87651ea3565402473fb088004430fbf build.sh bbc766fb5d35f240b41015c3e999e6ac reader.cpp diff --git a/throughput/streams/bulkio/reader/cpp/Makefile.am b/throughput/streams/bulkio/reader/cpp/Makefile.am index df8c6c8fe..4cb479698 100644 --- a/throughput/streams/bulkio/reader/cpp/Makefile.am +++ b/throughput/streams/bulkio/reader/cpp/Makefile.am @@ -18,15 +18,24 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # ossieName = reader -bindir = $(prefix)/dom/components/reader/cpp/ -bin_PROGRAMS = reader +libdir = $(prefix)/dom/components/reader/cpp +lib_LTLIBRARIES = reader.la -xmldir = $(prefix)/dom/components/reader/ +xmldir = $(prefix)/dom/components/reader dist_xml_DATA = ../reader.scd.xml ../reader.prf.xml ../reader.spd.xml ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie AUTOMAKE_OPTIONS = subdir-objects +.PHONY: convenience-link clean-convenience-link +all-local : convenience-link +clean-local : clean-convenience-link + +convenience-link : reader.la + @ln -fs .libs/reader.so + +clean-convenience-link: + @rm -f reader.so distclean-local: rm -rf m4 @@ -42,13 +51,12 @@ distclean-local: rm -f missing rm -rf .deps - # Sources, libraries and library directories are auto-included from a file # generated by the REDHAWK IDE. You can remove/modify the following lines if # you wish to manually control these options. include $(srcdir)/Makefile.am.ide -reader_SOURCES = $(redhawk_SOURCES_auto) -reader_LDADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) -reader_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -reader_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) +reader_la_SOURCES = $(redhawk_SOURCES_auto) +reader_la_LIBADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +reader_la_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +reader_la_LDFLAGS = -shared -module -export-dynamic -export-symbols-regex 'make_component' -avoid-version $(redhawk_LDFLAGS_auto) diff --git a/throughput/streams/bulkio/reader/cpp/main.cpp b/throughput/streams/bulkio/reader/cpp/main.cpp index 06fb8ab52..8564681e6 100644 --- a/throughput/streams/bulkio/reader/cpp/main.cpp +++ b/throughput/streams/bulkio/reader/cpp/main.cpp @@ -21,10 +21,10 @@ #include "ossie/ossieSupport.h" #include "reader.h" -int main(int argc, char* argv[]) -{ - reader_i* reader_servant; - Component::start_component(reader_servant, argc, argv); - return 0; +extern "C" { + Resource_impl* make_component(const std::string& uuid, const std::string& identifier) + { + return new reader_i(uuid.c_str(), identifier.c_str()); + } } diff --git a/throughput/streams/bulkio/reader/cpp/reader.cpp b/throughput/streams/bulkio/reader/cpp/reader.cpp index 3ada042c5..093fdcb0b 100644 --- a/throughput/streams/bulkio/reader/cpp/reader.cpp +++ b/throughput/streams/bulkio/reader/cpp/reader.cpp @@ -26,19 +26,65 @@ **************************************************************************/ +#include + #include "reader.h" +class OctetPort : public bulkio::InOctetPort +{ +public: + OctetPort(const std::string& name) : + bulkio::InOctetPort(name), + lastPacketSize(0), + packetCount(0), + totalTime(0.0), + averageTime(0.0) + { + } + + virtual void pushPacket(const CF::OctetSequence& data, const BULKIO::PrecisionUTCTime& T, CORBA::Boolean EOS, const char* streamID) + { + double start = get_time(); + bulkio::InOctetPort::pushPacket(data, T, EOS, streamID); + double end = get_time(); + + if (lastPacketSize != data.length()) { + lastPacketSize = data.length(); + packetCount = 1; + totalTime = end - start; + averageTime = totalTime; + } else { + packetCount++; + totalTime += end - start; + averageTime = totalTime / packetCount; + } + } + + double getAverageTime() + { + return averageTime; + } + +private: + size_t lastPacketSize; + size_t packetCount; + double totalTime; + double averageTime; +}; + PREPARE_LOGGING(reader_i) reader_i::reader_i(const char *uuid, const char *label) : reader_base(uuid, label) { // Avoid placing constructor code here. Instead, use the "constructor" function. - + dataOctet_in = new OctetPort("dataOctet_in"); + addPort("dataOctet_in", dataOctet_in); } reader_i::~reader_i() { + delete dataOctet_in; } void reader_i::constructor() @@ -46,6 +92,12 @@ void reader_i::constructor() /*********************************************************************************** This is the RH constructor. All properties are properly initialized before this function is called ***********************************************************************************/ + setPropertyQueryImpl(average_time, this, &reader_i::get_average_time); +} + +double reader_i::get_average_time() +{ + return dataOctet_in->getAverageTime(); } /*********************************************************************************************** diff --git a/throughput/streams/bulkio/reader/cpp/reader.h b/throughput/streams/bulkio/reader/cpp/reader.h index b4018d827..959258dd6 100644 --- a/throughput/streams/bulkio/reader/cpp/reader.h +++ b/throughput/streams/bulkio/reader/cpp/reader.h @@ -22,6 +22,8 @@ #include "reader_base.h" +class OctetPort; + class reader_i : public reader_base { ENABLE_LOGGING @@ -32,6 +34,11 @@ class reader_i : public reader_base void constructor(); int serviceFunction(); + + private: + OctetPort* dataOctet_in; + + double get_average_time(); }; #endif // READER_I_IMPL_H diff --git a/throughput/streams/bulkio/reader/cpp/reader_base.cpp b/throughput/streams/bulkio/reader/cpp/reader_base.cpp index 10a207663..7ffebc1d0 100644 --- a/throughput/streams/bulkio/reader/cpp/reader_base.cpp +++ b/throughput/streams/bulkio/reader/cpp/reader_base.cpp @@ -33,6 +33,8 @@ reader_base::reader_base(const char *uuid, const char *label) : Component(uuid, label), ThreadedComponent() { + setThreadName(label); + loadProperties(); dataOctet_in = new bulkio::InOctetPort("dataOctet_in"); @@ -86,6 +88,15 @@ void reader_base::loadProperties() "external", "property"); + addProperty(average_time, + 0.0, + "average_time", + "", + "readonly", + "", + "external", + "property"); + } diff --git a/throughput/streams/bulkio/reader/cpp/reader_base.h b/throughput/streams/bulkio/reader/cpp/reader_base.h index 04a81cdbd..5ef55bdde 100644 --- a/throughput/streams/bulkio/reader/cpp/reader_base.h +++ b/throughput/streams/bulkio/reader/cpp/reader_base.h @@ -44,6 +44,8 @@ class reader_base : public Component, protected ThreadedComponent // Member variables exposed as properties /// Property: received CORBA::ULongLong received; + /// Property: average_time + double average_time; // Ports /// Port: dataOctet_in diff --git a/throughput/streams/bulkio/reader/reader.prf.xml b/throughput/streams/bulkio/reader/reader.prf.xml index 79a1c1eee..3c88458aa 100644 --- a/throughput/streams/bulkio/reader/reader.prf.xml +++ b/throughput/streams/bulkio/reader/reader.prf.xml @@ -25,4 +25,9 @@ with this program. If not, see http://www.gnu.org/licenses/. + + 0.0 + + + diff --git a/throughput/streams/bulkio/reader/reader.spd.xml b/throughput/streams/bulkio/reader/reader.spd.xml index c76ee4afe..5d4883692 100644 --- a/throughput/streams/bulkio/reader/reader.spd.xml +++ b/throughput/streams/bulkio/reader/reader.spd.xml @@ -32,9 +32,9 @@ with this program. If not, see http://www.gnu.org/licenses/. The implementation contains descriptive information about the template for a software component. - - - cpp/reader + + + cpp/reader.so diff --git a/throughput/streams/bulkio/writer/cpp/.md5sums b/throughput/streams/bulkio/writer/cpp/.md5sums index 93c62aa70..a7d41c697 100644 --- a/throughput/streams/bulkio/writer/cpp/.md5sums +++ b/throughput/streams/bulkio/writer/cpp/.md5sums @@ -1,10 +1,10 @@ -04765722f6bd272eadc240fa48f2caec main.cpp +dff373b5aa0cad57441e0dd60ed03b6f main.cpp b87651ea3565402473fb088004430fbf build.sh 8bfcd22353c3a57fee561ad86ee2a56b reconf -9bed716ae031bab9a729cbd74246aefd writer_base.h -68964fc9bfc7dc13cd670a7b711b1b43 configure.ac +9e5d33596cdced0a9983169fbcb50fef writer_base.h +9957cb9059fc17084dcb83614b0c8301 configure.ac cf250777e1af3cb1c0606423e85a800f writer.cpp -a46d6351333bed1378280bc9c1331f82 Makefile.am +2eb5eac4936add3eb4c5430a5b476eb1 Makefile.am 2697fc91ff0865583b7f248454c0c1b2 Makefile.am.ide f905c5c2f46f79a675d8f3e10d7c528a writer.h -4182ca94f290d82ff4edfcd39c59651d writer_base.cpp +f01dd717a4833099b79af1604b39d280 writer_base.cpp diff --git a/throughput/streams/bulkio/writer/cpp/Makefile.am b/throughput/streams/bulkio/writer/cpp/Makefile.am index 5c829281b..6e7c71503 100644 --- a/throughput/streams/bulkio/writer/cpp/Makefile.am +++ b/throughput/streams/bulkio/writer/cpp/Makefile.am @@ -18,15 +18,24 @@ # along with this program. If not, see http://www.gnu.org/licenses/. # ossieName = writer -bindir = $(prefix)/dom/components/writer/cpp/ -bin_PROGRAMS = writer +libdir = $(prefix)/dom/components/writer/cpp +lib_LTLIBRARIES = writer.la -xmldir = $(prefix)/dom/components/writer/ +xmldir = $(prefix)/dom/components/writer dist_xml_DATA = ../writer.scd.xml ../writer.prf.xml ../writer.spd.xml ACLOCAL_AMFLAGS = -I m4 -I${OSSIEHOME}/share/aclocal/ossie AUTOMAKE_OPTIONS = subdir-objects +.PHONY: convenience-link clean-convenience-link +all-local : convenience-link +clean-local : clean-convenience-link + +convenience-link : writer.la + @ln -fs .libs/writer.so + +clean-convenience-link: + @rm -f writer.so distclean-local: rm -rf m4 @@ -47,8 +56,8 @@ distclean-local: # generated by the REDHAWK IDE. You can remove/modify the following lines if # you wish to manually control these options. include $(srcdir)/Makefile.am.ide -writer_SOURCES = $(redhawk_SOURCES_auto) -writer_LDADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) -writer_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) -writer_LDFLAGS = -Wall $(redhawk_LDFLAGS_auto) +writer_la_SOURCES = $(redhawk_SOURCES_auto) +writer_la_LIBADD = $(SOFTPKG_LIBS) $(PROJECTDEPS_LIBS) $(BOOST_LDFLAGS) $(BOOST_THREAD_LIB) $(BOOST_REGEX_LIB) $(BOOST_SYSTEM_LIB) $(INTERFACEDEPS_LIBS) $(redhawk_LDADD_auto) +writer_la_CXXFLAGS = -Wall $(SOFTPKG_CFLAGS) $(PROJECTDEPS_CFLAGS) $(BOOST_CPPFLAGS) $(INTERFACEDEPS_CFLAGS) $(redhawk_INCLUDES_auto) +writer_la_LDFLAGS = -shared -module -export-dynamic -export-symbols-regex 'make_component' -avoid-version $(redhawk_LDFLAGS_auto) diff --git a/throughput/streams/bulkio/writer/cpp/main.cpp b/throughput/streams/bulkio/writer/cpp/main.cpp index 8e28ed5a3..c2a4ba206 100644 --- a/throughput/streams/bulkio/writer/cpp/main.cpp +++ b/throughput/streams/bulkio/writer/cpp/main.cpp @@ -21,10 +21,10 @@ #include "ossie/ossieSupport.h" #include "writer.h" -int main(int argc, char* argv[]) -{ - writer_i* writer_servant; - Component::start_component(writer_servant, argc, argv); - return 0; +extern "C" { + Resource_impl* make_component(const std::string& uuid, const std::string& identifier) + { + return new writer_i(uuid.c_str(), identifier.c_str()); + } } diff --git a/throughput/streams/bulkio/writer/cpp/writer.cpp b/throughput/streams/bulkio/writer/cpp/writer.cpp index a3244204b..ef93179f1 100644 --- a/throughput/streams/bulkio/writer/cpp/writer.cpp +++ b/throughput/streams/bulkio/writer/cpp/writer.cpp @@ -26,12 +26,16 @@ **************************************************************************/ +#include + #include "writer.h" -PREPARE_LOGGING(writer_i) +PREPARE_LOGGING(writer_i); writer_i::writer_i(const char *uuid, const char *label) : - writer_base(uuid, label) + writer_base(uuid, label), + lastSize(0), + totalSeconds(0.0) { // Avoid placing constructor code here. Instead, use the "constructor" function. @@ -220,12 +224,20 @@ void writer_i::constructor() ************************************************************************************************/ int writer_i::serviceFunction() { - size_t buffer_size = transfer_length; - if (buffer.size() != buffer_size) { - buffer.resize(buffer_size); + redhawk::buffer buffer(transfer_length); + if (buffer.size() != lastSize) { + total_packets = 0; + totalSeconds = 0.0; + lastSize = buffer.size(); } + double start = get_time(); stream.write(buffer, bulkio::time::utils::now()); + double end = get_time(); + + total_packets++; + totalSeconds += end-start; + average_time = totalSeconds/total_packets; return NORMAL; } diff --git a/throughput/streams/bulkio/writer/cpp/writer.h b/throughput/streams/bulkio/writer/cpp/writer.h index 5e96047e5..66202c212 100644 --- a/throughput/streams/bulkio/writer/cpp/writer.h +++ b/throughput/streams/bulkio/writer/cpp/writer.h @@ -36,6 +36,9 @@ class writer_i : public writer_base private: std::vector buffer; bulkio::OutOctetStream stream; + + size_t lastSize; + double totalSeconds; }; #endif // WRITER_I_IMPL_H diff --git a/throughput/streams/bulkio/writer/cpp/writer_base.cpp b/throughput/streams/bulkio/writer/cpp/writer_base.cpp index 6cafe95e5..42d0913f4 100644 --- a/throughput/streams/bulkio/writer/cpp/writer_base.cpp +++ b/throughput/streams/bulkio/writer/cpp/writer_base.cpp @@ -33,6 +33,8 @@ writer_base::writer_base(const char *uuid, const char *label) : Component(uuid, label), ThreadedComponent() { + setThreadName(label); + loadProperties(); dataOctet_out = new bulkio::OutOctetPort("dataOctet_out"); @@ -86,6 +88,24 @@ void writer_base::loadProperties() "external", "property"); + addProperty(total_packets, + 0, + "total_packets", + "", + "readonly", + "", + "external", + "property"); + + addProperty(average_time, + 0.0, + "average_time", + "", + "readonly", + "", + "external", + "property"); + } diff --git a/throughput/streams/bulkio/writer/cpp/writer_base.h b/throughput/streams/bulkio/writer/cpp/writer_base.h index 92a039124..a700e0e42 100644 --- a/throughput/streams/bulkio/writer/cpp/writer_base.h +++ b/throughput/streams/bulkio/writer/cpp/writer_base.h @@ -44,6 +44,10 @@ class writer_base : public Component, protected ThreadedComponent // Member variables exposed as properties /// Property: transfer_length CORBA::ULong transfer_length; + /// Property: total_packets + CORBA::ULong total_packets; + /// Property: average_time + double average_time; // Ports /// Port: dataOctet_out diff --git a/throughput/streams/bulkio/writer/writer.prf.xml b/throughput/streams/bulkio/writer/writer.prf.xml index 396a88ab4..e258d2f62 100644 --- a/throughput/streams/bulkio/writer/writer.prf.xml +++ b/throughput/streams/bulkio/writer/writer.prf.xml @@ -25,4 +25,14 @@ with this program. If not, see http://www.gnu.org/licenses/. + + 0 + + + + + 0.0 + + + diff --git a/throughput/streams/bulkio/writer/writer.spd.xml b/throughput/streams/bulkio/writer/writer.spd.xml index a9bf69bf2..47927bb1a 100644 --- a/throughput/streams/bulkio/writer/writer.spd.xml +++ b/throughput/streams/bulkio/writer/writer.spd.xml @@ -32,9 +32,9 @@ with this program. If not, see http://www.gnu.org/licenses/. The implementation contains descriptive information about the template for a software component. - - - cpp/writer + + + cpp/writer.so diff --git a/throughput/streams/corba/__init__.py b/throughput/streams/corba/__init__.py index 2ab549d14..a761578e9 100644 --- a/throughput/streams/corba/__init__.py +++ b/throughput/streams/corba/__init__.py @@ -51,11 +51,17 @@ def get_writer(self): return self.writer_proc.pid def transfer_size(self, size): - self.writer.transfer_length(size) + self.writer.transfer_length(int(size)) def received(self): return self.reader.received() + def send_time(self): + return self.writer._get_average_time() + + def recv_time(self): + return self.reader._get_average_time() + def terminate(self): self.reader_proc.terminate() self.writer_proc.terminate() diff --git a/throughput/streams/corba/idl/rawdata.idl b/throughput/streams/corba/idl/rawdata.idl index 09ef9fc74..d449f9c9b 100644 --- a/throughput/streams/corba/idl/rawdata.idl +++ b/throughput/streams/corba/idl/rawdata.idl @@ -27,6 +27,7 @@ module rawdata { void push_short(in short_sequence data); void push_float(in float_sequence data); long long received(); + readonly attribute double average_time; }; interface writer { @@ -34,5 +35,6 @@ module rawdata { void transfer_length(in long length); void start(); void stop(); + readonly attribute double average_time; }; }; diff --git a/throughput/streams/corba/reader.cpp b/throughput/streams/corba/reader.cpp index 7b6ccbe59..f3f2d331e 100644 --- a/throughput/streams/corba/reader.cpp +++ b/throughput/streams/corba/reader.cpp @@ -22,6 +22,7 @@ #include +#include #include #include "rawdata.h" @@ -29,14 +30,21 @@ class Reader : public virtual POA_rawdata::reader { public: Reader() : - _received(0) + _received(0), + _lastPacketSize(0), + _packetCount(0), + _totalTime(0.0), + _averageTime(0.0) { } void push_octet(const rawdata::octet_sequence& data) { + double start = get_time(); _received += data.length(); _deleter.deallocate_array(const_cast(data).get_buffer(1)); + double end = get_time(); + record_time(end-start, data.length()); } void push_short(const rawdata::short_sequence& data) @@ -56,9 +64,32 @@ class Reader : public virtual POA_rawdata::reader { return _received; } + double average_time() + { + return _averageTime; + } + + void record_time(double elapsed, size_t length) { + if (_lastPacketSize != length) { + _lastPacketSize = length; + _packetCount = 1; + _totalTime = elapsed; + _averageTime = _totalTime; + } else { + _packetCount++; + _totalTime += elapsed; + _averageTime = _totalTime / _packetCount; + } + } + private: threaded_deleter _deleter; size_t _received; + + size_t _lastPacketSize; + size_t _packetCount; + double _totalTime; + double _averageTime; }; int main (int argc, char* argv[]) diff --git a/throughput/streams/corba/writer.cpp b/throughput/streams/corba/writer.cpp index 3a6677972..220544287 100644 --- a/throughput/streams/corba/writer.cpp +++ b/throughput/streams/corba/writer.cpp @@ -21,6 +21,8 @@ #include +#include + #include "rawdata.h" class Writer : public virtual POA_rawdata::writer { @@ -28,7 +30,10 @@ class Writer : public virtual POA_rawdata::writer { Writer() : _thread(0), _running(true), - _length(1024) + _length(1024), + _totalPackets(0), + _totalSeconds(0.0), + _averageTime(0.0) { _thread = new omni_thread(&Writer::thread_start, this); } @@ -42,6 +47,9 @@ class Writer : public virtual POA_rawdata::writer { void transfer_length(CORBA::Long length) { _length = length; + _totalPackets = 0; + _totalSeconds = 0.0; + _averageTime = 0.0; } void start() @@ -54,6 +62,11 @@ class Writer : public virtual POA_rawdata::writer { _running = false; } + double average_time() + { + return _averageTime; + } + private: void thread_run() { @@ -82,11 +95,19 @@ class Writer : public virtual POA_rawdata::writer { if (data.length() != _length) { data.length(_length); } + + double start = get_time(); _reader->push_octet(data); + double end = get_time(); + + _totalPackets++; + _totalSeconds += end-start; + _averageTime = _totalSeconds/_totalPackets; } } } + static void* thread_start(void* arg) { Writer* writer = (Writer*)arg; @@ -99,6 +120,10 @@ class Writer : public virtual POA_rawdata::writer { volatile bool _running; std::string _format; int _length; + + size_t _totalPackets; + double _totalSeconds; + double _averageTime; }; int main (int argc, char* argv[]) diff --git a/throughput/streams/raw/__init__.py b/throughput/streams/raw/__init__.py index 8f607057e..8a1965beb 100644 --- a/throughput/streams/raw/__init__.py +++ b/throughput/streams/raw/__init__.py @@ -29,12 +29,14 @@ class control(object): def __init__(self, transfer_size): fd, self.filename = tempfile.mkstemp() - os.ftruncate(fd, 12) - self.buf = mmap.mmap(fd, 12, mmap.MAP_SHARED, mmap.PROT_WRITE) + os.ftruncate(fd, 20) + self.buf = mmap.mmap(fd, 20, mmap.MAP_SHARED, mmap.PROT_WRITE) os.close(fd) self.total_bytes = ctypes.c_uint64.from_buffer(self.buf) self.total_bytes.value = 0 - self.transfer_size = ctypes.c_uint32.from_buffer(self.buf, 8) + self.average_time = ctypes.c_double.from_buffer(self.buf, 8) + self.average_time.value = 0.0 + self.transfer_size = ctypes.c_uint32.from_buffer(self.buf, 16) self.transfer_size.value = transfer_size def __del__(self): @@ -65,12 +67,18 @@ def get_writer(self): return self.writer_proc.pid def transfer_size(self, size): - self.writer_control.transfer_size.value = size - self.reader_control.transfer_size.value = size + self.writer_control.transfer_size.value = int(size) + self.reader_control.transfer_size.value = int(size) def received(self): return self.reader_control.total_bytes.value + def send_time(self): + return self.writer_control.average_time.value + + def recv_time(self): + return self.reader_control.average_time.value + def terminate(self): # Assuming stop() was already called, the reader and writer should have # already exited diff --git a/throughput/streams/raw/control.h b/throughput/streams/raw/control.h index 744b79212..b61c27c42 100644 --- a/throughput/streams/raw/control.h +++ b/throughput/streams/raw/control.h @@ -26,6 +26,7 @@ struct control { volatile uint64_t total_bytes; + volatile double average_time; volatile uint32_t transfer_size; }; diff --git a/throughput/streams/raw/reader.cpp b/throughput/streams/raw/reader.cpp index ebd5923c9..47b5952f6 100644 --- a/throughput/streams/raw/reader.cpp +++ b/throughput/streams/raw/reader.cpp @@ -33,6 +33,7 @@ #include +#include #include #include "control.h" @@ -125,19 +126,37 @@ int main(int argc, const char* argv[]) control* state = open_control(argv[3]); + size_t total_packets = 0; + double total_seconds = 0.0; + size_t last_size = 0; + ssize_t count = 0; while (true) { + double start = get_time(); + size_t buffer_size = 0; if (read(fd, &buffer_size, sizeof(buffer_size)) < sizeof(buffer_size)) { break; } + if (buffer_size != last_size) { + last_size = buffer_size; + total_packets = 0; + total_seconds = 0.0; + state->average_time = 0.0; + } + char* buffer = new char[buffer_size]; size_t pass = read_buffer(fd, buffer, buffer_size); deleter.deallocate_array(buffer); if (pass == 0) { break; } + double end = get_time(); + + total_packets++; + total_seconds += end - start; + state->average_time = total_seconds / total_packets; state->total_bytes += pass; } diff --git a/throughput/streams/raw/writer.cpp b/throughput/streams/raw/writer.cpp index 9c80f0b96..ef7fdb2a8 100644 --- a/throughput/streams/raw/writer.cpp +++ b/throughput/streams/raw/writer.cpp @@ -29,6 +29,8 @@ #include #include +#include + #include "control.h" static volatile bool running = true; @@ -109,13 +111,24 @@ int main(int argc, const char* argv[]) char temp; std::cin.get(temp); + size_t total_packets = 0; + double total_seconds = 0.0; + while (running) { size_t buffer_size = state->transfer_size; if (buffer_size != buffer.size()) { buffer.resize(buffer_size); + total_packets = 0; + total_seconds = 0.0; + state->average_time = 0.0; } + double start = get_time(); write(fd, &buffer_size, sizeof(buffer_size)); ssize_t pass = write(fd, &buffer[0], buffer.size()); + double end = get_time(); + total_packets++; + total_seconds += end - start; + state->average_time = total_seconds / total_packets; state->total_bytes += pass; } diff --git a/throughput/tools/speedometer.py b/throughput/tools/speedometer.py index fd3da4bf5..513b7c100 100644 --- a/throughput/tools/speedometer.py +++ b/throughput/tools/speedometer.py @@ -51,6 +51,15 @@ def __init__(self, period): self.line, = self.axes.plot(self.x, self.y) self.axes.set_xlim(xmax=period) + self.figure.subplots_adjust(bottom=0.2) + + sliderax = self.figure.add_axes([0.2, 0.05, 0.6, 0.05]) + min_size = 1024 + max_size = 8*1024*1024 + default_size = 1*1024*1024 + self.slider = matplotlib.widgets.Slider(sliderax, 'Transfer size', min_size, max_size, + default_size, '%.0f') + self.figure.show() def sample_added(self, **stats): @@ -182,6 +191,7 @@ def run(self, name, stream): test = ThroughputTest(poll_time, run_time) + import matplotlib from matplotlib import pyplot display = Speedometer(run_time) test.add_idle_task(display.update) @@ -197,6 +207,7 @@ def run(self, name, stream): numa_policy = numa.NumaPolicy(numa_distance) stream = factory.create('octet', numa_policy.next()) + display.slider.on_changed(stream.transfer_size) try: stream.transfer_size(transfer_size) test.run(interface, stream)